diff --git a/octodns/provider/dyn.py b/octodns/provider/dyn.py index f47f77b..09969fc 100644 --- a/octodns/provider/dyn.py +++ b/octodns/provider/dyn.py @@ -7,9 +7,9 @@ from __future__ import absolute_import, division, print_function, \ from collections import defaultdict from dyn.tm.errors import DynectGetError -from dyn.tm.services.dsf import DSFARecord, DSFAAAARecord, DSFFailoverChain, \ - DSFMonitor, DSFNode, DSFRecordSet, DSFResponsePool, DSFRuleset, \ - TrafficDirector, get_all_dsf_monitors, get_all_dsf_services, \ +from dyn.tm.services.dsf import DSFARecord, DSFAAAARecord, DSFCNAMERecord, \ + DSFFailoverChain, DSFMonitor, DSFNode, DSFRecordSet, DSFResponsePool, \ + DSFRuleset, TrafficDirector, get_all_dsf_monitors, get_all_dsf_services, \ get_response_pool from dyn.tm.session import DynectSession from dyn.tm.zones import Zone as DynZone @@ -18,6 +18,7 @@ from threading import Lock from uuid import uuid4 from ..record import Record, Update +from ..record.geo import GeoCodes from .base import BaseProvider @@ -178,6 +179,10 @@ class _CachingDynZone(DynZone): self.flush_cache() +def _dynamic_value_sort_key(value): + return value['value'] + + class DynProvider(BaseProvider): ''' Dynect Managed DNS provider @@ -232,6 +237,8 @@ class DynProvider(BaseProvider): 'OC': 16, # Continental Australia/Oceania 'AN': 17, # Continental Antarctica } + # Reverse of ^ + REGION_CODES_LOOKUP = {code: geo for geo, code in REGION_CODES.items()} MONITOR_HEADER = 'User-Agent: Dyn Monitor' MONITOR_TIMEOUT = 10 @@ -261,8 +268,7 @@ class DynProvider(BaseProvider): @property def SUPPORTS_DYNAMIC(self): - # TODO: dynamic - return False + return True def _check_dyn_sess(self): # We don't have to worry about locking for the check since the @@ -395,61 +401,246 @@ class DynProvider(BaseProvider): for td in get_all_dsf_services(): try: fqdn, _type = td.label.split(':', 1) - except ValueError as e: - self.log.warn("Failed to load TrafficDirector '%s': %s", - td.label, e.message) + except ValueError: + self.log.warn("Unsupported TrafficDirector '%s'", td.label) continue tds[fqdn][_type] = td self._traffic_directors = dict(tds) return self._traffic_directors + def _populate_geo_traffic_director(self, zone, fqdn, _type, td, rulesets, + lenient): + # We start out with something that will always show change in case this + # is a busted TD. This will prevent us from creating a duplicate td. + # We'll overwrite this with real data provided we have it + geo = {} + data = { + 'geo': geo, + 'type': _type, + 'ttl': td.ttl, + 'values': ['0.0.0.0'] + } + for ruleset in rulesets: + try: + record_set = ruleset.response_pools[0].rs_chains[0] \ + .record_sets[0] + except IndexError: + # problems indicate a malformed ruleset, ignore it + continue + if ruleset.label.startswith('default:'): + data_for = getattr(self, '_data_for_{}'.format(_type)) + data.update(data_for(_type, record_set.records)) + else: + # We've stored the geo in label + try: + code, _ = ruleset.label.split(':', 1) + except ValueError: + continue + values = [r.address for r in record_set.records] + geo[code] = values + + name = zone.hostname_from_fqdn(fqdn) + record = Record.new(zone, name, data, source=self) + zone.add_record(record, lenient=lenient) + + return record + + def _value_for_address(self, _type, record): + return { + 'value': record.address, + 'weight': record.weight, + } + + _value_for_A = _value_for_address + _value_for_AAAA = _value_for_address + + def _value_for_CNAME(self, _type, record): + return { + 'value': record.cname, + 'weight': record.weight, + } + + def _populate_dynamic_pools(self, _type, rulesets, response_pools): + default = {} + pools = {} + + data_for = getattr(self, '_data_for_{}'.format(_type)) + value_for = getattr(self, '_value_for_{}'.format(_type)) + + # Build the list of pools, we can't just read them off of rules b/c we + # won't see unused pools there. If/when we dis-allow unused pools we + # could probably change that and avoid the refresh + for response_pool in response_pools: + # We have to refresh the response pool to have access to its + # rs_chains and thus records, yeah... :-( + # TODO: look at rulesets first b/c they won't need a refresh... + response_pool.refresh() + try: + record_set = response_pool.rs_chains[0] \ + .record_sets[0] + except IndexError: + # problems indicate a malformed ruleset, ignore it + self.log.warn('_populate_dynamic_pools: ' + 'malformed response_pool "%s" ignoring', + response_pool.label) + continue + + label = response_pool.label + + if label == 'default': + # The default pool has the base record values + default = data_for(_type, record_set.records) + else: + if label not in pools: + # First time we've seen it get its data + # Note we'll have to set fallbacks as we go through rules + # b/c we can't determine them here + values = [value_for(_type, r) for r in record_set.records] + # Sort to ensure consistent ordering so we can compare them + values.sort(key=_dynamic_value_sort_key) + pools[label] = { + 'values': values, + } + + return default, pools + + def _populate_dynamic_rules(self, rulesets, pools): + rules = [] + + # Build the list of rules based on the rulesets + for ruleset in rulesets: + if ruleset.label.startswith('default:'): + # Ignore the default, it's implicit in our model + continue + + num_pools = len(ruleset.response_pools) + if num_pools > 0: + # Find the primary pool for this rule + pool = ruleset.response_pools[0].label + # TODO: verify pool exists + if num_pools > 1: + # We have a fallback, record it in the approrpriate pool. + # Note we didn't have fallback info when we populated the + # pools above so we're filling that info in here. It's + # possible that rules will have disagreeing values for the + # fallbacks. That's annoying but a sync should fix it and + # match stuff up with the config. + fallback = ruleset.response_pools[1].label + # TODO: verify fallback exists + if fallback != 'default': + pools[pool]['fallback'] = fallback + else: + self.log.warn('_populate_dynamic_pools: ' + 'ruleset "%s" has no response_pools', + ruleset.label) + continue + + # OK we have the rule's pool info, record it and work on the rule's + # matching criteria + rule = { + 'pool': pool, + } + + criteria_type = ruleset.criteria_type + if criteria_type == 'geoip': + # Geo + geo = ruleset.criteria['geoip'] + geos = [] + # Dyn uses the same 2-letter codes as octoDNS (except for + # continents) but it doesn't have the hierary, e.g. US is + # just US, not NA-US. We'll have to map these things back + for code in geo['country']: + geos.append(GeoCodes.country_to_code(code)) + for code in geo['province']: + geos.append(GeoCodes.province_to_code(code.upper())) + for code in geo['region']: + geos.append(self.REGION_CODES_LOOKUP[int(code)]) + geos.sort() + rule['geos'] = geos + elif criteria_type == 'always': + pass + else: + self.log.warn('_populate_dynamic_rules: ' + 'unsupported criteria_type "%s", ignoring', + criteria_type) + continue + + rules.append(rule) + + return rules + + def _populate_dynamic_traffic_director(self, zone, fqdn, _type, td, + rulesets, lenient): + # We'll go ahead and grab pools too, using all will include unref'd + # pools + response_pools = td.all_response_pools + + # Populate pools + default, pools = self._populate_dynamic_pools(_type, rulesets, + response_pools) + + # Populate rules + rules = self._populate_dynamic_rules(rulesets, pools) + + # We start out with something that will always show + # change in case this is a busted TD. This will prevent us from + # creating a duplicate td. We'll overwrite this with real data + # provide we have it + data = { + 'dynamic': { + 'pools': pools, + 'rules': rules, + }, + 'type': _type, + 'ttl': td.ttl, + } + # Include default's information in data + data.update(default) + + name = zone.hostname_from_fqdn(fqdn) + record = Record.new(zone, name, data, source=self, lenient=lenient) + zone.add_record(record, lenient=lenient) + + return record + + def _is_traffic_director_dyanmic(self, td, rulesets): + for ruleset in rulesets: + try: + pieces = ruleset.label.split(':') + if len(pieces) == 2: + # It matches octoDNS's format + int(pieces[0]) + # It's an integer, so probably rule_num, thus dynamic + return True + except (IndexError, ValueError): + pass + # We didn't see any rulesets that look like a dynamic record so maybe + # geo... + return False + def _populate_traffic_directors(self, zone, lenient): - self.log.debug('_populate_traffic_directors: zone=%s', zone.name) + self.log.debug('_populate_traffic_directors: zone=%s, lenient=%s', + zone.name, lenient) td_records = set() for fqdn, types in self.traffic_directors.items(): # TODO: skip subzones if not fqdn.endswith(zone.name): continue - for _type, td in types.items(): # critical to call rulesets once, each call loads them :-( rulesets = td.rulesets - - # We start out with something that will always change show - # change in case this is a busted TD. This will prevent us from - # creating a duplicate td. We'll overwrite this with real data - # provide we have it - geo = {} - data = { - 'geo': geo, - 'type': _type, - 'ttl': td.ttl, - 'values': ['0.0.0.0'] - } - for ruleset in rulesets: - try: - record_set = ruleset.response_pools[0].rs_chains[0] \ - .record_sets[0] - except IndexError: - # problems indicate a malformed ruleset, ignore it - continue - _type = record_set.rdata_class - if ruleset.label.startswith('default:'): - data_for = getattr(self, '_data_for_{}'.format(_type)) - data.update(data_for(_type, record_set.records)) - else: - # We've stored the geo in label - try: - code, _ = ruleset.label.split(':', 1) - except ValueError: - continue - values = [r.address for r in record_set.records] - geo[code] = values - - name = zone.hostname_from_fqdn(fqdn) - record = Record.new(zone, name, data, source=self) - zone.add_record(record, lenient=lenient) + if self._is_traffic_director_dyanmic(td, rulesets): + record = \ + self._populate_dynamic_traffic_director(zone, fqdn, + _type, td, + rulesets, + lenient) + else: + record = \ + self._populate_geo_traffic_director(zone, fqdn, _type, + td, rulesets, + lenient) td_records.add(record) return td_records @@ -659,8 +850,8 @@ class DynProvider(BaseProvider): self._traffic_director_monitors[label] = monitor return monitor - def _find_or_create_pool(self, td, pools, label, _type, values, - monitor_id=None): + def _find_or_create_geo_pool(self, td, pools, label, _type, values, + monitor_id=None): for pool in pools: if pool.label != label: continue @@ -680,9 +871,78 @@ class DynProvider(BaseProvider): chain = DSFFailoverChain(label, record_sets=[record_set]) pool = DSFResponsePool(label, rs_chains=[chain]) pool.create(td) + + # We need to store the newly created pool in the pools list since the + # caller won't know if it was newly created or not. This will allow us + # to find this pool again if another rule references it and avoid + # creating duplicates + pools.append(pool) + return pool - def _mod_rulesets(self, td, change): + def _dynamic_records_for_A(self, values, record_extras): + return [DSFARecord(v['value'], weight=v.get('weight', 1), + **record_extras) + for v in values] + + def _dynamic_records_for_AAAA(self, values, record_extras): + return [DSFAAAARecord(v['value'], weight=v.get('weight', 1), + **record_extras) + for v in values] + + def _dynamic_records_for_CNAME(self, values, record_extras): + return [DSFCNAMERecord(v['value'], weight=v.get('weight', 1), + **record_extras) + for v in values] + + def _find_or_create_dynamic_pool(self, td, pools, label, _type, values, + monitor_id=None, record_extras={}): + + # Sort the values for consistent ordering so that we can compare + values = sorted(values, key=_dynamic_value_sort_key) + # Ensure that weight is included and if not use the default + values = map(lambda v: { + 'value': v['value'], + 'weight': v.get('weight', 1), + }, values) + + # Walk through our existing pools looking for a match we can use + for pool in pools: + # It must have the same label + if pool.label != label: + continue + try: + records = pool.rs_chains[0].record_sets[0].records + except IndexError: + # No values, can't match + continue + # And the (sorted) values must match once converted for comparison + # purposes + value_for = getattr(self, '_value_for_{}'.format(_type)) + record_values = [value_for(_type, r) for r in records] + if record_values == values: + # it's a match + return pool + + # We don't have this pool and thus need to create it + records_for = getattr(self, '_dynamic_records_for_{}'.format(_type)) + records = records_for(values, record_extras) + record_set = DSFRecordSet(_type, label, + serve_count=min(len(records), 2), + records=records, dsf_monitor_id=monitor_id) + chain = DSFFailoverChain(label, record_sets=[record_set]) + pool = DSFResponsePool(label, rs_chains=[chain]) + pool.create(td) + + # We need to store the newly created pool in the pools list since the + # caller won't know if it was newly created or not. This will allow us + # to find this pool again if another rule references it and avoid + # creating duplicates + pools.append(pool) + + return pool + + def _mod_geo_rulesets(self, td, change): new = change.new # Response Pools @@ -732,14 +992,14 @@ class DynProvider(BaseProvider): int(r._ordering) for r in existing_rulesets ] + [-1]) + 1 - self.log.debug('_mod_rulesets: insert_at=%d', insert_at) + self.log.debug('_mod_geo_rulesets: insert_at=%d', insert_at) # add the default label = 'default:{}'.format(uuid4().hex) ruleset = DSFRuleset(label, 'always', []) ruleset.create(td, index=insert_at) - pool = self._find_or_create_pool(td, pools, 'default', new._type, - new.values) + pool = self._find_or_create_geo_pool(td, pools, 'default', new._type, + new.values) # There's no way in the client lib to create a ruleset with an existing # pool (ref'd by id) so we have to do this round-a-bout. active_pools = { @@ -773,8 +1033,8 @@ class DynProvider(BaseProvider): ruleset.create(td, index=insert_at) first = geo.values[0] - pool = self._find_or_create_pool(td, pools, first, new._type, - geo.values, monitor_id) + pool = self._find_or_create_geo_pool(td, pools, first, new._type, + geo.values, monitor_id) active_pools[geo.code] = pool.response_pool_id ruleset.add_response_pool(pool.response_pool_id) @@ -811,7 +1071,7 @@ class DynProvider(BaseProvider): node = DSFNode(new.zone.name, fqdn) td = TrafficDirector(label, ttl=new.ttl, nodes=[node], publish='Y') self.log.debug('_mod_geo_Create: td=%s', td.service_id) - self._mod_rulesets(td, change) + self._mod_geo_rulesets(td, change) self.traffic_directors[fqdn] = { _type: td } @@ -832,7 +1092,7 @@ class DynProvider(BaseProvider): self._mod_geo_Create(dyn_zone, change) self._mod_Delete(dyn_zone, change) return - self._mod_rulesets(td, change) + self._mod_geo_rulesets(td, change) def _mod_geo_Delete(self, dyn_zone, change): existing = change.existing @@ -841,6 +1101,237 @@ class DynProvider(BaseProvider): fqdn_tds[_type].delete() del fqdn_tds[_type] + def _mod_dynamic_rulesets(self, td, change): + new = change.new + + # TODO: make sure we can update TTLs + if td.ttl != new.ttl: + td.ttl = new.ttl + + # Get existing pools. This should be simple, but it's not b/c the dyn + # api is a POS. We need all response pools so we can GC and check to + # make sure that what we're after doesn't already exist. + # td.all_response_pools just returns thin objects that don't include + # their rs_chains (and children down to actual records.) We could just + # foreach over those turning them into full DSFResponsePool objects + # with get_response_pool, but that'd be N round-trips. We can avoid + # those round trips in cases where the pools are in use in rules where + # they're already full objects. + + # First up populate all the pools we have under rules, the _ prevents a + # td.refresh we don't need :-( seriously? + existing_rulesets = td._rulesets + pools = {} + for ruleset in existing_rulesets: + for pool in ruleset.response_pools: + pools[pool.response_pool_id] = pool + + # Reverse sort the existing_rulesets by _ordering so that we'll remove + # them in that order later, this will ensure that we remove the old + # default before any of the old geo rules preventing it from catching + # everything. + existing_rulesets.sort(key=lambda r: r._ordering, reverse=True) + + # Add in any pools that aren't currently referenced by rules + for pool in td.all_response_pools: + rpid = pool.response_pool_id + if rpid not in pools: + # we want this one, but it's thin, inflate it + pools[rpid] = get_response_pool(rpid, td) + # now that we have full objects for the complete set of existing pools, + # a list will be more useful + pools = pools.values() + + # Rulesets + + # We need to make sure and insert the new rules after any existing + # rules so they won't take effect before we've had a chance to add + # response pools to them. I've tried both publish=False (which is + # completely broken in the client) and creating the rulesets with + # response_pool_ids neither of which appear to work from the client + # library. If there are no existing rulesets fallback to 0 + insert_at = max([ + int(r._ordering) + for r in existing_rulesets + ] + [-1]) + 1 + self.log.debug('_mod_dynamic_rulesets: insert_at=%d', insert_at) + + # Add the base record values as the ultimate/unhealthchecked default + label = 'default:{}'.format(uuid4().hex) + ruleset = DSFRuleset(label, 'always', []) + ruleset.create(td, index=insert_at) + # If/when we go beyond A, AAAA, and CNAME this will have to get + # more intelligent, probably a weighted_values method on Record objects + # or something like that? + try: + values = new.values + except AttributeError: + values = [new.value] + values = [{ + 'value': v, + 'weight': 1, + } for v in values] + # For these defaults we need to set them to always be served and to + # ignore any health checking (since they won't have one) + pool = self._find_or_create_dynamic_pool(td, pools, 'default', + new._type, values, + record_extras={ + 'automation': 'manual', + 'eligible': True, + }) + # There's no way in the client lib to create a ruleset with an existing + # pool (ref'd by id) so we have to do this round-a-bout. + active_pools = { + # TODO: disallow default as a pool id + 'default': pool.response_pool_id + } + ruleset.add_response_pool(pool.response_pool_id) + + # Get our monitor + monitor_id = self._traffic_director_monitor(new).dsf_monitor_id + + # Make sure we have all the pools we're going to need + for _id, pool in sorted(new.dynamic.pools.items()): + values = [{ + 'weight': v.get('weight', 1), + 'value': v['value'], + } for v in pool.data['values']] + pool = self._find_or_create_dynamic_pool(td, pools, _id, + new._type, values, + monitor_id) + active_pools[_id] = pool.response_pool_id + + # Run through and configure our rules + for rule_num, rule in enumerate(reversed(new.dynamic.rules)): + criteria = defaultdict(lambda: defaultdict(list)) + criteria_type = 'always' + try: + geos = rule.data['geos'] + criteria_type = 'geoip' + except KeyError: + geos = [] + + for geo in geos: + geo = GeoCodes.parse(geo) + if geo['province_code']: + criteria['geoip']['province'] \ + .append(geo['province_code'].lower()) + elif geo['country_code']: + criteria['geoip']['country'] \ + .append(geo['country_code']) + else: + criteria['geoip']['region'] \ + .append(self.REGION_CODES[geo['continent_code']]) + + label = '{}:{}'.format(rule_num, uuid4().hex) + ruleset = DSFRuleset(label, criteria_type, [], criteria) + # Something you have to call create others the constructor does it + ruleset.create(td, index=insert_at) + + # Add the primary pool for this rule + rule_pool = rule.data['pool'] + ruleset.add_response_pool(active_pools[rule_pool]) + + # OK, we have the rule and its primary pool setup, now look to see + # if there's a fallback chain that needs to be configured + fallback = new.dynamic.pools[rule_pool].data.get('fallback', None) + seen = set([rule_pool]) + while fallback and fallback not in seen: + seen.add(fallback) + # looking at client lib code, index > exists appends + ruleset.add_response_pool(active_pools[fallback], index=999) + fallback = new.dynamic.pools[fallback].data.get('fallback', + None) + if fallback is not None: + # If we're out of the while and fallback is not None that means + # there was a loop. This generally shouldn't happen since + # Record validations test for it, but this is a + # belt-and-suspenders setup. Excepting here would put things + # into a partially configured state which would be bad. We'll + # just break at the point where the loop was going to happen + # and log about it. Note that any time we hit this we're likely + # to hit it multiple times as we configure the other pools + self.log.warn('_mod_dynamic_rulesets: loop detected in ' + 'fallback chain, fallback=%s, seen=%s', fallback, + seen) + + # and always add default as the last + ruleset.add_response_pool(active_pools['default'], index=999) + + # we're done with active_pools as a lookup, convert it in to a set of + # the ids in use + active_pools = set(active_pools.values()) + # Clean up unused response_pools + for pool in pools: + if pool.response_pool_id in active_pools: + continue + pool.delete() + + # Clean out the old rulesets + for ruleset in existing_rulesets: + ruleset.delete() + + def _mod_dynamic_Create(self, dyn_zone, change): + new = change.new + fqdn = new.fqdn + _type = new._type + # Create a new traffic director + label = '{}:{}'.format(fqdn, _type) + node = DSFNode(new.zone.name, fqdn) + td = TrafficDirector(label, ttl=new.ttl, nodes=[node], publish='Y') + self.log.debug('_mod_dynamic_Create: td=%s', td.service_id) + # Sync up it's pools & rules + self._mod_dynamic_rulesets(td, change) + # Store it for future reference + self.traffic_directors[fqdn] = { + _type: td + } + + def _mod_dynamic_Update(self, dyn_zone, change): + new = change.new + if not new.dynamic: + if new.geo: + # New record is a geo record + self.log.info('_mod_dynamic_Update: %s to geo', new.fqdn) + # Convert the TD over to a geo and we're done + self._mod_geo_Update(dyn_zone, change) + else: + # New record doesn't have dynamic, we're going from a TD to a + # regular record + self.log.info('_mod_dynamic_Update: %s to plain', new.fqdn) + # Create the regular record + self._mod_Create(dyn_zone, change) + # Delete the dynamic + self._mod_dynamic_Delete(dyn_zone, change) + return + try: + # We'll be dynamic going forward, see if we have one already + td = self.traffic_directors[new.fqdn][new._type] + if change.existing.geo: + self.log.info('_mod_dynamic_Update: %s from geo', new.fqdn) + else: + self.log.debug('_mod_dynamic_Update: %s existing', new.fqdn) + # If we're here we do, we'll just update it down below + except KeyError: + # There's no td, this is actually a create, we must be going from a + # non-dynamic to dynamic record + # First create the dynamic record + self.log.info('_mod_dynamic_Update: %s from regular', new.fqdn) + self._mod_dynamic_Create(dyn_zone, change) + # From a generic so remove the old generic + self._mod_Delete(dyn_zone, change) + return + + # IF we're here it's actually an update, sync up rules + self._mod_dynamic_rulesets(td, change) + + def _mod_dynamic_Delete(self, dyn_zone, change): + existing = change.existing + fqdn_tds = self.traffic_directors[existing.fqdn] + _type = existing._type + fqdn_tds[_type].delete() + del fqdn_tds[_type] + def _mod_Create(self, dyn_zone, change): new = change.new kwargs_for = getattr(self, '_kwargs_for_{}'.format(new._type)) @@ -867,8 +1358,13 @@ class DynProvider(BaseProvider): unhandled_changes = [] for c in changes: # we only mess with changes that have geo info somewhere - if getattr(c.new, 'geo', False) or getattr(c.existing, 'geo', - False): + if getattr(c.new, 'dynamic', False) or getattr(c.existing, + 'dynamic', False): + mod = getattr(self, '_mod_dynamic_{}' + .format(c.__class__.__name__)) + mod(dyn_zone, c) + elif getattr(c.new, 'geo', False) or getattr(c.existing, 'geo', + False): mod = getattr(self, '_mod_geo_{}'.format(c.__class__.__name__)) mod(dyn_zone, c) else: diff --git a/octodns/record/__init__.py b/octodns/record/__init__.py index a198cc5..ba0ab98 100644 --- a/octodns/record/__init__.py +++ b/octodns/record/__init__.py @@ -268,6 +268,7 @@ class _ValuesMixin(object): values = data['values'] except KeyError: values = [data['value']] + # TODO: should we natsort values? self.values = sorted(self._value_type.process(values)) def changes(self, other, target): @@ -384,12 +385,27 @@ class _DynamicPool(object): def __init__(self, _id, data): self._id = _id - self.data = data + + values = [ + { + 'value': d['value'], + 'weight': d.get('weight', 1), + } for d in data['values'] + ] + values.sort(key=lambda d: d['value']) + + fallback = data.get('fallback', None) + self.data = { + 'fallback': fallback if fallback != 'default' else None, + 'values': values, + } def _data(self): return self.data def __eq__(self, other): + if not isinstance(other, _DynamicPool): + return False return self.data == other.data def __ne__(self, other): @@ -403,12 +419,23 @@ class _DynamicRule(object): def __init__(self, i, data): self.i = i - self.data = data + + self.data = {} + try: + self.data['pool'] = data['pool'] + except KeyError: + pass + try: + self.data['geos'] = sorted(data['geos']) + except KeyError: + pass def _data(self): return self.data def __eq__(self, other): + if not isinstance(other, _DynamicRule): + return False return self.data == other.data def __ne__(self, other): @@ -437,6 +464,8 @@ class _Dynamic(object): } def __eq__(self, other): + if not isinstance(other, _Dynamic): + return False ret = self.pools == other.pools and self.rules == other.rules return ret @@ -457,6 +486,8 @@ class _DynamicMixin(object): if 'dynamic' not in data: return reasons + elif 'geo' in data: + reasons.append('"dynamic" record with "geo" content') try: pools = data['dynamic']['pools'] @@ -533,6 +564,8 @@ class _DynamicMixin(object): else: seen_default = False + # TODO: don't allow 'default' as a pool name, reserved + # TODO: warn or error on unused pools? for i, rule in enumerate(rules): rule_num = i + 1 try: diff --git a/octodns/record/geo.py b/octodns/record/geo.py index c7b5468..ed54194 100644 --- a/octodns/record/geo.py +++ b/octodns/record/geo.py @@ -2,11 +2,13 @@ # # +from logging import getLogger + from .geo_data import geo_data class GeoCodes(object): - __COUNTRIES = None + log = getLogger('GeoCodes') @classmethod def validate(cls, code, prefix): @@ -33,3 +35,37 @@ class GeoCodes(object): reasons.append('{}unknown province code "{}"'.format(prefix, code)) return reasons + + @classmethod + def parse(cls, code): + pieces = code.split('-') + try: + country_code = pieces[1] + except IndexError: + country_code = None + try: + province_code = pieces[2] + except IndexError: + province_code = None + return { + 'continent_code': pieces[0], + 'country_code': country_code, + 'province_code': province_code, + } + + @classmethod + def country_to_code(cls, country): + for continent, countries in geo_data.items(): + if country in countries: + return '{}-{}'.format(continent, country) + cls.log.warn('country_to_code: unrecognized country "%s"', country) + return + + @classmethod + def province_to_code(cls, province): + # We get to cheat on this one since we only support provinces in NA-US + if province not in geo_data['NA']['US']['provinces']: + cls.log.warn('country_to_code: unrecognized province "%s"', + province) + return + return 'NA-US-{}'.format(province) diff --git a/tests/test_octodns_provider_dyn.py b/tests/test_octodns_provider_dyn.py index ac56477..79d764d 100644 --- a/tests/test_octodns_provider_dyn.py +++ b/tests/test_octodns_provider_dyn.py @@ -13,7 +13,8 @@ from unittest import TestCase from octodns.record import Create, Delete, Record, Update from octodns.provider.base import Plan -from octodns.provider.dyn import DynProvider, _CachingDynZone, DSFMonitor +from octodns.provider.dyn import DynProvider, _CachingDynZone, DSFMonitor, \ + _dynamic_value_sort_key from octodns.zone import Zone from helpers import SimpleProvider @@ -671,10 +672,8 @@ class TestDynProviderGeo(TestCase): set(tds.keys())) self.assertEquals(['A'], tds['unit.tests.'].keys()) self.assertEquals(['A'], tds['geo.unit.tests.'].keys()) - provider.log.warn.assert_called_with("Failed to load TrafficDirector " - "'%s': %s", 'something else', - 'need more than 1 value to ' - 'unpack') + provider.log.warn.assert_called_with("Unsupported TrafficDirector " + "'%s'", 'something else') @patch('dyn.core.SessionEngine.execute') def test_traffic_director_monitor(self, mock): @@ -1159,7 +1158,7 @@ class TestDynProviderGeo(TestCase): traffic_directors_enabled=True) # will be tested separately - provider._mod_rulesets = MagicMock() + provider._mod_geo_rulesets = MagicMock() mock.side_effect = [ # create traffic director @@ -1171,7 +1170,7 @@ class TestDynProviderGeo(TestCase): # td now lives in cache self.assertTrue('A' in provider.traffic_directors['unit.tests.']) # should have seen 1 gen call - provider._mod_rulesets.assert_called_once() + provider._mod_geo_rulesets.assert_called_once() def test_mod_geo_update_geo_geo(self): provider = DynProvider('test', 'cust', 'user', 'pass', @@ -1185,8 +1184,8 @@ class TestDynProviderGeo(TestCase): 'A': 42, } } - # mock _mod_rulesets - provider._mod_rulesets = MagicMock() + # mock _mod_geo_rulesets + provider._mod_geo_rulesets = MagicMock() geo = self.geo_record change = Update(geo, geo) @@ -1194,7 +1193,7 @@ class TestDynProviderGeo(TestCase): # still in cache self.assertTrue('A' in provider.traffic_directors['unit.tests.']) # should have seen 1 gen call - provider._mod_rulesets.assert_called_once_with(42, change) + provider._mod_geo_rulesets.assert_called_once_with(42, change) @patch('dyn.core.SessionEngine.execute') def test_mod_geo_update_geo_regular(self, _): @@ -1248,7 +1247,7 @@ class TestDynProviderGeo(TestCase): self.assertFalse('A' in provider.traffic_directors['unit.tests.']) @patch('dyn.tm.services.DSFResponsePool.create') - def test_find_or_create_pool(self, mock): + def test_find_or_create_geo_pool(self, mock): provider = DynProvider('test', 'cust', 'user', 'pass', traffic_directors_enabled=True) @@ -1256,7 +1255,8 @@ class TestDynProviderGeo(TestCase): # no candidates cache miss, so create values = ['1.2.3.4', '1.2.3.5'] - pool = provider._find_or_create_pool(td, [], 'default', 'A', values) + pool = provider._find_or_create_geo_pool(td, [], 'default', 'A', + values) self.assertIsInstance(pool, DSFResponsePool) self.assertEquals(1, len(pool.rs_chains)) records = pool.rs_chains[0].record_sets[0].records @@ -1266,23 +1266,24 @@ class TestDynProviderGeo(TestCase): # cache hit, use the one we just created mock.reset_mock() pools = [pool] - cached = provider._find_or_create_pool(td, pools, 'default', 'A', - values) + cached = provider._find_or_create_geo_pool(td, pools, 'default', 'A', + values) self.assertEquals(pool, cached) mock.assert_not_called() # cache miss, non-matching label mock.reset_mock() - miss = provider._find_or_create_pool(td, pools, 'NA-US-CA', 'A', - values) + miss = provider._find_or_create_geo_pool(td, pools, 'NA-US-CA', 'A', + values) self.assertNotEquals(pool, miss) self.assertEquals('NA-US-CA', miss.label) mock.assert_called_once_with(td) - # cache miss, non-matching label + # cache miss, matching label, mis-matching values mock.reset_mock() values = ['2.2.3.4.', '2.2.3.5'] - miss = provider._find_or_create_pool(td, pools, 'default', 'A', values) + miss = provider._find_or_create_geo_pool(td, pools, 'default', 'A', + values) self.assertNotEquals(pool, miss) mock.assert_called_once_with(td) @@ -1290,19 +1291,19 @@ class TestDynProviderGeo(TestCase): @patch('dyn.tm.services.DSFRuleset.create') # just lets us ignore the pool.create calls @patch('dyn.tm.services.DSFResponsePool.create') - def test_mod_rulesets_create(self, _, ruleset_create_mock, - add_response_pool_mock): + def test_mod_geo_rulesets_create(self, _, ruleset_create_mock, + add_response_pool_mock): provider = DynProvider('test', 'cust', 'user', 'pass', traffic_directors_enabled=True) td_mock = MagicMock() td_mock._rulesets = [] provider._traffic_director_monitor = MagicMock() - provider._find_or_create_pool = MagicMock() + provider._find_or_create_geo_pool = MagicMock() td_mock.all_response_pools = [] - provider._find_or_create_pool.side_effect = [ + provider._find_or_create_geo_pool.side_effect = [ _DummyPool('default'), _DummyPool(1), _DummyPool(2), @@ -1311,7 +1312,7 @@ class TestDynProviderGeo(TestCase): ] change = Create(self.geo_record) - provider._mod_rulesets(td_mock, change) + provider._mod_geo_rulesets(td_mock, change) ruleset_create_mock.assert_has_calls(( call(td_mock, index=0), call(td_mock, index=0), @@ -1343,9 +1344,9 @@ class TestDynProviderGeo(TestCase): @patch('dyn.tm.services.DSFRuleset.create') # just lets us ignore the pool.create calls @patch('dyn.tm.services.DSFResponsePool.create') - def test_mod_rulesets_existing(self, _, ruleset_create_mock, - add_response_pool_mock, - get_response_pool_mock): + def test_mod_geo_rulesets_existing(self, _, ruleset_create_mock, + add_response_pool_mock, + get_response_pool_mock): provider = DynProvider('test', 'cust', 'user', 'pass', traffic_directors_enabled=True) @@ -1357,14 +1358,14 @@ class TestDynProviderGeo(TestCase): ruleset_mock, ] provider._traffic_director_monitor = MagicMock() - provider._find_or_create_pool = MagicMock() + provider._find_or_create_geo_pool = MagicMock() unused_pool = _DummyPool('unused') td_mock.all_response_pools = \ ruleset_mock.response_pools + [unused_pool] get_response_pool_mock.return_value = unused_pool - provider._find_or_create_pool.side_effect = [ + provider._find_or_create_geo_pool.side_effect = [ _DummyPool('default'), _DummyPool(1), _DummyPool(2), @@ -1373,7 +1374,7 @@ class TestDynProviderGeo(TestCase): ] change = Create(self.geo_record) - provider._mod_rulesets(td_mock, change) + provider._mod_geo_rulesets(td_mock, change) ruleset_create_mock.assert_has_calls(( call(td_mock, index=2), call(td_mock, index=2), @@ -1577,3 +1578,1054 @@ class TestDSFMonitorMonkeyPatching(TestCase): monitor = DummyDSFMonitor() monitor.port = 8080 self.assertEquals(8080, monitor.port) + + +class DummyRecord(object): + + def __init__(self, address, weight, ttl): + self.address = address + self.weight = weight + self.ttl = ttl + + +class DummyRecordSets(object): + + def __init__(self, records): + self.records = records + + +class DummyRsChains(object): + + def __init__(self, records): + self.record_sets = [DummyRecordSets(records)] + + +class DummyResponsePool(object): + + def __init__(self, label, records=[]): + self.label = label + if records: + self.rs_chains = [DummyRsChains(records)] + else: + self.rs_chains = [] + + def refresh(self): + pass + + +class DummyRuleset(object): + + def __init__(self, label, response_pools=[], + criteria_type='always', criteria={}): + self.label = label + self.response_pools = response_pools + self.criteria_type = criteria_type + self.criteria = criteria + + +class DummyTrafficDirector(object): + + def __init__(self, rulesets=[], response_pools=[], ttl=42): + self.label = 'dummy:abcdef1234567890' + self.rulesets = rulesets + self.all_response_pools = response_pools + self.ttl = ttl + + +class TestDynProviderDynamic(TestCase): + + def test_value_for_address(self): + provider = DynProvider('test', 'cust', 'user', 'pass') + + class DummyRecord(object): + + def __init__(self, address, weight): + self.address = address + self.weight = weight + + record = DummyRecord('1.2.3.4', 32) + self.assertEquals({ + 'value': record.address, + 'weight': record.weight, + }, provider._value_for_A('A', record)) + + record = DummyRecord('2601:644:500:e210:62f8:1dff:feb8:947a', 32) + self.assertEquals({ + 'value': record.address, + 'weight': record.weight, + }, provider._value_for_AAAA('AAAA', record)) + + def test_value_for_CNAME(self): + provider = DynProvider('test', 'cust', 'user', 'pass') + + class DummyRecord(object): + + def __init__(self, cname, weight): + self.cname = cname + self.weight = weight + + record = DummyRecord('foo.unit.tests.', 32) + self.assertEquals({ + 'value': record.cname, + 'weight': record.weight, + }, provider._value_for_CNAME('CNAME', record)) + + def test_populate_dynamic_pools(self): + provider = DynProvider('test', 'cust', 'user', 'pass') + + # Empty data, empty returns + default, pools = provider._populate_dynamic_pools('A', [], []) + self.assertEquals({}, default) + self.assertEquals({}, pools) + + records_a = [DummyRecord('1.2.3.4', 32, 60)] + default_a = DummyResponsePool('default', records_a) + + # Just a default A + response_pools = [default_a] + default, pools = provider._populate_dynamic_pools('A', [], + response_pools) + self.assertEquals({ + 'ttl': 60, + 'type': 'A', + 'values': ['1.2.3.4'], + }, default) + self.assertEquals({}, pools) + + multi_a = [ + DummyRecord('1.2.3.5', 42, 90), + DummyRecord('1.2.3.6', 43, 90), + DummyRecord('1.2.3.7', 44, 90), + ] + example_a = DummyResponsePool('example', multi_a) + + # Just a named pool + response_pools = [example_a] + default, pools = provider._populate_dynamic_pools('A', [], + response_pools) + self.assertEquals({}, default) + self.assertEquals({ + 'example': { + 'values': [{ + 'value': '1.2.3.5', + 'weight': 42, + }, { + 'value': '1.2.3.6', + 'weight': 43, + }, { + 'value': '1.2.3.7', + 'weight': 44, + }], + }, + }, pools) + + # Named pool that shows up twice + response_pools = [example_a, example_a] + default, pools = provider._populate_dynamic_pools('A', [], + response_pools) + self.assertEquals({}, default) + self.assertEquals({ + 'example': { + 'values': [{ + 'value': '1.2.3.5', + 'weight': 42, + }, { + 'value': '1.2.3.6', + 'weight': 43, + }, { + 'value': '1.2.3.7', + 'weight': 44, + }], + }, + }, pools) + + # Default & named + response_pools = [example_a, default_a, example_a] + default, pools = provider._populate_dynamic_pools('A', [], + response_pools) + self.assertEquals({ + 'ttl': 60, + 'type': 'A', + 'values': ['1.2.3.4'], + }, default) + self.assertEquals({ + 'example': { + 'values': [{ + 'value': '1.2.3.5', + 'weight': 42, + }, { + 'value': '1.2.3.6', + 'weight': 43, + }, { + 'value': '1.2.3.7', + 'weight': 44, + }], + }, + }, pools) + + # empty rs_chains doesn't cause an example, just ignores + empty_a = DummyResponsePool('empty') + response_pools = [empty_a] + default, pools = provider._populate_dynamic_pools('A', [], + response_pools) + self.assertEquals({}, default) + self.assertEquals({}, pools) + + def test_populate_dynamic_rules(self): + provider = DynProvider('test', 'cust', 'user', 'pass') + + # Empty + rulesets = [] + pools = {} + rules = provider._populate_dynamic_rules(rulesets, pools) + self.assertEquals([], rules) + + # default: is ignored + rulesets = [DummyRuleset('default:')] + pools = {} + rules = provider._populate_dynamic_rules(rulesets, pools) + self.assertEquals([], rules) + + # No ResponsePools in RuleSet, ignored + rulesets = [DummyRuleset('0:abcdefg')] + pools = {} + rules = provider._populate_dynamic_rules(rulesets, pools) + self.assertEquals([], rules) + + # ResponsePool, no fallback + rulesets = [DummyRuleset('0:abcdefg', [ + DummyResponsePool('some-pool') + ])] + pools = {} + rules = provider._populate_dynamic_rules(rulesets, pools) + self.assertEquals([{ + 'pool': 'some-pool', + }], rules) + + # ResponsePool, with dfault fallback (ignored) + rulesets = [DummyRuleset('0:abcdefg', [ + DummyResponsePool('some-pool'), + DummyResponsePool('default'), + ])] + pools = {} + rules = provider._populate_dynamic_rules(rulesets, pools) + self.assertEquals([{ + 'pool': 'some-pool', + }], rules) + + # ResponsePool, with fallback + rulesets = [DummyRuleset('0:abcdefg', [ + DummyResponsePool('some-pool'), + DummyResponsePool('some-fallback'), + ])] + pools = { + 'some-pool': {}, + } + rules = provider._populate_dynamic_rules(rulesets, pools) + self.assertEquals([{ + 'pool': 'some-pool', + }], rules) + # fallback has been installed + self.assertEquals({ + 'some-pool': { + 'fallback': 'some-fallback', + } + }, pools) + + # Unsupported criteria_type (ignored) + rulesets = [DummyRuleset('0:abcdefg', [ + DummyResponsePool('some-pool') + ], 'unsupported')] + pools = {} + rules = provider._populate_dynamic_rules(rulesets, pools) + self.assertEquals([], rules) + + # Geo Continent/Region + response_pools = [DummyResponsePool('some-pool')] + criteria = { + 'geoip': { + 'country': ['US'], + 'province': ['or'], + 'region': [14], + }, + } + ruleset = DummyRuleset('0:abcdefg', response_pools, + 'geoip', criteria) + rulesets = [ruleset] + pools = {} + rules = provider._populate_dynamic_rules(rulesets, pools) + self.assertEquals([{ + 'geos': ['AF', 'NA-US', 'NA-US-OR'], + 'pool': 'some-pool', + }], rules) + + def test_populate_dynamic_traffic_director(self): + provider = DynProvider('test', 'cust', 'user', 'pass') + fqdn = 'dynamic.unit.tests.' + + multi_a = [ + DummyRecord('1.2.3.5', 1, 90), + DummyRecord('1.2.3.6', 1, 90), + DummyRecord('1.2.3.7', 1, 90), + ] + default_response_pool = DummyResponsePool('default', multi_a) + pool1_response_pool = DummyResponsePool('pool1', multi_a) + rulesets = [ + DummyRuleset('default', [default_response_pool]), + DummyRuleset('0:abcdef', [pool1_response_pool], 'geoip', { + 'geoip': { + 'country': ['US'], + 'province': ['or'], + 'region': [14], + }, + }), + ] + td = DummyTrafficDirector(rulesets, [default_response_pool, + pool1_response_pool]) + zone = Zone('unit.tests.', []) + record = provider._populate_dynamic_traffic_director(zone, fqdn, 'A', + td, rulesets, + True) + self.assertTrue(record) + self.assertEquals('A', record._type) + self.assertEquals(90, record.ttl) + self.assertEquals([ + '1.2.3.5', + '1.2.3.6', + '1.2.3.7', + ], record.values) + self.assertTrue('pool1' in record.dynamic.pools) + self.assertEquals({ + 'fallback': None, + 'values': [{ + 'value': '1.2.3.5', + 'weight': 1, + }, { + 'value': '1.2.3.6', + 'weight': 1, + }, { + 'value': '1.2.3.7', + 'weight': 1, + }] + }, record.dynamic.pools['pool1'].data) + self.assertEquals(2, len(record.dynamic.rules)) + self.assertEquals({ + 'pool': 'default', + }, record.dynamic.rules[0].data) + self.assertEquals({ + 'pool': 'pool1', + 'geos': ['AF', 'NA-US', 'NA-US-OR'], + }, record.dynamic.rules[1].data) + + # Hack into the provider and create a fake list of traffic directors + provider._traffic_directors = { + 'dynamic.unit.tests.': { + 'A': td, + } + } + zone = Zone('unit.tests.', []) + records = provider._populate_traffic_directors(zone, lenient=True) + self.assertEquals(1, len(records)) + + def test_dynamic_records_for_A(self): + provider = DynProvider('test', 'cust', 'user', 'pass') + + # Empty + records = provider._dynamic_records_for_A([], {}) + self.assertEquals([], records) + + # Basic + values = [{ + 'value': '1.2.3.4', + }, { + 'value': '1.2.3.5', + 'weight': 42, + }] + records = provider._dynamic_records_for_A(values, {}) + self.assertEquals(2, len(records)) + record = records[0] + self.assertEquals('1.2.3.4', record.address) + self.assertEquals(1, record.weight) + record = records[1] + self.assertEquals('1.2.3.5', record.address) + self.assertEquals(42, record.weight) + + # With extras + records = provider._dynamic_records_for_A(values, { + 'automation': 'manual', + 'eligible': True, + }) + self.assertEquals(2, len(records)) + record = records[0] + self.assertEquals('1.2.3.4', record.address) + self.assertEquals(1, record.weight) + self.assertEquals('manual', record._automation) + self.assertTrue(record.eligible) + + def test_dynamic_records_for_AAAA(self): + provider = DynProvider('test', 'cust', 'user', 'pass') + + # Empty + records = provider._dynamic_records_for_AAAA([], {}) + self.assertEquals([], records) + + # Basic + values = [{ + 'value': '2601:644:500:e210:62f8:1dff:feb8:947a', + }, { + 'value': '2601:644:500:e210:62f8:1dff:feb8:947b', + 'weight': 42, + }] + records = provider._dynamic_records_for_AAAA(values, {}) + self.assertEquals(2, len(records)) + record = records[0] + self.assertEquals('2601:644:500:e210:62f8:1dff:feb8:947a', + record.address) + self.assertEquals(1, record.weight) + record = records[1] + self.assertEquals('2601:644:500:e210:62f8:1dff:feb8:947b', + record.address) + self.assertEquals(42, record.weight) + + # With extras + records = provider._dynamic_records_for_AAAA(values, { + 'automation': 'manual', + 'eligible': True, + }) + self.assertEquals(2, len(records)) + record = records[0] + self.assertEquals('2601:644:500:e210:62f8:1dff:feb8:947a', + record.address) + self.assertEquals(1, record.weight) + self.assertEquals('manual', record._automation) + self.assertTrue(record.eligible) + + def test_dynamic_records_for_CNAME(self): + provider = DynProvider('test', 'cust', 'user', 'pass') + + # Empty + records = provider._dynamic_records_for_CNAME([], {}) + self.assertEquals([], records) + + # Basic + values = [{ + 'value': 'target-1.unit.tests.', + }, { + 'value': 'target-2.unit.tests.', + 'weight': 42, + }] + records = provider._dynamic_records_for_CNAME(values, {}) + self.assertEquals(2, len(records)) + record = records[0] + self.assertEquals('target-1.unit.tests.', record.cname) + self.assertEquals(1, record.weight) + record = records[1] + self.assertEquals('target-2.unit.tests.', record.cname) + self.assertEquals(42, record.weight) + + # With extras + records = provider._dynamic_records_for_CNAME(values, { + 'automation': 'manual', + 'eligible': True, + }) + self.assertEquals(2, len(records)) + record = records[0] + self.assertEquals('target-1.unit.tests.', record.cname) + self.assertEquals(1, record.weight) + self.assertEquals('manual', record._automation) + self.assertTrue(record.eligible) + + def test_dynamic_value_sort_key(self): + values = [{ + 'value': '1.2.3.1', + }, { + 'value': '1.2.3.27', + }, { + 'value': '1.2.3.127', + }, { + 'value': '1.2.3.2', + }] + + self.assertEquals([{ + 'value': '1.2.3.1', + }, { + 'value': '1.2.3.127', + }, { + 'value': '1.2.3.2', + }, { + 'value': '1.2.3.27', + }], sorted(values, key=_dynamic_value_sort_key)) + + @patch('dyn.tm.services.DSFResponsePool.create') + def test_find_or_create_dynamic_pools(self, mock): + provider = DynProvider('test', 'cust', 'user', 'pass') + + td = 42 + label = 'foo' + values = [{ + 'value': '1.2.3.1', + }, { + 'value': '1.2.3.127', + }, { + 'value': '1.2.3.2', + }, { + 'value': '1.2.3.27', + }] + + # A Pool with no existing pools, will create + pools = [] + pool = provider._find_or_create_dynamic_pool(td, pools, label, 'A', + values) + self.assertIsInstance(pool, DSFResponsePool) + self.assertEquals(1, len(pool.rs_chains)) + self.assertEquals(1, len(pool.rs_chains[0].record_sets)) + records = pool.rs_chains[0].record_sets[0].records + self.assertEquals(4, len(records)) + self.assertEquals([v['value'] for v in values], + [r.address for r in records]) + self.assertEquals([1 for r in records], [r.weight for r in records]) + mock.assert_called_once_with(td) + + # Ask for the pool we created above and include it in the canidate list + mock.reset_mock() + pools = [pool] + cached = provider._find_or_create_dynamic_pool(td, pools, label, 'A', + values) + self.assertEquals(pool, cached) + mock.assert_not_called() + + # Invalid candidate pool, still finds the valid one that's there too + mock.reset_mock() + invalid = DSFResponsePool(label, rs_chains=[]) + pools = [invalid, pool] + cached = provider._find_or_create_dynamic_pool(td, pools, label, 'A', + values) + self.assertEquals(pool, cached) + mock.assert_not_called() + + # Ask for a pool with a different label, should create a new one + mock.reset_mock() + pools = [pool] + other = provider._find_or_create_dynamic_pool(td, pools, 'other', 'A', + values) + self.assertEquals('other', other.label) + mock.assert_called_once_with(td) + + # Ask for a pool that matches label-wise, but has different values + values = [{ + 'value': '1.2.3.44', + }] + mock.reset_mock() + pools = [pool] + new = provider._find_or_create_dynamic_pool(td, pools, label, 'A', + values) + self.assertEquals(label, new.label) + self.assertEquals(1, len(new.rs_chains)) + self.assertEquals(1, len(new.rs_chains[0].record_sets)) + records = new.rs_chains[0].record_sets[0].records + self.assertEquals(1, len(records)) + self.assertEquals([v['value'] for v in values], + [r.address for r in records]) + self.assertEquals([1 for r in records], [r.weight for r in records]) + mock.assert_called_once_with(td) + + zone = Zone('unit.tests.', []) + dynamic_a_record = Record.new(zone, '', { + 'dynamic': { + 'pools': { + 'one': { + 'values': [{ + 'value': '3.3.3.3', + }], + }, + 'two': { + # Testing out of order value sorting here + 'values': [{ + 'value': '5.5.5.5', + }, { + 'value': '4.4.4.4', + }], + }, + 'three': { + 'fallback': 'two', + 'values': [{ + 'weight': 10, + 'value': '4.4.4.4', + }, { + 'weight': 12, + 'value': '5.5.5.5', + }], + }, + }, + 'rules': [{ + 'geos': ['AF', 'EU', 'AS-JP'], + 'pool': 'three', + }, { + 'geos': ['NA-US-CA'], + 'pool': 'two', + }, { + 'pool': 'one', + }], + }, + 'type': 'A', + 'ttl': 60, + 'values': [ + '1.1.1.1', + '2.2.2.2', + ], + }) + geo_a_record = Record.new(zone, '', { + 'geo': { + 'AF': ['2.2.3.4', '2.2.3.5'], + 'AS-JP': ['3.2.3.4', '3.2.3.5'], + 'NA-US': ['4.2.3.4', '4.2.3.5'], + 'NA-US-CA': ['5.2.3.4', '5.2.3.5'] + }, + 'ttl': 300, + 'type': 'A', + 'values': ['1.2.3.4', '1.2.3.5'], + }) + regular_a_record = Record.new(zone, '', { + 'ttl': 301, + 'type': 'A', + 'value': '1.2.3.4', + }) + dynamic_cname_record = Record.new(zone, 'www', { + 'dynamic': { + 'pools': { + 'one': { + 'values': [{ + 'value': 'target-0.unit.tests.', + }], + }, + 'two': { + # Testing out of order value sorting here + 'values': [{ + 'value': 'target-1.unit.tests.', + }, { + 'value': 'target-2.unit.tests.', + }], + }, + 'three': { + 'values': [{ + 'weight': 10, + 'value': 'target-3.unit.tests.', + }, { + 'weight': 12, + 'value': 'target-4.unit.tests.', + }], + }, + }, + 'rules': [{ + 'geos': ['AF', 'EU', 'AS-JP'], + 'pool': 'three', + }, { + 'geos': ['NA-US-CA'], + 'pool': 'two', + }, { + 'pool': 'one', + }], + }, + 'type': 'CNAME', + 'ttl': 60, + 'value': 'target.unit.tests.', + }) + + dynamic_fallback_loop = Record.new(zone, '', { + 'dynamic': { + 'pools': { + 'one': { + 'values': [{ + 'value': '3.3.3.3', + }], + }, + 'two': { + # Testing out of order value sorting here + 'fallback': 'three', + 'values': [{ + 'value': '5.5.5.5', + }, { + 'value': '4.4.4.4', + }], + }, + 'three': { + 'fallback': 'two', + 'values': [{ + 'weight': 10, + 'value': '4.4.4.4', + }, { + 'weight': 12, + 'value': '5.5.5.5', + }], + }, + }, + 'rules': [{ + 'geos': ['AF', 'EU', 'AS-JP'], + 'pool': 'three', + }, { + 'geos': ['NA-US-CA'], + 'pool': 'two', + }, { + 'pool': 'one', + }], + }, + 'type': 'A', + 'ttl': 60, + 'values': [ + '1.1.1.1', + '2.2.2.2', + ], + }, lenient=True) + + @patch('dyn.tm.services.DSFRuleset.add_response_pool') + @patch('dyn.tm.services.DSFRuleset.create') + # just lets us ignore the pool.create calls + @patch('dyn.tm.services.DSFResponsePool.create') + def test_mod_dynamic_rulesets_create_CNAME(self, _, ruleset_create_mock, + add_response_pool_mock): + provider = DynProvider('test', 'cust', 'user', 'pass', + traffic_directors_enabled=True) + + td_mock = MagicMock() + td_mock._rulesets = [] + provider._traffic_director_monitor = MagicMock() + provider._find_or_create_dynamic_pool = MagicMock() + + td_mock.all_response_pools = [] + + provider._find_or_create_dynamic_pool.side_effect = [ + _DummyPool('default'), + _DummyPool('one'), + _DummyPool('two'), + _DummyPool('three'), + ] + + change = Create(self.dynamic_cname_record) + provider._mod_dynamic_rulesets(td_mock, change) + add_response_pool_mock.assert_has_calls(( + # default + call('default'), + # first dynamic and it's fallback + call('one'), + call('default', index=999), + # 2nd dynamic and it's fallback + call('three'), + call('default', index=999), + # 3nd dynamic and it's fallback + call('two'), + call('default', index=999), + )) + ruleset_create_mock.assert_has_calls(( + call(td_mock, index=0), + call(td_mock, index=0), + call(td_mock, index=0), + call(td_mock, index=0), + )) + + # have to patch the place it's imported into, not where it lives + @patch('octodns.provider.dyn.get_response_pool') + @patch('dyn.tm.services.DSFRuleset.add_response_pool') + @patch('dyn.tm.services.DSFRuleset.create') + # just lets us ignore the pool.create calls + @patch('dyn.tm.services.DSFResponsePool.create') + def test_mod_dynamic_rulesets_existing(self, _, ruleset_create_mock, + add_response_pool_mock, + get_response_pool_mock): + provider = DynProvider('test', 'cust', 'user', 'pass', + traffic_directors_enabled=True) + + ruleset_mock = MagicMock() + ruleset_mock.response_pools = [_DummyPool('three')] + + td_mock = MagicMock() + td_mock._rulesets = [ + ruleset_mock, + ] + provider._traffic_director_monitor = MagicMock() + provider._find_or_create_dynamic_pool = MagicMock() + # Matching ttl + td_mock.ttl = self.dynamic_a_record.ttl + + unused_pool = _DummyPool('unused') + td_mock.all_response_pools = \ + ruleset_mock.response_pools + [unused_pool] + get_response_pool_mock.return_value = unused_pool + + provider._find_or_create_dynamic_pool.side_effect = [ + _DummyPool('default'), + _DummyPool('one'), + _DummyPool('two'), + ruleset_mock.response_pools[0], + ] + + change = Create(self.dynamic_a_record) + provider._mod_dynamic_rulesets(td_mock, change) + add_response_pool_mock.assert_has_calls(( + # default + call('default'), + # first dynamic and it's fallback + call('one'), + call('default', index=999), + # 2nd dynamic and it's fallback + call('three'), + call('default', index=999), + # 3nd dynamic, from existing, and it's fallback + call('two'), + call('three', index=999), + call('default', index=999), + )) + ruleset_create_mock.assert_has_calls(( + call(td_mock, index=2), + call(td_mock, index=2), + call(td_mock, index=2), + call(td_mock, index=2), + )) + # unused poll should have been deleted + self.assertTrue(unused_pool.deleted) + # old ruleset ruleset should be deleted, it's pool will have been + # reused + ruleset_mock.delete.assert_called_once() + + # have to patch the place it's imported into, not where it lives + @patch('octodns.provider.dyn.get_response_pool') + @patch('dyn.tm.services.DSFRuleset.add_response_pool') + @patch('dyn.tm.services.DSFRuleset.create') + # just lets us ignore the pool.create calls + @patch('dyn.tm.services.DSFResponsePool.create') + def test_mod_dynamic_rulesets_fallback_loop(self, _, ruleset_create_mock, + add_response_pool_mock, + get_response_pool_mock): + provider = DynProvider('test', 'cust', 'user', 'pass', + traffic_directors_enabled=True) + + ruleset_mock = MagicMock() + ruleset_mock.response_pools = [_DummyPool('three')] + + td_mock = MagicMock() + td_mock._rulesets = [ + ruleset_mock, + ] + provider._traffic_director_monitor = MagicMock() + provider._find_or_create_dynamic_pool = MagicMock() + # Matching ttl + td_mock.ttl = self.dynamic_fallback_loop.ttl + + unused_pool = _DummyPool('unused') + td_mock.all_response_pools = \ + ruleset_mock.response_pools + [unused_pool] + get_response_pool_mock.return_value = unused_pool + + provider._find_or_create_dynamic_pool.side_effect = [ + _DummyPool('default'), + _DummyPool('one'), + _DummyPool('two'), + ruleset_mock.response_pools[0], + ] + + change = Create(self.dynamic_fallback_loop) + provider._mod_dynamic_rulesets(td_mock, change) + add_response_pool_mock.assert_has_calls(( + # default + call('default'), + # first dynamic and it's fallback + call('one'), + call('default', index=999), + # 2nd dynamic and it's fallback (no loop) + call('three'), + call('two', index=999), + call('default', index=999), + # 3nd dynamic and it's fallback (no loop) + call('two'), + call('three', index=999), + call('default', index=999), + )) + ruleset_create_mock.assert_has_calls(( + call(td_mock, index=2), + call(td_mock, index=2), + call(td_mock, index=2), + call(td_mock, index=2), + )) + # unused poll should have been deleted + self.assertTrue(unused_pool.deleted) + # old ruleset ruleset should be deleted, it's pool will have been + # reused + ruleset_mock.delete.assert_called_once() + + with open('./tests/fixtures/dyn-traffic-director-get.json') as fh: + traffic_director_response = loads(fh.read()) + + @property + def traffic_directors_response(self): + return { + 'data': [{ + 'active': 'Y', + 'label': 'unit.tests.:A', + 'nodes': [], + 'notifiers': [], + 'pending_change': '', + 'rulesets': [], + 'service_id': '2ERWXQNsb_IKG2YZgYqkPvk0PBM', + 'ttl': '300' + }, { + 'active': 'Y', + 'label': 'some.other.:A', + 'nodes': [], + 'notifiers': [], + 'pending_change': '', + 'rulesets': [], + 'service_id': '3ERWXQNsb_IKG2YZgYqkPvk0PBM', + 'ttl': '300' + }, { + 'active': 'Y', + 'label': 'other format', + 'nodes': [], + 'notifiers': [], + 'pending_change': '', + 'rulesets': [], + 'service_id': '4ERWXQNsb_IKG2YZgYqkPvk0PBM', + 'ttl': '300' + }] + } + + @patch('dyn.core.SessionEngine.execute') + def test_mod_dynamic_create(self, mock): + provider = DynProvider('test', 'cust', 'user', 'pass', + traffic_directors_enabled=True) + + # will be tested separately + provider._mod_dynamic_rulesets = MagicMock() + + mock.side_effect = [ + # create traffic director + self.traffic_director_response, + # get traffic directors + self.traffic_directors_response + ] + provider._mod_dynamic_Create(None, Create(self.dynamic_a_record)) + # td now lives in cache + self.assertTrue('A' in provider.traffic_directors['unit.tests.']) + # should have seen 1 gen call + provider._mod_dynamic_rulesets.assert_called_once() + + def test_mod_dynamic_update_dynamic_dynamic(self): + provider = DynProvider('test', 'cust', 'user', 'pass', + traffic_directors_enabled=True) + + # update of an existing dynamic td + + # pre-populate the cache with our mock td + provider._traffic_directors = { + 'unit.tests.': { + 'A': 42, + } + } + # mock _mod_dynamic_rulesets + provider._mod_dynamic_rulesets = MagicMock() + + dyn = self.dynamic_a_record + change = Update(dyn, dyn) + provider._mod_dynamic_Update(None, change) + # still in cache + self.assertTrue('A' in provider.traffic_directors['unit.tests.']) + # should have seen 1 gen call + provider._mod_dynamic_rulesets.assert_called_once_with(42, change) + + @patch('dyn.core.SessionEngine.execute') + def test_mod_dynamic_update_dynamic_geo(self, _): + provider = DynProvider('test', 'cust', 'user', 'pass', + traffic_directors_enabled=True) + + # convert a dynamic td to a geo record + + provider._mod_geo_Update = MagicMock() + + change = Update(self.dynamic_a_record, self.geo_a_record) + provider._mod_dynamic_Update(42, change) + # should have seen a call to create the new geo record + provider._mod_geo_Update.assert_called_once_with(42, change) + + @patch('dyn.core.SessionEngine.execute') + def test_mod_dynamic_update_dynamic_regular(self, _): + provider = DynProvider('test', 'cust', 'user', 'pass', + traffic_directors_enabled=True) + + # convert a dynamic td to a regular record + + provider._mod_Create = MagicMock() + provider._mod_dynamic_Delete = MagicMock() + + change = Update(self.dynamic_a_record, self.regular_a_record) + provider._mod_dynamic_Update(42, change) + # should have seen a call to create the new regular record + provider._mod_Create.assert_called_once_with(42, change) + # should have seen a call to delete the old td record + provider._mod_dynamic_Delete.assert_called_once_with(42, change) + + @patch('dyn.core.SessionEngine.execute') + def test_mod_dynamic_update_geo_dynamic(self, _): + provider = DynProvider('test', 'cust', 'user', 'pass', + traffic_directors_enabled=True) + + # convert a geo record to a dynamic td + + # pre-populate the cache with our mock td + provider._traffic_directors = { + 'unit.tests.': { + 'A': 42, + } + } + # mock _mod_dynamic_rulesets + provider._mod_dynamic_rulesets = MagicMock() + + change = Update(self.geo_a_record, self.dynamic_a_record) + provider._mod_dynamic_Update(None, change) + # still in cache + self.assertTrue('A' in provider.traffic_directors['unit.tests.']) + # should have seen 1 gen call + provider._mod_dynamic_rulesets.assert_called_once_with(42, change) + + @patch('dyn.core.SessionEngine.execute') + def test_mod_dynamic_update_regular_dynamic(self, _): + provider = DynProvider('test', 'cust', 'user', 'pass', + traffic_directors_enabled=True) + + # convert a regular record to a dynamic td + + provider._mod_dynamic_Create = MagicMock() + provider._mod_Delete = MagicMock() + + change = Update(self.regular_a_record, self.dynamic_a_record) + provider._mod_dynamic_Update(42, change) + # should have seen a call to create the new geo record + provider._mod_dynamic_Create.assert_called_once_with(42, change) + # should have seen a call to delete the old regular record + provider._mod_Delete.assert_called_once_with(42, change) + + @patch('dyn.core.SessionEngine.execute') + def test_mod_dynamic_delete(self, mock): + provider = DynProvider('test', 'cust', 'user', 'pass', + traffic_directors_enabled=True) + + td_mock = MagicMock() + provider._traffic_directors = { + 'unit.tests.': { + 'A': td_mock, + } + } + provider._mod_dynamic_Delete(None, Delete(self.dynamic_a_record)) + # delete called + td_mock.delete.assert_called_once() + # removed from cache + self.assertFalse('A' in provider.traffic_directors['unit.tests.']) + + @patch('dyn.core.SessionEngine.execute') + def test_apply_traffic_directors_dynamic(self, mock): + provider = DynProvider('test', 'cust', 'user', 'pass', + traffic_directors_enabled=True) + + # will be tested separately + provider._mod_dynamic_Create = MagicMock() + + changes = [Create(self.dynamic_a_record)] + provider._apply_traffic_directors(self.zone, changes, None) + provider._mod_dynamic_Create.assert_called_once() diff --git a/tests/test_octodns_provider_etc_hosts.py b/tests/test_octodns_provider_etc_hosts.py index 236164d..fc518bd 100644 --- a/tests/test_octodns_provider_etc_hosts.py +++ b/tests/test_octodns_provider_etc_hosts.py @@ -170,7 +170,6 @@ class TestEtcHostsProvider(TestCase): with open(hosts_file) as fh: data = fh.read() - print(data) self.assertTrue('# loop.unit.tests -> start.unit.tests ' '**loop**' in data) self.assertTrue('# middle.unit.tests -> loop.unit.tests ' diff --git a/tests/test_octodns_record.py b/tests/test_octodns_record.py index 6be334d..4f05126 100644 --- a/tests/test_octodns_record.py +++ b/tests/test_octodns_record.py @@ -10,7 +10,7 @@ from unittest import TestCase from octodns.record import ARecord, AaaaRecord, AliasRecord, CaaRecord, \ CnameRecord, Create, Delete, GeoValue, MxRecord, NaptrRecord, \ NaptrValue, NsRecord, Record, SshfpRecord, SpfRecord, SrvRecord, \ - TxtRecord, Update, ValidationError + TxtRecord, Update, ValidationError, _Dynamic, _DynamicPool, _DynamicRule from octodns.zone import Zone from helpers import DynamicProvider, GeoProvider, SimpleProvider @@ -1906,10 +1906,11 @@ class TestDynamicRecords(TestCase): }], }, 'two': { + # Testing out of order value sorting here 'values': [{ - 'value': '4.4.4.4', - }, { 'value': '5.5.5.5', + }, { + 'value': '4.4.4.4', }], }, 'three': { @@ -1948,10 +1949,24 @@ class TestDynamicRecords(TestCase): pools = dynamic.pools self.assertTrue(pools) - self.assertEquals(a_data['dynamic']['pools']['one'], pools['one'].data) - self.assertEquals(a_data['dynamic']['pools']['two'], pools['two'].data) - self.assertEquals(a_data['dynamic']['pools']['three'], - pools['three'].data) + self.assertEquals({ + 'value': '3.3.3.3', + 'weight': 1, + }, pools['one'].data['values'][0]) + self.assertEquals([{ + 'value': '4.4.4.4', + 'weight': 1, + }, { + 'value': '5.5.5.5', + 'weight': 1, + }], pools['two'].data['values']) + self.assertEquals([{ + 'weight': 10, + 'value': '4.4.4.4', + }, { + 'weight': 12, + 'value': '5.5.5.5', + }], pools['three'].data['values']) rules = dynamic.rules self.assertTrue(rules) @@ -1994,10 +2009,11 @@ class TestDynamicRecords(TestCase): }], }, 'two': { + # Testing out of order value sorting here 'values': [{ - 'value': '2601:642:500:e210:62f8:1dff:feb8:9474', - }, { 'value': '2601:642:500:e210:62f8:1dff:feb8:9475', + }, { + 'value': '2601:642:500:e210:62f8:1dff:feb8:9474', }], }, 'three': { @@ -2036,12 +2052,24 @@ class TestDynamicRecords(TestCase): pools = dynamic.pools self.assertTrue(pools) - self.assertEquals(aaaa_data['dynamic']['pools']['one'], - pools['one'].data) - self.assertEquals(aaaa_data['dynamic']['pools']['two'], - pools['two'].data) - self.assertEquals(aaaa_data['dynamic']['pools']['three'], - pools['three'].data) + self.assertEquals({ + 'value': '2601:642:500:e210:62f8:1dff:feb8:9473', + 'weight': 1, + }, pools['one'].data['values'][0]) + self.assertEquals([{ + 'value': '2601:642:500:e210:62f8:1dff:feb8:9474', + 'weight': 1, + }, { + 'value': '2601:642:500:e210:62f8:1dff:feb8:9475', + 'weight': 1, + }], pools['two'].data['values']) + self.assertEquals([{ + 'weight': 10, + 'value': '2601:642:500:e210:62f8:1dff:feb8:9476', + }, { + 'weight': 12, + 'value': '2601:642:500:e210:62f8:1dff:feb8:9477', + }], pools['three'].data['values']) rules = dynamic.rules self.assertTrue(rules) @@ -2094,12 +2122,21 @@ class TestDynamicRecords(TestCase): pools = dynamic.pools self.assertTrue(pools) - self.assertEquals(cname_data['dynamic']['pools']['one'], - pools['one'].data) - self.assertEquals(cname_data['dynamic']['pools']['two'], - pools['two'].data) - self.assertEquals(cname_data['dynamic']['pools']['three'], - pools['three'].data) + self.assertEquals({ + 'value': 'one.cname.target.', + 'weight': 1, + }, pools['one'].data['values'][0]) + self.assertEquals({ + 'value': 'two.cname.target.', + 'weight': 1, + }, pools['two'].data['values'][0]) + self.assertEquals([{ + 'value': 'three-1.cname.target.', + 'weight': 12, + }, { + 'value': 'three-2.cname.target.', + 'weight': 32, + }], pools['three'].data['values']) rules = dynamic.rules self.assertTrue(rules) @@ -2906,9 +2943,10 @@ class TestDynamicRecords(TestCase): a_data = { 'dynamic': { 'rules': [{ - 'pools': { - 1: 'one', - } + 'geos': ['EU'], + 'pool': 'two', + }, { + 'pool': 'one', }], }, 'ttl': 60, @@ -2928,7 +2966,19 @@ class TestDynamicRecords(TestCase): a_data = { 'dynamic': { 'pools': { - 'one': '1.1.1.1', + 'one': { + 'values': [{ + 'value': '3.3.3.3', + }] + }, + 'two': { + 'values': [{ + 'value': '4.4.4.4', + }, { + 'value': '5.5.5.5', + 'weight': 2, + }] + }, }, }, 'ttl': 60, @@ -2941,11 +2991,82 @@ class TestDynamicRecords(TestCase): a = Record.new(self.zone, 'bad', a_data, lenient=True) self.assertEquals({ 'pools': { - 'one': '1.1.1.1', + 'one': { + 'fallback': None, + 'values': [{ + 'value': '3.3.3.3', + 'weight': 1, + }] + }, + 'two': { + 'fallback': None, + 'values': [{ + 'value': '4.4.4.4', + 'weight': 1, + }, { + 'value': '5.5.5.5', + 'weight': 2, + }] + }, }, 'rules': [], }, a._data()['dynamic']) + # rule without pool + a_data = { + 'dynamic': { + 'pools': { + 'one': { + 'values': [{ + 'value': '3.3.3.3', + }] + }, + 'two': { + 'values': [{ + 'value': '4.4.4.4', + }, { + 'value': '5.5.5.5', + 'weight': 2, + }] + }, + }, + 'rules': [{ + 'geos': ['EU'], + 'pool': 'two', + }, { + }], + }, + 'ttl': 60, + 'type': 'A', + 'values': [ + '1.1.1.1', + '2.2.2.2', + ], + } + a = Record.new(self.zone, 'bad', a_data, lenient=True) + self.assertEquals({ + 'pools': { + 'one': { + 'fallback': None, + 'values': [{ + 'value': '3.3.3.3', + 'weight': 1, + }] + }, + 'two': { + 'fallback': None, + 'values': [{ + 'value': '4.4.4.4', + 'weight': 1, + }, { + 'value': '5.5.5.5', + 'weight': 2, + }] + }, + }, + 'rules': a_data['dynamic']['rules'], + }, a._data()['dynamic']) + def test_dynamic_changes(self): simple = SimpleProvider() dynamic = DynamicProvider() @@ -2953,17 +3074,24 @@ class TestDynamicRecords(TestCase): a_data = { 'dynamic': { 'pools': { - 'one': '3.3.3.3', - 'two': [ - '4.4.4.4', - '5.5.5.5', - ], + 'one': { + 'values': [{ + 'value': '3.3.3.3', + }] + }, + 'two': { + 'values': [{ + 'value': '4.4.4.4', + }, { + 'value': '5.5.5.5', + }] + }, }, 'rules': [{ - 'pools': { - 100: 'one', - 200: 'two', - } + 'geos': ['EU'], + 'pool': 'two', + }, { + 'pool': 'one', }], }, 'ttl': 60, @@ -2978,17 +3106,25 @@ class TestDynamicRecords(TestCase): b_data = { 'dynamic': { 'pools': { - 'one': '3.3.3.5', - 'two': [ - '4.4.4.4', - '5.5.5.5', - ], + 'one': { + 'values': [{ + 'value': '3.3.3.3', + }] + }, + 'two': { + 'values': [{ + 'value': '4.4.4.4', + 'weight': 2, + }, { + 'value': '5.5.5.5', + }] + }, }, 'rules': [{ - 'pools': { - 100: 'one', - 200: 'two', - } + 'geos': ['EU'], + 'pool': 'two', + }, { + 'pool': 'one', }], }, 'ttl': 60, @@ -3002,17 +3138,24 @@ class TestDynamicRecords(TestCase): c_data = { 'dynamic': { 'pools': { - 'one': '3.3.3.3', - 'two': [ - '4.4.4.4', - '5.5.5.5', - ], + 'one': { + 'values': [{ + 'value': '3.3.3.3', + }] + }, + 'two': { + 'values': [{ + 'value': '4.4.4.4', + }, { + 'value': '5.5.5.5', + }] + }, }, 'rules': [{ - 'pools': { - 100: 'one', - 300: 'two', - } + 'geos': ['NA'], + 'pool': 'two', + }, { + 'pool': 'one', }], }, 'ttl': 60, @@ -3052,3 +3195,96 @@ class TestDynamicRecords(TestCase): self.assertEquals(a.dynamic.rules, a.dynamic.rules) self.assertEquals(a.dynamic.rules[0], a.dynamic.rules[0]) self.assertNotEquals(a.dynamic.rules[0], c.dynamic.rules[0]) + + def test_dynamic_and_geo_validation(self): + a_data = { + 'dynamic': { + 'pools': { + 'one': { + 'values': [{ + 'value': '3.3.3.3', + }], + }, + 'two': { + # Testing out of order value sorting here + 'values': [{ + 'value': '5.5.5.5', + }, { + 'value': '4.4.4.4', + }], + }, + 'three': { + 'values': [{ + 'weight': 10, + 'value': '4.4.4.4', + }, { + 'weight': 12, + 'value': '5.5.5.5', + }], + }, + }, + 'rules': [{ + 'geos': ['AF', 'EU'], + 'pool': 'three', + }, { + 'geos': ['NA-US-CA'], + 'pool': 'two', + }, { + 'pool': 'one', + }], + }, + 'geo': { + 'NA': ['1.2.3.5'], + 'NA-US': ['1.2.3.5', '1.2.3.6'] + }, + 'type': 'A', + 'ttl': 60, + 'values': [ + '1.1.1.1', + '2.2.2.2', + ], + } + with self.assertRaises(ValidationError) as ctx: + Record.new(self.zone, 'bad', a_data) + self.assertEquals(['"dynamic" record with "geo" content'], + ctx.exception.reasons) + + def test_dynamic_eqs(self): + + pool_one = _DynamicPool('one', { + 'values': [{ + 'value': '1.2.3.4', + }], + }) + pool_two = _DynamicPool('two', { + 'values': [{ + 'value': '1.2.3.5', + }], + }) + self.assertEquals(pool_one, pool_one) + self.assertNotEquals(pool_one, pool_two) + self.assertNotEquals(pool_one, 42) + + pools = { + 'one': pool_one, + 'two': pool_two, + } + rule_one = _DynamicRule(0, { + 'pool': 'one', + }) + rule_two = _DynamicRule(1, { + 'pool': 'two', + }) + self.assertEquals(rule_one, rule_one) + self.assertNotEquals(rule_one, rule_two) + self.assertNotEquals(rule_one, 42) + rules = [ + rule_one, + rule_two, + ] + + dynamic = _Dynamic(pools, rules) + other = _Dynamic({}, []) + self.assertEquals(dynamic, dynamic) + self.assertNotEquals(dynamic, other) + self.assertNotEquals(dynamic, 42) diff --git a/tests/test_octodns_record_geo.py b/tests/test_octodns_record_geo.py index 6b9cd4e..5b7454c 100644 --- a/tests/test_octodns_record_geo.py +++ b/tests/test_octodns_record_geo.py @@ -51,3 +51,30 @@ class TestRecordGeoCodes(TestCase): # Bad province code, good continent and country self.assertEquals(['xyz unknown province code "NA-US-XX"'], GeoCodes.validate('NA-US-XX', prefix)) + + def test_parse(self): + self.assertEquals({ + 'continent_code': 'NA', + 'country_code': None, + 'province_code': None, + }, GeoCodes.parse('NA')) + self.assertEquals({ + 'continent_code': 'NA', + 'country_code': 'US', + 'province_code': None, + }, GeoCodes.parse('NA-US')) + self.assertEquals({ + 'continent_code': 'NA', + 'country_code': 'US', + 'province_code': 'CA', + }, GeoCodes.parse('NA-US-CA')) + + def test_country_to_code(self): + self.assertEquals('NA-US', GeoCodes.country_to_code('US')) + self.assertEquals('EU-GB', GeoCodes.country_to_code('GB')) + self.assertFalse(GeoCodes.country_to_code('XX')) + + def test_province_to_code(self): + self.assertEquals('NA-US-OR', GeoCodes.province_to_code('OR')) + self.assertEquals('NA-US-KY', GeoCodes.province_to_code('KY')) + self.assertFalse(GeoCodes.province_to_code('XX'))