diff --git a/CHANGELOG.md b/CHANGELOG.md index 2922a40..4de4853 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -6,8 +6,12 @@ https://github.com/octodns/octodns/issues/622 & https://github.com/octodns/octodns/pull/822 for more information. Providers that have been extracted in this release include: - * [PowerDnsProvider](https://github.com/octodns/octodns-powerdns/) * [ConstellixProvider](https://github.com/octodns/octodns-constellix/) + * [DnsimpleProvider](https://github.com/octodns/octodns-dnsimple/) + * [Ns1Provider](https://github.com/octodns/octodns-ns1/) + * [PowerDnsProvider](https://github.com/octodns/octodns-powerdns/) + * [Route53Provider](https://github.com/octodns/octodns-route53/) also + AwsAcmMangingProcessor * NS1 provider has received improvements to the dynamic record implementation. As a result, if octoDNS is downgraded from this version, any dynamic records created or updated using this version will show an update. diff --git a/README.md b/README.md index 4a977cd..db00f04 100644 --- a/README.md +++ b/README.md @@ -198,7 +198,7 @@ The table below lists the providers octoDNS supports. We're currently in the pro | [ConstellixProvider](https://github.com/octodns/octodns-constellix/) | [octodns_constellix](https://github.com/octodns/octodns-constellix/) | | | | | | [DigitalOceanProvider](/octodns/provider/digitalocean.py) | | | A, AAAA, CAA, CNAME, MX, NS, TXT, SRV | No | CAA tags restricted | | [DnsMadeEasyProvider](/octodns/provider/dnsmadeeasy.py) | | | A, AAAA, ALIAS (ANAME), CAA, CNAME, MX, NS, PTR, SPF, SRV, TXT | No | CAA tags restricted | -| [DnsimpleProvider](/octodns/provider/dnsimple.py) | | | All | No | CAA tags restricted | +| [DnsimpleProvider](https://github.com/octodns/octodns-dnsimple/) | [octodns_dnsimple](https://github.com/octodns/octodns-dnsimple/) | | | | | | [DynProvider](/octodns/provider/dyn.py) | | dyn | All | Both | | | [EasyDNSProvider](/octodns/provider/easydns.py) | | | A, AAAA, CAA, CNAME, MX, NAPTR, NS, SRV, TXT | No | | | [EtcHostsProvider](/octodns/provider/etc_hosts.py) | | | A, AAAA, ALIAS, CNAME | No | | @@ -208,11 +208,11 @@ The table below lists the providers octoDNS supports. We're currently in the pro | [GoogleCloudProvider](/octodns/provider/googlecloud.py) | | google-cloud-dns | A, AAAA, CAA, CNAME, MX, NAPTR, NS, PTR, SPF, SRV, TXT | No | | | [HetznerProvider](/octodns/provider/hetzner.py) | | | A, AAAA, CAA, CNAME, MX, NS, SRV, TXT | No | | | [MythicBeastsProvider](/octodns/provider/mythicbeasts.py) | | Mythic Beasts | A, AAAA, ALIAS, CNAME, MX, NS, SRV, SSHFP, CAA, TXT | No | | -| [Ns1Provider](/octodns/provider/ns1.py) | | ns1-python | All | Yes | | +| [Ns1Provider](https://github.com/octodns/octodns-ns1/) | [octodns_ns1](https://github.com/octodns/octodns-ns1/) | | | | | | [OVH](/octodns/provider/ovh.py) | | ovh | A, AAAA, CAA, CNAME, MX, NAPTR, NS, PTR, SPF, SRV, SSHFP, TXT, DKIM | No | | | [PowerDnsProvider](https://github.com/octodns/octodns-powerdns/) | [octodns_powerdns](https://github.com/octodns/octodns-powerdns/) | | | | | | [Rackspace](/octodns/provider/rackspace.py) | | | A, AAAA, ALIAS, CNAME, MX, NS, PTR, SPF, TXT | No | | -| [Route53](/octodns/provider/route53.py) | | boto3 | A, AAAA, CAA, CNAME, MX, NAPTR, NS, PTR, SPF, SRV, TXT | Both | CNAME health checks don't support a Host header | +| [Route53](https://github.com/octodns/octodns-route53) | [octodns_route53](https://github.com/octodns/octodns-route53) | | | | | | [Selectel](/octodns/provider/selectel.py) | | | A, AAAA, CNAME, MX, NS, SPF, SRV, TXT | No | | | [Transip](/octodns/provider/transip.py) | | transip | A, AAAA, CNAME, MX, NS, SRV, SPF, TXT, SSHFP, CAA | No | | | [UltraDns](/octodns/provider/ultra.py) | | | A, AAAA, CAA, CNAME, MX, NS, PTR, SPF, SRV, TXT | No | | diff --git a/octodns/processor/awsacm.py b/octodns/processor/awsacm.py index d036aba..49c1c06 100644 --- a/octodns/processor/awsacm.py +++ b/octodns/processor/awsacm.py @@ -7,38 +7,16 @@ from __future__ import absolute_import, division, print_function, \ from logging import getLogger -from .base import BaseProcessor - - -class AwsAcmMangingProcessor(BaseProcessor): - ''' - processors: - awsacm: - class: octodns.processor.acme.AwsAcmMangingProcessor - - ... - - zones: - something.com.: - ... - processors: - - awsacm - ... - ''' - - log = getLogger('AwsAcmMangingProcessor') - - def _ignore_awsacm_cnames(self, zone): - for r in zone.records: - if r._type == 'CNAME' and \ - r.name.startswith('_') \ - and r.value.endswith('.acm-validations.aws.'): - self.log.info('_process: ignoring %s', r.fqdn) - zone.remove_record(r) - return zone - - def process_source_zone(self, desired, *args, **kwargs): - return self._ignore_awsacm_cnames(desired) - - def process_target_zone(self, existing, *args, **kwargs): - return self._ignore_awsacm_cnames(existing) +logger = getLogger('Route53') +try: + logger.warn('octodns_route53 shimmed. Update your processor class to ' + 'octodns_route53.processor.AwsAcmMangingProcessor. ' + 'Shim will be removed in 1.0') + from octodns_route53.processor import AwsAcmMangingProcessor + AwsAcmMangingProcessor # pragma: no cover +except ModuleNotFoundError: + logger.exception('AwsAcmMangingProcessor has been moved into a seperate ' + 'module, octodns_route53 is now required. Processor ' + 'class should be updated to ' + 'octodns_route53.processor.AwsAcmMangingProcessor') + raise diff --git a/octodns/provider/dnsimple.py b/octodns/provider/dnsimple.py index 1ce1673..7be4fcb 100644 --- a/octodns/provider/dnsimple.py +++ b/octodns/provider/dnsimple.py @@ -5,448 +5,17 @@ from __future__ import absolute_import, division, print_function, \ unicode_literals -from collections import defaultdict -from requests import Session -import logging - -from ..record import Record -from . import ProviderException -from .base import BaseProvider - - -class DnsimpleClientException(ProviderException): - pass - - -class DnsimpleClientNotFound(DnsimpleClientException): - - def __init__(self): - super(DnsimpleClientNotFound, self).__init__('Not found') - - -class DnsimpleClientUnauthorized(DnsimpleClientException): - - def __init__(self): - super(DnsimpleClientUnauthorized, self).__init__('Unauthorized') - - -class DnsimpleClient(object): - - def __init__(self, token, account, sandbox): - self.account = account - sess = Session() - sess.headers.update({'Authorization': f'Bearer {token}'}) - self._sess = sess - if sandbox: - self.base = 'https://api.sandbox.dnsimple.com/v2/' - else: - self.base = 'https://api.dnsimple.com/v2/' - - def _request(self, method, path, params=None, data=None): - url = f'{self.base}{self.account}{path}' - resp = self._sess.request(method, url, params=params, json=data) - if resp.status_code == 401: - raise DnsimpleClientUnauthorized() - if resp.status_code == 404: - raise DnsimpleClientNotFound() - resp.raise_for_status() - return resp - - def zone(self, name): - path = f'/zones/{name}' - return self._request('GET', path).json() - - def domain_create(self, name): - return self._request('POST', '/domains', data={'name': name}) - - def records(self, zone_name): - ret = [] - - page = 1 - while True: - data = self._request('GET', f'/zones/{zone_name}/records', - {'page': page}).json() - ret += data['data'] - pagination = data['pagination'] - if page >= pagination['total_pages']: - break - page += 1 - - return ret - - def record_create(self, zone_name, params): - path = f'/zones/{zone_name}/records' - self._request('POST', path, data=params) - - def record_delete(self, zone_name, record_id): - path = f'/zones/{zone_name}/records/{record_id}' - self._request('DELETE', path) - - -class DnsimpleProvider(BaseProvider): - ''' - Dnsimple provider using API v2 - - dnsimple: - class: octodns.provider.dnsimple.DnsimpleProvider - # API v2 account access token (required) - token: letmein - # Your account number (required) - account: 42 - # Use sandbox (optional) - sandbox: true - ''' - SUPPORTS_GEO = False - SUPPORTS_DYNAMIC = False - SUPPORTS = set(('A', 'AAAA', 'ALIAS', 'CAA', 'CNAME', 'MX', 'NAPTR', 'NS', - 'PTR', 'SPF', 'SRV', 'SSHFP', 'TXT')) - - def __init__(self, id, token, account, sandbox=False, *args, **kwargs): - self.log = logging.getLogger(f'DnsimpleProvider[{id}]') - self.log.debug('__init__: id=%s, token=***, account=%s', id, account) - super(DnsimpleProvider, self).__init__(id, *args, **kwargs) - self._client = DnsimpleClient(token, account, sandbox) - - self._zone_records = {} - - def _data_for_multiple(self, _type, records): - return { - 'ttl': records[0]['ttl'], - 'type': _type, - 'values': [r['content'] for r in records] - } - - _data_for_A = _data_for_multiple - _data_for_AAAA = _data_for_multiple - _data_for_SPF = _data_for_multiple - - def _data_for_TXT(self, _type, records): - return { - 'ttl': records[0]['ttl'], - 'type': _type, - # escape semicolons - 'values': [r['content'].replace(';', '\\;') for r in records] - } - - def _data_for_CAA(self, _type, records): - values = [] - for record in records: - flags, tag, value = record['content'].split(' ') - values.append({ - 'flags': flags, - 'tag': tag, - 'value': value[1:-1], - }) - return { - 'ttl': records[0]['ttl'], - 'type': _type, - 'values': values - } - - def _data_for_CNAME(self, _type, records): - record = records[0] - return { - 'ttl': record['ttl'], - 'type': _type, - 'value': f'{record["content"]}.' - } - - _data_for_ALIAS = _data_for_CNAME - - def _data_for_MX(self, _type, records): - values = [] - for record in records: - values.append({ - 'preference': record['priority'], - 'exchange': f'{record["content"]}.' - }) - return { - 'ttl': records[0]['ttl'], - 'type': _type, - 'values': values - } - - def _data_for_NAPTR(self, _type, records): - values = [] - for record in records: - try: - order, preference, flags, service, regexp, replacement = \ - record['content'].split(' ', 5) - except ValueError: - # their api will let you create invalid records, this - # essentially handles that by ignoring them for values - # purposes. That will cause updates to happen to delete them if - # they shouldn't exist or update them if they're wrong - continue - values.append({ - 'flags': flags[1:-1], - 'order': order, - 'preference': preference, - 'regexp': regexp[1:-1], - 'replacement': replacement, - 'service': service[1:-1], - }) - return { - 'type': _type, - 'ttl': records[0]['ttl'], - 'values': values - } - - def _data_for_NS(self, _type, records): - values = [] - for record in records: - content = record['content'] - if content[-1] != '.': - content = f'{content}.' - values.append(content) - return { - 'ttl': records[0]['ttl'], - 'type': _type, - 'values': values, - } - - def _data_for_PTR(self, _type, records): - record = records[0] - return { - 'ttl': record['ttl'], - 'type': _type, - 'value': record['content'] - } - - def _data_for_SRV(self, _type, records): - values = [] - for record in records: - try: - weight, port, target = record['content'].split(' ', 2) - except ValueError: - # their api/website will let you create invalid records, this - # essentially handles that by ignoring them for values - # purposes. That will cause updates to happen to delete them if - # they shouldn't exist or update them if they're wrong - self.log.warning( - '_data_for_SRV: unsupported %s record (%s)', - _type, - record['content'] - ) - continue - - target = f'{target}.' if target != "." else "." - - values.append({ - 'port': port, - 'priority': record['priority'], - 'target': target, - 'weight': weight - }) - return { - 'type': _type, - 'ttl': records[0]['ttl'], - 'values': values - } - - def _data_for_SSHFP(self, _type, records): - values = [] - for record in records: - try: - algorithm, fingerprint_type, fingerprint = \ - record['content'].split(' ', 2) - except ValueError: - # see _data_for_NAPTR's continue - continue - values.append({ - 'algorithm': algorithm, - 'fingerprint': fingerprint, - 'fingerprint_type': fingerprint_type - }) - return { - 'type': _type, - 'ttl': records[0]['ttl'], - 'values': values - } - - def zone_records(self, zone): - if zone.name not in self._zone_records: - try: - self._zone_records[zone.name] = \ - self._client.records(zone.name[:-1]) - except DnsimpleClientNotFound: - return [] - - return self._zone_records[zone.name] - - def populate(self, zone, target=False, lenient=False): - self.log.debug('populate: name=%s, target=%s, lenient=%s', zone.name, - target, lenient) - - values = defaultdict(lambda: defaultdict(list)) - for record in self.zone_records(zone): - _type = record['type'] - if _type not in self.SUPPORTS: - self.log.warning( - 'populate: skipping unsupported %s record', - _type - ) - continue - elif _type == 'TXT' and record['content'].startswith('ALIAS for'): - # ALIAS has a "ride along" TXT record with 'ALIAS for XXXX', - # we're ignoring it - continue - values[record['name']][record['type']].append(record) - - before = len(zone.records) - for name, types in values.items(): - for _type, records in types.items(): - data_for = getattr(self, f'_data_for_{_type}') - record = Record.new(zone, name, data_for(_type, records), - source=self, lenient=lenient) - zone.add_record(record, lenient=lenient) - - exists = zone.name in self._zone_records - self.log.info('populate: found %s records, exists=%s', - len(zone.records) - before, exists) - return exists - - def supports(self, record): - # DNSimple does not support empty/NULL SRV records - # - # Fails silently and leaves a corrupt record - # - # Skip the record and continue - if record._type == "SRV": - if 'value' in record.data: - targets = (record.data['value']['target'],) - else: - targets = [value['target'] for value in record.data['values']] - - if "." in targets: - self.log.warning( - 'supports: unsupported %s record with target (%s)', - record._type, targets - ) - return False - - return super(DnsimpleProvider, self).supports(record) - - def _params_for_multiple(self, record): - for value in record.values: - yield { - 'content': value, - 'name': record.name, - 'ttl': record.ttl, - 'type': record._type, - } - - _params_for_A = _params_for_multiple - _params_for_AAAA = _params_for_multiple - _params_for_NS = _params_for_multiple - _params_for_SPF = _params_for_multiple - - def _params_for_TXT(self, record): - for value in record.values: - yield { - # un-escape semicolons - 'content': value.replace('\\', ''), - 'name': record.name, - 'ttl': record.ttl, - 'type': record._type, - } - - def _params_for_CAA(self, record): - for value in record.values: - yield { - 'content': f'{value.flags} {value.tag} "{value.value}"', - 'name': record.name, - 'ttl': record.ttl, - 'type': record._type - } - - def _params_for_single(self, record): - yield { - 'content': record.value, - 'name': record.name, - 'ttl': record.ttl, - 'type': record._type - } - - _params_for_ALIAS = _params_for_single - _params_for_CNAME = _params_for_single - _params_for_PTR = _params_for_single - - def _params_for_MX(self, record): - for value in record.values: - yield { - 'content': value.exchange, - 'name': record.name, - 'priority': value.preference, - 'ttl': record.ttl, - 'type': record._type - } - - def _params_for_NAPTR(self, record): - for value in record.values: - content = f'{value.order} {value.preference} "{value.flags}" ' \ - f'"{value.service}" "{value.preference}" {value.flags}' - yield { - 'content': content, - 'name': record.name, - 'ttl': record.ttl, - 'type': record._type - } - - def _params_for_SRV(self, record): - for value in record.values: - yield { - 'content': f'{value.weight} {value.port} {value.target}', - 'name': record.name, - 'priority': value.priority, - 'ttl': record.ttl, - 'type': record._type - } - - def _params_for_SSHFP(self, record): - for value in record.values: - yield { - 'content': f'{value.algorithm} {value.fingerprint_type} ' - f'{value.fingerprint}', - 'name': record.name, - 'ttl': record.ttl, - 'type': record._type - } - - def _apply_Create(self, change): - new = change.new - params_for = getattr(self, f'_params_for_{new._type}') - for params in params_for(new): - self._client.record_create(new.zone.name[:-1], params) - - def _apply_Update(self, change): - self._apply_Delete(change) - self._apply_Create(change) - - def _apply_Delete(self, change): - existing = change.existing - zone = existing.zone - for record in self.zone_records(zone): - if existing.name == record['name'] and \ - existing._type == record['type']: - self._client.record_delete(zone.name[:-1], record['id']) - - def _apply(self, plan): - desired = plan.desired - changes = plan.changes - self.log.debug('_apply: zone=%s, len(changes)=%d', desired.name, - len(changes)) - - domain_name = desired.name[:-1] - try: - self._client.zone(domain_name) - except DnsimpleClientNotFound: - self.log.debug('_apply: no matching zone, creating domain') - self._client.domain_create(domain_name) - - for change in changes: - class_name = change.__class__.__name__ - getattr(self, f'_apply_{class_name}')(change) - - # Clear out the cache if any - self._zone_records.pop(desired.name, None) +from logging import getLogger + +logger = getLogger('Dnsimple') +try: + logger.warn('octodns_dnsimple shimmed. Update your provider class to ' + 'octodns_dnsimple.DnsimpleProvider. ' + 'Shim will be removed in 1.0') + from octodns_dnsimple import DnsimpleProvider + DnsimpleProvider # pragma: no cover +except ModuleNotFoundError: + logger.exception('DnsimpleProvider has been moved into a seperate module, ' + 'octodns_dnsimple is now required. Provider class should ' + 'be updated to octodns_dnsimple.DnsimpleProvider') + raise diff --git a/octodns/provider/ns1.py b/octodns/provider/ns1.py index 802758b..afccc7e 100644 --- a/octodns/provider/ns1.py +++ b/octodns/provider/ns1.py @@ -6,1584 +6,16 @@ from __future__ import absolute_import, division, print_function, \ unicode_literals from logging import getLogger -from itertools import chain -from collections import Mapping, OrderedDict, defaultdict -from ns1 import NS1 -from ns1.rest.errors import RateLimitException, ResourceException -from pycountry_convert import country_alpha2_to_continent_code -from time import sleep -from uuid import uuid4 -from ..record import Record, Update -from . import ProviderException -from .base import BaseProvider - - -def _ensure_endswith_dot(string): - return string if string.endswith('.') else f'{string}.' - - -class Ns1Exception(ProviderException): - pass - - -class Ns1Client(object): - log = getLogger('NS1Client') - - def __init__(self, api_key, parallelism=None, retry_count=4, - client_config=None): - self.log.debug('__init__: parallelism=%s, retry_count=%d, ' - 'client_config=%s', parallelism, retry_count, - client_config) - self.retry_count = retry_count - - client = NS1(apiKey=api_key) - - # NS1 rate limits via a "token bucket" scheme, and provides information - # about rate limiting in headers on responses. Token bucket can be - # thought of as an initially "full" bucket, where, if not full, tokens - # are added at some rate. This allows "bursting" requests until the - # bucket is empty, after which, you are limited to the rate of token - # replenishment. - # There are a couple of "strategies" built into the SDK to avoid 429s - # from rate limiting. Since octodns operates concurrently via - # `max_workers`, a concurrent strategy seems appropriate. - # This strategy does nothing until the remaining requests are equal to - # or less than our `parallelism`, after which, each process will sleep - # for the token replenishment interval times parallelism. - # For example, if we can make 10 requests in 60 seconds, a token is - # replenished every 6 seconds. If parallelism is 3, we will burst 7 - # requests, and subsequently each process will sleep for 18 seconds - # before making another request. - # In general, parallelism should match the number of workers. - if parallelism is not None: - client.config['rate_limit_strategy'] = 'concurrent' - client.config['parallelism'] = parallelism - - # The list of records for a zone is paginated at around ~2.5k records, - # this tells the client to handle any of that transparently and ensure - # we get the full list of records. - client.config['follow_pagination'] = True - - # additional options or overrides - if isinstance(client_config, Mapping): - for k, v in client_config.items(): - client.config[k] = v - - self._client = client - - self._records = client.records() - self._zones = client.zones() - self._monitors = client.monitors() - self._notifylists = client.notifylists() - self._datasource = client.datasource() - self._datafeed = client.datafeed() - - self.reset_caches() - - def reset_caches(self): - self._datasource_id = None - self._feeds_for_monitors = None - self._monitors_cache = None - self._notifylists_cache = None - self._zones_cache = {} - self._records_cache = {} - - def update_record_cache(func): - def call(self, zone, domain, _type, **params): - if zone in self._zones_cache: - # remove record's zone from cache - del self._zones_cache[zone] - - cached = self._records_cache.setdefault(zone, {}) \ - .setdefault(domain, {}) - - if _type in cached: - # remove record from cache - del cached[_type] - - # write record to cache if its not a delete - new_record = func(self, zone, domain, _type, **params) - if new_record: - cached[_type] = new_record - - return new_record - - return call - - def read_or_set_record_cache(func): - def call(self, zone, domain, _type): - cached = self._records_cache.setdefault(zone, {}) \ - .setdefault(domain, {}) - if _type not in cached: - cached[_type] = func(self, zone, domain, _type) - - return cached[_type] - - return call - - @property - def datasource_id(self): - if self._datasource_id is None: - name = 'octoDNS NS1 Data Source' - source = None - for candidate in self.datasource_list(): - if candidate['name'] == name: - # Found it - source = candidate - break - - if source is None: - self.log.info('datasource_id: creating datasource %s', name) - # We need to create it - source = self.datasource_create(name=name, - sourcetype='nsone_monitoring') - self.log.info('datasource_id: id=%s', source['id']) - - self._datasource_id = source['id'] - - return self._datasource_id - - @property - def feeds_for_monitors(self): - if self._feeds_for_monitors is None: - self.log.debug('feeds_for_monitors: fetching & building') - self._feeds_for_monitors = { - f['config']['jobid']: f['id'] - for f in self.datafeed_list(self.datasource_id) - } - - return self._feeds_for_monitors - - @property - def monitors(self): - if self._monitors_cache is None: - self.log.debug('monitors: fetching & building') - self._monitors_cache = \ - {m['id']: m for m in self.monitors_list()} - return self._monitors_cache - - @property - def notifylists(self): - if self._notifylists_cache is None: - self.log.debug('notifylists: fetching & building') - self._notifylists_cache = \ - {l['name']: l for l in self.notifylists_list()} - return self._notifylists_cache - - def datafeed_create(self, sourceid, name, config): - ret = self._try(self._datafeed.create, sourceid, name, config) - self.feeds_for_monitors[config['jobid']] = ret['id'] - return ret - - def datafeed_delete(self, sourceid, feedid): - ret = self._try(self._datafeed.delete, sourceid, feedid) - self._feeds_for_monitors = { - k: v for k, v in self._feeds_for_monitors.items() if v != feedid - } - return ret - - def datafeed_list(self, sourceid): - return self._try(self._datafeed.list, sourceid) - - def datasource_create(self, **body): - return self._try(self._datasource.create, **body) - - def datasource_list(self): - return self._try(self._datasource.list) - - def monitors_create(self, **params): - body = {} - ret = self._try(self._monitors.create, body, **params) - self.monitors[ret['id']] = ret - return ret - - def monitors_delete(self, jobid): - ret = self._try(self._monitors.delete, jobid) - self.monitors.pop(jobid) - return ret - - def monitors_list(self): - return self._try(self._monitors.list) - - def monitors_update(self, job_id, **params): - body = {} - ret = self._try(self._monitors.update, job_id, body, **params) - self.monitors[ret['id']] = ret - return ret - - def notifylists_delete(self, nlid): - for name, nl in self.notifylists.items(): - if nl['id'] == nlid: - del self._notifylists_cache[name] - break - return self._try(self._notifylists.delete, nlid) - - def notifylists_create(self, **body): - nl = self._try(self._notifylists.create, body) - # cache it - self.notifylists[nl['name']] = nl - return nl - - def notifylists_list(self): - return self._try(self._notifylists.list) - - @update_record_cache - def records_create(self, zone, domain, _type, **params): - return self._try(self._records.create, zone, domain, _type, **params) - - @update_record_cache - def records_delete(self, zone, domain, _type): - return self._try(self._records.delete, zone, domain, _type) - - @read_or_set_record_cache - def records_retrieve(self, zone, domain, _type): - return self._try(self._records.retrieve, zone, domain, _type) - - @update_record_cache - def records_update(self, zone, domain, _type, **params): - return self._try(self._records.update, zone, domain, _type, **params) - - def zones_create(self, name): - self._zones_cache[name] = self._try(self._zones.create, name) - return self._zones_cache[name] - - def zones_retrieve(self, name): - if name not in self._zones_cache: - self._zones_cache[name] = self._try(self._zones.retrieve, name) - return self._zones_cache[name] - - def _try(self, method, *args, **kwargs): - tries = self.retry_count - while True: # We'll raise to break after our tries expire - try: - return method(*args, **kwargs) - except RateLimitException as e: - if tries <= 1: - raise - period = float(e.period) - self.log.warn('rate limit encountered, pausing ' - 'for %ds and trying again, %d remaining', - period, tries) - sleep(period) - tries -= 1 - - -class Ns1Provider(BaseProvider): - ''' - Ns1 provider - - ns1: - # Required - class: octodns.provider.ns1.Ns1Provider - api_key: env/NS1_API_KEY - # Only required if using dynamic records - monitor_regions: - - lga - # Optional. Default: false. true is Recommended, but not the default - # for backwards compatibility reasons. If true, all NS1 monitors will - # use a shared notify list rather than one per record & value - # combination. See CHANGELOG, - # https://github.com/octodns/octodns/blob/master/CHANGELOG.md, for more - # information before enabling this behavior. - shared_notifylist: false - # Optional. Default: None. If set, back off in advance to avoid 429s - # from rate-limiting. Generally this should be set to the number - # of processes or workers hitting the API, e.g. the value of - # `max_workers`. - parallelism: 11 - # Optional. Default: 4. Number of times to retry if a 429 response - # is received. - retry_count: 4 - # Optional. Default: None. Additional options or overrides passed to - # the NS1 SDK config, as key-value pairs. - client_config: - endpoint: my.nsone.endpoint # Default: api.nsone.net - ignore-ssl-errors: true # Default: false - follow_pagination: false # Default: true - ''' - SUPPORTS_GEO = True - SUPPORTS_DYNAMIC = True - SUPPORTS_POOL_VALUE_STATUS = True - SUPPORTS_MULTIVALUE_PTR = True - SUPPORTS = set(('A', 'AAAA', 'ALIAS', 'CAA', 'CNAME', 'MX', 'NAPTR', - 'NS', 'PTR', 'SPF', 'SRV', 'TXT', 'URLFWD')) - - ZONE_NOT_FOUND_MESSAGE = 'server error: zone not found' - SHARED_NOTIFYLIST_NAME = 'octoDNS NS1 Notify List' - - @property - def _UP_FILTER(self): - return { - 'config': {}, - 'filter': 'up' - } - - @property - def _REGION_FILTER(self): - return { - 'config': { - 'remove_no_georegion': True - }, - 'filter': u'geofence_regional' - } - - @property - def _COUNTRY_FILTER(self): - return { - 'config': { - 'remove_no_location': True - }, - 'filter': u'geofence_country' - } - - # In the NS1 UI/portal, this filter is called "SELECT FIRST GROUP" though - # the filter name in the NS1 api is 'select_first_region' - @property - def _SELECT_FIRST_REGION_FILTER(self): - return { - 'config': {}, - 'filter': u'select_first_region' - } - - @property - def _PRIORITY_FILTER(self): - return { - 'config': { - 'eliminate': u'1' - }, - 'filter': 'priority' - } - - @property - def _WEIGHTED_SHUFFLE_FILTER(self): - return { - 'config': {}, - 'filter': u'weighted_shuffle' - } - - @property - def _SELECT_FIRST_N_FILTER(self): - return { - 'config': { - 'N': u'1' - }, - 'filter': u'select_first_n' - } - - @property - def _BASIC_FILTER_CHAIN(self): - return [ - self._UP_FILTER, - self._SELECT_FIRST_REGION_FILTER, - self._PRIORITY_FILTER, - self._WEIGHTED_SHUFFLE_FILTER, - self._SELECT_FIRST_N_FILTER - ] - - @property - def _FILTER_CHAIN_WITH_REGION(self): - return [ - self._UP_FILTER, - self._REGION_FILTER, - self._SELECT_FIRST_REGION_FILTER, - self._PRIORITY_FILTER, - self._WEIGHTED_SHUFFLE_FILTER, - self._SELECT_FIRST_N_FILTER - ] - - @property - def _FILTER_CHAIN_WITH_COUNTRY(self): - return [ - self._UP_FILTER, - self._COUNTRY_FILTER, - self._SELECT_FIRST_REGION_FILTER, - self._PRIORITY_FILTER, - self._WEIGHTED_SHUFFLE_FILTER, - self._SELECT_FIRST_N_FILTER - ] - - @property - def _FILTER_CHAIN_WITH_REGION_AND_COUNTRY(self): - return [ - self._UP_FILTER, - self._REGION_FILTER, - self._COUNTRY_FILTER, - self._SELECT_FIRST_REGION_FILTER, - self._PRIORITY_FILTER, - self._WEIGHTED_SHUFFLE_FILTER, - self._SELECT_FIRST_N_FILTER - ] - - _REGION_TO_CONTINENT = { - 'AFRICA': 'AF', - 'ASIAPAC': 'AS', - 'EUROPE': 'EU', - 'SOUTH-AMERICA': 'SA', - # continent NA has been handled as part of Geofence Country filter - # starting from v0.9.13. These below US-* just need to continue to - # exist here so it doesn't break the ugrade path - 'US-CENTRAL': 'NA', - 'US-EAST': 'NA', - 'US-WEST': 'NA', - } - _CONTINENT_TO_REGIONS = { - 'AF': ('AFRICA',), - 'AS': ('ASIAPAC',), - 'EU': ('EUROPE',), - 'SA': ('SOUTH-AMERICA',), - } - - # Necessary for handling unsupported continents in _CONTINENT_TO_REGIONS - _CONTINENT_TO_LIST_OF_COUNTRIES = { - 'OC': {'FJ', 'NC', 'PG', 'SB', 'VU', 'AU', 'NF', 'NZ', 'FM', 'GU', - 'KI', 'MH', 'MP', 'NR', 'PW', 'AS', 'CK', 'NU', 'PF', 'PN', - 'TK', 'TO', 'TV', 'WF', 'WS'}, - 'NA': {'DO', 'DM', 'BB', 'BL', 'BM', 'HT', 'KN', 'JM', 'VC', 'HN', - 'BS', 'BZ', 'PR', 'NI', 'LC', 'TT', 'VG', 'PA', 'TC', 'PM', - 'GT', 'AG', 'GP', 'AI', 'VI', 'CA', 'GD', 'AW', 'CR', 'GL', - 'CU', 'MF', 'SV', 'US', 'MQ', 'MS', 'KY', 'MX', 'CW', 'BQ', - 'SX', 'UM'} - } - - def __init__(self, id, api_key, retry_count=4, monitor_regions=None, - parallelism=None, client_config=None, shared_notifylist=False, - *args, **kwargs): - self.log = getLogger(f'Ns1Provider[{id}]') - self.log.debug('__init__: id=%s, api_key=***, retry_count=%d, ' - 'monitor_regions=%s, parallelism=%s, client_config=%s', - id, retry_count, monitor_regions, parallelism, - client_config) - super(Ns1Provider, self).__init__(id, *args, **kwargs) - self.monitor_regions = monitor_regions - self.shared_notifylist = shared_notifylist - self.record_filters = dict() - self._client = Ns1Client(api_key, parallelism, retry_count, - client_config) - - def _sanitize_disabled_in_filter_config(self, filter_cfg): - # remove disabled=False from filters - for filter in filter_cfg: - if 'disabled' in filter and filter['disabled'] is False: - del filter['disabled'] - return filter_cfg - - def _valid_filter_config(self, filter_cfg): - self._sanitize_disabled_in_filter_config(filter_cfg) - has_region = self._REGION_FILTER in filter_cfg - has_country = self._COUNTRY_FILTER in filter_cfg - expected_filter_cfg = self._get_updated_filter_chain(has_region, - has_country) - return filter_cfg == expected_filter_cfg - - def _get_updated_filter_chain(self, has_region, has_country): - if has_region and has_country: - filter_chain = self._FILTER_CHAIN_WITH_REGION_AND_COUNTRY - elif has_region: - filter_chain = self._FILTER_CHAIN_WITH_REGION - elif has_country: - filter_chain = self._FILTER_CHAIN_WITH_COUNTRY - else: - filter_chain = self._BASIC_FILTER_CHAIN - - return filter_chain - - def _encode_notes(self, data): - return ' '.join([f'{k}:{v}' for k, v in sorted(data.items())]) - - def _parse_notes(self, note): - data = {} - if note: - for piece in note.split(' '): - try: - k, v = piece.split(':', 1) - data[k] = v if v != '' else None - except ValueError: - pass - return data - - def _data_for_geo_A(self, _type, record): - # record meta (which would include geo information is only - # returned when getting a record's detail, not from zone detail - geo = defaultdict(list) - data = { - 'ttl': record['ttl'], - 'type': _type, - } - values, codes = [], [] - for answer in record.get('answers', []): - meta = answer.get('meta', {}) - if meta: - # country + state and country + province are allowed - # in that case though, supplying a state/province would - # be redundant since the country would supercede in when - # resolving the record. it is syntactically valid, however. - country = meta.get('country', []) - us_state = meta.get('us_state', []) - ca_province = meta.get('ca_province', []) - for cntry in country: - con = country_alpha2_to_continent_code(cntry) - key = f'{con}-{cntry}' - geo[key].extend(answer['answer']) - for state in us_state: - key = f'NA-US-{state}' - geo[key].extend(answer['answer']) - for province in ca_province: - key = f'NA-CA-{province}' - geo[key].extend(answer['answer']) - for code in meta.get('iso_region_code', []): - key = code - geo[key].extend(answer['answer']) - else: - values.extend(answer['answer']) - codes.append([]) - values = [str(x) for x in values] - geo = OrderedDict( - {str(k): [str(x) for x in v] for k, v in geo.items()} - ) - data['values'] = values - data['geo'] = geo - return data - - def _parse_dynamic_pool_name(self, pool_name): - if pool_name.startswith('catchall__'): - # Special case for the old-style catchall prefix - return pool_name[10:] - try: - pool_name, _ = pool_name.rsplit('__', 1) - except ValueError: - pass - return pool_name - - def _parse_pools(self, answers): - # All regions (pools) will include the list of default values - # (eventually) at higher priorities, we'll just add them to this set to - # we'll have the complete collection. - default = set() - - # Fill out the pools by walking the answers and looking at their - # region (< v0.9.11) or notes (> v0.9.11). - pools = defaultdict(lambda: {'fallback': None, 'values': []}) - for answer in answers: - meta = answer['meta'] - notes = self._parse_notes(meta.get('note', '')) - - value = str(answer['answer'][0]) - if notes.get('from', False) == '--default--': - # It's a final/default value, record it and move on - default.add(value) - continue - - # NS1 pool names can be found in notes > v0.9.11, in order to allow - # us to find fallback-only pools/values. Before that we used - # `region` (group name in the UI) and only paid attention to - # priority=1 (first level) - notes_pool_name = notes.get('pool', None) - if notes_pool_name is None: - # < v0.9.11 - if meta['priority'] != 1: - # Ignore all but priority 1 - continue - # And use region's name as the pool name - pool_name = self._parse_dynamic_pool_name(answer['region']) - else: - # > v0.9.11, use the notes-based name and consider all values - pool_name = notes_pool_name - - pool = pools[pool_name] - value_dict = { - 'value': value, - 'weight': int(meta.get('weight', 1)), - } - if isinstance(meta['up'], bool): - value_dict['status'] = 'up' if meta['up'] else 'down' - - if value_dict not in pool['values']: - # If we haven't seen this value before add it to the pool - pool['values'].append(value_dict) - - # If there's a fallback recorded in the value for its pool go ahead - # and use it, another v0.9.11 thing - fallback = notes.get('fallback', None) - if fallback is not None: - pool['fallback'] = fallback - - # Order and convert to a list - default = sorted(default) - - return default, pools - - def _parse_rule_geos(self, meta, notes): - geos = set() - - for georegion in meta.get('georegion', []): - geos.add(self._REGION_TO_CONTINENT[georegion]) - - # Countries are easy enough to map, we just have to find their - # continent - # - # NOTE: Some continents need special handling since NS1 - # does not supprt them as regions. These are defined under - # _CONTINENT_TO_LIST_OF_COUNTRIES. So the countries for these - # regions will be present in meta['country']. If all the countries - # in _CONTINENT_TO_LIST_OF_COUNTRIES[] list are found, - # set the continent as the region and remove individual countries - - # continents that don't have all countries here because a subset of - # them were used in another rule, but we still need this rule to use - # continent instead of the remaining subset of its countries - continents_from_notes = set(notes.get('continents', '').split(',')) - - special_continents = dict() - for country in meta.get('country', []): - # country_alpha2_to_continent_code fails for Pitcairn ('PN'), - # United States Minor Outlying Islands ('UM') and - # Sint Maarten ('SX') - if country == 'PN': - con = 'OC' - elif country in ['SX', 'UM']: - con = 'NA' - else: - con = country_alpha2_to_continent_code(country) - - if con in self._CONTINENT_TO_LIST_OF_COUNTRIES: - special_continents.setdefault(con, set()).add(country) - else: - geos.add(f'{con}-{country}') - - for continent, countries in special_continents.items(): - if countries == self._CONTINENT_TO_LIST_OF_COUNTRIES[ - continent] or continent in continents_from_notes: - # All countries found or continent in notes, so add it to geos - geos.add(continent) - else: - # Partial countries found, so just add them as-is to geos - for c in countries: - geos.add(f'{continent}-{c}') - - # States and provinces are easy too, - # just assume NA-US or NA-CA - for state in meta.get('us_state', []): - geos.add(f'NA-US-{state}') - - for province in meta.get('ca_province', []): - geos.add(f'NA-CA-{province}') - - return geos - - def _parse_rules(self, pools, regions): - # The regions objects map to rules, but it's a bit fuzzy since they're - # tied to pools on the NS1 side, e.g. we can only have 1 rule per pool, - # that may eventually run into problems, but I don't have any use-cases - # examples currently where it would - rules = {} - for pool_name, region in sorted(regions.items()): - # Get the actual pool name by removing the type - pool_name = self._parse_dynamic_pool_name(pool_name) - - meta = region['meta'] - notes = self._parse_notes(meta.get('note', '')) - - # The group notes field in the UI is a `note` on the region here, - # that's where we can find our pool's fallback in < v0.9.11 anyway - if 'fallback' in notes: - # set the fallback pool name - pools[pool_name]['fallback'] = notes['fallback'] - - rule_order = notes['rule-order'] - try: - rule = rules[rule_order] - except KeyError: - rule = { - 'pool': pool_name, - '_order': rule_order, - } - rules[rule_order] = rule - - geos = self._parse_rule_geos(meta, notes) - if geos: - # There are geos, combine them with any existing geos for this - # pool and recorded the sorted unique set of them - rule['geos'] = sorted(set(rule.get('geos', [])) | geos) - - # Convert to list and order - rules = sorted(rules.values(), key=lambda r: (r['_order'], r['pool'])) - - return rules - - def _data_for_dynamic(self, _type, record): - # Cache record filters for later use - record_filters = self.record_filters.setdefault(record['domain'], {}) - record_filters[_type] = record['filters'] - - default, pools = self._parse_pools(record['answers']) - rules = self._parse_rules(pools, record['regions']) - - data = { - 'dynamic': { - 'pools': pools, - 'rules': rules, - }, - 'ttl': record['ttl'], - 'type': _type, - } - - if _type == 'CNAME': - data['value'] = default[0] - else: - data['values'] = default - - return data - - def _data_for_A(self, _type, record): - if record.get('tier', 1) > 1: - # Advanced record, see if it's first answer has a note - try: - first_answer_note = record['answers'][0]['meta']['note'] - except (IndexError, KeyError): - first_answer_note = '' - # If that note includes a `from` (pool name) it's a dynamic record - if 'from:' in first_answer_note: - return self._data_for_dynamic(_type, record) - # If not it's an old geo record - return self._data_for_geo_A(_type, record) - - # This is a basic record, just convert it - return { - 'ttl': record['ttl'], - 'type': _type, - 'values': [str(x) for x in record['short_answers']] - } - - _data_for_AAAA = _data_for_A - - def _data_for_SPF(self, _type, record): - values = [v.replace(';', '\\;') for v in record['short_answers']] - return { - 'ttl': record['ttl'], - 'type': _type, - 'values': values - } - - _data_for_TXT = _data_for_SPF - - def _data_for_CAA(self, _type, record): - values = [] - for answer in record['short_answers']: - flags, tag, value = answer.split(' ', 2) - values.append({ - 'flags': flags, - 'tag': tag, - 'value': value, - }) - return { - 'ttl': record['ttl'], - 'type': _type, - 'values': values, - } - - def _data_for_CNAME(self, _type, record): - if record.get('tier', 1) > 1: - # Advanced record, see if it's first answer has a note - try: - first_answer_note = record['answers'][0]['meta']['note'] - except (IndexError, KeyError): - first_answer_note = '' - # If that note includes a `pool` it's a valid dynamic record - if 'pool:' in first_answer_note: - return self._data_for_dynamic(_type, record) - # If not, it can't be parsed. Let it be an empty record - self.log.warn('Cannot parse %s dynamic record due to missing ' - 'pool name in first answer note, treating it as ' - 'an empty record', record['domain']) - value = None - else: - try: - value = record['short_answers'][0] - except IndexError: - value = None - - return { - 'ttl': record['ttl'], - 'type': _type, - 'value': value, - } - - _data_for_ALIAS = _data_for_CNAME - - def _data_for_MX(self, _type, record): - values = [] - for answer in record['short_answers']: - preference, exchange = answer.split(' ', 1) - values.append({ - 'preference': preference, - 'exchange': exchange, - }) - return { - 'ttl': record['ttl'], - 'type': _type, - 'values': values, - } - - def _data_for_NAPTR(self, _type, record): - values = [] - for answer in record['short_answers']: - order, preference, flags, service, regexp, replacement = \ - answer.split(' ', 5) - values.append({ - 'flags': flags, - 'order': order, - 'preference': preference, - 'regexp': regexp, - 'replacement': replacement, - 'service': service, - }) - return { - 'ttl': record['ttl'], - 'type': _type, - 'values': values, - } - - def _data_for_NS(self, _type, record): - return { - 'ttl': record['ttl'], - 'type': _type, - 'values': record['short_answers'], - } - - _data_for_PTR = _data_for_NS - - def _data_for_SRV(self, _type, record): - values = [] - for answer in record['short_answers']: - priority, weight, port, target = answer.split(' ', 3) - values.append({ - 'priority': priority, - 'weight': weight, - 'port': port, - 'target': target, - }) - return { - 'ttl': record['ttl'], - 'type': _type, - 'values': values, - } - - def _data_for_URLFWD(self, _type, record): - values = [] - for answer in record['short_answers']: - path, target, code, masking, query = answer.split(' ', 4) - values.append({ - 'path': path, - 'target': target, - 'code': code, - 'masking': masking, - 'query': query, - }) - return { - 'ttl': record['ttl'], - 'type': _type, - 'values': values, - } - - def populate(self, zone, target=False, lenient=False): - self.log.debug('populate: name=%s, target=%s, lenient=%s', - zone.name, - target, lenient) - - try: - ns1_zone_name = zone.name[:-1] - ns1_zone = self._client.zones_retrieve(ns1_zone_name) - - records = [] - geo_records = [] - - # change answers for certain types to always be absolute - for record in ns1_zone['records']: - if record['type'] in ['ALIAS', 'CNAME', 'MX', 'NS', 'PTR', - 'SRV']: - record['short_answers'] = [ - _ensure_endswith_dot(a) - for a in record['short_answers'] - ] - - if record.get('tier', 1) > 1: - # Need to get the full record data for geo records - record = self._client.records_retrieve(ns1_zone_name, - record['domain'], - record['type']) - geo_records.append(record) - else: - records.append(record) - - exists = True - except ResourceException as e: - if e.message != self.ZONE_NOT_FOUND_MESSAGE: - raise - records = [] - geo_records = [] - exists = False - - before = len(zone.records) - # geo information isn't returned from the main endpoint, so we need - # to query for all records with geo information - zone_hash = {} - for record in chain(records, geo_records): - _type = record['type'] - if _type not in self.SUPPORTS: - continue - data_for = getattr(self, f'_data_for_{_type}') - name = zone.hostname_from_fqdn(record['domain']) - data = data_for(_type, record) - record = Record.new(zone, name, data, source=self, lenient=lenient) - zone_hash[(_type, name)] = record - [zone.add_record(r, lenient=lenient) for r in zone_hash.values()] - self.log.info('populate: found %s records, exists=%s', - len(zone.records) - before, exists) - return exists - - def _params_for_geo_A(self, record): - # purposefully set non-geo answers to have an empty meta, - # so that we know we did this on purpose if/when troubleshooting - params = { - 'answers': [{"answer": [x], "meta": {}} for x in record.values], - 'ttl': record.ttl, - } - - has_country = False - for iso_region, target in record.geo.items(): - key = 'iso_region_code' - value = iso_region - if not has_country and len(value.split('-')) > 1: - has_country = True - for answer in target.values: - params['answers'].append( - { - 'answer': [answer], - 'meta': {key: [value]}, - }, - ) - - params['filters'] = [] - if has_country: - params['filters'].append( - {"filter": "shuffle", "config": {}} - ) - params['filters'].append( - {"filter": "geotarget_country", "config": {}} - ) - params['filters'].append( - {"filter": "select_first_n", - "config": {"N": 1}} - ) - - return params, None - - def _monitors_for(self, record): - monitors = {} - - if getattr(record, 'dynamic', False): - expected_host = record.fqdn[:-1] - expected_type = record._type - - for monitor in self._client.monitors.values(): - data = self._parse_notes(monitor['notes']) - if not data: - continue - if expected_host == data['host'] and \ - expected_type == data['type']: - # This monitor does not belong to this record - config = monitor['config'] - value = config['host'] - if record._type == 'CNAME': - # Append a trailing dot for CNAME records so that - # lookup by a CNAME answer works - value = value + '.' - monitors[value] = monitor - - return monitors - - def _uuid(self): - return uuid4().hex - - def _feed_create(self, monitor): - monitor_id = monitor['id'] - self.log.debug('_feed_create: monitor=%s', monitor_id) - name = f'{monitor["name"]} - {self._uuid()[:6]}' - - # Create the data feed - config = { - 'jobid': monitor_id, - } - feed = self._client.datafeed_create(self._client.datasource_id, name, - config) - feed_id = feed['id'] - self.log.debug('_feed_create: feed=%s', feed_id) - - return feed_id - - def _notifylists_find_or_create(self, name): - self.log.debug('_notifylists_find_or_create: name="%s"', name) - try: - nl = self._client.notifylists[name] - self.log.debug('_notifylists_find_or_create: existing=%s', - nl['id']) - except KeyError: - notify_list = [{ - 'config': { - 'sourceid': self._client.datasource_id, - }, - 'type': 'datafeed', - }] - nl = self._client.notifylists_create(name=name, - notify_list=notify_list) - self.log.debug('_notifylists_find_or_create: created=%s', - nl['id']) - - return nl - - def _monitor_create(self, monitor): - self.log.debug('_monitor_create: monitor="%s"', monitor['name']) - - # Find the right notifylist - nl_name = self.SHARED_NOTIFYLIST_NAME \ - if self.shared_notifylist else monitor['name'] - nl = self._notifylists_find_or_create(nl_name) - - # Create the monitor - monitor['notify_list'] = nl['id'] - monitor = self._client.monitors_create(**monitor) - monitor_id = monitor['id'] - self.log.debug('_monitor_create: monitor=%s', monitor_id) - - return monitor_id, self._feed_create(monitor) - - def _healthcheck_policy(self, record): - return record._octodns.get('ns1', {}) \ - .get('healthcheck', {}) \ - .get('policy', 'quorum') - - def _healthcheck_frequency(self, record): - return record._octodns.get('ns1', {}) \ - .get('healthcheck', {}) \ - .get('frequency', 60) - - def _healthcheck_rapid_recheck(self, record): - return record._octodns.get('ns1', {}) \ - .get('healthcheck', {}) \ - .get('rapid_recheck', False) - - def _healthcheck_connect_timeout(self, record): - return record._octodns.get('ns1', {}) \ - .get('healthcheck', {}) \ - .get('connect_timeout', 2) - - def _healthcheck_response_timeout(self, record): - return record._octodns.get('ns1', {}) \ - .get('healthcheck', {}) \ - .get('response_timeout', 10) - - def _monitor_gen(self, record, value): - host = record.fqdn[:-1] - _type = record._type - - if _type == 'CNAME': - # NS1 does not accept a host value with a trailing dot - value = value[:-1] - - ret = { - 'active': True, - 'config': { - 'connect_timeout': - # TCP monitors use milliseconds, so convert from - # seconds to milliseconds - self._healthcheck_connect_timeout(record) * 1000, - 'host': value, - 'port': record.healthcheck_port, - 'response_timeout': - # TCP monitors use milliseconds, so convert from - # seconds to milliseconds - self._healthcheck_response_timeout(record) * 1000, - 'ssl': record.healthcheck_protocol == 'HTTPS', - }, - 'job_type': 'tcp', - 'name': f'{host} - {_type} - {value}', - 'notes': self._encode_notes({ - 'host': host, - 'type': _type, - }), - 'policy': self._healthcheck_policy(record), - 'frequency': self._healthcheck_frequency(record), - 'rapid_recheck': self._healthcheck_rapid_recheck(record), - 'region_scope': 'fixed', - 'regions': self.monitor_regions, - } - - if _type == 'AAAA': - ret['config']['ipv6'] = True - - if record.healthcheck_protocol != 'TCP': - # IF it's HTTP we need to send the request string - path = record.healthcheck_path - host = record.healthcheck_host(value=value) - request = fr'GET {path} HTTP/1.0\r\nHost: {host}\r\n' \ - r'User-agent: NS1\r\n\r\n' - ret['config']['send'] = request - # We'll also expect a HTTP response - ret['rules'] = [{ - 'comparison': 'contains', - 'key': 'output', - 'value': '200 OK', - }] - - return ret - - def _monitor_is_match(self, expected, have): - # Make sure what we have matches what's in expected exactly. Anything - # else in have will be ignored. - for k, v in expected.items(): - if have.get(k, '--missing--') != v: - return False - - return True - - def _monitor_sync(self, record, value, existing): - self.log.debug('_monitor_sync: record=%s, value=%s', record.fqdn, - value) - expected = self._monitor_gen(record, value) - - if existing: - self.log.debug('_monitor_sync: existing=%s', existing['id']) - monitor_id = existing['id'] - - if not self._monitor_is_match(expected, existing): - self.log.debug('_monitor_sync: existing needs update') - # Update the monitor to match expected, everything else will be - # left alone and assumed correct - self._client.monitors_update(monitor_id, **expected) - - feed_id = self._client.feeds_for_monitors.get(monitor_id) - if feed_id is None: - self.log.warn('_monitor_sync: %s (%s) missing feed, creating', - existing['name'], monitor_id) - feed_id = self._feed_create(existing) - else: - self.log.debug('_monitor_sync: needs create') - # We don't have an existing monitor create it (and related bits) - monitor_id, feed_id = self._monitor_create(expected) - - return monitor_id, feed_id - - def _monitors_gc(self, record, active_monitor_ids=None): - self.log.debug('_monitors_gc: record=%s, active_monitor_ids=%s', - record.fqdn, active_monitor_ids) - - if active_monitor_ids is None: - active_monitor_ids = set() - - for monitor in self._monitors_for(record).values(): - monitor_id = monitor['id'] - if monitor_id in active_monitor_ids: - continue - - self.log.debug('_monitors_gc: deleting %s', monitor_id) - - feed_id = self._client.feeds_for_monitors.get(monitor_id) - if feed_id: - self._client.datafeed_delete(self._client.datasource_id, - feed_id) - - self._client.monitors_delete(monitor_id) - - notify_list_id = monitor['notify_list'] - for nl_name, nl in self._client.notifylists.items(): - if nl['id'] == notify_list_id: - # We've found the that might need deleting - if nl['name'] != self.SHARED_NOTIFYLIST_NAME: - # It's not shared so is safe to delete - self._client.notifylists_delete(notify_list_id) - break - - def _add_answers_for_pool(self, answers, default_answers, pool_name, - pool_label, pool_answers, pools, priority): - current_pool_name = pool_name - seen = set() - while current_pool_name and current_pool_name not in seen: - seen.add(current_pool_name) - pool = pools[current_pool_name] - for answer in pool_answers[current_pool_name]: - fallback = pool.data['fallback'] - if answer['feed_id']: - up = {'feed': answer['feed_id']} - else: - up = answer['status'] == 'up' - answer = { - 'answer': answer['answer'], - 'meta': { - 'priority': priority, - 'note': self._encode_notes({ - 'from': pool_label, - 'pool': current_pool_name, - 'fallback': fallback or '', - }), - 'up': up, - 'weight': answer['weight'], - }, - 'region': pool_label, # the one we're answering - } - answers.append(answer) - - current_pool_name = pool.data.get('fallback', None) - priority += 1 - - # Static/default - for answer in default_answers: - answer = { - 'answer': answer['answer'], - 'meta': { - 'priority': priority, - 'note': self._encode_notes({ - 'from': '--default--', - }), - 'up': True, - 'weight': 1, - }, - 'region': pool_label, # the one we're answering - } - answers.append(answer) - - def _generate_regions(self, record): - pools = record.dynamic.pools - has_country = False - has_region = False - regions = {} - - explicit_countries = dict() - for rule in record.dynamic.rules: - for geo in rule.data.get('geos', []): - if len(geo) == 5: - con, country = geo.split('-', 1) - explicit_countries.setdefault(con, set()).add(country) - - for i, rule in enumerate(record.dynamic.rules): - pool_name = rule.data['pool'] - - notes = { - 'rule-order': i, - } - - fallback = pools[pool_name].data.get('fallback', None) - if fallback: - notes['fallback'] = fallback - - country = set() - georegion = set() - us_state = set() - ca_province = set() - - for geo in rule.data.get('geos', []): - n = len(geo) - if n == 8: - # US state, e.g. NA-US-KY - # CA province, e.g. NA-CA-NL - us_state.add(geo[-2:]) if "NA-US" in geo \ - else ca_province.add(geo[-2:]) - # For filtering. State filtering is done by the country - # filter - has_country = True - elif n == 5: - # Country, e.g. EU-FR - country.add(geo[-2:]) - has_country = True - else: - # Continent, e.g. AS - if geo in self._CONTINENT_TO_REGIONS: - georegion.update(self._CONTINENT_TO_REGIONS[geo]) - has_region = True - else: - # No maps for geo in _CONTINENT_TO_REGIONS. - # Use the country list - self.log.debug('Converting geo {} to country list'. - format(geo)) - continent_countries = \ - self._CONTINENT_TO_LIST_OF_COUNTRIES[geo] - exclude = explicit_countries.get(geo, set()) - country.update(continent_countries - exclude) - notes.setdefault('continents', set()).add(geo) - has_country = True - - if 'continents' in notes: - notes['continents'] = ','.join(sorted(notes['continents'])) - - meta = { - 'note': self._encode_notes(notes), - } - - if georegion: - georegion_meta = dict(meta) - georegion_meta['georegion'] = sorted(georegion) - regions[f'{pool_name}__georegion'] = { - 'meta': georegion_meta, - } - - if country or us_state or ca_province: - # If there's country and/or states its a country pool, - # countries and states can coexist as they're handled by the - # same step in the filterchain (countries and georegions - # cannot as they're seperate stages and run the risk of - # eliminating all options) - country_state_meta = dict(meta) - if country: - country_state_meta['country'] = sorted(country) - if us_state: - country_state_meta['us_state'] = sorted(us_state) - if ca_province: - country_state_meta['ca_province'] = sorted(ca_province) - regions[f'{pool_name}__country'] = { - 'meta': country_state_meta, - } - elif not georegion: - # If there's no targeting it's a catchall - regions[f'{pool_name}__catchall'] = { - 'meta': meta, - } - - return has_country, has_region, regions - - def _generate_answers(self, record, regions): - pools = record.dynamic.pools - existing_monitors = self._monitors_for(record) - active_monitors = set() - - # Build a list of primary values for each pool, including their - # feed_id (monitor) - value_feed = dict() - pool_answers = defaultdict(list) - for pool_name, pool in sorted(pools.items()): - for value in pool.data['values']: - weight = value['weight'] - status = value['status'] - value = value['value'] - - feed_id = None - if status == 'obey': - # state is not forced, let's find a monitor - feed_id = value_feed.get(value) - # check for identical monitor and skip creating one if - # found - if not feed_id: - existing = existing_monitors.get(value) - monitor_id, feed_id = self._monitor_sync(record, value, - existing) - value_feed[value] = feed_id - active_monitors.add(monitor_id) - - pool_answers[pool_name].append({ - 'answer': [value], - 'weight': weight, - 'feed_id': feed_id, - 'status': status, - }) - - if record._type == 'CNAME': - default_values = [record.value] - else: - default_values = record.values - default_answers = [{ - 'answer': [v], - 'weight': 1, - } for v in default_values] - - # Build our list of answers - # The regions dictionary built above already has the required pool - # names. Iterate over them and add answers. - answers = [] - for pool_name in sorted(regions.keys()): - priority = 1 - - # Dynamic/health checked - pool_label = pool_name - # Remove the pool type from the end of the name - pool_name = self._parse_dynamic_pool_name(pool_name) - self._add_answers_for_pool(answers, default_answers, pool_name, - pool_label, pool_answers, pools, - priority) - - return active_monitors, answers - - def _params_for_dynamic(self, record): - # Convert rules to regions - has_country, has_region, regions = self._generate_regions(record) - - # Convert pools to answers - active_monitors, answers = self._generate_answers(record, regions) - - # Update filters as necessary - filters = self._get_updated_filter_chain(has_region, has_country) - - return { - 'answers': answers, - 'filters': filters, - 'regions': regions, - 'ttl': record.ttl, - }, active_monitors - - def _params_for_A(self, record): - if getattr(record, 'dynamic', False): - return self._params_for_dynamic(record) - elif hasattr(record, 'geo'): - return self._params_for_geo_A(record) - - return { - 'answers': record.values, - 'ttl': record.ttl, - }, None - - _params_for_AAAA = _params_for_A - _params_for_NS = _params_for_A - - def _params_for_SPF(self, record): - # NS1 seems to be the only provider that doesn't want things - # escaped in values so we have to strip them here and add - # them when going the other way - values = [v.replace('\\;', ';') for v in record.values] - return {'answers': values, 'ttl': record.ttl}, None - - _params_for_TXT = _params_for_SPF - - def _params_for_CAA(self, record): - values = [(v.flags, v.tag, v.value) for v in record.values] - return {'answers': values, 'ttl': record.ttl}, None - - def _params_for_CNAME(self, record): - if getattr(record, 'dynamic', False): - return self._params_for_dynamic(record) - - return {'answers': [record.value], 'ttl': record.ttl}, None - - _params_for_ALIAS = _params_for_CNAME - - def _params_for_MX(self, record): - values = [(v.preference, v.exchange) for v in record.values] - return {'answers': values, 'ttl': record.ttl}, None - - def _params_for_NAPTR(self, record): - values = [(v.order, v.preference, v.flags, v.service, v.regexp, - v.replacement) for v in record.values] - return {'answers': values, 'ttl': record.ttl}, None - - def _params_for_PTR(self, record): - return { - 'answers': record.values, - 'ttl': record.ttl, - }, None - - def _params_for_SRV(self, record): - values = [(v.priority, v.weight, v.port, v.target) - for v in record.values] - return {'answers': values, 'ttl': record.ttl}, None - - def _params_for_URLFWD(self, record): - values = [(v.path, v.target, v.code, v.masking, v.query) - for v in record.values] - return {'answers': values, 'ttl': record.ttl}, None - - def _extra_changes(self, desired, changes, **kwargs): - self.log.debug('_extra_changes: desired=%s', desired.name) - changed = set([c.record for c in changes]) - extra = [] - for record in desired.records: - if record in changed or not getattr(record, 'dynamic', False): - # Already changed, or no dynamic , no need to check it - continue - - # Filter normalization - # Check if filters for existing domains need an update - # Needs an explicit check since there might be no change in the - # config at all. Filters however might still need an update - domain = record.fqdn[:-1] - _type = record._type - record_filters = self.record_filters.get(domain, {}).get(_type, []) - if not self._valid_filter_config(record_filters): - # unrecognized set of filters, overwrite them by updating the - # record - self.log.info('_extra_changes: unrecognized filters in %s, ' - 'will update record', domain) - extra.append(Update(record, record)) - continue - - for value, have in self._monitors_for(record).items(): - expected = self._monitor_gen(record, value) - # TODO: find values which have missing monitors - if not self._monitor_is_match(expected, have): - self.log.info('_extra_changes: monitor mis-match for %s', - expected['name']) - extra.append(Update(record, record)) - break - if not have.get('notify_list'): - self.log.info('_extra_changes: broken monitor no notify ' - 'list %s (%s)', have['name'], have['id']) - extra.append(Update(record, record)) - break - - return extra - - def _apply_Create(self, ns1_zone, change): - new = change.new - zone = new.zone.name[:-1] - domain = new.fqdn[:-1] - _type = new._type - params, active_monitor_ids = getattr(self, f'_params_for_{_type}')(new) - self._client.records_create(zone, domain, _type, **params) - self._monitors_gc(new, active_monitor_ids) - - def _apply_Update(self, ns1_zone, change): - new = change.new - zone = new.zone.name[:-1] - domain = new.fqdn[:-1] - _type = new._type - params, active_monitor_ids = getattr(self, f'_params_for_{_type}')(new) - self._client.records_update(zone, domain, _type, **params) - # If we're cleaning up we need to send in the old record since it'd - # have anything that needs cleaning up - self._monitors_gc(change.existing, active_monitor_ids) - - def _apply_Delete(self, ns1_zone, change): - existing = change.existing - zone = existing.zone.name[:-1] - domain = existing.fqdn[:-1] - _type = existing._type - self._client.records_delete(zone, domain, _type) - self._monitors_gc(existing) - - def _has_dynamic(self, changes): - for change in changes: - if getattr(change.record, 'dynamic', False): - return True - - return False - - def _apply(self, plan): - desired = plan.desired - changes = plan.changes - self.log.debug('_apply: zone=%s, len(changes)=%d', desired.name, - len(changes)) - - # Make sure that if we're going to make any dynamic changes that we - # have monitor_regions configured before touching anything so we can - # abort early and not half-apply - if self._has_dynamic(changes) and self.monitor_regions is None: - raise Ns1Exception('Monitored record, but monitor_regions not set') - - domain_name = desired.name[:-1] - try: - ns1_zone = self._client.zones_retrieve(domain_name) - except ResourceException as e: - if e.message != self.ZONE_NOT_FOUND_MESSAGE: - raise - self.log.debug('_apply: no matching zone, creating') - ns1_zone = self._client.zones_create(domain_name) - - for change in changes: - class_name = change.__class__.__name__ - getattr(self, f'_apply_{class_name}')(ns1_zone, change) +logger = getLogger('Ns1') +try: + logger.warn('octodns_ns1 shimmed. Update your provider class to ' + 'octodns_ns1.Ns1Provider. ' + 'Shim will be removed in 1.0') + from octodns_ns1 import Ns1Provider + Ns1Provider # pragma: no cover +except ModuleNotFoundError: + logger.exception('Ns1Provider has been moved into a seperate module, ' + 'octodns_ns1 is now required. Provider class should ' + 'be updated to octodns_ns1.Ns1Provider') + raise diff --git a/octodns/provider/powerdns.py b/octodns/provider/powerdns.py index ebcd3b6..097088a 100644 --- a/octodns/provider/powerdns.py +++ b/octodns/provider/powerdns.py @@ -7,7 +7,7 @@ from __future__ import absolute_import, division, print_function, \ from logging import getLogger -logger = getLogger('PowerDNS') +logger = getLogger('PowerDns') try: logger.warn('octodns_powerdns shimmed. Update your provider class to ' 'octodns_powerdns.PowerDnsProvider. ' diff --git a/octodns/provider/route53.py b/octodns/provider/route53.py index 3359f3e..daef8ea 100644 --- a/octodns/provider/route53.py +++ b/octodns/provider/route53.py @@ -5,1540 +5,17 @@ from __future__ import absolute_import, division, print_function, \ unicode_literals -from boto3 import client -from botocore.config import Config -from collections import defaultdict -from ipaddress import AddressValueError, ip_address -from pycountry_convert import country_alpha2_to_continent_code -from uuid import uuid4 -import logging -import re - -from ..equality import EqualityTupleMixin -from ..record import Record, Update -from ..record.geo import GeoCodes -from . import ProviderException -from .base import BaseProvider - -octal_re = re.compile(r'\\(\d\d\d)') - - -def _octal_replace(s): - # See http://docs.aws.amazon.com/Route53/latest/DeveloperGuide/ - # DomainNameFormat.html - return octal_re.sub(lambda m: chr(int(m.group(1), 8)), s) - - -class _Route53Record(EqualityTupleMixin): - - @classmethod - def _new_dynamic(cls, provider, record, hosted_zone_id, creating): - # Creates the RRSets that correspond to the given dynamic record - ret = set() - - # HostedZoneId wants just the last bit, but the place we're getting - # this from looks like /hostedzone/Z424CArX3BB224 - hosted_zone_id = hosted_zone_id.split('/', 2)[-1] - - # Create the default pool which comes from the base `values` of the - # record object. Its only used if all other values fail their - # healthchecks, which hopefully never happens. - fqdn = record.fqdn - ret.add(_Route53Record(provider, record, creating, - f'_octodns-default-pool.{fqdn}')) - - # Pools - for pool_name, pool in record.dynamic.pools.items(): - - # Create the primary, this will be the rrset that geo targeted - # rrsets will point to when they want to use a pool of values. It's - # a primary and observes target health so if all the values for - # this pool go red, we'll use the fallback/SECONDARY just below - ret.add(_Route53DynamicPool(provider, hosted_zone_id, record, - pool_name, creating)) - - # Create the fallback for this pool - fallback = pool.data.get('fallback', False) - if fallback: - # We have an explicitly configured fallback, another pool to - # use if all our values go red. This RRSet configures that pool - # as the next best option - ret.add(_Route53DynamicPool(provider, hosted_zone_id, record, - pool_name, creating, - target_name=fallback)) - else: - # We fallback on the default, no explicit fallback so if all of - # this pool's values go red we'll fallback to the base - # (non-health-checked) default pool of values - ret.add(_Route53DynamicPool(provider, hosted_zone_id, record, - pool_name, creating, - target_name='default')) - - # Create the values for this pool. These are health checked and in - # general each unique value will have an associated healthcheck. - # The PRIMARY pool up above will point to these RRSets which will - # be served out according to their weights - for i, value in enumerate(pool.data['values']): - weight = value['weight'] - status = value['status'] - value = value['value'] - ret.add(_Route53DynamicValue(provider, record, pool_name, - value, weight, status, i, - creating)) - - # Rules - for i, rule in enumerate(record.dynamic.rules): - pool_name = rule.data['pool'] - geos = rule.data.get('geos', []) - if geos: - for geo in geos: - # Create a RRSet for each geo in each rule that uses the - # desired target pool - ret.add(_Route53DynamicRule(provider, hosted_zone_id, - record, pool_name, i, - creating, geo=geo)) - else: - # There's no geo's for this rule so it's the catchall that will - # just point things that don't match any geo rules to the - # specified pool - ret.add(_Route53DynamicRule(provider, hosted_zone_id, record, - pool_name, i, creating)) - - return ret - - @classmethod - def _new_geo(cls, provider, record, creating): - # Creates the RRSets that correspond to the given geo record - ret = set() - - ret.add(_Route53GeoDefault(provider, record, creating)) - for ident, geo in record.geo.items(): - ret.add(_Route53GeoRecord(provider, record, ident, geo, - creating)) - - return ret - - @classmethod - def new(cls, provider, record, hosted_zone_id, creating): - # Creates the RRSets that correspond to the given record - - if getattr(record, 'dynamic', False): - ret = cls._new_dynamic(provider, record, hosted_zone_id, creating) - return ret - elif getattr(record, 'geo', False): - return cls._new_geo(provider, record, creating) - - # Its a simple record that translates into a single RRSet - return set((_Route53Record(provider, record, creating),)) - - def __init__(self, provider, record, creating, fqdn_override=None): - self.fqdn = fqdn_override or record.fqdn - self._type = record._type - self.ttl = record.ttl - - values_for = getattr(self, f'_values_for_{self._type}') - self.values = values_for(record) - - def mod(self, action, existing_rrsets): - return { - 'Action': action, - 'ResourceRecordSet': { - 'Name': self.fqdn, - 'ResourceRecords': [{'Value': v} for v in self.values], - 'TTL': self.ttl, - 'Type': self._type, - } - } - - # NOTE: we're using __hash__ and ordering methods that consider - # _Route53Records equivalent if they have the same class, fqdn, and _type. - # Values are ignored. This is useful when computing diffs/changes. - - def __hash__(self): - 'sub-classes should never use this method' - return f'{self.fqdn}:{self._type}'.__hash__() - - def _equality_tuple(self): - '''Sub-classes should call up to this and return its value and add - any additional fields they need to hav considered.''' - return (self.__class__.__name__, self.fqdn, self._type) - - def __repr__(self): - return '_Route53Record<{self.fqdn} {self._type} {self.ttl} ' \ - f'{self.values}>' - - def _value_convert_value(self, value, record): - return value - - _value_convert_A = _value_convert_value - _value_convert_AAAA = _value_convert_value - _value_convert_NS = _value_convert_value - _value_convert_CNAME = _value_convert_value - _value_convert_PTR = _value_convert_value - - def _values_for_values(self, record): - return record.values - - _values_for_A = _values_for_values - _values_for_AAAA = _values_for_values - _values_for_NS = _values_for_values - - def _value_convert_CAA(self, value, record): - return f'{value.flags} {value.tag} "{value.value}"' - - def _values_for_CAA(self, record): - return [self._value_convert_CAA(v, record) for v in record.values] - - def _values_for_value(self, record): - return [record.value] - - _values_for_CNAME = _values_for_value - _values_for_PTR = _values_for_value - - def _value_convert_MX(self, value, record): - return f'{value.preference} {value.exchange}' - - def _values_for_MX(self, record): - return [self._value_convert_MX(v, record) for v in record.values] - - def _value_convert_NAPTR(self, value, record): - flags = value.flags if value.flags else '' - service = value.service if value.service else '' - regexp = value.regexp if value.regexp else '' - return f'{value.order} {value.preference} "{flags}" "{service}" ' \ - f'"{regexp}" {value.replacement}' - - def _values_for_NAPTR(self, record): - return [self._value_convert_NAPTR(v, record) for v in record.values] - - def _value_convert_quoted(self, value, record): - return record.chunked_value(value) - - _value_convert_SPF = _value_convert_quoted - _value_convert_TXT = _value_convert_quoted - - def _values_for_quoted(self, record): - return record.chunked_values - - _values_for_SPF = _values_for_quoted - _values_for_TXT = _values_for_quoted - - def _value_for_SRV(self, value, record): - return f'{value.priority} {value.weight} {value.port} {value.target}' - - def _values_for_SRV(self, record): - return [self._value_for_SRV(v, record) for v in record.values] - - -class _Route53DynamicPool(_Route53Record): - - def __init__(self, provider, hosted_zone_id, record, pool_name, creating, - target_name=None): - fqdn_override = f'_octodns-{pool_name}-pool.{record.fqdn}' - super(_Route53DynamicPool, self) \ - .__init__(provider, record, creating, fqdn_override=fqdn_override) - - self.hosted_zone_id = hosted_zone_id - self.pool_name = pool_name - - self.target_name = target_name - if target_name: - # We're pointing down the chain - self.target_dns_name = f'_octodns-{target_name}-pool.{record.fqdn}' - else: - # We're a paimary, point at our values - self.target_dns_name = f'_octodns-{pool_name}-value.{record.fqdn}' - - @property - def mode(self): - return 'Secondary' if self.target_name else 'Primary' - - @property - def identifer(self): - if self.target_name: - return f'{self.pool_name}-{self.mode}-{self.target_name}' - return f'{self.pool_name}-{self.mode}' - - def mod(self, action, existing_rrsets): - return { - 'Action': action, - 'ResourceRecordSet': { - 'AliasTarget': { - 'DNSName': self.target_dns_name, - 'EvaluateTargetHealth': True, - 'HostedZoneId': self.hosted_zone_id, - }, - 'Failover': 'SECONDARY' if self.target_name else 'PRIMARY', - 'Name': self.fqdn, - 'SetIdentifier': self.identifer, - 'Type': self._type, - } - } - - def __hash__(self): - return f'{self.fqdn}:{self._type}:{self.identifer}'.__hash__() - - def __repr__(self): - return f'_Route53DynamicPool<{self.fqdn} {self._type} {self.mode} ' \ - f'{self.target_dns_name}>' - - -class _Route53DynamicRule(_Route53Record): - - def __init__(self, provider, hosted_zone_id, record, pool_name, index, - creating, geo=None): - super(_Route53DynamicRule, self).__init__(provider, record, creating) - - self.hosted_zone_id = hosted_zone_id - self.geo = geo - self.pool_name = pool_name - self.index = index - - self.target_dns_name = f'_octodns-{pool_name}-pool.{record.fqdn}' - - @property - def identifer(self): - return f'{self.index}-{self.pool_name}-{self.geo}' - - def mod(self, action, existing_rrsets): - rrset = { - 'AliasTarget': { - 'DNSName': self.target_dns_name, - 'EvaluateTargetHealth': True, - 'HostedZoneId': self.hosted_zone_id, - }, - 'GeoLocation': { - 'CountryCode': '*' - }, - 'Name': self.fqdn, - 'SetIdentifier': self.identifer, - 'Type': self._type, - } - - if self.geo: - geo = GeoCodes.parse(self.geo) - - if geo['province_code']: - rrset['GeoLocation'] = { - 'CountryCode': geo['country_code'], - 'SubdivisionCode': geo['province_code'], - } - elif geo['country_code']: - rrset['GeoLocation'] = { - 'CountryCode': geo['country_code'] - } - else: - rrset['GeoLocation'] = { - 'ContinentCode': geo['continent_code'], - } - - return { - 'Action': action, - 'ResourceRecordSet': rrset, - } - - def __hash__(self): - return f'{self.fqdn}:{self._type}:{self.identifer}'.__hash__() - - def __repr__(self): - return f'_Route53DynamicRule<{self.fqdn} {self._type} {self.index} ' \ - f'{self.geo} {self.target_dns_name}>' - - -class _Route53DynamicValue(_Route53Record): - - def __init__(self, provider, record, pool_name, value, weight, status, - index, creating): - fqdn_override = f'_octodns-{pool_name}-value.{record.fqdn}' - super(_Route53DynamicValue, self).__init__(provider, record, creating, - fqdn_override=fqdn_override) - - self.pool_name = pool_name - self.status = status - self.index = index - value_convert = getattr(self, f'_value_convert_{record._type}') - self.value = value_convert(value, record) - self.weight = weight - - self.health_check_id = provider.get_health_check_id(record, self.value, - self.status, - creating) - - @property - def identifer(self): - return f'{self.pool_name}-{self.index:03d}' - - def mod(self, action, existing_rrsets): - - if action == 'DELETE': - # When deleting records try and find the original rrset so that - # we're 100% sure to have the complete & accurate data (this mostly - # ensures we have the right health check id when there's multiple - # potential matches) - for existing in existing_rrsets: - if self.fqdn == existing.get('Name') and \ - self.identifer == existing.get('SetIdentifier', None): - return { - 'Action': action, - 'ResourceRecordSet': existing, - } - - ret = { - 'Action': action, - 'ResourceRecordSet': { - 'Name': self.fqdn, - 'ResourceRecords': [{'Value': self.value}], - 'SetIdentifier': self.identifer, - 'TTL': self.ttl, - 'Type': self._type, - 'Weight': self.weight, - } - } - - if self.health_check_id: - ret['ResourceRecordSet']['HealthCheckId'] = self.health_check_id - - return ret - - def __hash__(self): - return f'{self.fqdn}:{self._type}:{self.identifer}'.__hash__() - - def __repr__(self): - return f'_Route53DynamicValue<{self.fqdn} {self._type} ' \ - f'{self.identifer} {self.value}>' - - -class _Route53GeoDefault(_Route53Record): - - def mod(self, action, existing_rrsets): - return { - 'Action': action, - 'ResourceRecordSet': { - 'Name': self.fqdn, - 'GeoLocation': { - 'CountryCode': '*' - }, - 'ResourceRecords': [{'Value': v} for v in self.values], - 'SetIdentifier': 'default', - 'TTL': self.ttl, - 'Type': self._type, - } - } - - def __hash__(self): - return f'{self.fqdn}:{self._type}:default'.__hash__() - - def __repr__(self): - return f'_Route53GeoDefault<{self.fqdn} {self._type} {self.ttl} ' \ - f'{self.values}>' - - -class _Route53GeoRecord(_Route53Record): - - def __init__(self, provider, record, ident, geo, creating): - super(_Route53GeoRecord, self).__init__(provider, record, creating) - self.geo = geo - - value = geo.values[0] - self.health_check_id = provider.get_health_check_id(record, value, - 'obey', creating) - - def mod(self, action, existing_rrsets): - geo = self.geo - set_identifier = geo.code - fqdn = self.fqdn - - if action == 'DELETE': - # When deleting records try and find the original rrset so that - # we're 100% sure to have the complete & accurate data (this mostly - # ensures we have the right health check id when there's multiple - # potential matches) - for existing in existing_rrsets: - if fqdn == existing.get('Name') and \ - set_identifier == existing.get('SetIdentifier', None): - return { - 'Action': action, - 'ResourceRecordSet': existing, - } - - rrset = { - 'Name': self.fqdn, - 'GeoLocation': { - 'CountryCode': '*' - }, - 'ResourceRecords': [{'Value': v} for v in geo.values], - 'SetIdentifier': set_identifier, - 'TTL': self.ttl, - 'Type': self._type, - } - - if self.health_check_id: - rrset['HealthCheckId'] = self.health_check_id - - if geo.subdivision_code: - rrset['GeoLocation'] = { - 'CountryCode': geo.country_code, - 'SubdivisionCode': geo.subdivision_code - } - elif geo.country_code: - rrset['GeoLocation'] = { - 'CountryCode': geo.country_code - } - else: - rrset['GeoLocation'] = { - 'ContinentCode': geo.continent_code - } - - return { - 'Action': action, - 'ResourceRecordSet': rrset, - } - - def __hash__(self): - return f'{self.fqdn}:{self._type}:{self.geo.code}'.__hash__() - - def _equality_tuple(self): - return super(_Route53GeoRecord, self)._equality_tuple() + \ - (self.geo.code,) - - def __repr__(self): - return f'_Route53GeoRecord<{self.fqdn} {self._type} {self.ttl} ' \ - f'{self.geo.code} {self.values}>' - - -class Route53ProviderException(ProviderException): - pass - - -def _mod_keyer(mod): - rrset = mod['ResourceRecordSet'] - - # Route53 requires that changes are ordered such that a target of an - # AliasTarget is created or upserted prior to the record that targets it. - # This is complicated by "UPSERT" appearing to be implemented as "DELETE" - # before all changes, followed by a "CREATE", internally in the AWS API. - # Because of this, we order changes as follows: - # - Delete any records that we wish to delete that are GEOS - # (because they are never targeted by anything) - # - Delete any records that we wish to delete that are SECONDARY - # (because they are no longer targeted by GEOS) - # - Delete any records that we wish to delete that are PRIMARY - # (because they are no longer targeted by SECONDARY) - # - Delete any records that we wish to delete that are VALUES - # (because they are no longer targeted by PRIMARY) - # - CREATE/UPSERT any records that are VALUES - # (because they don't depend on other records) - # - CREATE/UPSERT any records that are PRIMARY - # (because they always point to VALUES which now exist) - # - CREATE/UPSERT any records that are SECONDARY - # (because they now have PRIMARY records to target) - # - CREATE/UPSERT any records that are GEOS - # (because they now have all their PRIMARY pools to target) - # - :tada: - # - # In theory we could also do this based on actual target reference - # checking, but that's more complex. Since our rules have a known - # dependency order, we just rely on that. - - # Get the unique ID from the name/id to get a consistent ordering. - if rrset.get('GeoLocation', False): - unique_id = rrset['SetIdentifier'] - else: - if 'SetIdentifier' in rrset: - unique_id = f'{rrset["Name"]}-{rrset["SetIdentifier"]}' - else: - unique_id = rrset['Name'] - - # Prioritise within the action_priority, ensuring targets come first. - if rrset.get('GeoLocation', False): - # Geos reference pools, so they come last. - record_priority = 3 - elif rrset.get('AliasTarget', False): - # We use an alias - if rrset.get('Failover', False) == 'SECONDARY': - # We're a secondary, which reference the primary (failover, P1). - record_priority = 2 - else: - # We're a primary, we reference values (P0). - record_priority = 1 - else: - # We're just a plain value, has no dependencies so first. - record_priority = 0 - - if mod['Action'] == 'DELETE': - # Delete things first, so we can never trounce our own additions - action_priority = 0 - # Delete in the reverse order of priority, e.g. start with the deepest - # reference and work back to the values, rather than starting at the - # values (still ref'd). - record_priority = -record_priority - else: - # For CREATE and UPSERT, Route53 seems to treat them the same, so - # interleave these, keeping the reference order described above. - action_priority = 1 - - return (action_priority, record_priority, unique_id) - - -def _parse_pool_name(n): - # Parse the pool name out of _octodns--pool... - return n.split('.', 1)[0][9:-5] - - -class Route53Provider(BaseProvider): - ''' - AWS Route53 Provider - - route53: - class: octodns.provider.route53.Route53Provider - # The AWS access key id - access_key_id: - # The AWS secret access key - secret_access_key: - # The AWS session token (optional) - # Only needed if using temporary security credentials - session_token: - - Alternatively, you may leave out access_key_id, secret_access_key - and session_token. - This will result in boto3 deciding authentication dynamically. - - In general the account used will need full permissions on Route53. - ''' - SUPPORTS_GEO = True - SUPPORTS_DYNAMIC = True - SUPPORTS_POOL_VALUE_STATUS = True - SUPPORTS = set(('A', 'AAAA', 'CAA', 'CNAME', 'MX', 'NAPTR', 'NS', 'PTR', - 'SPF', 'SRV', 'TXT')) - - # This should be bumped when there are underlying changes made to the - # health check config. - HEALTH_CHECK_VERSION = '0001' - - def __init__(self, id, access_key_id=None, secret_access_key=None, - max_changes=1000, client_max_attempts=None, - session_token=None, delegation_set_id=None, - get_zones_by_name=False, *args, **kwargs): - self.max_changes = max_changes - self.delegation_set_id = delegation_set_id - self.get_zones_by_name = get_zones_by_name - _msg = f'access_key_id={access_key_id}, secret_access_key=***, ' \ - 'session_token=***' - use_fallback_auth = access_key_id is None and \ - secret_access_key is None and session_token is None - if use_fallback_auth: - _msg = 'auth=fallback' - self.log = logging.getLogger(f'Route53Provider[{id}]') - self.log.debug('__init__: id=%s, %s', id, _msg) - super(Route53Provider, self).__init__(id, *args, **kwargs) - - config = None - if client_max_attempts is not None: - self.log.info('__init__: setting max_attempts to %d', - client_max_attempts) - config = Config(retries={'max_attempts': client_max_attempts}) - - if use_fallback_auth: - self._conn = client('route53', config=config) - else: - self._conn = client('route53', aws_access_key_id=access_key_id, - aws_secret_access_key=secret_access_key, - aws_session_token=session_token, - config=config) - - self._r53_zones = None - self._r53_rrsets = {} - self._health_checks = None - - def _get_zone_id_by_name(self, name): - # attempt to get zone by name - # limited to one as this should be unique - id = None - resp = self._conn.list_hosted_zones_by_name( - DNSName=name, MaxItems="1" - ) - if len(resp['HostedZones']) != 0: - # if there is a response that starts with the name - if resp['HostedZones'][0]['Name'].startswith(name): - id = resp['HostedZones'][0]['Id'] - self.log.debug('get_zones_by_name: id=%s', id) - return id - - def update_r53_zones(self, name): - if self._r53_zones is None: - if self.get_zones_by_name: - id = self._get_zone_id_by_name(name) - zones = {} - zones[name] = id - self._r53_zones = zones - else: - self.log.debug('r53_zones: loading') - zones = {} - more = True - start = {} - while more: - resp = self._conn.list_hosted_zones(**start) - for z in resp['HostedZones']: - zones[z['Name']] = z['Id'] - more = resp['IsTruncated'] - start['Marker'] = resp.get('NextMarker', None) - self._r53_zones = zones - else: - if name not in self._r53_zones and self.get_zones_by_name: - id = self._get_zone_id_by_name(name) - self._r53_zones[name] = id - - def _get_zone_id(self, name, create=False): - self.log.debug('_get_zone_id: name=%s', name) - self.update_r53_zones(name) - id = None - if name in self._r53_zones: - id = self._r53_zones[name] - self.log.debug('_get_zone_id: id=%s', id) - if create and not id: - ref = uuid4().hex - del_set = self.delegation_set_id - self.log.debug('_get_zone_id: no matching zone, creating, ' - 'ref=%s', ref) - if del_set: - resp = self._conn.create_hosted_zone(Name=name, - CallerReference=ref, - DelegationSetId=del_set) - else: - resp = self._conn.create_hosted_zone(Name=name, - CallerReference=ref) - self._r53_zones[name] = id = resp['HostedZone']['Id'] - return id - - def _parse_geo(self, rrset): - try: - loc = rrset['GeoLocation'] - except KeyError: - # No geo loc - return - try: - return loc['ContinentCode'] - except KeyError: - # Must be country - cc = loc['CountryCode'] - if cc == '*': - # This is the default - return - cn = country_alpha2_to_continent_code(cc) - try: - return f'{cn}-{cc}-{loc["SubdivisionCode"]}' - except KeyError: - return f'{cn}-{cc}' - - def _data_for_geo(self, rrset): - ret = { - 'type': rrset['Type'], - 'values': [v['Value'] for v in rrset['ResourceRecords']], - 'ttl': int(rrset['TTL']) - } - geo = self._parse_geo(rrset) - if geo: - ret['geo'] = geo - return ret - - _data_for_A = _data_for_geo - _data_for_AAAA = _data_for_geo - - def _data_for_CAA(self, rrset): - values = [] - for rr in rrset['ResourceRecords']: - flags, tag, value = rr['Value'].split() - values.append({ - 'flags': flags, - 'tag': tag, - 'value': value[1:-1], - }) - return { - 'type': rrset['Type'], - 'values': values, - 'ttl': int(rrset['TTL']) - } - - def _data_for_single(self, rrset): - return { - 'type': rrset['Type'], - 'value': rrset['ResourceRecords'][0]['Value'], - 'ttl': int(rrset['TTL']) - } - - _data_for_PTR = _data_for_single - _data_for_CNAME = _data_for_single - - _fix_semicolons = re.compile(r'(? 1: - # Multiple data indicates a record with GeoDNS, convert - # them data into the format we need - geo = {} - for d in data: - try: - geo[d['geo']] = d['values'] - except KeyError: - primary = d - data = primary - data['geo'] = geo - else: - data = data[0] - record = Record.new(zone, name, data, source=self, - lenient=lenient) - zone.add_record(record, lenient=lenient) - - self.log.info('populate: found %s records, exists=%s', - len(zone.records) - before, exists) - return exists - - def _gen_mods(self, action, records, existing_rrsets): - ''' - Turns `_Route53*`s in to `change_resource_record_sets` `Changes` - ''' - return [r.mod(action, existing_rrsets) for r in records] - - @property - def health_checks(self): - if self._health_checks is None: - # need to do the first load - self.log.debug('health_checks: loading') - checks = {} - more = True - start = {} - while more: - resp = self._conn.list_health_checks(**start) - for health_check in resp['HealthChecks']: - # our format for CallerReference is dddd:hex-uuid - ref = health_check.get('CallerReference', 'xxxxx') - if len(ref) > 4 and ref[4] != ':': - # ignore anything else - continue - checks[health_check['Id']] = health_check - - more = resp['IsTruncated'] - start['Marker'] = resp.get('NextMarker', None) - - self._health_checks = checks - - # We've got a cached version use it - return self._health_checks - - def _healthcheck_measure_latency(self, record): - return record._octodns.get('route53', {}) \ - .get('healthcheck', {}) \ - .get('measure_latency', True) - - def _healthcheck_request_interval(self, record): - interval = record._octodns.get('route53', {}) \ - .get('healthcheck', {}) \ - .get('request_interval', 10) - if (interval in [10, 30]): - return interval - else: - raise Route53ProviderException( - 'route53.healthcheck.request_interval ' - 'parameter must be either 10 or 30.') - - def _health_check_equivalent(self, host, path, protocol, port, - measure_latency, request_interval, - health_check, value=None, disabled=None, - inverted=None): - config = health_check['HealthCheckConfig'] - - # So interestingly Route53 normalizes IPv6 addresses to a funky, but - # valid, form which will cause us to fail to find see things as - # equivalent. To work around this we'll ip_address's returned objects - # for equivalence. - # E.g 2001:4860:4860:0:0:0:0:8842 -> 2001:4860:4860::8842 - if value: - value = ip_address(str(value)) - config_ip_address = ip_address(str(config['IPAddress'])) - else: - # No value so give this a None to match value's - config_ip_address = None - - fully_qualified_domain_name = config.get('FullyQualifiedDomainName', - None) - resource_path = config.get('ResourcePath', None) - return host == fully_qualified_domain_name and \ - path == resource_path and protocol == config['Type'] and \ - port == config['Port'] and \ - measure_latency == config['MeasureLatency'] and \ - request_interval == config['RequestInterval'] and \ - (disabled is None or disabled == config['Disabled']) and \ - (inverted is None or inverted == config['Inverted']) and \ - value == config_ip_address - - def get_health_check_id(self, record, value, status, create): - # fqdn & the first value are special, we use them to match up health - # checks to their records. Route53 health checks check a single ip and - # we're going to assume that ips are interchangeable to avoid - # health-checking each one independently - fqdn = record.fqdn - self.log.debug('get_health_check_id: fqdn=%s, type=%s, value=%s, ' - 'status=%s', fqdn, record._type, value, status) - - if status == 'up': - # status up means no health check - self.log.debug('get_health_check_id: status up, no health check') - return None - - try: - ip_address(str(value)) - # We're working with an IP, host is the Host header - healthcheck_host = record.healthcheck_host(value=value) - except (AddressValueError, ValueError): - # This isn't an IP, host is the value, value should be None - healthcheck_host = value - value = None - - healthcheck_path = record.healthcheck_path - healthcheck_protocol = record.healthcheck_protocol - healthcheck_port = record.healthcheck_port - healthcheck_latency = self._healthcheck_measure_latency(record) - healthcheck_interval = self._healthcheck_request_interval(record) - if status == 'down': - healthcheck_disabled = True - healthcheck_inverted = True - else: # obey - healthcheck_disabled = False - healthcheck_inverted = False - - # we're looking for a healthcheck with the current version & our record - # type, we'll ignore anything else - expected_ref = \ - f'{self.HEALTH_CHECK_VERSION}:{record._type}:{record.fqdn}:' - for id, health_check in self.health_checks.items(): - if not health_check['CallerReference'].startswith(expected_ref): - # not match, ignore - continue - if self._health_check_equivalent(healthcheck_host, - healthcheck_path, - healthcheck_protocol, - healthcheck_port, - healthcheck_latency, - healthcheck_interval, - health_check, - value=value, - disabled=healthcheck_disabled, - inverted=healthcheck_inverted): - # this is the health check we're looking for - self.log.debug('get_health_check_id: found match id=%s', id) - return id - - if not create: - # no existing matches and not allowed to create, return none - self.log.debug('get_health_check_id: no matches, no create') - return - - # no existing matches, we need to create a new health check - config = { - 'Disabled': healthcheck_disabled, - 'Inverted': healthcheck_inverted, - 'EnableSNI': healthcheck_protocol == 'HTTPS', - 'FailureThreshold': 6, - 'MeasureLatency': healthcheck_latency, - 'Port': healthcheck_port, - 'RequestInterval': healthcheck_interval, - 'Type': healthcheck_protocol, - } - if healthcheck_protocol != 'TCP': - config['FullyQualifiedDomainName'] = healthcheck_host - config['ResourcePath'] = healthcheck_path - if value: - config['IPAddress'] = value - - ref = f'{self.HEALTH_CHECK_VERSION}:{record._type}:{record.fqdn}:' + \ - uuid4().hex[:12] - resp = self._conn.create_health_check(CallerReference=ref, - HealthCheckConfig=config) - health_check = resp['HealthCheck'] - id = health_check['Id'] - - # Set a Name for the benefit of the UI - value_or_host = value or healthcheck_host - name = f'{record.fqdn}:{record._type} - {value_or_host}' - self._conn.change_tags_for_resource(ResourceType='healthcheck', - ResourceId=id, - AddTags=[{ - 'Key': 'Name', - 'Value': name, - }]) - # Manually add it to our cache - health_check['Tags'] = { - 'Name': name - } - - # store the new health check so that we'll be able to find it in the - # future - self._health_checks[id] = health_check - self.log.info('get_health_check_id: created id=%s, host=%s, ' - 'path=%s, protocol=%s, port=%d, measure_latency=%r, ' - 'request_interval=%d, value=%s', - id, healthcheck_host, healthcheck_path, - healthcheck_protocol, healthcheck_port, - healthcheck_latency, healthcheck_interval, value) - return id - - def _gc_health_checks(self, record, new): - if record._type not in ('A', 'AAAA'): - return - self.log.debug('_gc_health_checks: record=%s', record) - # Find the health checks we're using for the new route53 records - in_use = set() - for r in new: - hc_id = getattr(r, 'health_check_id', False) - if hc_id: - in_use.add(hc_id) - self.log.debug('_gc_health_checks: in_use=%s', in_use) - # Now we need to run through ALL the health checks looking for those - # that apply to this record, deleting any that do and are no longer in - # use - expected_re = re.compile(fr'^\d\d\d\d:{record._type}:{record.fqdn}:') - # UNITL 1.0: we'll clean out the previous version of Route53 health - # checks as best as we can. - expected_legacy_host = record.fqdn[:-1] - expected_legacy = f'0000:{record._type}:' - for id, health_check in self.health_checks.items(): - ref = health_check['CallerReference'] - if expected_re.match(ref) and id not in in_use: - # this is a health check for this record, but not one we're - # planning to use going forward - self.log.info('_gc_health_checks: deleting id=%s', id) - self._conn.delete_health_check(HealthCheckId=id) - elif ref.startswith(expected_legacy): - config = health_check['HealthCheckConfig'] - if expected_legacy_host == config['FullyQualifiedDomainName']: - self.log.info('_gc_health_checks: deleting legacy id=%s', - id) - self._conn.delete_health_check(HealthCheckId=id) - - def _gen_records(self, record, zone_id, creating=False): - ''' - Turns an octodns.Record into one or more `_Route53*`s - ''' - return _Route53Record.new(self, record, zone_id, creating) - - def _mod_Create(self, change, zone_id, existing_rrsets): - # New is the stuff that needs to be created - new_records = self._gen_records(change.new, zone_id, creating=True) - # Now is a good time to clear out any unused health checks since we - # know what we'll be using going forward - self._gc_health_checks(change.new, new_records) - return self._gen_mods('CREATE', new_records, existing_rrsets) - - def _mod_Update(self, change, zone_id, existing_rrsets): - # See comments in _Route53Record for how the set math is made to do our - # bidding here. - existing_records = self._gen_records(change.existing, zone_id, - creating=False) - new_records = self._gen_records(change.new, zone_id, creating=True) - # Now is a good time to clear out any unused health checks since we - # know what we'll be using going forward - self._gc_health_checks(change.new, new_records) - # Things in existing, but not new are deletes - deletes = existing_records - new_records - # Things in new, but not existing are the creates - creates = new_records - existing_records - # Things in both need updating, we could optimize this and filter out - # things that haven't actually changed, but that's for another day. - # We can't use set math here b/c we won't be able to control which of - # the two objects will be in the result and we need to ensure it's the - # new one. - upserts = set() - for new_record in new_records: - if new_record in existing_records: - upserts.add(new_record) - - return self._gen_mods('DELETE', deletes, existing_rrsets) + \ - self._gen_mods('CREATE', creates, existing_rrsets) + \ - self._gen_mods('UPSERT', upserts, existing_rrsets) - - def _mod_Delete(self, change, zone_id, existing_rrsets): - # Existing is the thing that needs to be deleted - existing_records = self._gen_records(change.existing, zone_id, - creating=False) - # Now is a good time to clear out all the health checks since we know - # we're done with them - self._gc_health_checks(change.existing, []) - return self._gen_mods('DELETE', existing_records, existing_rrsets) - - def _extra_changes_update_needed(self, record, rrset, statuses={}): - value = rrset['ResourceRecords'][0]['Value'] - if record._type == 'CNAME': - # For CNAME, healthcheck host by default points to the CNAME value - healthcheck_host = value - else: - healthcheck_host = record.healthcheck_host() - - healthcheck_path = record.healthcheck_path - healthcheck_protocol = record.healthcheck_protocol - healthcheck_port = record.healthcheck_port - healthcheck_latency = self._healthcheck_measure_latency(record) - healthcheck_interval = self._healthcheck_request_interval(record) - - status = statuses.get(value, 'obey') - if status == 'up': - if 'HealthCheckId' in rrset: - self.log.info('_extra_changes_update_needed: health-check ' - 'found for status="up", causing update of %s:%s', - record.fqdn, record._type) - return True - else: - # No health check needed - return False - - try: - health_check_id = rrset['HealthCheckId'] - health_check = self.health_checks[health_check_id] - caller_ref = health_check['CallerReference'] - if caller_ref.startswith(self.HEALTH_CHECK_VERSION): - if self._health_check_equivalent(healthcheck_host, - healthcheck_path, - healthcheck_protocol, - healthcheck_port, - healthcheck_latency, - healthcheck_interval, - health_check): - # it has the right health check - return False - except (IndexError, KeyError): - # no health check id or one that isn't the right version - pass - - # no good, doesn't have the right health check, needs an update - self.log.info('_extra_changes_update_needed: health-check caused ' - 'update of %s:%s', record.fqdn, record._type) - return True - - def _extra_changes_geo_needs_update(self, zone_id, record): - # OK this is a record we don't have change for that does have geo - # information. We need to look and see if it needs to be updated b/c of - # a health check version bump or other mismatch - self.log.debug('_extra_changes_geo_needs_update: inspecting=%s, %s', - record.fqdn, record._type) - - fqdn = record.fqdn - - # loop through all the r53 rrsets - for rrset in self._load_records(zone_id): - if fqdn == rrset['Name'] and record._type == rrset['Type'] and \ - rrset.get('GeoLocation', {}).get('CountryCode', False) != '*' \ - and self._extra_changes_update_needed(record, rrset): - # no good, doesn't have the right health check, needs an update - self.log.info('_extra_changes_geo_needs_update: health-check ' - 'caused update of %s:%s', record.fqdn, - record._type) - return True - - return False - - def _extra_changes_dynamic_needs_update(self, zone_id, record): - # OK this is a record we don't have change for that does have dynamic - # information. We need to look and see if it needs to be updated b/c of - # a health check version bump or other mismatch - self.log.debug('_extra_changes_dynamic_needs_update: inspecting=%s, ' - '%s', record.fqdn, record._type) - - fqdn = record.fqdn - _type = record._type - - # map values to statuses - statuses = {} - for pool in record.dynamic.pools.values(): - for value in pool.data['values']: - statuses[value['value']] = value.get('status', 'obey') - - # loop through all the r53 rrsets - for rrset in self._load_records(zone_id): - name = rrset['Name'] - # Break off the first piece of the name, it'll let us figure out if - # this is an rrset we're interested in. - maybe_meta, rest = name.split('.', 1) - - if not maybe_meta.startswith('_octodns-') or \ - not maybe_meta.endswith('-value') or \ - '-default-' in name: - # We're only interested in non-default dynamic value records, - # as that's where healthchecks live - continue - - if rest != fqdn or _type != rrset['Type']: - # rrset isn't for the current record - continue - - if self._extra_changes_update_needed(record, rrset, statuses): - # no good, doesn't have the right health check, needs an update - self.log.info('_extra_changes_dynamic_needs_update: ' - 'health-check caused update of %s:%s', - record.fqdn, record._type) - return True - - return False - - def _extra_changes(self, desired, changes, **kwargs): - self.log.debug('_extra_changes: desired=%s', desired.name) - zone_id = self._get_zone_id(desired.name) - if not zone_id: - # zone doesn't exist so no extras to worry about - return [] - # we'll skip extra checking for anything we're already going to change - changed = set([c.record for c in changes]) - # ok, now it's time for the reason we're here, we need to go over all - # the desired records - extras = [] - for record in desired.records: - if record in changed: - # already have a change for it, skipping - continue - - if getattr(record, 'geo', False): - if self._extra_changes_geo_needs_update(zone_id, record): - extras.append(Update(record, record)) - elif getattr(record, 'dynamic', False): - if self._extra_changes_dynamic_needs_update(zone_id, record): - extras.append(Update(record, record)) - - return extras - - def _apply(self, plan): - desired = plan.desired - changes = plan.changes - self.log.info('_apply: zone=%s, len(changes)=%d', desired.name, - len(changes)) - - batch = [] - batch_rs_count = 0 - zone_id = self._get_zone_id(desired.name, True) - existing_rrsets = self._load_records(zone_id) - for c in changes: - # Generate the mods for this change - klass = c.__class__.__name__ - mod_type = getattr(self, f'_mod_{klass}') - mods = mod_type(c, zone_id, existing_rrsets) - - # Order our mods to make sure targets exist before alises point to - # them and we CRUD in the desired order - mods.sort(key=_mod_keyer) - - mods_rs_count = sum( - [len(m['ResourceRecordSet'].get('ResourceRecords', '')) - for m in mods] - ) - - if mods_rs_count > self.max_changes: - # a single mod resulted in too many ResourceRecords changes - raise Exception(f'Too many modifications: {mods_rs_count}') - - # r53 limits changesets to 1000 entries - if (batch_rs_count + mods_rs_count) < self.max_changes: - # append to the batch - batch += mods - batch_rs_count += mods_rs_count - else: - self.log.info('_apply: sending change request for batch of ' - '%d mods, %d ResourceRecords', len(batch), - batch_rs_count) - # send the batch - self._really_apply(batch, zone_id) - # start a new batch with the leftovers - batch = mods - batch_rs_count = mods_rs_count - - # the way the above process works there will always be something left - # over in batch to process. In the case that we submit a batch up there - # it was always the case that there was something pushing us over - # max_changes and thus left over to submit. - self.log.info('_apply: sending change request for batch of %d mods,' - ' %d ResourceRecords', len(batch), - batch_rs_count) - self._really_apply(batch, zone_id) - - def _really_apply(self, batch, zone_id): - # Ensure this batch is ordered (deletes before creates etc.) - batch.sort(key=_mod_keyer) - uuid = uuid4().hex - batch = { - 'Comment': f'Change: {uuid}', - 'Changes': batch, - } - self.log.debug('_really_apply: sending change request, comment=%s', - batch['Comment']) - resp = self._conn.change_resource_record_sets( - HostedZoneId=zone_id, ChangeBatch=batch) - self.log.debug('_really_apply: change info=%s', resp['ChangeInfo']) +from logging import getLogger + +logger = getLogger('Route53') +try: + logger.warn('octodns_route53 shimmed. Update your provider class to ' + 'octodns_route53.Route53Provider. ' + 'Shim will be removed in 1.0') + from octodns_route53 import Route53Provider + Route53Provider # pragma: no cover +except ModuleNotFoundError: + logger.exception('Route53Provider has been moved into a seperate module, ' + 'octodns_route53 is now required. Provider class should ' + 'be updated to octodns_route53.Route53Provider') + raise diff --git a/requirements.txt b/requirements.txt index c9c9f00..23bf713 100644 --- a/requirements.txt +++ b/requirements.txt @@ -3,8 +3,6 @@ azure-common==1.1.27 azure-identity==1.5.0 azure-mgmt-dns==8.0.0 azure-mgmt-trafficmanager==0.51.0 -boto3==1.15.9 -botocore==1.18.9 dnspython==1.16.0 docutils==0.16 dyn==1.8.1 @@ -15,12 +13,10 @@ google-cloud-dns==0.32.0 jmespath==0.10.0 msrestazure==0.6.4 natsort==6.2.1 -ns1-python==0.16.1 ovh==0.5.0 pycountry-convert==0.7.2 pycountry==20.7.3 python-dateutil==2.8.1 requests==2.25.1 -s3transfer==0.3.3 setuptools==44.1.1 -python-transip==0.5.0 \ No newline at end of file +python-transip==0.5.0 diff --git a/tests/fixtures/dnsimple-invalid-content.json b/tests/fixtures/dnsimple-invalid-content.json deleted file mode 100644 index 4e6e10b..0000000 --- a/tests/fixtures/dnsimple-invalid-content.json +++ /dev/null @@ -1,106 +0,0 @@ -{ - "data": [ - { - "id": 11189898, - "zone_id": "unit.tests", - "parent_id": null, - "name": "naptr", - "content": "", - "ttl": 600, - "priority": null, - "type": "NAPTR", - "regions": [ - "global" - ], - "system_record": false, - "created_at": "2017-03-09T15:55:11Z", - "updated_at": "2017-03-09T15:55:11Z" - }, - { - "id": 11189899, - "zone_id": "unit.tests", - "parent_id": null, - "name": "naptr", - "content": "100 \"U\" \"SIP+D2U\" \"!^.*$!sip:info@bar.example.com!\" .", - "ttl": 600, - "priority": null, - "type": "NAPTR", - "regions": [ - "global" - ], - "system_record": false, - "created_at": "2017-03-09T15:55:11Z", - "updated_at": "2017-03-09T15:55:11Z" - }, - { - "id": 11189878, - "zone_id": "unit.tests", - "parent_id": null, - "name": "_srv._tcp", - "content": "", - "ttl": 600, - "priority": 10, - "type": "SRV", - "regions": [ - "global" - ], - "system_record": false, - "created_at": "2017-03-09T15:55:08Z", - "updated_at": "2017-03-09T15:55:08Z" - }, - { - "id": 11189879, - "zone_id": "unit.tests", - "parent_id": null, - "name": "_srv._tcp", - "content": "20 foo-2.unit.tests", - "ttl": 600, - "priority": 12, - "type": "SRV", - "regions": [ - "global" - ], - "system_record": false, - "created_at": "2017-03-09T15:55:08Z", - "updated_at": "2017-03-09T15:55:08Z" - }, - { - "id": 11189882, - "zone_id": "unit.tests", - "parent_id": null, - "name": "", - "content": "", - "ttl": 3600, - "priority": null, - "type": "SSHFP", - "regions": [ - "global" - ], - "system_record": false, - "created_at": "2017-03-09T15:55:08Z", - "updated_at": "2017-03-09T15:55:08Z" - }, - { - "id": 11189883, - "zone_id": "unit.tests", - "parent_id": null, - "name": "", - "content": "1 1", - "ttl": 3600, - "priority": null, - "type": "SSHFP", - "regions": [ - "global" - ], - "system_record": false, - "created_at": "2017-03-09T15:55:09Z", - "updated_at": "2017-03-09T15:55:09Z" - } - ], - "pagination": { - "current_page": 1, - "per_page": 20, - "total_entries": 6, - "total_pages": 1 - } -} diff --git a/tests/fixtures/dnsimple-page-1.json b/tests/fixtures/dnsimple-page-1.json deleted file mode 100644 index fca2111..0000000 --- a/tests/fixtures/dnsimple-page-1.json +++ /dev/null @@ -1,314 +0,0 @@ -{ - "data": [ - { - "id": 11189873, - "zone_id": "unit.tests", - "parent_id": null, - "name": "", - "content": "ns1.dnsimple.com admin.dnsimple.com 1489074932 86400 7200 604800 300", - "ttl": 3600, - "priority": null, - "type": "SOA", - "regions": [ - "global" - ], - "system_record": true, - "created_at": "2017-03-09T15:55:08Z", - "updated_at": "2017-03-09T15:56:21Z" - }, - { - "id": 11189874, - "zone_id": "unit.tests", - "parent_id": null, - "name": "", - "content": "ns1.dnsimple.com", - "ttl": 3600, - "priority": null, - "type": "NS", - "regions": [ - "global" - ], - "system_record": true, - "created_at": "2017-03-09T15:55:08Z", - "updated_at": "2017-03-09T15:55:08Z" - }, - { - "id": 11189875, - "zone_id": "unit.tests", - "parent_id": null, - "name": "", - "content": "ns2.dnsimple.com", - "ttl": 3600, - "priority": null, - "type": "NS", - "regions": [ - "global" - ], - "system_record": true, - "created_at": "2017-03-09T15:55:08Z", - "updated_at": "2017-03-09T15:55:08Z" - }, - { - "id": 11189876, - "zone_id": "unit.tests", - "parent_id": null, - "name": "", - "content": "ns3.dnsimple.com", - "ttl": 3600, - "priority": null, - "type": "NS", - "regions": [ - "global" - ], - "system_record": true, - "created_at": "2017-03-09T15:55:08Z", - "updated_at": "2017-03-09T15:55:08Z" - }, - { - "id": 11189877, - "zone_id": "unit.tests", - "parent_id": null, - "name": "", - "content": "ns4.dnsimple.com", - "ttl": 3600, - "priority": null, - "type": "NS", - "regions": [ - "global" - ], - "system_record": true, - "created_at": "2017-03-09T15:55:08Z", - "updated_at": "2017-03-09T15:55:08Z" - }, - { - "id": 11189878, - "zone_id": "unit.tests", - "parent_id": null, - "name": "_srv._tcp", - "content": "20 30 foo-1.unit.tests", - "ttl": 600, - "priority": 10, - "type": "SRV", - "regions": [ - "global" - ], - "system_record": false, - "created_at": "2017-03-09T15:55:08Z", - "updated_at": "2017-03-09T15:55:08Z" - }, - { - "id": 11189879, - "zone_id": "unit.tests", - "parent_id": null, - "name": "_srv._tcp", - "content": "20 30 foo-2.unit.tests", - "ttl": 600, - "priority": 12, - "type": "SRV", - "regions": [ - "global" - ], - "system_record": false, - "created_at": "2017-03-09T15:55:08Z", - "updated_at": "2017-03-09T15:55:08Z" - }, - { - "id": 11189880, - "zone_id": "unit.tests", - "parent_id": null, - "name": "under", - "content": "ns1.unit.tests.", - "ttl": 3600, - "priority": null, - "type": "NS", - "regions": [ - "global" - ], - "system_record": false, - "created_at": "2017-03-09T15:55:08Z", - "updated_at": "2017-03-09T15:55:08Z" - }, - { - "id": 11189881, - "zone_id": "unit.tests", - "parent_id": null, - "name": "under", - "content": "ns2.unit.tests.", - "ttl": 3600, - "priority": null, - "type": "NS", - "regions": [ - "global" - ], - "system_record": false, - "created_at": "2017-03-09T15:55:08Z", - "updated_at": "2017-03-09T15:55:08Z" - }, - { - "id": 11189882, - "zone_id": "unit.tests", - "parent_id": null, - "name": "", - "content": "1 1 7491973e5f8b39d5327cd4e08bc81b05f7710b49", - "ttl": 3600, - "priority": null, - "type": "SSHFP", - "regions": [ - "global" - ], - "system_record": false, - "created_at": "2017-03-09T15:55:08Z", - "updated_at": "2017-03-09T15:55:08Z" - }, - { - "id": 11189883, - "zone_id": "unit.tests", - "parent_id": null, - "name": "", - "content": "1 1 bf6b6825d2977c511a475bbefb88aad54a92ac73", - "ttl": 3600, - "priority": null, - "type": "SSHFP", - "regions": [ - "global" - ], - "system_record": false, - "created_at": "2017-03-09T15:55:09Z", - "updated_at": "2017-03-09T15:55:09Z" - }, - { - "id": 11189884, - "zone_id": "unit.tests", - "parent_id": null, - "name": "txt", - "content": "Bah bah black sheep", - "ttl": 600, - "priority": null, - "type": "TXT", - "regions": [ - "global" - ], - "system_record": false, - "created_at": "2017-03-09T15:55:09Z", - "updated_at": "2017-03-09T15:55:09Z" - }, - { - "id": 11189885, - "zone_id": "unit.tests", - "parent_id": null, - "name": "txt", - "content": "have you any wool.", - "ttl": 600, - "priority": null, - "type": "TXT", - "regions": [ - "global" - ], - "system_record": false, - "created_at": "2017-03-09T15:55:09Z", - "updated_at": "2017-03-09T15:55:09Z" - }, - { - "id": 11189886, - "zone_id": "unit.tests", - "parent_id": null, - "name": "", - "content": "1.2.3.4", - "ttl": 300, - "priority": null, - "type": "A", - "regions": [ - "global" - ], - "system_record": false, - "created_at": "2017-03-09T15:55:09Z", - "updated_at": "2017-03-09T15:55:09Z" - }, - { - "id": 11189887, - "zone_id": "unit.tests", - "parent_id": null, - "name": "", - "content": "1.2.3.5", - "ttl": 300, - "priority": null, - "type": "A", - "regions": [ - "global" - ], - "system_record": false, - "created_at": "2017-03-09T15:55:09Z", - "updated_at": "2017-03-09T15:55:09Z" - }, - { - "id": 11189889, - "zone_id": "unit.tests", - "parent_id": null, - "name": "www", - "content": "2.2.3.6", - "ttl": 300, - "priority": null, - "type": "A", - "regions": [ - "global" - ], - "system_record": false, - "created_at": "2017-03-09T15:55:09Z", - "updated_at": "2017-03-09T15:55:09Z" - }, - { - "id": 11189890, - "zone_id": "unit.tests", - "parent_id": null, - "name": "mx", - "content": "smtp-4.unit.tests", - "ttl": 300, - "priority": 10, - "type": "MX", - "regions": [ - "global" - ], - "system_record": false, - "created_at": "2017-03-09T15:55:10Z", - "updated_at": "2017-03-09T15:55:10Z" - }, - { - "id": 11189891, - "zone_id": "unit.tests", - "parent_id": null, - "name": "mx", - "content": "smtp-2.unit.tests", - "ttl": 300, - "priority": 20, - "type": "MX", - "regions": [ - "global" - ], - "system_record": false, - "created_at": "2017-03-09T15:55:10Z", - "updated_at": "2017-03-09T15:55:10Z" - }, - { - "id": 11189892, - "zone_id": "unit.tests", - "parent_id": null, - "name": "mx", - "content": "smtp-3.unit.tests", - "ttl": 300, - "priority": 30, - "type": "MX", - "regions": [ - "global" - ], - "system_record": false, - "created_at": "2017-03-09T15:55:10Z", - "updated_at": "2017-03-09T15:55:10Z" - } - ], - "pagination": { - "current_page": 1, - "per_page": 20, - "total_entries": 29, - "total_pages": 2 - } -} diff --git a/tests/fixtures/dnsimple-page-2.json b/tests/fixtures/dnsimple-page-2.json deleted file mode 100644 index c12c4f4..0000000 --- a/tests/fixtures/dnsimple-page-2.json +++ /dev/null @@ -1,202 +0,0 @@ -{ - "data": [ - { - "id": 11189893, - "zone_id": "unit.tests", - "parent_id": null, - "name": "mx", - "content": "smtp-1.unit.tests", - "ttl": 300, - "priority": 40, - "type": "MX", - "regions": [ - "global" - ], - "system_record": false, - "created_at": "2017-03-09T15:55:10Z", - "updated_at": "2017-03-09T15:55:10Z" - }, - { - "id": 11189894, - "zone_id": "unit.tests", - "parent_id": null, - "name": "aaaa", - "content": "2601:644:500:e210:62f8:1dff:feb8:947a", - "ttl": 600, - "priority": null, - "type": "AAAA", - "regions": [ - "global" - ], - "system_record": false, - "created_at": "2017-03-09T15:55:10Z", - "updated_at": "2017-03-09T15:55:10Z" - }, - { - "id": 11189895, - "zone_id": "unit.tests", - "parent_id": null, - "name": "cname", - "content": "unit.tests", - "ttl": 300, - "priority": null, - "type": "CNAME", - "regions": [ - "global" - ], - "system_record": false, - "created_at": "2017-03-09T15:55:10Z", - "updated_at": "2017-03-09T15:55:10Z" - }, - { - "id": 11189896, - "zone_id": "unit.tests", - "parent_id": null, - "name": "ptr", - "content": "foo.bar.com.", - "ttl": 300, - "priority": null, - "type": "PTR", - "regions": [ - "global" - ], - "system_record": false, - "created_at": "2017-03-09T15:55:10Z", - "updated_at": "2017-03-09T15:55:10Z" - }, - { - "id": 11189897, - "zone_id": "unit.tests", - "parent_id": null, - "name": "www.sub", - "content": "2.2.3.6", - "ttl": 300, - "priority": null, - "type": "A", - "regions": [ - "global" - ], - "system_record": false, - "created_at": "2017-03-09T15:55:10Z", - "updated_at": "2017-03-09T15:55:10Z" - }, - { - "id": 11189898, - "zone_id": "unit.tests", - "parent_id": null, - "name": "naptr", - "content": "10 100 \"S\" \"SIP+D2U\" \"!^.*$!sip:info@bar.example.com!\" .", - "ttl": 600, - "priority": null, - "type": "NAPTR", - "regions": [ - "global" - ], - "system_record": false, - "created_at": "2017-03-09T15:55:11Z", - "updated_at": "2017-03-09T15:55:11Z" - }, - { - "id": 11189899, - "zone_id": "unit.tests", - "parent_id": null, - "name": "naptr", - "content": "100 100 \"U\" \"SIP+D2U\" \"!^.*$!sip:info@bar.example.com!\" .", - "ttl": 600, - "priority": null, - "type": "NAPTR", - "regions": [ - "global" - ], - "system_record": false, - "created_at": "2017-03-09T15:55:11Z", - "updated_at": "2017-03-09T15:55:11Z" - }, - { - "id": 11189900, - "zone_id": "unit.tests", - "parent_id": null, - "name": "spf", - "content": "v=spf1 ip4:192.168.0.1/16-all", - "ttl": 600, - "priority": null, - "type": "SPF", - "regions": [ - "global" - ], - "system_record": false, - "created_at": "2017-03-09T15:55:11Z", - "updated_at": "2017-03-09T15:55:11Z" - }, - { - "id": 11189901, - "zone_id": "unit.tests", - "parent_id": null, - "name": "txt", - "content": "v=DKIM1;k=rsa;s=email;h=sha256;p=A/kinda+of/long/string+with+numb3rs", - "ttl": 600, - "priority": null, - "type": "TXT", - "regions": [ - "global" - ], - "system_record": false, - "created_at": "2017-03-09T15:55:09Z", - "updated_at": "2017-03-09T15:55:09Z" - }, - { - "id": 11188802, - "zone_id": "unit.tests", - "parent_id": null, - "name": "txt", - "content": "ALIAS for www.unit.tests.", - "ttl": 600, - "priority": null, - "type": "TXT", - "regions": [ - "global" - ], - "system_record": false, - "created_at": "2017-03-09T15:55:09Z", - "updated_at": "2017-03-09T15:55:09Z" - }, - { - "id": 12188803, - "zone_id": "unit.tests", - "parent_id": null, - "name": "", - "content": "0 issue \"ca.unit.tests\"", - "ttl": 3600, - "priority": null, - "type": "CAA", - "regions": [ - "global" - ], - "system_record": false, - "created_at": "2017-03-09T15:55:09Z", - "updated_at": "2017-03-09T15:55:09Z" - }, - { - "id": 12188805, - "zone_id": "unit.tests", - "parent_id": null, - "name": "included", - "content": "unit.tests", - "ttl": 3600, - "priority": null, - "type": "CNAME", - "regions": [ - "global" - ], - "system_record": false, - "created_at": "2017-03-09T15:55:09Z", - "updated_at": "2017-03-09T15:55:09Z" - } - ], - "pagination": { - "current_page": 2, - "per_page": 20, - "total_entries": 32, - "total_pages": 2 - } -} diff --git a/tests/test_octodns_processor_awsacm.py b/tests/test_octodns_processor_awsacm.py index e184755..3a99447 100644 --- a/tests/test_octodns_processor_awsacm.py +++ b/tests/test_octodns_processor_awsacm.py @@ -7,64 +7,10 @@ from __future__ import absolute_import, division, print_function, \ from unittest import TestCase -from octodns.processor.awsacm import AwsAcmMangingProcessor -from octodns.record import Record -from octodns.zone import Zone - -zone = Zone('unit.tests.', []) -records = { - 'root': Record.new(zone, '_deadbeef', { - 'ttl': 30, - 'type': 'CNAME', - 'value': '_0123456789abcdef.acm-validations.aws.', - }), - 'sub': Record.new(zone, '_deadbeef.sub', { - 'ttl': 30, - 'type': 'CNAME', - 'value': '_0123456789abcdef.acm-validations.aws.', - }), - 'not-cname': Record.new(zone, '_deadbeef.not-cname', { - 'ttl': 30, - 'type': 'AAAA', - 'value': '::1', - }), - 'not-acm': Record.new(zone, '_not-acm', { - 'ttl': 30, - 'type': 'CNAME', - 'value': 'localhost.unit.tests.', - }), -} - class TestAwsAcmMangingProcessor(TestCase): - def test_process_zones(self): - acm = AwsAcmMangingProcessor('acm') - - source = Zone(zone.name, []) - # Unrelated stuff that should be untouched - source.add_record(records['not-cname']) - source.add_record(records['not-acm']) - # ACM records that should be ignored - source.add_record(records['root']) - source.add_record(records['sub']) - - got = acm.process_source_zone(source) - self.assertEqual([ - '_deadbeef.not-cname', - '_not-acm', - ], sorted([r.name for r in got.records])) - - existing = Zone(zone.name, []) - # Unrelated stuff that should be untouched - existing.add_record(records['not-cname']) - existing.add_record(records['not-acm']) - # Stuff that will be ignored - existing.add_record(records['root']) - existing.add_record(records['sub']) - - got = acm.process_target_zone(existing) - self.assertEqual([ - '_deadbeef.not-cname', - '_not-acm' - ], sorted([r.name for r in got.records])) + def test_missing(self): + with self.assertRaises(ModuleNotFoundError): + from octodns.processor.awsacm import AwsAcmMangingProcessor + AwsAcmMangingProcessor diff --git a/tests/test_octodns_provider_dnsimple.py b/tests/test_octodns_provider_dnsimple.py index e496d41..611caea 100644 --- a/tests/test_octodns_provider_dnsimple.py +++ b/tests/test_octodns_provider_dnsimple.py @@ -5,231 +5,12 @@ from __future__ import absolute_import, division, print_function, \ unicode_literals -from mock import Mock, call -from os.path import dirname, join -from requests import HTTPError -from requests_mock import ANY, mock as requests_mock from unittest import TestCase -from octodns.record import Record -from octodns.provider.dnsimple import DnsimpleClientNotFound, DnsimpleProvider -from octodns.provider.yaml import YamlProvider -from octodns.zone import Zone +class TestDnsimpleShim(TestCase): -class TestDnsimpleProvider(TestCase): - expected = Zone('unit.tests.', []) - source = YamlProvider('test', join(dirname(__file__), 'config')) - source.populate(expected) - - # Our test suite differs a bit, add our NS and remove the simple one - expected.add_record(Record.new(expected, 'under', { - 'ttl': 3600, - 'type': 'NS', - 'values': [ - 'ns1.unit.tests.', - 'ns2.unit.tests.', - ] - })) - for record in list(expected.records): - if record.name == 'sub' and record._type == 'NS': - expected._remove_record(record) - break - - def test_populate(self): - - # Sandbox - provider = DnsimpleProvider('test', 'token', 42, 'true') - self.assertTrue('sandbox' in provider._client.base) - - provider = DnsimpleProvider('test', 'token', 42) - self.assertFalse('sandbox' in provider._client.base) - - # Bad auth - with requests_mock() as mock: - mock.get(ANY, status_code=401, - text='{"message": "Authentication failed"}') - - with self.assertRaises(Exception) as ctx: - zone = Zone('unit.tests.', []) - provider.populate(zone) - self.assertEquals('Unauthorized', str(ctx.exception)) - - # General error - with requests_mock() as mock: - mock.get(ANY, status_code=502, text='Things caught fire') - - with self.assertRaises(HTTPError) as ctx: - zone = Zone('unit.tests.', []) - provider.populate(zone) - self.assertEquals(502, ctx.exception.response.status_code) - - # Non-existent zone doesn't populate anything - with requests_mock() as mock: - mock.get(ANY, status_code=404, - text='{"message": "Domain `foo.bar` not found"}') - - zone = Zone('unit.tests.', []) - provider.populate(zone) - self.assertEquals(set(), zone.records) - - # No diffs == no changes - with requests_mock() as mock: - base = 'https://api.dnsimple.com/v2/42/zones/unit.tests/' \ - 'records?page=' - with open('tests/fixtures/dnsimple-page-1.json') as fh: - mock.get(f'{base}1', text=fh.read()) - with open('tests/fixtures/dnsimple-page-2.json') as fh: - mock.get(f'{base}2', text=fh.read()) - - zone = Zone('unit.tests.', []) - provider.populate(zone) - self.assertEquals(16, len(zone.records)) - changes = self.expected.changes(zone, provider) - self.assertEquals(0, len(changes)) - - # 2nd populate makes no network calls/all from cache - again = Zone('unit.tests.', []) - provider.populate(again) - self.assertEquals(16, len(again.records)) - - # bust the cache - del provider._zone_records[zone.name] - - # test handling of invalid content - with requests_mock() as mock: - with open('tests/fixtures/dnsimple-invalid-content.json') as fh: - mock.get(ANY, text=fh.read()) - - zone = Zone('unit.tests.', []) - provider.populate(zone, lenient=True) - self.assertEquals(set([ - Record.new(zone, '', { - 'ttl': 3600, - 'type': 'SSHFP', - 'values': [] - }, lenient=True), - Record.new(zone, '_srv._tcp', { - 'ttl': 600, - 'type': 'SRV', - 'values': [] - }, lenient=True), - Record.new(zone, 'naptr', { - 'ttl': 600, - 'type': 'NAPTR', - 'values': [] - }, lenient=True), - ]), zone.records) - - def test_apply(self): - provider = DnsimpleProvider('test', 'token', 42) - - resp = Mock() - resp.json = Mock() - provider._client._request = Mock(return_value=resp) - - # non-existent domain, create everything - resp.json.side_effect = [ - DnsimpleClientNotFound, # no zone in populate - DnsimpleClientNotFound, # no domain during apply - ] - plan = provider.plan(self.expected) - - # No root NS, no ignored, no excluded - n = len(self.expected.records) - 8 - self.assertEquals(n, len(plan.changes)) - self.assertEquals(n, provider.apply(plan)) - self.assertFalse(plan.exists) - - provider._client._request.assert_has_calls([ - # created the domain - call('POST', '/domains', data={'name': 'unit.tests'}), - # created at least some of the record with expected data - call('POST', '/zones/unit.tests/records', data={ - 'content': '1.2.3.4', - 'type': 'A', - 'name': '', - 'ttl': 300}), - call('POST', '/zones/unit.tests/records', data={ - 'content': '1.2.3.5', - 'type': 'A', - 'name': '', - 'ttl': 300}), - call('POST', '/zones/unit.tests/records', data={ - 'content': '0 issue "ca.unit.tests"', - 'type': 'CAA', - 'name': '', - 'ttl': 3600}), - call('POST', '/zones/unit.tests/records', data={ - 'content': '1 1 7491973e5f8b39d5327cd4e08bc81b05f7710b49', - 'type': 'SSHFP', - 'name': '', - 'ttl': 3600}), - call('POST', '/zones/unit.tests/records', data={ - 'content': '1 1 bf6b6825d2977c511a475bbefb88aad54a92ac73', - 'type': 'SSHFP', - 'name': '', - 'ttl': 3600}), - call('POST', '/zones/unit.tests/records', data={ - 'content': '20 30 foo-1.unit.tests.', - 'priority': 10, - 'type': 'SRV', - 'name': '_srv._tcp', - 'ttl': 600 - }), - ]) - # expected number of total calls - self.assertEquals(28, provider._client._request.call_count) - - provider._client._request.reset_mock() - - # delete 1 and update 1 - provider._client.records = Mock(return_value=[ - { - 'id': 11189897, - 'name': 'www', - 'content': '1.2.3.4', - 'ttl': 300, - 'type': 'A', - }, - { - 'id': 11189898, - 'name': 'www', - 'content': '2.2.3.4', - 'ttl': 300, - 'type': 'A', - }, - { - 'id': 11189899, - 'name': 'ttl', - 'content': '3.2.3.4', - 'ttl': 600, - 'type': 'A', - } - ]) - # Domain exists, we don't care about return - resp.json.side_effect = ['{}'] - - wanted = Zone('unit.tests.', []) - wanted.add_record(Record.new(wanted, 'ttl', { - 'ttl': 300, - 'type': 'A', - 'value': '3.2.3.4' - })) - - plan = provider.plan(wanted) - self.assertTrue(plan.exists) - self.assertEquals(2, len(plan.changes)) - self.assertEquals(2, provider.apply(plan)) - # recreate for update, and deletes for the 2 parts of the other - provider._client._request.assert_has_calls([ - call('POST', '/zones/unit.tests/records', data={ - 'content': '3.2.3.4', - 'type': 'A', - 'name': 'ttl', - 'ttl': 300 - }), - call('DELETE', '/zones/unit.tests/records/11189899'), - call('DELETE', '/zones/unit.tests/records/11189897'), - call('DELETE', '/zones/unit.tests/records/11189898') - ], any_order=True) + def test_missing(self): + with self.assertRaises(ModuleNotFoundError): + from octodns.provider.dnsimple import DnsimpleProvider + DnsimpleProvider diff --git a/tests/test_octodns_provider_ns1.py b/tests/test_octodns_provider_ns1.py index 01a2ab1..3d85dd0 100644 --- a/tests/test_octodns_provider_ns1.py +++ b/tests/test_octodns_provider_ns1.py @@ -5,2859 +5,12 @@ from __future__ import absolute_import, division, print_function, \ unicode_literals -from collections import defaultdict -from mock import call, patch -from ns1.rest.errors import AuthException, RateLimitException, \ - ResourceException from unittest import TestCase -from octodns.record import Delete, Record, Update -from octodns.provider.ns1 import Ns1Client, Ns1Exception, Ns1Provider -from octodns.provider.plan import Plan -from octodns.zone import Zone - class TestNs1Provider(TestCase): - zone = Zone('unit.tests.', []) - expected = set() - expected.add(Record.new(zone, '', { - 'ttl': 32, - 'type': 'A', - 'value': '1.2.3.4', - 'meta': {}, - })) - expected.add(Record.new(zone, 'foo', { - 'ttl': 33, - 'type': 'A', - 'values': ['1.2.3.4', '1.2.3.5'], - 'meta': {}, - })) - expected.add(Record.new(zone, 'geo', { - 'ttl': 34, - 'type': 'A', - 'values': ['101.102.103.104', '101.102.103.105'], - 'geo': {'NA-US-NY': ['201.202.203.204']}, - 'meta': {}, - })) - expected.add(Record.new(zone, 'cname', { - 'ttl': 34, - 'type': 'CNAME', - 'value': 'foo.unit.tests.', - })) - expected.add(Record.new(zone, '', { - 'ttl': 35, - 'type': 'MX', - 'values': [{ - 'preference': 10, - 'exchange': 'mx1.unit.tests.', - }, { - 'preference': 20, - 'exchange': 'mx2.unit.tests.', - }] - })) - expected.add(Record.new(zone, 'naptr', { - 'ttl': 36, - 'type': 'NAPTR', - 'values': [{ - 'flags': 'U', - 'order': 100, - 'preference': 100, - 'regexp': '!^.*$!sip:info@bar.example.com!', - 'replacement': '.', - 'service': 'SIP+D2U', - }, { - 'flags': 'S', - 'order': 10, - 'preference': 100, - 'regexp': '!^.*$!sip:info@bar.example.com!', - 'replacement': '.', - 'service': 'SIP+D2U', - }] - })) - expected.add(Record.new(zone, '', { - 'ttl': 37, - 'type': 'NS', - 'values': ['ns1.unit.tests.', 'ns2.unit.tests.'], - })) - expected.add(Record.new(zone, '_srv._tcp', { - 'ttl': 38, - 'type': 'SRV', - 'values': [{ - 'priority': 10, - 'weight': 20, - 'port': 30, - 'target': 'foo-1.unit.tests.', - }, { - 'priority': 12, - 'weight': 30, - 'port': 30, - 'target': 'foo-2.unit.tests.', - }] - })) - expected.add(Record.new(zone, 'sub', { - 'ttl': 39, - 'type': 'NS', - 'values': ['ns3.unit.tests.', 'ns4.unit.tests.'], - })) - expected.add(Record.new(zone, '', { - 'ttl': 40, - 'type': 'CAA', - 'value': { - 'flags': 0, - 'tag': 'issue', - 'value': 'ca.unit.tests', - }, - })) - expected.add(Record.new(zone, 'urlfwd', { - 'ttl': 41, - 'type': 'URLFWD', - 'value': { - 'path': '/', - 'target': 'http://foo.unit.tests', - 'code': 301, - 'masking': 2, - 'query': 0, - }, - })) - expected.add(Record.new(zone, '1.2.3.4', { - 'ttl': 42, - 'type': 'PTR', - 'values': ['one.one.one.one.', 'two.two.two.two.'], - })) - ns1_records = [{ - 'type': 'A', - 'ttl': 32, - 'short_answers': ['1.2.3.4'], - 'domain': 'unit.tests.', - }, { - 'type': 'A', - 'ttl': 33, - 'short_answers': ['1.2.3.4', '1.2.3.5'], - 'domain': 'foo.unit.tests.', - }, { - 'type': 'A', - 'ttl': 34, - 'short_answers': ['101.102.103.104', '101.102.103.105'], - 'domain': 'geo.unit.tests', - }, { - 'type': 'CNAME', - 'ttl': 34, - 'short_answers': ['foo.unit.tests'], - 'domain': 'cname.unit.tests.', - }, { - 'type': 'MX', - 'ttl': 35, - 'short_answers': ['10 mx1.unit.tests.', '20 mx2.unit.tests'], - 'domain': 'unit.tests.', - }, { - 'type': 'NAPTR', - 'ttl': 36, - 'short_answers': [ - '10 100 S SIP+D2U !^.*$!sip:info@bar.example.com! .', - '100 100 U SIP+D2U !^.*$!sip:info@bar.example.com! .' - ], - 'domain': 'naptr.unit.tests.', - }, { - 'type': 'NS', - 'ttl': 37, - 'short_answers': ['ns1.unit.tests.', 'ns2.unit.tests'], - 'domain': 'unit.tests.', - }, { - 'type': 'SRV', - 'ttl': 38, - 'short_answers': ['12 30 30 foo-2.unit.tests.', - '10 20 30 foo-1.unit.tests'], - 'domain': '_srv._tcp.unit.tests.', - }, { - 'type': 'NS', - 'ttl': 39, - 'short_answers': ['ns3.unit.tests.', 'ns4.unit.tests'], - 'domain': 'sub.unit.tests.', - }, { - 'type': 'CAA', - 'ttl': 40, - 'short_answers': ['0 issue ca.unit.tests'], - 'domain': 'unit.tests.', - }, { - 'type': 'URLFWD', - 'ttl': 41, - 'short_answers': ['/ http://foo.unit.tests 301 2 0'], - 'domain': 'urlfwd.unit.tests.', - }, { - 'type': 'PTR', - 'ttl': 42, - 'short_answers': ['one.one.one.one.', 'two.two.two.two.'], - 'domain': '1.2.3.4.unit.tests.', - }] - - @patch('ns1.rest.records.Records.retrieve') - @patch('ns1.rest.zones.Zones.retrieve') - def test_populate(self, zone_retrieve_mock, record_retrieve_mock): - provider = Ns1Provider('test', 'api-key') - - def reset(): - provider._client.reset_caches() - zone_retrieve_mock.reset_mock() - record_retrieve_mock.reset_mock() - - # Bad auth - reset() - zone_retrieve_mock.side_effect = AuthException('unauthorized') - zone = Zone('unit.tests.', []) - with self.assertRaises(AuthException) as ctx: - provider.populate(zone) - self.assertEquals(zone_retrieve_mock.side_effect, ctx.exception) - - # General error - reset() - zone_retrieve_mock.side_effect = ResourceException('boom') - zone = Zone('unit.tests.', []) - with self.assertRaises(ResourceException) as ctx: - provider.populate(zone) - self.assertEquals(zone_retrieve_mock.side_effect, ctx.exception) - self.assertEquals(('unit.tests',), zone_retrieve_mock.call_args[0]) - - # Non-existent zone doesn't populate anything - reset() - zone_retrieve_mock.side_effect = \ - ResourceException('server error: zone not found') - zone = Zone('unit.tests.', []) - exists = provider.populate(zone) - self.assertEquals(set(), zone.records) - self.assertEquals(('unit.tests',), zone_retrieve_mock.call_args[0]) - self.assertFalse(exists) - - # Existing zone w/o records - reset() - ns1_zone = { - 'records': [{ - "domain": "geo.unit.tests", - "zone": "unit.tests", - "type": "A", - "answers": [ - {'answer': ['1.1.1.1'], 'meta': {}}, - {'answer': ['1.2.3.4'], - 'meta': {'ca_province': ['ON']}}, - {'answer': ['2.3.4.5'], 'meta': {'us_state': ['NY']}}, - {'answer': ['3.4.5.6'], 'meta': {'country': ['US']}}, - {'answer': ['4.5.6.7'], - 'meta': {'iso_region_code': ['NA-US-WA']}}, - ], - 'tier': 3, - 'ttl': 34, - }], - } - zone_retrieve_mock.side_effect = [ns1_zone] - # Its tier 3 so we'll do a full lookup - record_retrieve_mock.side_effect = ns1_zone['records'] - zone = Zone('unit.tests.', []) - provider.populate(zone) - self.assertEquals(1, len(zone.records)) - self.assertEquals(('unit.tests',), zone_retrieve_mock.call_args[0]) - record_retrieve_mock.assert_has_calls([call('unit.tests', - 'geo.unit.tests', 'A')]) - - # Existing zone w/records - reset() - ns1_zone = { - 'records': self.ns1_records + [{ - "domain": "geo.unit.tests", - "zone": "unit.tests", - "type": "A", - "answers": [ - {'answer': ['1.1.1.1'], 'meta': {}}, - {'answer': ['1.2.3.4'], - 'meta': {'ca_province': ['ON']}}, - {'answer': ['2.3.4.5'], 'meta': {'us_state': ['NY']}}, - {'answer': ['3.4.5.6'], 'meta': {'country': ['US']}}, - {'answer': ['4.5.6.7'], - 'meta': {'iso_region_code': ['NA-US-WA']}}, - ], - 'tier': 3, - 'ttl': 34, - }], - } - zone_retrieve_mock.side_effect = [ns1_zone] - # Its tier 3 so we'll do a full lookup - record_retrieve_mock.side_effect = ns1_zone['records'] - zone = Zone('unit.tests.', []) - provider.populate(zone) - self.assertEquals(self.expected, zone.records) - self.assertEquals(('unit.tests',), zone_retrieve_mock.call_args[0]) - record_retrieve_mock.assert_has_calls([call('unit.tests', - 'geo.unit.tests', 'A')]) - - # Test skipping unsupported record type - reset() - ns1_zone = { - 'records': self.ns1_records + [{ - 'type': 'UNSUPPORTED', - 'ttl': 42, - 'short_answers': ['unsupported'], - 'domain': 'unsupported.unit.tests.', - }, { - "domain": "geo.unit.tests", - "zone": "unit.tests", - "type": "A", - "answers": [ - {'answer': ['1.1.1.1'], 'meta': {}}, - {'answer': ['1.2.3.4'], - 'meta': {'ca_province': ['ON']}}, - {'answer': ['2.3.4.5'], 'meta': {'us_state': ['NY']}}, - {'answer': ['3.4.5.6'], 'meta': {'country': ['US']}}, - {'answer': ['4.5.6.7'], - 'meta': {'iso_region_code': ['NA-US-WA']}}, - ], - 'tier': 3, - 'ttl': 34, - }], - } - zone_retrieve_mock.side_effect = [ns1_zone] - zone = Zone('unit.tests.', []) - provider.populate(zone) - self.assertEquals(self.expected, zone.records) - self.assertEquals(('unit.tests',), zone_retrieve_mock.call_args[0]) - record_retrieve_mock.assert_has_calls([call('unit.tests', - 'geo.unit.tests', 'A')]) - - @patch('ns1.rest.records.Records.delete') - @patch('ns1.rest.records.Records.update') - @patch('ns1.rest.records.Records.create') - @patch('ns1.rest.records.Records.retrieve') - @patch('ns1.rest.zones.Zones.create') - @patch('ns1.rest.zones.Zones.retrieve') - def test_sync(self, zone_retrieve_mock, zone_create_mock, - record_retrieve_mock, record_create_mock, - record_update_mock, record_delete_mock): - provider = Ns1Provider('test', 'api-key') - - desired = Zone('unit.tests.', []) - for r in self.expected: - desired.add_record(r) - - plan = provider.plan(desired) - # everything except the root NS - expected_n = len(self.expected) - 1 - self.assertEquals(expected_n, len(plan.changes)) - self.assertTrue(plan.exists) - - def reset(): - provider._client.reset_caches() - record_retrieve_mock.reset_mock() - zone_create_mock.reset_mock() - zone_retrieve_mock.reset_mock() - - # Fails, general error - reset() - zone_retrieve_mock.side_effect = ResourceException('boom') - with self.assertRaises(ResourceException) as ctx: - provider.apply(plan) - self.assertEquals(zone_retrieve_mock.side_effect, ctx.exception) - - # Fails, bad auth - reset() - zone_retrieve_mock.side_effect = \ - ResourceException('server error: zone not found') - zone_create_mock.side_effect = AuthException('unauthorized') - with self.assertRaises(AuthException) as ctx: - provider.apply(plan) - self.assertEquals(zone_create_mock.side_effect, ctx.exception) - - # non-existent zone, create - reset() - zone_retrieve_mock.side_effect = \ - ResourceException('server error: zone not found') - - zone_create_mock.side_effect = ['foo'] - # Test out the create rate-limit handling, then successes for the rest - record_create_mock.side_effect = [ - RateLimitException('boo', period=0), - ] + ([None] * len(self.expected)) - - got_n = provider.apply(plan) - self.assertEquals(expected_n, got_n) - - # Zone was created - zone_create_mock.assert_has_calls([call('unit.tests')]) - # Checking that we got some of the expected records too - record_create_mock.assert_has_calls([ - call('unit.tests', 'unit.tests', 'A', answers=[ - {'answer': ['1.2.3.4'], 'meta': {}} - ], filters=[], ttl=32), - call('unit.tests', 'unit.tests', 'CAA', answers=[ - (0, 'issue', 'ca.unit.tests') - ], ttl=40), - call('unit.tests', 'unit.tests', 'MX', answers=[ - (10, 'mx1.unit.tests.'), (20, 'mx2.unit.tests.') - ], ttl=35), - call('unit.tests', '1.2.3.4.unit.tests', 'PTR', answers=[ - 'one.one.one.one.', 'two.two.two.two.', - ], ttl=42), - ]) - - # Update & delete - reset() - - ns1_zone = { - 'records': self.ns1_records + [{ - 'type': 'A', - 'ttl': 42, - 'short_answers': ['9.9.9.9'], - 'domain': 'delete-me.unit.tests.', - }, { - "domain": "geo.unit.tests", - "zone": "unit.tests", - "type": "A", - "short_answers": [ - '1.1.1.1', - '1.2.3.4', - '2.3.4.5', - '3.4.5.6', - '4.5.6.7', - ], - 'tier': 3, # This flags it as advacned, full load required - 'ttl': 34, - }], - } - ns1_zone['records'][0]['short_answers'][0] = '2.2.2.2' - - ns1_record = { - "domain": "geo.unit.tests", - "zone": "unit.tests", - "type": "A", - "answers": [ - {'answer': ['1.1.1.1'], 'meta': {}}, - {'answer': ['1.2.3.4'], - 'meta': {'ca_province': ['ON']}}, - {'answer': ['2.3.4.5'], 'meta': {'us_state': ['NY']}}, - {'answer': ['3.4.5.6'], 'meta': {'country': ['US']}}, - {'answer': ['4.5.6.7'], - 'meta': {'iso_region_code': ['NA-US-WA']}}, - ], - 'tier': 3, - 'ttl': 34, - } - - record_retrieve_mock.side_effect = [ns1_record, ns1_record] - zone_retrieve_mock.side_effect = [ns1_zone, ns1_zone] - plan = provider.plan(desired) - self.assertEquals(3, len(plan.changes)) - # Shouldn't rely on order so just count classes - classes = defaultdict(lambda: 0) - for change in plan.changes: - classes[change.__class__] += 1 - self.assertEquals(1, classes[Delete]) - self.assertEquals(2, classes[Update]) - - record_update_mock.side_effect = [ - RateLimitException('one', period=0), - None, - None, - ] - record_delete_mock.side_effect = [ - RateLimitException('two', period=0), - None, - None, - ] - - record_retrieve_mock.side_effect = [ns1_record, ns1_record] - zone_retrieve_mock.side_effect = [ns1_zone, ns1_zone] - got_n = provider.apply(plan) - self.assertEquals(3, got_n) - - record_update_mock.assert_has_calls([ - call('unit.tests', 'unit.tests', 'A', answers=[ - {'answer': ['1.2.3.4'], 'meta': {}}], - filters=[], - ttl=32), - call('unit.tests', 'unit.tests', 'A', answers=[ - {'answer': ['1.2.3.4'], 'meta': {}}], - filters=[], - ttl=32), - call('unit.tests', 'geo.unit.tests', 'A', answers=[ - {'answer': ['101.102.103.104'], 'meta': {}}, - {'answer': ['101.102.103.105'], 'meta': {}}, - { - 'answer': ['201.202.203.204'], - 'meta': {'iso_region_code': ['NA-US-NY']} - }], - filters=[ - {'filter': 'shuffle', 'config': {}}, - {'filter': 'geotarget_country', 'config': {}}, - {'filter': 'select_first_n', 'config': {'N': 1}}], - ttl=34) - ]) - - def test_escaping(self): - provider = Ns1Provider('test', 'api-key') - record = { - 'ttl': 31, - 'short_answers': ['foo; bar baz; blip'] - } - self.assertEquals(['foo\\; bar baz\\; blip'], - provider._data_for_SPF('SPF', record)['values']) - - record = { - 'ttl': 31, - 'short_answers': ['no', 'foo; bar baz; blip', 'yes'] - } - self.assertEquals(['no', 'foo\\; bar baz\\; blip', 'yes'], - provider._data_for_TXT('TXT', record)['values']) - - zone = Zone('unit.tests.', []) - record = Record.new(zone, 'spf', { - 'ttl': 34, - 'type': 'SPF', - 'value': 'foo\\; bar baz\\; blip' - }) - params, _ = provider._params_for_SPF(record) - self.assertEquals(['foo; bar baz; blip'], params['answers']) - - record = Record.new(zone, 'txt', { - 'ttl': 35, - 'type': 'TXT', - 'value': 'foo\\; bar baz\\; blip' - }) - params, _ = provider._params_for_SPF(record) - self.assertEquals(['foo; bar baz; blip'], params['answers']) - - def test_data_for_CNAME(self): - provider = Ns1Provider('test', 'api-key') - - # answers from ns1 - a_record = { - 'ttl': 31, - 'type': 'CNAME', - 'short_answers': ['foo.unit.tests.'] - } - a_expected = { - 'ttl': 31, - 'type': 'CNAME', - 'value': 'foo.unit.tests.' - } - self.assertEqual(a_expected, - provider._data_for_CNAME(a_record['type'], a_record)) - - # no answers from ns1 - b_record = { - 'ttl': 32, - 'type': 'CNAME', - 'short_answers': [] - } - b_expected = { - 'ttl': 32, - 'type': 'CNAME', - 'value': None - } - self.assertEqual(b_expected, - provider._data_for_CNAME(b_record['type'], b_record)) - - -class TestNs1ProviderDynamic(TestCase): - zone = Zone('unit.tests.', []) - - def record(self): - # return a new object each time so we can mess with it without causing - # problems from test to test - return Record.new(self.zone, '', { - 'dynamic': { - 'pools': { - 'lhr': { - 'fallback': 'iad', - 'values': [{ - 'value': '3.4.5.6', - }], - }, - 'iad': { - 'values': [{ - 'value': '1.2.3.4', - }, { - 'value': '2.3.4.5', - }], - }, - }, - 'rules': [{ - 'geos': [ - 'AF', - 'EU-GB', - 'NA-US-FL' - ], - 'pool': 'lhr', - }, { - 'geos': [ - 'AF-ZW', - ], - 'pool': 'iad', - }, { - 'pool': 'iad', - }], - }, - 'octodns': { - 'healthcheck': { - 'host': 'send.me', - 'path': '/_ping', - 'port': 80, - 'protocol': 'HTTP', - }, - 'ns1': { - 'healthcheck': { - 'connect_timeout': 5, - 'response_timeout': 6, - }, - }, - }, - 'ttl': 32, - 'type': 'A', - 'value': '1.2.3.4', - 'meta': {}, - }) - - def aaaa_record(self): - return Record.new(self.zone, '', { - 'dynamic': { - 'pools': { - 'lhr': { - 'fallback': 'iad', - 'values': [{ - 'value': '::ffff:3.4.5.6', - }], - }, - 'iad': { - 'values': [{ - 'value': '::ffff:1.2.3.4', - }, { - 'value': '::ffff:2.3.4.5', - }], - }, - }, - 'rules': [{ - 'geos': [ - 'AF', - 'EU-GB', - 'NA-US-FL' - ], - 'pool': 'lhr', - }, { - 'geos': [ - 'AF-ZW', - ], - 'pool': 'iad', - }, { - 'pool': 'iad', - }], - }, - 'octodns': { - 'healthcheck': { - 'host': 'send.me', - 'path': '/_ping', - 'port': 80, - 'protocol': 'HTTP', - } - }, - 'ttl': 32, - 'type': 'AAAA', - 'value': '::ffff:1.2.3.4', - 'meta': {}, - }) - - def cname_record(self): - return Record.new(self.zone, 'foo', { - 'dynamic': { - 'pools': { - 'iad': { - 'values': [{ - 'value': 'iad.unit.tests.', - }], - }, - }, - 'rules': [{ - 'pool': 'iad', - }], - }, - 'octodns': { - 'healthcheck': { - 'host': 'send.me', - 'path': '/_ping', - 'port': 80, - 'protocol': 'HTTP', - } - }, - 'ttl': 33, - 'type': 'CNAME', - 'value': 'value.unit.tests.', - 'meta': {}, - }) - - def test_notes(self): - provider = Ns1Provider('test', 'api-key') - - self.assertEquals({}, provider._parse_notes(None)) - self.assertEquals({}, provider._parse_notes('')) - self.assertEquals({}, provider._parse_notes('blah-blah-blah')) - - # Round tripping - data = { - 'key': 'value', - 'priority': '1', - } - notes = provider._encode_notes(data) - self.assertEquals(data, provider._parse_notes(notes)) - - def test_monitors_for(self): - provider = Ns1Provider('test', 'api-key') - - # pre-populate the client's monitors cache - monitor_one = { - 'config': { - 'host': '1.2.3.4', - }, - 'notes': 'host:unit.tests type:A', - } - monitor_four = { - 'config': { - 'host': '2.3.4.5', - }, - 'notes': 'host:unit.tests type:A', - } - monitor_five = { - 'config': { - 'host': 'iad.unit.tests', - }, - 'notes': 'host:foo.unit.tests type:CNAME', - } - provider._client._monitors_cache = { - 'one': monitor_one, - 'two': { - 'config': { - 'host': '8.8.8.8', - }, - 'notes': 'host:unit.tests type:AAAA', - }, - 'three': { - 'config': { - 'host': '9.9.9.9', - }, - 'notes': 'host:other.unit.tests type:A', - }, - 'four': monitor_four, - 'five': monitor_five, - 'six': { - 'config': { - 'host': '10.10.10.10', - }, - 'notes': 'non-conforming notes', - }, - 'seven': { - 'config': { - 'host': '11.11.11.11', - }, - 'notes': None, - }, - } - - # Would match, but won't get there b/c it's not dynamic - record = Record.new(self.zone, '', { - 'ttl': 32, - 'type': 'A', - 'value': '1.2.3.4', - 'meta': {}, - }) - self.assertEquals({}, provider._monitors_for(record)) - - # Will match some records - self.assertEquals({ - '1.2.3.4': monitor_one, - '2.3.4.5': monitor_four, - }, provider._monitors_for(self.record())) - - # Check match for CNAME values - self.assertEquals({ - 'iad.unit.tests.': monitor_five, - }, provider._monitors_for(self.cname_record())) - - def test_uuid(self): - # Just a smoke test/for coverage - provider = Ns1Provider('test', 'api-key') - self.assertTrue(provider._uuid()) - - @patch('octodns.provider.ns1.Ns1Provider._uuid') - @patch('ns1.rest.data.Feed.create') - def test_feed_create(self, datafeed_create_mock, uuid_mock): - provider = Ns1Provider('test', 'api-key') - - # pre-fill caches to avoid extranious calls (things we're testing - # elsewhere) - provider._client._datasource_id = 'foo' - provider._client._feeds_for_monitors = {} - - uuid_mock.reset_mock() - datafeed_create_mock.reset_mock() - uuid_mock.side_effect = ['xxxxxxxxxxxxxx'] - feed = { - 'id': 'feed', - } - datafeed_create_mock.side_effect = [feed] - monitor = { - 'id': 'one', - 'name': 'one name', - 'config': { - 'host': '1.2.3.4', - }, - 'notes': 'host:unit.tests type:A', - } - self.assertEquals('feed', provider._feed_create(monitor)) - datafeed_create_mock.assert_has_calls([call('foo', 'one name - xxxxxx', - {'jobid': 'one'})]) - - @patch('octodns.provider.ns1.Ns1Provider._feed_create') - @patch('octodns.provider.ns1.Ns1Client.monitors_create') - @patch('octodns.provider.ns1.Ns1Client.notifylists_create') - def test_monitor_create(self, notifylists_create_mock, - monitors_create_mock, feed_create_mock): - provider = Ns1Provider('test', 'api-key') - - # pre-fill caches to avoid extranious calls (things we're testing - # elsewhere) - provider._client._datasource_id = 'foo' - provider._client._feeds_for_monitors = {} - - notifylists_create_mock.reset_mock() - monitors_create_mock.reset_mock() - feed_create_mock.reset_mock() - notifylists_create_mock.side_effect = [{ - 'id': 'nl-id', - }] - monitors_create_mock.side_effect = [{ - 'id': 'mon-id', - }] - feed_create_mock.side_effect = ['feed-id'] - monitor = { - 'name': 'test monitor', - } - provider._client._notifylists_cache = {} - monitor_id, feed_id = provider._monitor_create(monitor) - self.assertEquals('mon-id', monitor_id) - self.assertEquals('feed-id', feed_id) - monitors_create_mock.assert_has_calls([call(name='test monitor', - notify_list='nl-id')]) - - @patch('octodns.provider.ns1.Ns1Provider._feed_create') - @patch('octodns.provider.ns1.Ns1Client.monitors_create') - @patch('octodns.provider.ns1.Ns1Client._try') - def test_monitor_create_shared_notifylist(self, try_mock, - monitors_create_mock, - feed_create_mock): - provider = Ns1Provider('test', 'api-key', shared_notifylist=True) - - # pre-fill caches to avoid extranious calls (things we're testing - # elsewhere) - provider._client._datasource_id = 'foo' - provider._client._feeds_for_monitors = {} - - # First time we'll need to create the share list - provider._client._notifylists_cache = {} - try_mock.reset_mock() - monitors_create_mock.reset_mock() - feed_create_mock.reset_mock() - try_mock.side_effect = [{ - 'id': 'nl-id', - 'name': provider.SHARED_NOTIFYLIST_NAME, - }] - monitors_create_mock.side_effect = [{ - 'id': 'mon-id', - }] - feed_create_mock.side_effect = ['feed-id'] - monitor = { - 'name': 'test monitor', - } - monitor_id, feed_id = provider._monitor_create(monitor) - self.assertEquals('mon-id', monitor_id) - self.assertEquals('feed-id', feed_id) - monitors_create_mock.assert_has_calls([call(name='test monitor', - notify_list='nl-id')]) - try_mock.assert_called_once() - # The shared notifylist should be cached now - self.assertEquals([provider.SHARED_NOTIFYLIST_NAME], - list(provider._client._notifylists_cache.keys())) - - # Second time we'll use the cached version - try_mock.reset_mock() - monitors_create_mock.reset_mock() - feed_create_mock.reset_mock() - monitors_create_mock.side_effect = [{ - 'id': 'mon-id', - }] - feed_create_mock.side_effect = ['feed-id'] - monitor = { - 'name': 'test monitor', - } - monitor_id, feed_id = provider._monitor_create(monitor) - self.assertEquals('mon-id', monitor_id) - self.assertEquals('feed-id', feed_id) - monitors_create_mock.assert_has_calls([call(name='test monitor', - notify_list='nl-id')]) - try_mock.assert_not_called() - - def test_monitor_gen(self): - provider = Ns1Provider('test', 'api-key') - - value = '3.4.5.6' - record = self.record() - monitor = provider._monitor_gen(record, value) - self.assertEquals(value, monitor['config']['host']) - self.assertTrue('\\nHost: send.me\\r' in monitor['config']['send']) - self.assertFalse(monitor['config']['ssl']) - self.assertEquals('host:unit.tests type:A', monitor['notes']) - - record._octodns['healthcheck']['host'] = None - monitor = provider._monitor_gen(record, value) - self.assertTrue(r'\nHost: 3.4.5.6\r' in monitor['config']['send']) - - record._octodns['healthcheck']['protocol'] = 'HTTPS' - monitor = provider._monitor_gen(record, value) - self.assertTrue(monitor['config']['ssl']) - - record._octodns['healthcheck']['protocol'] = 'TCP' - monitor = provider._monitor_gen(record, value) - # No http send done - self.assertFalse('send' in monitor['config']) - # No http response expected - self.assertFalse('rules' in monitor) - - record._octodns['ns1']['healthcheck']['policy'] = 'all' - monitor = provider._monitor_gen(record, value) - self.assertEquals('all', monitor['policy']) - - record._octodns['ns1']['healthcheck']['frequency'] = 300 - monitor = provider._monitor_gen(record, value) - self.assertEquals(300, monitor['frequency']) - - record._octodns['ns1']['healthcheck']['rapid_recheck'] = True - monitor = provider._monitor_gen(record, value) - self.assertTrue(monitor['rapid_recheck']) - - record._octodns['ns1']['healthcheck']['connect_timeout'] = 1 - monitor = provider._monitor_gen(record, value) - self.assertEquals(1000, monitor['config']['connect_timeout']) - - record._octodns['ns1']['healthcheck']['response_timeout'] = 2 - monitor = provider._monitor_gen(record, value) - self.assertEquals(2000, monitor['config']['response_timeout']) - - def test_monitor_gen_AAAA(self): - provider = Ns1Provider('test', 'api-key') - - value = '::ffff:3.4.5.6' - record = self.aaaa_record() - monitor = provider._monitor_gen(record, value) - self.assertTrue(monitor['config']['ipv6']) - - def test_monitor_gen_CNAME(self): - provider = Ns1Provider('test', 'api-key') - - value = 'iad.unit.tests.' - record = self.cname_record() - monitor = provider._monitor_gen(record, value) - self.assertEquals(value[:-1], monitor['config']['host']) - - def test_monitor_is_match(self): - provider = Ns1Provider('test', 'api-key') - - # Empty matches empty - self.assertTrue(provider._monitor_is_match({}, {})) - - # Anything matches empty - self.assertTrue(provider._monitor_is_match({}, { - 'anything': 'goes' - })) - - # Missing doesn't match - self.assertFalse(provider._monitor_is_match({ - 'exepct': 'this', - }, { - 'anything': 'goes' - })) - - # Identical matches - self.assertTrue(provider._monitor_is_match({ - 'exepct': 'this', - }, { - 'exepct': 'this', - })) - - # Different values don't match - self.assertFalse(provider._monitor_is_match({ - 'exepct': 'this', - }, { - 'exepct': 'that', - })) - - # Different sub-values don't match - self.assertFalse(provider._monitor_is_match({ - 'exepct': { - 'this': 'to-be', - }, - }, { - 'exepct': { - 'this': 'something-else', - }, - })) - - @patch('octodns.provider.ns1.Ns1Provider._feed_create') - @patch('octodns.provider.ns1.Ns1Client.monitors_update') - @patch('octodns.provider.ns1.Ns1Provider._monitor_create') - @patch('octodns.provider.ns1.Ns1Provider._monitor_gen') - def test_monitor_sync(self, monitor_gen_mock, monitor_create_mock, - monitors_update_mock, feed_create_mock): - provider = Ns1Provider('test', 'api-key') - - # pre-fill caches to avoid extranious calls (things we're testing - # elsewhere) - provider._client._datasource_id = 'foo' - provider._client._feeds_for_monitors = { - 'mon-id': 'feed-id', - } - - def reset(): - feed_create_mock.reset_mock() - monitor_create_mock.reset_mock() - monitor_gen_mock.reset_mock() - monitors_update_mock.reset_mock() - - # No existing monitor - reset() - monitor_gen_mock.side_effect = [{'key': 'value'}] - monitor_create_mock.side_effect = [('mon-id', 'feed-id')] - value = '1.2.3.4' - record = self.record() - monitor_id, feed_id = provider._monitor_sync(record, value, None) - self.assertEquals('mon-id', monitor_id) - self.assertEquals('feed-id', feed_id) - monitor_gen_mock.assert_has_calls([call(record, value)]) - monitor_create_mock.assert_has_calls([call({'key': 'value'})]) - monitors_update_mock.assert_not_called() - feed_create_mock.assert_not_called() - - # Existing monitor that doesn't need updates - reset() - monitor = { - 'id': 'mon-id', - 'key': 'value', - 'name': 'monitor name', - } - monitor_gen_mock.side_effect = [monitor] - monitor_id, feed_id = provider._monitor_sync(record, value, - monitor) - self.assertEquals('mon-id', monitor_id) - self.assertEquals('feed-id', feed_id) - monitor_gen_mock.assert_called_once() - monitor_create_mock.assert_not_called() - monitors_update_mock.assert_not_called() - feed_create_mock.assert_not_called() - - # Existing monitor that doesn't need updates, but is missing its feed - reset() - monitor = { - 'id': 'mon-id2', - 'key': 'value', - 'name': 'monitor name', - } - monitor_gen_mock.side_effect = [monitor] - feed_create_mock.side_effect = ['feed-id2'] - monitor_id, feed_id = provider._monitor_sync(record, value, - monitor) - self.assertEquals('mon-id2', monitor_id) - self.assertEquals('feed-id2', feed_id) - monitor_gen_mock.assert_called_once() - monitor_create_mock.assert_not_called() - monitors_update_mock.assert_not_called() - feed_create_mock.assert_has_calls([call(monitor)]) - - # Existing monitor that needs updates - reset() - monitor = { - 'id': 'mon-id', - 'key': 'value', - 'name': 'monitor name', - } - gened = { - 'other': 'thing', - } - monitor_gen_mock.side_effect = [gened] - monitor_id, feed_id = provider._monitor_sync(record, value, - monitor) - self.assertEquals('mon-id', monitor_id) - self.assertEquals('feed-id', feed_id) - monitor_gen_mock.assert_called_once() - monitor_create_mock.assert_not_called() - monitors_update_mock.assert_has_calls([call('mon-id', other='thing')]) - feed_create_mock.assert_not_called() - - @patch('octodns.provider.ns1.Ns1Client.notifylists_delete') - @patch('octodns.provider.ns1.Ns1Client.monitors_delete') - @patch('octodns.provider.ns1.Ns1Client.datafeed_delete') - @patch('octodns.provider.ns1.Ns1Provider._monitors_for') - def test_monitors_gc(self, monitors_for_mock, datafeed_delete_mock, - monitors_delete_mock, notifylists_delete_mock): - provider = Ns1Provider('test', 'api-key') - - # pre-fill caches to avoid extranious calls (things we're testing - # elsewhere) - provider._client._datasource_id = 'foo' - provider._client._feeds_for_monitors = { - 'mon-id': 'feed-id', - } - - def reset(): - datafeed_delete_mock.reset_mock() - monitors_delete_mock.reset_mock() - monitors_for_mock.reset_mock() - notifylists_delete_mock.reset_mock() - - # No active monitors and no existing, nothing will happen - reset() - monitors_for_mock.side_effect = [{}] - record = self.record() - provider._monitors_gc(record) - monitors_for_mock.assert_has_calls([call(record)]) - datafeed_delete_mock.assert_not_called() - monitors_delete_mock.assert_not_called() - notifylists_delete_mock.assert_not_called() - - # No active monitors and one existing, delete all the things - reset() - monitors_for_mock.side_effect = [{ - 'x': { - 'id': 'mon-id', - 'notify_list': 'nl-id', - } - }] - provider._client._notifylists_cache = { - 'not shared': { - 'id': 'nl-id', - 'name': 'not shared', - } - } - provider._monitors_gc(record) - monitors_for_mock.assert_has_calls([call(record)]) - datafeed_delete_mock.assert_has_calls([call('foo', 'feed-id')]) - monitors_delete_mock.assert_has_calls([call('mon-id')]) - notifylists_delete_mock.assert_has_calls([call('nl-id')]) - - # Same existing, this time in active list, should be noop - reset() - monitors_for_mock.side_effect = [{ - 'x': { - 'id': 'mon-id', - 'notify_list': 'nl-id', - } - }] - provider._monitors_gc(record, {'mon-id'}) - monitors_for_mock.assert_has_calls([call(record)]) - datafeed_delete_mock.assert_not_called() - monitors_delete_mock.assert_not_called() - notifylists_delete_mock.assert_not_called() - - # Non-active monitor w/o a feed, and another monitor that's left alone - # b/c it's active - reset() - monitors_for_mock.side_effect = [{ - 'x': { - 'id': 'mon-id', - 'notify_list': 'nl-id', - }, - 'y': { - 'id': 'mon-id2', - 'notify_list': 'nl-id2', - }, - }] - provider._client._notifylists_cache = { - 'not shared': { - 'id': 'nl-id', - 'name': 'not shared', - }, - 'not shared 2': { - 'id': 'nl-id2', - 'name': 'not shared 2', - } - } - provider._monitors_gc(record, {'mon-id'}) - monitors_for_mock.assert_has_calls([call(record)]) - datafeed_delete_mock.assert_not_called() - monitors_delete_mock.assert_has_calls([call('mon-id2')]) - notifylists_delete_mock.assert_has_calls([call('nl-id2')]) - - # Non-active monitor w/o a notifylist, generally shouldn't happen, but - # code should handle it just in case someone gets clicky in the UI - reset() - monitors_for_mock.side_effect = [{ - 'y': { - 'id': 'mon-id2', - 'notify_list': 'nl-id2', - }, - }] - provider._client._notifylists_cache = { - 'not shared a': { - 'id': 'nl-ida', - 'name': 'not shared a', - }, - 'not shared b': { - 'id': 'nl-idb', - 'name': 'not shared b', - } - } - provider._monitors_gc(record, {'mon-id'}) - monitors_for_mock.assert_has_calls([call(record)]) - datafeed_delete_mock.assert_not_called() - monitors_delete_mock.assert_has_calls([call('mon-id2')]) - notifylists_delete_mock.assert_not_called() - - # Non-active monitor with a shared notifylist, monitor deleted, but - # notifylist is left alone - reset() - provider.shared_notifylist = True - monitors_for_mock.side_effect = [{ - 'y': { - 'id': 'mon-id2', - 'notify_list': 'shared', - }, - }] - provider._client._notifylists_cache = { - 'shared': { - 'id': 'shared', - 'name': provider.SHARED_NOTIFYLIST_NAME, - }, - } - provider._monitors_gc(record, {'mon-id'}) - monitors_for_mock.assert_has_calls([call(record)]) - datafeed_delete_mock.assert_not_called() - monitors_delete_mock.assert_has_calls([call('mon-id2')]) - notifylists_delete_mock.assert_not_called() - - @patch('octodns.provider.ns1.Ns1Provider._monitors_for') - def test_params_for_dynamic_with_pool_status(self, monitors_for_mock): - provider = Ns1Provider('test', 'api-key') - monitors_for_mock.reset_mock() - monitors_for_mock.return_value = {} - record = Record.new(self.zone, '', { - 'dynamic': { - 'pools': { - 'iad': { - 'values': [{ - 'value': '1.2.3.4', - 'status': 'up', - }], - }, - }, - 'rules': [{ - 'pool': 'iad', - }], - }, - 'ttl': 32, - 'type': 'A', - 'value': '1.2.3.4', - 'meta': {}, - }) - params, active_monitors = provider._params_for_dynamic(record) - self.assertEqual(params['answers'][0]['meta']['up'], True) - self.assertEqual(len(active_monitors), 0) - - # check for down also - record.dynamic.pools['iad'].data['values'][0]['status'] = 'down' - params, active_monitors = provider._params_for_dynamic(record) - self.assertEqual(params['answers'][0]['meta']['up'], False) - self.assertEqual(len(active_monitors), 0) - - @patch('octodns.provider.ns1.Ns1Provider._monitor_sync') - @patch('octodns.provider.ns1.Ns1Provider._monitors_for') - def test_params_for_dynamic_region_only(self, monitors_for_mock, - monitor_sync_mock): - provider = Ns1Provider('test', 'api-key') - - # pre-fill caches to avoid extranious calls (things we're testing - # elsewhere) - provider._client._datasource_id = 'foo' - provider._client._feeds_for_monitors = { - 'mon-id': 'feed-id', - } - - # provider._params_for_A() calls provider._monitors_for() and - # provider._monitor_sync(). Mock their return values so that we don't - # make NS1 API calls during tests - monitors_for_mock.reset_mock() - monitor_sync_mock.reset_mock() - monitors_for_mock.side_effect = [{ - '3.4.5.6': 'mid-3', - }] - monitor_sync_mock.side_effect = [ - ('mid-1', 'fid-1'), - ('mid-2', 'fid-2'), - ('mid-3', 'fid-3'), - ] - - record = self.record() - rule0 = record.data['dynamic']['rules'][0] - rule1 = record.data['dynamic']['rules'][1] - rule0['geos'] = ['AF', 'EU'] - rule1['geos'] = ['AS'] - ret, monitor_ids = provider._params_for_A(record) - self.assertEquals(10, len(ret['answers'])) - self.assertEquals(ret['filters'], - provider._FILTER_CHAIN_WITH_REGION) - self.assertEquals({ - 'iad__catchall': { - 'meta': { - 'note': 'rule-order:2' - } - }, - 'iad__georegion': { - 'meta': { - 'georegion': ['ASIAPAC'], - 'note': 'rule-order:1' - } - }, - 'lhr__georegion': { - 'meta': { - 'georegion': ['AFRICA', 'EUROPE'], - 'note': 'fallback:iad rule-order:0' - } - } - }, ret['regions']) - self.assertEquals({'mid-1', 'mid-2', 'mid-3'}, monitor_ids) - - @patch('octodns.provider.ns1.Ns1Provider._monitor_sync') - @patch('octodns.provider.ns1.Ns1Provider._monitors_for') - def test_params_for_dynamic_state_only(self, monitors_for_mock, - monitor_sync_mock): - provider = Ns1Provider('test', 'api-key') - - # pre-fill caches to avoid extranious calls (things we're testing - # elsewhere) - provider._client._datasource_id = 'foo' - provider._client._feeds_for_monitors = { - 'mon-id': 'feed-id', - } - - # provider._params_for_A() calls provider._monitors_for() and - # provider._monitor_sync(). Mock their return values so that we don't - # make NS1 API calls during tests - monitors_for_mock.reset_mock() - monitor_sync_mock.reset_mock() - monitors_for_mock.side_effect = [{ - '3.4.5.6': 'mid-3', - }] - monitor_sync_mock.side_effect = [ - ('mid-1', 'fid-1'), - ('mid-2', 'fid-2'), - ('mid-3', 'fid-3'), - ] - - record = self.record() - rule0 = record.data['dynamic']['rules'][0] - rule1 = record.data['dynamic']['rules'][1] - rule0['geos'] = ['AF', 'EU'] - rule1['geos'] = ['NA-US-CA', 'NA-CA-NL'] - ret, _ = provider._params_for_A(record) - self.assertEquals(10, len(ret['answers'])) - exp = provider._FILTER_CHAIN_WITH_REGION_AND_COUNTRY - self.assertEquals(ret['filters'], exp) - self.assertEquals({ - 'iad__catchall': { - 'meta': { - 'note': 'rule-order:2' - } - }, - 'iad__country': { - 'meta': { - 'note': 'rule-order:1', - 'us_state': ['CA'], - 'ca_province': ['NL'] - } - }, - 'lhr__georegion': { - 'meta': { - 'georegion': ['AFRICA', 'EUROPE'], - 'note': 'fallback:iad rule-order:0' - } - } - }, ret['regions']) - - @patch('octodns.provider.ns1.Ns1Provider._monitor_sync') - @patch('octodns.provider.ns1.Ns1Provider._monitors_for') - def test_params_for_dynamic_contient_and_countries(self, - monitors_for_mock, - monitor_sync_mock): - provider = Ns1Provider('test', 'api-key') - - # pre-fill caches to avoid extranious calls (things we're testing - # elsewhere) - provider._client._datasource_id = 'foo' - provider._client._feeds_for_monitors = { - 'mon-id': 'feed-id', - } - - # provider._params_for_A() calls provider._monitors_for() and - # provider._monitor_sync(). Mock their return values so that we don't - # make NS1 API calls during tests - provider._client.reset_caches() - monitors_for_mock.reset_mock() - monitor_sync_mock.reset_mock() - monitors_for_mock.side_effect = [{ - '3.4.5.6': 'mid-3', - }] - monitor_sync_mock.side_effect = [ - ('mid-1', 'fid-1'), - ('mid-2', 'fid-2'), - ('mid-3', 'fid-3'), - ] - - record = self.record() - rule0 = record.data['dynamic']['rules'][0] - rule1 = record.data['dynamic']['rules'][1] - rule0['geos'] = ['AF', 'EU', 'NA-US-CA'] - rule1['geos'] = ['AS', 'AS-IN'] - ret, _ = provider._params_for_A(record) - - self.assertEquals(17, len(ret['answers'])) - # Deeply check the answers we have here - # group the answers based on where they came from - notes = defaultdict(list) - for answer in ret['answers']: - notes[answer['meta']['note']].append(answer) - # Remove the meta and region part since it'll vary based on the - # exact pool, that'll let us == them down below - del answer['meta'] - del answer['region'] - - # Expected groups. iad has occurances in here: a country and region - # that was split out based on targeting a continent and a state. It - # finally has a catchall. Those are examples of the two ways pools get - # expanded. - # - # lhr splits in two, with a region and country and includes a fallback - # - # All values now include their own `pool:` name - # - # well as both lhr georegion (for contients) and country. The first is - # an example of a repeated target pool in a rule (only allowed when the - # 2nd is a catchall.) - self.assertEquals(['fallback: from:iad__catchall pool:iad', - 'fallback: from:iad__country pool:iad', - 'fallback: from:iad__georegion pool:iad', - 'fallback: from:lhr__country pool:iad', - 'fallback: from:lhr__georegion pool:iad', - 'fallback:iad from:lhr__country pool:lhr', - 'fallback:iad from:lhr__georegion pool:lhr', - 'from:--default--'], - sorted(notes.keys())) - - # All the iad's should match (after meta and region were removed) - self.assertEquals(notes['from:iad__catchall'], - notes['from:iad__country']) - self.assertEquals(notes['from:iad__catchall'], - notes['from:iad__georegion']) - - # The lhrs should match each other too - self.assertEquals(notes['from:lhr__georegion'], - notes['from:lhr__country']) - - # We have both country and region filter chain entries - exp = provider._FILTER_CHAIN_WITH_REGION_AND_COUNTRY - self.assertEquals(ret['filters'], exp) - - # and our region details match the expected behaviors/targeting - self.assertEquals({ - 'iad__catchall': { - 'meta': { - 'note': 'rule-order:2' - } - }, - 'iad__country': { - 'meta': { - 'country': ['IN'], - 'note': 'rule-order:1' - } - }, - 'iad__georegion': { - 'meta': { - 'georegion': ['ASIAPAC'], - 'note': 'rule-order:1' - } - }, - 'lhr__country': { - 'meta': { - 'note': 'fallback:iad rule-order:0', - 'us_state': ['CA'] - } - }, - 'lhr__georegion': { - 'meta': { - 'georegion': ['AFRICA', 'EUROPE'], - 'note': 'fallback:iad rule-order:0' - } - } - }, ret['regions']) - - @patch('octodns.provider.ns1.Ns1Provider._monitor_sync') - @patch('octodns.provider.ns1.Ns1Provider._monitors_for') - def test_params_for_dynamic_oceania(self, monitors_for_mock, - monitor_sync_mock): - provider = Ns1Provider('test', 'api-key') - - # pre-fill caches to avoid extranious calls (things we're testing - # elsewhere) - provider._client._datasource_id = 'foo' - provider._client._feeds_for_monitors = { - 'mon-id': 'feed-id', - } - - # provider._params_for_A() calls provider._monitors_for() and - # provider._monitor_sync(). Mock their return values so that we don't - # make NS1 API calls during tests - monitors_for_mock.reset_mock() - monitor_sync_mock.reset_mock() - monitors_for_mock.side_effect = [{ - '3.4.5.6': 'mid-3', - }] - monitor_sync_mock.side_effect = [ - ('mid-1', 'fid-1'), - ('mid-2', 'fid-2'), - ('mid-3', 'fid-3'), - ] - - # Set geos to 'OC' in rules[0] (pool - 'lhr') - # Check returned dict has list of countries under 'OC' - record = self.record() - rule0 = record.data['dynamic']['rules'][0] - rule0['geos'] = ['OC'] - ret, _ = provider._params_for_A(record) - - # Make sure the country list expanded into all the OC countries - got = set(ret['regions']['lhr__country']['meta']['country']) - self.assertEquals(got, - Ns1Provider._CONTINENT_TO_LIST_OF_COUNTRIES['OC']) - - # When rules has 'OC', it is converted to list of countries in the - # params. Look if the returned filters is the filter chain with country - self.assertEquals(ret['filters'], - provider._FILTER_CHAIN_WITH_COUNTRY) - - @patch('octodns.provider.ns1.Ns1Provider._monitor_sync') - @patch('octodns.provider.ns1.Ns1Provider._monitors_for') - def test_params_for_dynamic(self, monitors_for_mock, monitors_sync_mock): - provider = Ns1Provider('test', 'api-key') - - # pre-fill caches to avoid extranious calls (things we're testing - # elsewhere) - provider._client._datasource_id = 'foo' - provider._client._feeds_for_monitors = { - 'mon-id': 'feed-id', - } - - monitors_for_mock.reset_mock() - monitors_sync_mock.reset_mock() - monitors_for_mock.side_effect = [{ - '3.4.5.6': 'mid-3', - }] - monitors_sync_mock.side_effect = [ - ('mid-1', 'fid-1'), - ('mid-2', 'fid-2'), - ('mid-3', 'fid-3'), - ] - # This indirectly calls into _params_for_dynamic and tests the - # handling to get there - record = self.record() - # copy an existing answer from a different pool to 'lhr' so - # in order to test answer repetition across pools (monitor reuse) - record.dynamic._data()['pools']['lhr']['values'].append( - record.dynamic._data()['pools']['iad']['values'][0]) - ret, _ = provider._params_for_A(record) - - # Given that record has both country and region in the rules, - # the returned filter chain should be one with region and country - self.assertEquals(ret['filters'], - provider._FILTER_CHAIN_WITH_REGION_AND_COUNTRY) - - monitors_for_mock.assert_has_calls([call(record)]) - monitors_sync_mock.assert_has_calls([ - call(record, '1.2.3.4', None), - call(record, '2.3.4.5', None), - call(record, '3.4.5.6', 'mid-3'), - ]) - - record = Record.new(self.zone, 'geo', { - 'ttl': 34, - 'type': 'A', - 'values': ['101.102.103.104', '101.102.103.105'], - 'geo': {'EU': ['201.202.203.204']}, - 'meta': {}, - }) - params, _ = provider._params_for_geo_A(record) - self.assertEquals([], params['filters']) - - @patch('octodns.provider.ns1.Ns1Provider._monitor_sync') - @patch('octodns.provider.ns1.Ns1Provider._monitors_for') - def test_params_for_dynamic_CNAME(self, monitors_for_mock, - monitor_sync_mock): - provider = Ns1Provider('test', 'api-key') - - # pre-fill caches to avoid extranious calls (things we're testing - # elsewhere) - provider._client._datasource_id = 'foo' - provider._client._feeds_for_monitors = { - 'mon-id': 'feed-id', - } - - # provider._params_for_A() calls provider._monitors_for() and - # provider._monitor_sync(). Mock their return values so that we don't - # make NS1 API calls during tests - monitors_for_mock.reset_mock() - monitor_sync_mock.reset_mock() - monitors_for_mock.side_effect = [{ - 'iad.unit.tests.': 'mid-1', - }] - monitor_sync_mock.side_effect = [ - ('mid-1', 'fid-1'), - ] - - record = self.cname_record() - ret, _ = provider._params_for_CNAME(record) - - # Check if the default value was correctly read and populated - # All other dynamic record test cases are covered by dynamic_A tests - self.assertEquals(ret['answers'][-1]['answer'][0], 'value.unit.tests.') - - def test_data_for_dynamic(self): - provider = Ns1Provider('test', 'api-key') - - # empty record turns into empty data - ns1_record = { - 'answers': [], - 'domain': 'unit.tests', - 'filters': provider._BASIC_FILTER_CHAIN, - 'regions': {}, - 'ttl': 42, - } - data = provider._data_for_dynamic('A', ns1_record) - self.assertEquals({ - 'dynamic': { - 'pools': {}, - 'rules': [], - }, - 'ttl': 42, - 'type': 'A', - 'values': [], - }, data) - - # Test out a small, but realistic setup that covers all the options - # We have country and region in the test config - filters = provider._get_updated_filter_chain(True, True) - catchall_pool_name = 'iad__catchall' - ns1_record = { - 'answers': [{ - 'answer': ['3.4.5.6'], - 'meta': { - 'priority': 1, - 'note': 'from:lhr__country', - 'up': {}, - }, - 'region': 'lhr', - }, { - 'answer': ['2.3.4.5'], - 'meta': { - 'priority': 2, - 'weight': 12, - 'note': 'from:iad', - 'up': {}, - }, - 'region': 'lhr', - }, { - 'answer': ['1.2.3.4'], - 'meta': { - 'priority': 3, - 'note': 'from:--default--', - }, - 'region': 'lhr', - }, { - 'answer': ['2.3.4.5'], - 'meta': { - 'priority': 1, - 'weight': 12, - 'note': 'from:iad', - 'up': {}, - }, - 'region': 'iad', - }, { - 'answer': ['1.2.3.4'], - 'meta': { - 'priority': 2, - 'note': 'from:--default--', - }, - 'region': 'iad', - }, { - 'answer': ['2.3.4.5'], - 'meta': { - 'priority': 1, - 'weight': 12, - 'note': f'from:{catchall_pool_name}', - 'up': {}, - }, - 'region': catchall_pool_name, - }, { - 'answer': ['1.2.3.4'], - 'meta': { - 'priority': 2, - 'note': 'from:--default--', - }, - 'region': catchall_pool_name, - }], - 'domain': 'unit.tests', - 'filters': filters, - 'regions': { - # lhr will use the new-split style names (and that will require - # combining in the code to produce the expected answer - 'lhr__georegion': { - 'meta': { - 'note': 'rule-order:1 fallback:iad', - 'georegion': ['AFRICA'], - }, - }, - 'lhr__country': { - 'meta': { - 'note': 'rule-order:1 fallback:iad', - 'country': ['MX'], - 'us_state': ['OR'], - 'ca_province': ['NL'] - }, - }, - # iad will use the old style "plain" region naming. We won't - # see mixed names like this in practice, but this should - # exercise both paths - 'iad': { - 'meta': { - 'note': 'rule-order:2', - 'country': ['ZW'], - }, - }, - catchall_pool_name: { - 'meta': { - 'note': 'rule-order:3', - }, - } - }, - 'tier': 3, - 'ttl': 42, - } - data = provider._data_for_dynamic('A', ns1_record) - self.assertEquals({ - 'dynamic': { - 'pools': { - 'iad': { - 'fallback': None, - 'values': [{ - 'value': '2.3.4.5', - 'weight': 12, - }], - }, - 'lhr': { - 'fallback': 'iad', - 'values': [{ - 'weight': 1, - 'value': '3.4.5.6', - }], - }, - }, - 'rules': [{ - '_order': '1', - 'geos': [ - 'AF', - 'NA-CA-NL', - 'NA-MX', - 'NA-US-OR' - ], - 'pool': 'lhr', - }, { - '_order': '2', - 'geos': [ - 'AF-ZW', - ], - 'pool': 'iad', - }, { - '_order': '3', - 'pool': 'iad', - }], - }, - 'ttl': 42, - 'type': 'A', - 'values': ['1.2.3.4'], - }, data) - - # Same answer if we go through _data_for_A which out sources the job to - # _data_for_dynamic - data2 = provider._data_for_A('A', ns1_record) - self.assertEquals(data, data2) - - # Same answer if we have an old-style catchall name - old_style_catchall_pool_name = 'catchall__iad' - ns1_record['answers'][-2]['region'] = old_style_catchall_pool_name - ns1_record['answers'][-1]['region'] = old_style_catchall_pool_name - ns1_record['regions'][old_style_catchall_pool_name] = \ - ns1_record['regions'][catchall_pool_name] - del ns1_record['regions'][catchall_pool_name] - data3 = provider._data_for_dynamic('A', ns1_record) - self.assertEquals(data, data2) - - # Oceania test cases - # 1. Full list of countries should return 'OC' in geos - oc_countries = Ns1Provider._CONTINENT_TO_LIST_OF_COUNTRIES['OC'] - ns1_record['regions']['lhr__country']['meta']['country'] = \ - list(oc_countries) - data3 = provider._data_for_A('A', ns1_record) - self.assertTrue('OC' in data3['dynamic']['rules'][0]['geos']) - - # 2. Partial list of countries should return just those - partial_oc_cntry_list = list(oc_countries)[:5] - ns1_record['regions']['lhr__country']['meta']['country'] = \ - partial_oc_cntry_list - data4 = provider._data_for_A('A', ns1_record) - for c in partial_oc_cntry_list: - self.assertTrue(f'OC-{c}' in data4['dynamic']['rules'][0]['geos']) - - # NA test cases - # 1. Full list of countries should return 'NA' in geos - na_countries = Ns1Provider._CONTINENT_TO_LIST_OF_COUNTRIES['NA'] - del ns1_record['regions']['lhr__country']['meta']['us_state'] - ns1_record['regions']['lhr__country']['meta']['country'] = \ - list(na_countries) - data5 = provider._data_for_A('A', ns1_record) - self.assertTrue('NA' in data5['dynamic']['rules'][0]['geos']) - - # 2. Partial list of countries should return just those - partial_na_cntry_list = list(na_countries)[:5] + ['SX', 'UM'] - ns1_record['regions']['lhr__country']['meta']['country'] = \ - partial_na_cntry_list - data6 = provider._data_for_A('A', ns1_record) - for c in partial_na_cntry_list: - self.assertTrue(f'NA-{c}' in data6['dynamic']['rules'][0]['geos']) - - # Test out fallback only pools and new-style notes - ns1_record = { - 'answers': [{ - 'answer': ['1.1.1.1'], - 'meta': { - 'priority': 1, - 'note': 'from:one__country pool:one fallback:two', - 'up': True, - }, - 'region': 'one_country', - }, { - 'answer': ['2.2.2.2'], - 'meta': { - 'priority': 2, - 'note': 'from:one__country pool:two fallback:three', - 'up': {}, - }, - 'region': 'one_country', - }, { - 'answer': ['3.3.3.3'], - 'meta': { - 'priority': 3, - 'note': 'from:one__country pool:three fallback:', - 'up': False, - }, - 'region': 'one_country', - }, { - 'answer': ['5.5.5.5'], - 'meta': { - 'priority': 4, - 'note': 'from:--default--', - }, - 'region': 'one_country', - }, { - 'answer': ['4.4.4.4'], - 'meta': { - 'priority': 1, - 'note': 'from:four__country pool:four fallback:', - 'up': {}, - }, - 'region': 'four_country', - }, { - 'answer': ['5.5.5.5'], - 'meta': { - 'priority': 2, - 'note': 'from:--default--', - }, - 'region': 'four_country', - }], - 'domain': 'unit.tests', - 'filters': filters, - 'regions': { - 'one__country': { - 'meta': { - 'note': 'rule-order:1 fallback:two', - 'country': ['CA'], - 'us_state': ['OR'], - }, - }, - 'four__country': { - 'meta': { - 'note': 'rule-order:2', - 'country': ['CA'], - 'us_state': ['OR'], - }, - }, - catchall_pool_name: { - 'meta': { - 'note': 'rule-order:3', - }, - } - }, - 'tier': 3, - 'ttl': 42, - } - data = provider._data_for_dynamic('A', ns1_record) - self.assertEquals({ - 'dynamic': { - 'pools': { - 'four': { - 'fallback': None, - 'values': [{'value': '4.4.4.4', 'weight': 1}] - }, - 'one': { - 'fallback': 'two', - 'values': [ - {'value': '1.1.1.1', 'weight': 1, 'status': 'up'}, - ], - }, - 'three': { - 'fallback': None, - 'values': [ - {'value': '3.3.3.3', 'weight': 1, 'status': 'down'} - ] - }, - 'two': { - 'fallback': 'three', - 'values': [{'value': '2.2.2.2', 'weight': 1}] - }, - }, - 'rules': [{ - '_order': '1', - 'geos': ['NA-CA', 'NA-US-OR'], - 'pool': 'one' - }, { - '_order': '2', - 'geos': ['NA-CA', 'NA-US-OR'], - 'pool': 'four' - }, { - '_order': '3', 'pool': 'iad'} - ] - }, - 'ttl': 42, - 'type': 'A', - 'values': ['5.5.5.5'] - }, data) - - def test_data_for_dynamic_CNAME(self): - provider = Ns1Provider('test', 'api-key') - - # Test out a small setup that just covers default value validation - # Everything else is same as dynamic A whose tests will cover all - # other options and test cases - # Not testing for geo/region specific cases - filters = provider._get_updated_filter_chain(False, False) - catchall_pool_name = 'iad__catchall' - ns1_record = { - 'answers': [{ - 'answer': ['iad.unit.tests.'], - 'meta': { - 'priority': 1, - 'weight': 12, - 'note': f'pool:iad from:{catchall_pool_name}', - 'up': {}, - }, - 'region': catchall_pool_name, - }, { - 'answer': ['value.unit.tests.'], - 'meta': { - 'priority': 2, - 'note': 'from:--default--', - 'up': {}, - }, - 'region': catchall_pool_name, - }], - 'domain': 'foo.unit.tests', - 'filters': filters, - 'regions': { - catchall_pool_name: { - 'meta': { - 'note': 'rule-order:1', - }, - } - }, - 'tier': 3, - 'ttl': 43, - 'type': 'CNAME', - } - data = provider._data_for_CNAME('CNAME', ns1_record) - self.assertEquals({ - 'dynamic': { - 'pools': { - 'iad': { - 'fallback': None, - 'values': [{ - 'value': 'iad.unit.tests.', - 'weight': 12, - }], - }, - }, - 'rules': [{ - '_order': '1', - 'pool': 'iad', - }], - }, - 'ttl': 43, - 'type': 'CNAME', - 'value': 'value.unit.tests.', - }, data) - - def test_data_for_invalid_dynamic_CNAME(self): - provider = Ns1Provider('test', 'api-key') - - # Potential setup created outside of octoDNS, so it could be missing - # notes and region names can be arbitrary - filters = provider._get_updated_filter_chain(False, False) - ns1_record = { - 'answers': [{ - 'answer': ['iad.unit.tests.'], - 'meta': { - 'priority': 1, - 'weight': 12, - 'up': {}, - }, - 'region': 'global', - }, { - 'answer': ['value.unit.tests.'], - 'meta': { - 'priority': 2, - 'up': {}, - }, - 'region': 'global', - }], - 'domain': 'foo.unit.tests', - 'filters': filters, - 'regions': { - 'global': {}, - }, - 'tier': 3, - 'ttl': 44, - 'type': 'CNAME', - } - data = provider._data_for_CNAME('CNAME', ns1_record) - self.assertEquals({ - 'ttl': 44, - 'type': 'CNAME', - 'value': None, - }, data) - - @patch('octodns.provider.ns1.Ns1Provider._monitor_sync') - @patch('octodns.provider.ns1.Ns1Provider._monitors_for') - def test_dynamic_explicit_countries(self, monitors_for_mock, - monitors_sync_mock): - provider = Ns1Provider('test', 'api-key') - record_data = { - 'dynamic': { - 'pools': { - 'iad': { - 'values': [{ - 'value': 'iad.unit.tests.', - 'status': 'up', - }], - }, - 'lhr': { - 'values': [{ - 'value': 'lhr.unit.tests.', - 'status': 'up', - }] - } - }, - 'rules': [ - { - 'geos': ['NA-US'], - 'pool': 'iad', - }, - { - 'geos': ['NA'], - 'pool': 'lhr', - }, - ], - }, - 'ttl': 33, - 'type': 'CNAME', - 'value': 'value.unit.tests.', - } - record = Record.new(self.zone, 'foo', record_data) - - ns1_record, _ = provider._params_for_dynamic(record) - regions = [ - r for r in ns1_record['regions'].values() - if 'US' in r['meta']['country'] - ] - self.assertEquals(len(regions), 1) - - ns1_record['domain'] = record.fqdn[:-1] - data = provider._data_for_dynamic(record._type, ns1_record)['dynamic'] - self.assertEquals(data['rules'][0]['geos'], ['NA-US']) - self.assertEquals(data['rules'][1]['geos'], ['NA']) - - @patch('ns1.rest.records.Records.retrieve') - @patch('ns1.rest.zones.Zones.retrieve') - @patch('octodns.provider.ns1.Ns1Provider._monitors_for') - def test_extra_changes(self, monitors_for_mock, zones_retrieve_mock, - records_retrieve_mock): - provider = Ns1Provider('test', 'api-key') - - desired = Zone('unit.tests.', []) - - def reset(): - monitors_for_mock.reset_mock() - provider._client.reset_caches() - records_retrieve_mock.reset_mock() - zones_retrieve_mock.reset_mock() - - # Empty zone and no changes - reset() - - extra = provider._extra_changes(desired, []) - self.assertFalse(extra) - monitors_for_mock.assert_not_called() - - # Non-existent zone. No changes - reset() - zones_retrieve_mock.side_effect = \ - ResourceException('server error: zone not found') - extra = provider._extra_changes(desired, []) - self.assertFalse(extra) - - # Simple record, ignored, filter update lookups ignored - reset() - zones_retrieve_mock.side_effect = \ - ResourceException('server error: zone not found') - - simple = Record.new(desired, '', { - 'ttl': 32, - 'type': 'A', - 'value': '1.2.3.4', - 'meta': {}, - }) - desired.add_record(simple) - extra = provider._extra_changes(desired, []) - self.assertFalse(extra) - monitors_for_mock.assert_not_called() - - # Dynamic record, inspectable - dynamic = Record.new(desired, 'dyn', { - 'dynamic': { - 'pools': { - 'iad': { - 'values': [{ - 'value': '1.2.3.4', - }], - }, - }, - 'rules': [{ - 'pool': 'iad', - }], - }, - 'octodns': { - 'healthcheck': { - 'host': 'send.me', - 'path': '/_ping', - 'port': 80, - 'protocol': 'HTTP', - } - }, - 'ttl': 32, - 'type': 'A', - 'value': '1.2.3.4', - 'meta': {}, - }) - desired.add_record(dynamic) - - # untouched, but everything in sync so no change needed - reset() - # Generate what we expect to have - provider.record_filters[dynamic.fqdn[:-1]] = { - dynamic._type: provider._get_updated_filter_chain(False, False) - } - gend = provider._monitor_gen(dynamic, '1.2.3.4') - gend.update({ - 'id': 'mid', # need to add an id - 'notify_list': 'xyz', # need to add a notify list (for now) - }) - monitors_for_mock.side_effect = [{ - '1.2.3.4': gend, - }] - extra = provider._extra_changes(desired, []) - self.assertFalse(extra) - monitors_for_mock.assert_has_calls([call(dynamic)]) - - update = Update(dynamic, dynamic) - - # If we don't have a notify list we're broken and we'll expect to see - # an Update - reset() - del gend['notify_list'] - monitors_for_mock.side_effect = [{ - '1.2.3.4': gend, - }] - extra = provider._extra_changes(desired, []) - self.assertEquals(1, len(extra)) - extra = list(extra)[0] - self.assertIsInstance(extra, Update) - self.assertEquals(dynamic, extra.new) - monitors_for_mock.assert_has_calls([call(dynamic)]) - - # Add notify_list back and change the healthcheck protocol, we'll still - # expect to see an update - reset() - gend['notify_list'] = 'xyz' - dynamic._octodns['healthcheck']['protocol'] = 'HTTPS' - del gend['notify_list'] - monitors_for_mock.side_effect = [{ - '1.2.3.4': gend, - }] - extra = provider._extra_changes(desired, []) - self.assertEquals(1, len(extra)) - extra = list(extra)[0] - self.assertIsInstance(extra, Update) - self.assertEquals(dynamic, extra.new) - monitors_for_mock.assert_has_calls([call(dynamic)]) - - # If it's in the changed list, it'll be ignored - reset() - extra = provider._extra_changes(desired, [update]) - self.assertFalse(extra) - monitors_for_mock.assert_not_called() - - # Test changes in filters - - # No change in filters - reset() - ns1_zone = { - 'records': [{ - "domain": "dyn.unit.tests", - "zone": "unit.tests", - "type": "A", - "tier": 3, - "filters": provider._BASIC_FILTER_CHAIN - }], - } - monitors_for_mock.side_effect = [{}] - zones_retrieve_mock.side_effect = [ns1_zone] - records_retrieve_mock.side_effect = ns1_zone['records'] - extra = provider._extra_changes(desired, []) - self.assertFalse(extra) - - # filters need an update - reset() - ns1_zone = { - 'records': [{ - "domain": "dyn.unit.tests", - "zone": "unit.tests", - "type": "A", - "tier": 3, - "filters": provider._BASIC_FILTER_CHAIN[:-1] - }], - } - monitors_for_mock.side_effect = [{}] - zones_retrieve_mock.side_effect = [ns1_zone] - records_retrieve_mock.side_effect = ns1_zone['records'] - ns1_record = ns1_zone['records'][0] - provider.record_filters[ns1_record['domain']] = { - ns1_record['type']: ns1_record['filters'] - } - extra = provider._extra_changes(desired, []) - self.assertTrue(extra) - - # disabled=False in filters doesn't trigger an update - reset() - ns1_zone = { - 'records': [{ - "domain": "dyn.unit.tests", - "zone": "unit.tests", - "type": "A", - "tier": 3, - "filters": provider._BASIC_FILTER_CHAIN - }], - } - ns1_zone['records'][0]['filters'][0]['disabled'] = False - monitors_for_mock.side_effect = [{}] - zones_retrieve_mock.side_effect = [ns1_zone] - records_retrieve_mock.side_effect = ns1_zone['records'] - ns1_record = ns1_zone['records'][0] - provider.record_filters[ns1_record['domain']] = { - ns1_record['type']: ns1_record['filters'] - } - extra = provider._extra_changes(desired, []) - self.assertFalse(extra) - - # disabled=True in filters does trigger an update - ns1_zone['records'][0]['filters'][0]['disabled'] = True - extra = provider._extra_changes(desired, []) - self.assertTrue(extra) - - DESIRED = Zone('unit.tests.', []) - - SIMPLE = Record.new(DESIRED, 'sim', { - 'ttl': 33, - 'type': 'A', - 'value': '1.2.3.4', - }) - - # Dynamic record, inspectable - DYNAMIC = Record.new(DESIRED, 'dyn', { - 'dynamic': { - 'pools': { - 'iad': { - 'values': [{ - 'value': '1.2.3.4', - }], - }, - }, - 'rules': [{ - 'pool': 'iad', - }], - }, - 'octodns': { - 'healthcheck': { - 'host': 'send.me', - 'path': '/_ping', - 'port': 80, - 'protocol': 'HTTP', - } - }, - 'ttl': 32, - 'type': 'A', - 'value': '1.2.3.4', - 'meta': {}, - }) - - def test_has_dynamic(self): - provider = Ns1Provider('test', 'api-key') - - simple_update = Update(self.SIMPLE, self.SIMPLE) - dynamic_update = Update(self.DYNAMIC, self.DYNAMIC) - - self.assertFalse(provider._has_dynamic([simple_update])) - self.assertTrue(provider._has_dynamic([dynamic_update])) - self.assertTrue(provider._has_dynamic([simple_update, dynamic_update])) - - @patch('octodns.provider.ns1.Ns1Client.zones_retrieve') - @patch('octodns.provider.ns1.Ns1Provider._apply_Update') - def test_apply_monitor_regions(self, apply_update_mock, - zones_retrieve_mock): - provider = Ns1Provider('test', 'api-key') - - simple_update = Update(self.SIMPLE, self.SIMPLE) - simple_plan = Plan(self.DESIRED, self.DESIRED, [simple_update], True) - dynamic_update = Update(self.DYNAMIC, self.DYNAMIC) - dynamic_update = Update(self.DYNAMIC, self.DYNAMIC) - dynamic_plan = Plan(self.DESIRED, self.DESIRED, [dynamic_update], - True) - both_plan = Plan(self.DESIRED, self.DESIRED, [simple_update, - dynamic_update], True) - - # always return foo, we aren't testing this part here - zones_retrieve_mock.side_effect = [ - 'foo', - 'foo', - 'foo', - 'foo', - ] - - # Doesn't blow up, and calls apply once - apply_update_mock.reset_mock() - provider._apply(simple_plan) - apply_update_mock.assert_has_calls([call('foo', simple_update)]) - - # Blows up and apply not called - apply_update_mock.reset_mock() - with self.assertRaises(Ns1Exception) as ctx: - provider._apply(dynamic_plan) - self.assertTrue('monitor_regions not set' in str(ctx.exception)) - apply_update_mock.assert_not_called() - - # Blows up and apply not called even though there's a simple - apply_update_mock.reset_mock() - with self.assertRaises(Ns1Exception) as ctx: - provider._apply(both_plan) - self.assertTrue('monitor_regions not set' in str(ctx.exception)) - apply_update_mock.assert_not_called() - - # with monitor_regions set - provider.monitor_regions = ['lga'] - - apply_update_mock.reset_mock() - provider._apply(both_plan) - apply_update_mock.assert_has_calls([ - call('foo', dynamic_update), - call('foo', simple_update), - ]) - - -class TestNs1Client(TestCase): - - @patch('ns1.rest.zones.Zones.retrieve') - def test_retry_behavior(self, zone_retrieve_mock): - client = Ns1Client('dummy-key') - - # No retry required, just calls and is returned - client.reset_caches() - zone_retrieve_mock.reset_mock() - zone_retrieve_mock.side_effect = ['foo'] - self.assertEquals('foo', client.zones_retrieve('unit.tests')) - zone_retrieve_mock.assert_has_calls([call('unit.tests')]) - - # One retry required - client.reset_caches() - zone_retrieve_mock.reset_mock() - zone_retrieve_mock.side_effect = [ - RateLimitException('boo', period=0), - 'foo' - ] - self.assertEquals('foo', client.zones_retrieve('unit.tests')) - zone_retrieve_mock.assert_has_calls([call('unit.tests')]) - - # Two retries required - client.reset_caches() - zone_retrieve_mock.reset_mock() - zone_retrieve_mock.side_effect = [ - RateLimitException('boo', period=0), - 'foo' - ] - self.assertEquals('foo', client.zones_retrieve('unit.tests')) - zone_retrieve_mock.assert_has_calls([call('unit.tests')]) - - # Exhaust our retries - client.reset_caches() - zone_retrieve_mock.reset_mock() - zone_retrieve_mock.side_effect = [ - RateLimitException('first', period=0), - RateLimitException('boo', period=0), - RateLimitException('boo', period=0), - RateLimitException('last', period=0), - ] - with self.assertRaises(RateLimitException) as ctx: - client.zones_retrieve('unit.tests') - self.assertEquals('last', str(ctx.exception)) - - def test_client_config(self): - with self.assertRaises(TypeError): - Ns1Client() - - client = Ns1Client('dummy-key') - self.assertEquals( - client._client.config.get('keys'), - {'default': {'key': u'dummy-key', 'desc': 'imported API key'}}) - self.assertEquals(client._client.config.get('follow_pagination'), True) - self.assertEquals( - client._client.config.get('rate_limit_strategy'), None) - self.assertEquals(client._client.config.get('parallelism'), None) - - client = Ns1Client('dummy-key', parallelism=11) - self.assertEquals( - client._client.config.get('rate_limit_strategy'), 'concurrent') - self.assertEquals(client._client.config.get('parallelism'), 11) - - client = Ns1Client('dummy-key', client_config={ - 'endpoint': 'my.endpoint.com', 'follow_pagination': False}) - self.assertEquals( - client._client.config.get('endpoint'), 'my.endpoint.com') - self.assertEquals( - client._client.config.get('follow_pagination'), False) - - @patch('ns1.rest.data.Source.list') - @patch('ns1.rest.data.Source.create') - def test_datasource_id(self, datasource_create_mock, datasource_list_mock): - client = Ns1Client('dummy-key') - - # First invocation with an empty list create - datasource_list_mock.reset_mock() - datasource_create_mock.reset_mock() - datasource_list_mock.side_effect = [[]] - datasource_create_mock.side_effect = [{ - 'id': 'foo', - }] - self.assertEquals('foo', client.datasource_id) - name = 'octoDNS NS1 Data Source' - source_type = 'nsone_monitoring' - datasource_create_mock.assert_has_calls([call(name=name, - sourcetype=source_type)]) - datasource_list_mock.assert_called_once() - - # 2nd invocation is cached - datasource_list_mock.reset_mock() - datasource_create_mock.reset_mock() - self.assertEquals('foo', client.datasource_id) - datasource_create_mock.assert_not_called() - datasource_list_mock.assert_not_called() - - # Reset the client's cache - client._datasource_id = None - - # First invocation with a match in the list finds it and doesn't call - # create - datasource_list_mock.reset_mock() - datasource_create_mock.reset_mock() - datasource_list_mock.side_effect = [[{ - 'id': 'other', - 'name': 'not a match', - }, { - 'id': 'bar', - 'name': name, - }]] - self.assertEquals('bar', client.datasource_id) - datasource_create_mock.assert_not_called() - datasource_list_mock.assert_called_once() - - @patch('ns1.rest.data.Feed.delete') - @patch('ns1.rest.data.Feed.create') - @patch('ns1.rest.data.Feed.list') - def test_feeds_for_monitors(self, datafeed_list_mock, - datafeed_create_mock, - datafeed_delete_mock): - client = Ns1Client('dummy-key') - - # pre-cache datasource_id - client._datasource_id = 'foo' - - # Populate the cache and check the results - datafeed_list_mock.reset_mock() - datafeed_list_mock.side_effect = [[{ - 'config': { - 'jobid': 'the-job', - }, - 'id': 'the-feed', - }, { - 'config': { - 'jobid': 'the-other-job', - }, - 'id': 'the-other-feed', - }]] - expected = { - 'the-job': 'the-feed', - 'the-other-job': 'the-other-feed', - } - self.assertEquals(expected, client.feeds_for_monitors) - datafeed_list_mock.assert_called_once() - - # 2nd call uses cache - datafeed_list_mock.reset_mock() - self.assertEquals(expected, client.feeds_for_monitors) - datafeed_list_mock.assert_not_called() - - # create a feed and make sure it's in the cache/map - datafeed_create_mock.reset_mock() - datafeed_create_mock.side_effect = [{ - 'id': 'new-feed', - }] - client.datafeed_create(client.datasource_id, 'new-name', { - 'jobid': 'new-job', - }) - datafeed_create_mock.assert_has_calls([call('foo', 'new-name', { - 'jobid': 'new-job', - })]) - new_expected = expected.copy() - new_expected['new-job'] = 'new-feed' - self.assertEquals(new_expected, client.feeds_for_monitors) - datafeed_create_mock.assert_called_once() - - # Delete a feed and make sure it's out of the cache/map - datafeed_delete_mock.reset_mock() - client.datafeed_delete(client.datasource_id, 'new-feed') - self.assertEquals(expected, client.feeds_for_monitors) - datafeed_delete_mock.assert_called_once() - - @patch('ns1.rest.monitoring.Monitors.delete') - @patch('ns1.rest.monitoring.Monitors.update') - @patch('ns1.rest.monitoring.Monitors.create') - @patch('ns1.rest.monitoring.Monitors.list') - def test_monitors(self, monitors_list_mock, monitors_create_mock, - monitors_update_mock, monitors_delete_mock): - client = Ns1Client('dummy-key') - - one = { - 'id': 'one', - 'key': 'value', - } - two = { - 'id': 'two', - 'key': 'other-value', - } - - # Populate the cache and check the results - monitors_list_mock.reset_mock() - monitors_list_mock.side_effect = [[one, two]] - expected = { - 'one': one, - 'two': two, - } - self.assertEquals(expected, client.monitors) - monitors_list_mock.assert_called_once() - - # 2nd round pulls it from cache - monitors_list_mock.reset_mock() - self.assertEquals(expected, client.monitors) - monitors_list_mock.assert_not_called() - - # Create a monitor, make sure it's in the list - monitors_create_mock.reset_mock() - monitor = { - 'id': 'new-id', - 'key': 'new-value', - } - monitors_create_mock.side_effect = [monitor] - self.assertEquals(monitor, client.monitors_create(param='eter')) - monitors_create_mock.assert_has_calls([call({}, param='eter')]) - new_expected = expected.copy() - new_expected['new-id'] = monitor - self.assertEquals(new_expected, client.monitors) - - # Update a monitor, make sure it's updated in the cache - monitors_update_mock.reset_mock() - monitor = { - 'id': 'new-id', - 'key': 'changed-value', - } - monitors_update_mock.side_effect = [monitor] - self.assertEquals(monitor, client.monitors_update('new-id', - key='changed-value')) - monitors_update_mock \ - .assert_has_calls([call('new-id', {}, key='changed-value')]) - new_expected['new-id'] = monitor - self.assertEquals(new_expected, client.monitors) - - # Delete a monitor, make sure it's out of the list - monitors_delete_mock.reset_mock() - monitors_delete_mock.side_effect = ['deleted'] - self.assertEquals('deleted', client.monitors_delete('new-id')) - monitors_delete_mock.assert_has_calls([call('new-id')]) - self.assertEquals(expected, client.monitors) - - @patch('ns1.rest.monitoring.NotifyLists.delete') - @patch('ns1.rest.monitoring.NotifyLists.create') - @patch('ns1.rest.monitoring.NotifyLists.list') - def test_notifylists(self, notifylists_list_mock, notifylists_create_mock, - notifylists_delete_mock): - client = Ns1Client('dummy-key') - - def reset(): - notifylists_create_mock.reset_mock() - notifylists_delete_mock.reset_mock() - notifylists_list_mock.reset_mock() - - reset() - notifylists_list_mock.side_effect = [{}] - expected = { - 'id': 'nl-id', - 'name': 'bar', - } - notifylists_create_mock.side_effect = [expected] - notify_list = [{ - 'config': { - 'sourceid': 'foo', - }, - 'type': 'datafeed', - }] - got = client.notifylists_create(name='some name', - notify_list=notify_list) - self.assertEquals(expected, got) - notifylists_list_mock.assert_called_once() - notifylists_create_mock.assert_has_calls([ - call({'name': 'some name', 'notify_list': notify_list}) - ]) - notifylists_delete_mock.assert_not_called() - - reset() - client.notifylists_delete('nlid') - notifylists_list_mock.assert_not_called() - notifylists_create_mock.assert_not_called() - notifylists_delete_mock.assert_has_calls([call('nlid')]) - - # Delete again, this time with a cache item that needs cleaned out and - # another that needs to be ignored - reset() - client._notifylists_cache = { - 'another': { - 'id': 'notid', - 'name': 'another', - }, - # This one comes 2nd on purpose - 'the-one': { - 'id': 'nlid', - 'name': 'the-one', - }, - } - client.notifylists_delete('nlid') - notifylists_list_mock.assert_not_called() - notifylists_create_mock.assert_not_called() - notifylists_delete_mock.assert_has_calls([call('nlid')]) - # Only another left - self.assertEquals(['another'], list(client._notifylists_cache.keys())) - - reset() - expected = ['one', 'two', 'three'] - notifylists_list_mock.side_effect = [expected] - nls = client.notifylists_list() - self.assertEquals(expected, nls) - notifylists_list_mock.assert_has_calls([call()]) - notifylists_create_mock.assert_not_called() - notifylists_delete_mock.assert_not_called() - - @patch('ns1.rest.records.Records.delete') - @patch('ns1.rest.records.Records.update') - @patch('ns1.rest.records.Records.create') - @patch('ns1.rest.records.Records.retrieve') - @patch('ns1.rest.zones.Zones.create') - @patch('ns1.rest.zones.Zones.delete') - @patch('ns1.rest.zones.Zones.retrieve') - def test_client_caching(self, zone_retrieve_mock, zone_delete_mock, - zone_create_mock, record_retrieve_mock, - record_create_mock, record_update_mock, - record_delete_mock): - client = Ns1Client('dummy-key') - - def reset(): - zone_retrieve_mock.reset_mock() - zone_delete_mock.reset_mock() - zone_create_mock.reset_mock() - record_retrieve_mock.reset_mock() - record_create_mock.reset_mock() - record_update_mock.reset_mock() - record_delete_mock.reset_mock() - # Testing caches so we don't reset those - - # Initial zone get fetches and caches - reset() - zone_retrieve_mock.side_effect = ['foo'] - self.assertEquals('foo', client.zones_retrieve('unit.tests')) - zone_retrieve_mock.assert_has_calls([call('unit.tests')]) - self.assertEquals({ - 'unit.tests': 'foo', - }, client._zones_cache) - - # Subsequent zone get does not fetch and returns from cache - reset() - self.assertEquals('foo', client.zones_retrieve('unit.tests')) - zone_retrieve_mock.assert_not_called() - - # Zone create stores in cache - reset() - zone_create_mock.side_effect = ['bar'] - self.assertEquals('bar', client.zones_create('sub.unit.tests')) - zone_create_mock.assert_has_calls([call('sub.unit.tests')]) - self.assertEquals({ - 'sub.unit.tests': 'bar', - 'unit.tests': 'foo', - }, client._zones_cache) - - # Initial record get fetches and caches - reset() - record_retrieve_mock.side_effect = ['baz'] - self.assertEquals('baz', client.records_retrieve('unit.tests', - 'a.unit.tests', 'A')) - record_retrieve_mock.assert_has_calls([call('unit.tests', - 'a.unit.tests', 'A')]) - self.assertEquals({ - 'unit.tests': { - 'a.unit.tests': { - 'A': 'baz' - } - } - }, client._records_cache) - - # Subsequent record get does not fetch and returns from cache - reset() - self.assertEquals('baz', client.records_retrieve('unit.tests', - 'a.unit.tests', 'A')) - record_retrieve_mock.assert_not_called() - - # Record create stores in cache - reset() - record_create_mock.side_effect = ['boo'] - self.assertEquals('boo', client.records_create('unit.tests', - 'aaaa.unit.tests', - 'AAAA', key='val')) - record_create_mock.assert_has_calls([call('unit.tests', - 'aaaa.unit.tests', 'AAAA', - key='val')]) - self.assertEquals({ - 'unit.tests': { - 'a.unit.tests': { - 'A': 'baz' - }, - 'aaaa.unit.tests': { - 'AAAA': 'boo' - }, - } - }, client._records_cache) - - # Record delete removes from cache and removes zone - reset() - record_delete_mock.side_effect = [{}] - self.assertEquals({}, client.records_delete('unit.tests', - 'aaaa.unit.tests', - 'AAAA')) - record_delete_mock.assert_has_calls([call('unit.tests', - 'aaaa.unit.tests', 'AAAA')]) - self.assertEquals({ - 'unit.tests': { - 'a.unit.tests': { - 'A': 'baz' - }, - 'aaaa.unit.tests': {}, - } - }, client._records_cache) - self.assertEquals({ - 'sub.unit.tests': 'bar', - }, client._zones_cache) - - # Delete the other record, no zone this time, record should still go - # away - reset() - record_delete_mock.side_effect = [{}] - self.assertEquals({}, client.records_delete('unit.tests', - 'a.unit.tests', 'A')) - record_delete_mock.assert_has_calls([call('unit.tests', 'a.unit.tests', - 'A')]) - self.assertEquals({ - 'unit.tests': { - 'a.unit.tests': {}, - 'aaaa.unit.tests': {}, - } - }, client._records_cache) - self.assertEquals({ - 'sub.unit.tests': 'bar', - }, client._zones_cache) - - # Record update removes zone and caches result - record_update_mock.side_effect = ['done'] - self.assertEquals('done', client.records_update('sub.unit.tests', - 'aaaa.sub.unit.tests', - 'AAAA', key='val')) - record_update_mock.assert_has_calls([call('sub.unit.tests', - 'aaaa.sub.unit.tests', - 'AAAA', key='val')]) - self.assertEquals({ - 'unit.tests': { - 'a.unit.tests': {}, - 'aaaa.unit.tests': {}, - }, - 'sub.unit.tests': { - 'aaaa.sub.unit.tests': { - 'AAAA': 'done', - }, - } - }, client._records_cache) - self.assertEquals({}, client._zones_cache) + def test_missing(self): + with self.assertRaises(ModuleNotFoundError): + from octodns.provider.ns1 import Ns1Provider + Ns1Provider diff --git a/tests/test_octodns_provider_route53.py b/tests/test_octodns_provider_route53.py index 680c9ce..23e5748 100644 --- a/tests/test_octodns_provider_route53.py +++ b/tests/test_octodns_provider_route53.py @@ -5,3532 +5,12 @@ from __future__ import absolute_import, division, print_function, \ unicode_literals -from botocore.exceptions import ClientError -from botocore.stub import ANY, Stubber from unittest import TestCase -from mock import patch - -from octodns.record import Create, Delete, Record, Update -from octodns.provider.route53 import Route53Provider, _Route53DynamicValue, \ - _Route53GeoDefault, _Route53GeoRecord, Route53ProviderException, \ - _Route53Record, _mod_keyer, _octal_replace -from octodns.zone import Zone - -from helpers import GeoProvider - - -class DummyR53Record(object): - - def __init__(self, health_check_id): - self.health_check_id = health_check_id - - -class TestOctalReplace(TestCase): - - def test_basic(self): - for expected, s in ( - ('', ''), - ('abc', 'abc'), - ('123', '123'), - ('abc123', 'abc123'), - ('*', '\\052'), - ('abc*', 'abc\\052'), - ('*abc', '\\052abc'), - ('123*', '123\\052'), - ('*123', '\\052123'), - ('**', '\\052\\052'), - ): - self.assertEquals(expected, _octal_replace(s)) - - -dynamic_rrsets = [{ - 'Name': '_octodns-default-pool.unit.tests.', - 'ResourceRecords': [{'Value': '1.1.2.1'}, - {'Value': '1.1.2.2'}], - 'TTL': 60, - 'Type': 'A', -}, { - 'HealthCheckId': '76', - 'Name': '_octodns-ap-southeast-1-value.unit.tests.', - 'ResourceRecords': [{'Value': '1.4.1.1'}], - 'SetIdentifier': 'ap-southeast-1-000', - 'TTL': 60, - 'Type': 'A', - 'Weight': 2 -}, { - 'Name': '_octodns-ap-southeast-1-value.unit.tests.', - 'ResourceRecords': [{'Value': '1.4.1.2'}], - 'SetIdentifier': 'ap-southeast-1-001', - 'TTL': 60, - 'Type': 'A', - 'Weight': 2 -}, { - 'HealthCheckId': 'ab', - 'Name': '_octodns-eu-central-1-value.unit.tests.', - 'ResourceRecords': [{'Value': '1.3.1.1'}], - 'SetIdentifier': 'eu-central-1-000', - 'TTL': 60, - 'Type': 'A', - 'Weight': 1 -}, { - 'HealthCheckId': '1e', - 'Name': '_octodns-eu-central-1-value.unit.tests.', - 'ResourceRecords': [{'Value': '1.3.1.2'}], - 'SetIdentifier': 'eu-central-1-001', - 'TTL': 60, - 'Type': 'A', - 'Weight': 1 -}, { - 'HealthCheckId': '2a', - 'Name': '_octodns-us-east-1-value.unit.tests.', - 'ResourceRecords': [{'Value': '1.5.1.1'}], - 'SetIdentifier': 'us-east-1-000', - 'TTL': 60, - 'Type': 'A', - 'Weight': 1 -}, { - 'HealthCheckId': '61', - 'Name': '_octodns-us-east-1-value.unit.tests.', - 'ResourceRecords': [{'Value': '1.5.1.2'}], - 'SetIdentifier': 'us-east-1-001', - 'TTL': 60, - 'Type': 'A', - 'Weight': 1, -}, { - 'AliasTarget': {'DNSName': '_octodns-default-pool.unit.tests.', - 'EvaluateTargetHealth': True, - 'HostedZoneId': 'Z2'}, - 'Failover': 'SECONDARY', - 'Name': '_octodns-us-east-1-pool.unit.tests.', - 'SetIdentifier': 'us-east-1-Secondary-default', - 'Type': 'A' -}, { - 'AliasTarget': { - 'DNSName': '_octodns-us-east-1-value.unit.tests.', - 'EvaluateTargetHealth': True, - 'HostedZoneId': 'Z2' - }, - 'Failover': 'PRIMARY', - 'Name': '_octodns-us-east-1-pool.unit.tests.', - 'SetIdentifier': 'us-east-1-Primary', - 'Type': 'A', -}, { - 'AliasTarget': {'DNSName': '_octodns-us-east-1-pool.unit.tests.', - 'EvaluateTargetHealth': True, - 'HostedZoneId': 'Z2'}, - 'Failover': 'SECONDARY', - 'Name': '_octodns-eu-central-1-pool.unit.tests.', - 'SetIdentifier': 'eu-central-1-Secondary-default', - 'Type': 'A' -}, { - 'AliasTarget': { - 'DNSName': '_octodns-eu-central-1-value.unit.tests.', - 'EvaluateTargetHealth': True, - 'HostedZoneId': 'Z2' - }, - 'Failover': 'PRIMARY', - 'Name': '_octodns-eu-central-1-pool.unit.tests.', - 'SetIdentifier': 'eu-central-1-Primary', - 'Type': 'A', -}, { - 'AliasTarget': {'DNSName': '_octodns-us-east-1-pool.unit.tests.', - 'EvaluateTargetHealth': True, - 'HostedZoneId': 'Z2'}, - 'Failover': 'SECONDARY', - 'Name': '_octodns-ap-southeast-1-pool.unit.tests.', - 'SetIdentifier': 'ap-southeast-1-Secondary-default', - 'Type': 'A' -}, { - 'AliasTarget': { - 'DNSName': '_octodns-ap-southeast-1-value.unit.tests.', - 'EvaluateTargetHealth': True, - 'HostedZoneId': 'Z2' - }, - 'Failover': 'PRIMARY', - 'Name': '_octodns-ap-southeast-1-pool.unit.tests.', - 'SetIdentifier': 'ap-southeast-1-Primary', - 'Type': 'A', -}, { - 'AliasTarget': {'DNSName': '_octodns-ap-southeast-1-pool.unit.tests.', - 'EvaluateTargetHealth': True, - 'HostedZoneId': 'Z2'}, - 'GeoLocation': {'CountryCode': 'JP'}, - 'Name': 'unit.tests.', - 'SetIdentifier': '1-ap-southeast-1-AS-JP', - 'Type': 'A', -}, { - 'AliasTarget': {'DNSName': '_octodns-ap-southeast-1-pool.unit.tests.', - 'EvaluateTargetHealth': True, - 'HostedZoneId': 'Z2'}, - 'GeoLocation': {'CountryCode': 'CN'}, - 'Name': 'unit.tests.', - 'SetIdentifier': '1-ap-southeast-1-AS-CN', - 'Type': 'A', -}, { - 'AliasTarget': {'DNSName': '_octodns-eu-central-1-pool.unit.tests.', - 'EvaluateTargetHealth': True, - 'HostedZoneId': 'Z2'}, - 'GeoLocation': {'ContinentCode': 'NA-US-FL'}, - 'Name': 'unit.tests.', - 'SetIdentifier': '2-eu-central-1-NA-US-FL', - 'Type': 'A', -}, { - 'AliasTarget': {'DNSName': '_octodns-eu-central-1-pool.unit.tests.', - 'EvaluateTargetHealth': True, - 'HostedZoneId': 'Z2'}, - 'GeoLocation': {'ContinentCode': 'EU'}, - 'Name': 'unit.tests.', - 'SetIdentifier': '2-eu-central-1-EU', - 'Type': 'A', -}, { - 'AliasTarget': {'DNSName': '_octodns-us-east-1-pool.unit.tests.', - 'EvaluateTargetHealth': True, - 'HostedZoneId': 'Z2'}, - 'GeoLocation': {'CountryCode': '*'}, - 'Name': 'unit.tests.', - 'SetIdentifier': '3-us-east-1-None', - 'Type': 'A', -}] -dynamic_health_checks = { - '76': { - 'HealthCheckConfig': { - 'Disabled': False, - 'Inverted': False, - } - }, - 'ab': { - 'HealthCheckConfig': { - 'Disabled': True, - 'Inverted': True, - } - }, -} - -dynamic_record_data = { - 'dynamic': { - 'pools': { - 'ap-southeast-1': { - 'fallback': 'us-east-1', - 'values': [{ - 'weight': 2, 'value': '1.4.1.1', 'status': 'obey', - }, { - 'weight': 2, 'value': '1.4.1.2', 'status': 'up', - }] - }, - 'eu-central-1': { - 'fallback': 'us-east-1', - 'values': [{ - 'weight': 1, 'value': '1.3.1.1', 'status': 'down', - }, { - 'weight': 1, 'value': '1.3.1.2', 'status': 'up', - }], - }, - 'us-east-1': { - 'values': [{ - 'weight': 1, 'value': '1.5.1.1', 'status': 'up', - }, { - 'weight': 1, 'value': '1.5.1.2', 'status': 'up', - }], - } - }, - 'rules': [{ - 'geos': ['AS-CN', 'AS-JP'], - 'pool': 'ap-southeast-1', - }, { - 'geos': ['EU', 'NA-US-FL'], - 'pool': 'eu-central-1', - }, { - 'pool': 'us-east-1', - }], - }, - 'ttl': 60, - 'type': 'A', - 'values': [ - '1.1.2.1', - '1.1.2.2', - ], -} class TestRoute53Provider(TestCase): - expected = Zone('unit.tests.', []) - for name, data in ( - ('simple', - {'ttl': 60, 'type': 'A', 'values': ['1.2.3.4', '2.2.3.4']}), - ('', - {'ttl': 61, 'type': 'A', 'values': ['2.2.3.4', '3.2.3.4'], - 'geo': { - 'AF': ['4.2.3.4'], - 'NA-US': ['5.2.3.4', '6.2.3.4'], - 'NA-US-CA': ['7.2.3.4']}}), - ('cname', {'ttl': 62, 'type': 'CNAME', 'value': 'unit.tests.'}), - ('txt', {'ttl': 63, 'type': 'TXT', 'values': ['Hello World!', - 'Goodbye World?']}), - ('', {'ttl': 64, 'type': 'MX', - 'values': [{ - 'preference': 10, - 'exchange': 'smtp-1.unit.tests.', - }, { - 'preference': 20, - 'exchange': 'smtp-2.unit.tests.', - }]}), - ('naptr', {'ttl': 65, 'type': 'NAPTR', - 'value': { - 'order': 10, - 'preference': 20, - 'flags': 'U', - 'service': 'SIP+D2U', - 'regexp': '!^.*$!sip:info@bar.example.com!', - 'replacement': '.', - }}), - ('_srv._tcp', {'ttl': 66, 'type': 'SRV', 'value': { - 'priority': 10, - 'weight': 20, - 'port': 30, - 'target': 'cname.unit.tests.' - }}), - ('', - {'ttl': 67, 'type': 'NS', 'values': ['8.2.3.4.', '9.2.3.4.']}), - ('sub', - {'ttl': 68, 'type': 'NS', 'values': ['5.2.3.4.', '6.2.3.4.']}), - ('', - {'ttl': 69, 'type': 'CAA', 'value': { - 'flags': 0, - 'tag': 'issue', - 'value': 'ca.unit.tests' - }}), - ): - record = Record.new(expected, name, data) - expected.add_record(record) - caller_ref = f'{Route53Provider.HEALTH_CHECK_VERSION}:A:unit.tests.:1324' - - health_checks = [{ - 'Id': '42', - 'CallerReference': caller_ref, - 'HealthCheckConfig': { - 'Disabled': False, - 'EnableSNI': True, - 'Inverted': False, - 'Type': 'HTTPS', - 'FullyQualifiedDomainName': 'unit.tests', - 'IPAddress': '4.2.3.4', - 'ResourcePath': '/_dns', - 'Type': 'HTTPS', - 'Port': 443, - 'MeasureLatency': True, - 'RequestInterval': 10, - }, - 'HealthCheckVersion': 2, - }, { - 'Id': 'ignored-also', - 'CallerReference': 'something-else', - 'HealthCheckConfig': { - 'Disabled': False, - 'EnableSNI': True, - 'Inverted': False, - 'Type': 'HTTPS', - 'FullyQualifiedDomainName': 'unit.tests', - 'IPAddress': '5.2.3.4', - 'ResourcePath': '/_dns', - 'Type': 'HTTPS', - 'Port': 443, - 'MeasureLatency': True, - 'RequestInterval': 10, - }, - 'HealthCheckVersion': 42, - }, { - 'Id': '43', - 'CallerReference': caller_ref, - 'HealthCheckConfig': { - 'Disabled': False, - 'EnableSNI': True, - 'Inverted': False, - 'Type': 'HTTPS', - 'FullyQualifiedDomainName': 'unit.tests', - 'IPAddress': '5.2.3.4', - 'ResourcePath': '/_dns', - 'Type': 'HTTPS', - 'Port': 443, - 'MeasureLatency': True, - 'RequestInterval': 10, - }, - 'HealthCheckVersion': 2, - }, { - 'Id': '44', - 'CallerReference': caller_ref, - 'HealthCheckConfig': { - 'Disabled': False, - 'EnableSNI': True, - 'Inverted': False, - 'Type': 'HTTPS', - 'FullyQualifiedDomainName': 'unit.tests', - 'IPAddress': '7.2.3.4', - 'ResourcePath': '/_dns', - 'Type': 'HTTPS', - 'Port': 443, - 'MeasureLatency': True, - 'RequestInterval': 10, - }, - 'HealthCheckVersion': 2, - }, { - 'Id': '45', - # won't match anything based on type - 'CallerReference': caller_ref.replace(':A:', ':AAAA:'), - 'HealthCheckConfig': { - 'Disabled': False, - 'EnableSNI': True, - 'Inverted': False, - 'Type': 'HTTPS', - 'FullyQualifiedDomainName': 'unit.tests', - 'IPAddress': '7.2.3.4', - 'ResourcePath': '/_dns', - 'Type': 'HTTPS', - 'Port': 443, - 'MeasureLatency': True, - 'RequestInterval': 10, - }, - 'HealthCheckVersion': 2, - }] - - def _get_stubbed_provider(self): - provider = Route53Provider('test', 'abc', '123') - - # Use the stubber - stubber = Stubber(provider._conn) - stubber.activate() - - return (provider, stubber) - - def _get_stubbed_delegation_set_provider(self): - provider = Route53Provider('test', 'abc', '123', - delegation_set_id="ABCDEFG123456") - - # Use the stubber - stubber = Stubber(provider._conn) - stubber.activate() - - return (provider, stubber) - - def _get_stubbed_fallback_auth_provider(self): - provider = Route53Provider('test') - - # Use the stubber - stubber = Stubber(provider._conn) - stubber.activate() - - return (provider, stubber) - - # with fallback boto makes an unstubbed call to the 169. metadata api, this - # stubs that bit out - @patch('botocore.credentials.CredentialResolver.load_credentials') - def test_process_desired_zone(self, fetch_metadata_token_mock): - provider, stubber = self._get_stubbed_fallback_auth_provider() - fetch_metadata_token_mock.side_effect = [None] - - # No records, essentially a no-op - desired = Zone('unit.tests.', []) - got = provider._process_desired_zone(desired) - self.assertEquals(desired.records, got.records) - - # Record without any geos - desired = Zone('unit.tests.', []) - record = Record.new(desired, 'a', { - 'ttl': 30, - 'type': 'A', - 'value': '1.2.3.4', - 'dynamic': { - 'pools': { - 'one': { - 'values': [{ - 'value': '2.2.3.4', - }], - }, - }, - 'rules': [{ - 'pool': 'one', - }], - }, - }) - desired.add_record(record) - got = provider._process_desired_zone(desired) - self.assertEquals(desired.records, got.records) - self.assertEquals(1, len(list(got.records)[0].dynamic.rules)) - self.assertFalse('geos' in list(got.records)[0].dynamic.rules[0].data) - - # Record where all geos are supported - desired = Zone('unit.tests.', []) - record = Record.new(desired, 'a', { - 'ttl': 30, - 'type': 'A', - 'value': '1.2.3.4', - 'dynamic': { - 'pools': { - 'one': { - 'values': [{ - 'value': '1.2.3.4', - }], - }, - 'two': { - 'values': [{ - 'value': '2.2.3.4', - }], - }, - }, - 'rules': [{ - 'geos': ['EU', 'NA-US-OR'], - 'pool': 'two', - }, { - 'pool': 'one', - }], - }, - }) - desired.add_record(record) - got = provider._process_desired_zone(desired) - self.assertEquals(2, len(list(got.records)[0].dynamic.rules)) - self.assertEquals(['EU', 'NA-US-OR'], - list(got.records)[0].dynamic.rules[0].data['geos']) - self.assertFalse('geos' in list(got.records)[0].dynamic.rules[1].data) - - # Record with NA-CA-* only rule which is removed - desired = Zone('unit.tests.', []) - record = Record.new(desired, 'a', { - 'ttl': 30, - 'type': 'A', - 'value': '1.2.3.4', - 'dynamic': { - 'pools': { - 'one': { - 'values': [{ - 'value': '1.2.3.4', - }], - }, - 'two': { - 'values': [{ - 'value': '2.2.3.4', - }], - }, - }, - 'rules': [{ - 'geos': ['NA-CA-BC'], - 'pool': 'two', - }, { - 'pool': 'one', - }], - }, - }) - desired.add_record(record) - got = provider._process_desired_zone(desired) - self.assertEquals(1, len(list(got.records)[0].dynamic.rules)) - self.assertFalse('geos' in list(got.records)[0].dynamic.rules[0].data) - - # Record with NA-CA-* rule combined with other geos, filtered - desired = Zone('unit.tests.', []) - record = Record.new(desired, 'a', { - 'ttl': 30, - 'type': 'A', - 'value': '1.2.3.4', - 'dynamic': { - 'pools': { - 'one': { - 'values': [{ - 'value': '1.2.3.4', - }], - }, - 'two': { - 'values': [{ - 'value': '2.2.3.4', - }], - }, - }, - 'rules': [{ - 'geos': ['EU', 'NA-CA-NB', 'NA-US-OR'], - 'pool': 'two', - }, { - 'pool': 'one', - }], - }, - }) - desired.add_record(record) - got = provider._process_desired_zone(desired) - self.assertEquals(2, len(list(got.records)[0].dynamic.rules)) - self.assertEquals(['EU', 'NA-US-OR'], - list(got.records)[0].dynamic.rules[0].data['geos']) - self.assertFalse('geos' in list(got.records)[0].dynamic.rules[1].data) - - # with fallback boto makes an unstubbed call to the 169. metadata api, this - # stubs that bit out - @patch('botocore.credentials.CredentialResolver.load_credentials') - def test_populate_with_fallback(self, fetch_metadata_token_mock): - provider, stubber = self._get_stubbed_fallback_auth_provider() - fetch_metadata_token_mock.side_effect = [None] - - got = Zone('unit.tests.', []) - with self.assertRaises(ClientError): - stubber.add_client_error('list_hosted_zones') - provider.populate(got) - - def test_populate(self): - provider, stubber = self._get_stubbed_provider() - - got = Zone('unit.tests.', []) - with self.assertRaises(ClientError): - stubber.add_client_error('list_hosted_zones') - provider.populate(got) - - with self.assertRaises(ClientError): - list_hosted_zones_resp = { - 'HostedZones': [{ - 'Name': 'unit.tests.', - 'Id': 'z42', - 'CallerReference': 'abc', - }], - 'Marker': 'm', - 'IsTruncated': False, - 'MaxItems': '100', - } - stubber.add_response('list_hosted_zones', list_hosted_zones_resp, - {}) - stubber.add_client_error('list_resource_record_sets', - expected_params={'HostedZoneId': u'z42'}) - provider.populate(got) - stubber.assert_no_pending_responses() - - # list_hosted_zones has been cached from now on so we don't have to - # worry about stubbing it - - list_resource_record_sets_resp_p1 = { - 'ResourceRecordSets': [{ - 'Name': 'simple.unit.tests.', - 'Type': 'A', - 'ResourceRecords': [{ - 'Value': '1.2.3.4', - }, { - 'Value': '2.2.3.4', - }], - 'TTL': 60, - }, { - 'Name': 'unit.tests.', - 'Type': 'A', - 'GeoLocation': { - 'CountryCode': '*', - }, - 'ResourceRecords': [{ - 'Value': '2.2.3.4', - }, { - 'Value': '3.2.3.4', - }], - 'TTL': 61, - }, { - 'Name': 'unit.tests.', - 'Type': 'A', - 'GeoLocation': { - 'ContinentCode': 'AF', - }, - 'ResourceRecords': [{ - 'Value': '4.2.3.4', - }], - 'TTL': 61, - }, { - 'Name': 'unit.tests.', - 'Type': 'A', - 'GeoLocation': { - 'CountryCode': 'US', - }, - 'ResourceRecords': [{ - 'Value': '5.2.3.4', - }, { - 'Value': '6.2.3.4', - }], - 'TTL': 61, - }, { - 'Name': 'unit.tests.', - 'Type': 'A', - 'GeoLocation': { - 'CountryCode': 'US', - 'SubdivisionCode': 'CA', - }, - 'ResourceRecords': [{ - 'Value': '7.2.3.4', - }], - 'TTL': 61, - }], - 'IsTruncated': True, - 'NextRecordName': 'next_name', - 'NextRecordType': 'next_type', - 'MaxItems': '100', - } - stubber.add_response('list_resource_record_sets', - list_resource_record_sets_resp_p1, - {'HostedZoneId': 'z42'}) - - list_resource_record_sets_resp_p2 = { - 'ResourceRecordSets': [{ - 'Name': 'cname.unit.tests.', - 'Type': 'CNAME', - 'ResourceRecords': [{ - 'Value': 'unit.tests.', - }], - 'TTL': 62, - }, { - 'Name': 'txt.unit.tests.', - 'Type': 'TXT', - 'ResourceRecords': [{ - 'Value': '"Hello World!"', - }, { - 'Value': '"Goodbye World?"', - }], - 'TTL': 63, - }, { - 'Name': 'unit.tests.', - 'Type': 'MX', - 'ResourceRecords': [{ - 'Value': '10 smtp-1.unit.tests.', - }, { - 'Value': '20 smtp-2.unit.tests.', - }], - 'TTL': 64, - }, { - 'Name': 'naptr.unit.tests.', - 'Type': 'NAPTR', - 'ResourceRecords': [{ - 'Value': '10 20 "U" "SIP+D2U" ' - '"!^.*$!sip:info@bar.example.com!" .', - }], - 'TTL': 65, - }, { - 'Name': '_srv._tcp.unit.tests.', - 'Type': 'SRV', - 'ResourceRecords': [{ - 'Value': '10 20 30 cname.unit.tests.', - }], - 'TTL': 66, - }, { - 'Name': 'unit.tests.', - 'Type': 'NS', - 'ResourceRecords': [{ - 'Value': 'ns1.unit.tests.', - }], - 'TTL': 67, - }, { - 'Name': 'sub.unit.tests.', - 'Type': 'NS', - 'GeoLocation': { - 'ContinentCode': 'AF', - }, - 'ResourceRecords': [{ - 'Value': '5.2.3.4.', - }, { - 'Value': '6.2.3.4.', - }], - 'TTL': 68, - }, { - 'Name': 'soa.unit.tests.', - 'Type': 'SOA', - 'ResourceRecords': [{ - 'Value': 'ns1.unit.tests.', - }], - 'TTL': 69, - }, { - 'Name': 'unit.tests.', - 'Type': 'CAA', - 'ResourceRecords': [{ - 'Value': '0 issue "ca.unit.tests"', - }], - 'TTL': 69, - }, { - 'AliasTarget': { - 'HostedZoneId': 'Z119WBBTVP5WFX', - 'EvaluateTargetHealth': False, - 'DNSName': 'unit.tests.' - }, - 'Type': 'A', - 'Name': 'alias.unit.tests.' - }], - 'IsTruncated': False, - 'MaxItems': '100', - } - stubber.add_response('list_resource_record_sets', - list_resource_record_sets_resp_p2, - {'HostedZoneId': 'z42', - 'StartRecordName': 'next_name', - 'StartRecordType': 'next_type'}) - - # Load everything - provider.populate(got) - # Make sure we got what we expected - changes = self.expected.changes(got, GeoProvider()) - self.assertEquals(0, len(changes)) - stubber.assert_no_pending_responses() - - # Populate a zone that doesn't exist - nonexistent = Zone('does.not.exist.', []) - provider.populate(nonexistent) - self.assertEquals(set(), nonexistent.records) - - def test_sync(self): - provider, stubber = self._get_stubbed_provider() - - list_hosted_zones_resp = { - 'HostedZones': [{ - 'Name': 'unit.tests.', - 'Id': 'z42', - 'CallerReference': 'abc', - }], - 'Marker': 'm', - 'IsTruncated': False, - 'MaxItems': '100', - } - stubber.add_response('list_hosted_zones', list_hosted_zones_resp, - {}) - list_resource_record_sets_resp = { - 'ResourceRecordSets': [], - 'IsTruncated': False, - 'MaxItems': '100', - } - stubber.add_response('list_resource_record_sets', - list_resource_record_sets_resp, - {'HostedZoneId': 'z42'}) - - plan = provider.plan(self.expected) - self.assertEquals(9, len(plan.changes)) - self.assertTrue(plan.exists) - for change in plan.changes: - self.assertIsInstance(change, Create) - stubber.assert_no_pending_responses() - - stubber.add_response('list_health_checks', - { - 'HealthChecks': self.health_checks, - 'IsTruncated': False, - 'MaxItems': '100', - 'Marker': '', - }) - stubber.add_response('change_resource_record_sets', - {'ChangeInfo': { - 'Id': 'id', - 'Status': 'PENDING', - 'SubmittedAt': '2017-01-29T01:02:03Z', - }}, {'HostedZoneId': 'z42', 'ChangeBatch': ANY}) - - self.assertEquals(9, provider.apply(plan)) - stubber.assert_no_pending_responses() - - # Delete by monkey patching in a populate that includes an extra record - def add_extra_populate(existing, target, lenient): - for record in self.expected.records: - existing.add_record(record) - record = Record.new(existing, 'extra', - {'ttl': 99, 'type': 'A', - 'values': ['9.9.9.9']}) - existing.add_record(record) - - provider.populate = add_extra_populate - change_resource_record_sets_params = { - 'ChangeBatch': { - 'Changes': [{ - 'Action': 'DELETE', 'ResourceRecordSet': { - 'Name': 'extra.unit.tests.', - 'ResourceRecords': [{'Value': u'9.9.9.9'}], - 'TTL': 99, - 'Type': 'A' - }}], - u'Comment': ANY - }, - 'HostedZoneId': u'z42' - } - stubber.add_response('change_resource_record_sets', - {'ChangeInfo': { - 'Id': 'id', - 'Status': 'PENDING', - 'SubmittedAt': '2017-01-29T01:02:03Z', - }}, change_resource_record_sets_params) - plan = provider.plan(self.expected) - self.assertEquals(1, len(plan.changes)) - self.assertIsInstance(plan.changes[0], Delete) - self.assertEquals(1, provider.apply(plan)) - stubber.assert_no_pending_responses() - - # Update by monkey patching in a populate that modifies the A record - # with geos - def mod_geo_populate(existing, target, lenient): - for record in self.expected.records: - if record._type != 'A' or not record.geo: - existing.add_record(record) - record = Record.new(existing, '', { - 'ttl': 61, - 'type': 'A', - 'values': ['8.2.3.4', '3.2.3.4'], - 'geo': { - 'AF': ['4.2.3.4'], - 'NA-US': ['5.2.3.4', '6.2.3.4'], - 'NA-US-KY': ['7.2.3.4'] - } - }) - existing.add_record(record) - - provider.populate = mod_geo_populate - change_resource_record_sets_params = { - 'ChangeBatch': { - 'Changes': [{ - 'Action': 'DELETE', - 'ResourceRecordSet': { - 'GeoLocation': {'CountryCode': 'US', - 'SubdivisionCode': 'KY'}, - 'HealthCheckId': u'44', - 'Name': 'unit.tests.', - 'ResourceRecords': [{'Value': '7.2.3.4'}], - 'SetIdentifier': 'NA-US-KY', - 'TTL': 61, - 'Type': 'A' - } - }, { - 'Action': 'UPSERT', - 'ResourceRecordSet': { - 'GeoLocation': {'ContinentCode': 'AF'}, - 'Name': 'unit.tests.', - 'HealthCheckId': u'42', - 'ResourceRecords': [{'Value': '4.2.3.4'}], - 'SetIdentifier': 'AF', - 'TTL': 61, - 'Type': 'A' - } - }, { - 'Action': 'UPSERT', - 'ResourceRecordSet': { - 'GeoLocation': {'CountryCode': 'US'}, - 'HealthCheckId': u'43', - 'Name': 'unit.tests.', - 'ResourceRecords': [{'Value': '5.2.3.4'}, - {'Value': '6.2.3.4'}], - 'SetIdentifier': 'NA-US', - 'TTL': 61, - 'Type': 'A' - } - }, { - 'Action': 'CREATE', - 'ResourceRecordSet': { - 'GeoLocation': {'CountryCode': 'US', - 'SubdivisionCode': 'CA'}, - 'HealthCheckId': u'44', - 'Name': 'unit.tests.', - 'ResourceRecords': [{'Value': '7.2.3.4'}], - 'SetIdentifier': 'NA-US-CA', - 'TTL': 61, - 'Type': 'A' - } - }, { - 'Action': 'UPSERT', - 'ResourceRecordSet': { - 'GeoLocation': {'CountryCode': '*'}, - 'Name': 'unit.tests.', - 'ResourceRecords': [{'Value': '2.2.3.4'}, - {'Value': '3.2.3.4'}], - 'SetIdentifier': 'default', - 'TTL': 61, - 'Type': 'A' - } - }], - 'Comment': ANY - }, - 'HostedZoneId': 'z42' - } - stubber.add_response('change_resource_record_sets', - {'ChangeInfo': { - 'Id': 'id', - 'Status': 'PENDING', - 'SubmittedAt': '2017-01-29T01:02:03Z', - }}, change_resource_record_sets_params) - plan = provider.plan(self.expected) - self.assertEquals(1, len(plan.changes)) - self.assertIsInstance(plan.changes[0], Update) - self.assertEquals(1, provider.apply(plan)) - stubber.assert_no_pending_responses() - - # Update converting to non-geo by monkey patching in a populate that - # modifies the A record with geos - def mod_add_geo_populate(existing, target, lenient): - for record in self.expected.records: - if record._type != 'A' or record.geo: - existing.add_record(record) - record = Record.new(existing, 'simple', { - 'ttl': 61, - 'type': 'A', - 'values': ['1.2.3.4', '2.2.3.4'], - 'geo': { - 'OC': ['3.2.3.4', '4.2.3.4'], - } - }) - existing.add_record(record) - - provider.populate = mod_add_geo_populate - change_resource_record_sets_params = { - 'ChangeBatch': { - 'Changes': [{ - 'Action': 'DELETE', - 'ResourceRecordSet': { - 'GeoLocation': {'ContinentCode': 'OC'}, - 'Name': 'simple.unit.tests.', - 'ResourceRecords': [{'Value': '3.2.3.4'}, - {'Value': '4.2.3.4'}], - 'SetIdentifier': 'OC', - 'TTL': 61, - 'Type': 'A'} - }, { - 'Action': 'DELETE', - 'ResourceRecordSet': { - 'GeoLocation': {'CountryCode': '*'}, - 'Name': 'simple.unit.tests.', - 'ResourceRecords': [{'Value': '1.2.3.4'}, - {'Value': '2.2.3.4'}], - 'SetIdentifier': 'default', - 'TTL': 61, - 'Type': 'A'} - }, { - 'Action': 'CREATE', - 'ResourceRecordSet': { - 'Name': 'simple.unit.tests.', - 'ResourceRecords': [{'Value': '1.2.3.4'}, - {'Value': '2.2.3.4'}], - 'TTL': 60, - 'Type': 'A'} - }], - 'Comment': ANY - }, - 'HostedZoneId': 'z42' - } - stubber.add_response('change_resource_record_sets', - {'ChangeInfo': { - 'Id': 'id', - 'Status': 'PENDING', - 'SubmittedAt': '2017-01-29T01:02:03Z', - }}, change_resource_record_sets_params) - plan = provider.plan(self.expected) - self.assertEquals(1, len(plan.changes)) - self.assertIsInstance(plan.changes[0], Update) - self.assertEquals(1, provider.apply(plan)) - stubber.assert_no_pending_responses() - - def test_sync_create(self): - provider, stubber = self._get_stubbed_provider() - - got = Zone('unit.tests.', []) - - list_hosted_zones_resp = { - 'HostedZones': [], - 'Marker': 'm', - 'IsTruncated': False, - 'MaxItems': '100', - } - stubber.add_response('list_hosted_zones', list_hosted_zones_resp, - {}) - - plan = provider.plan(self.expected) - self.assertEquals(9, len(plan.changes)) - self.assertFalse(plan.exists) - for change in plan.changes: - self.assertIsInstance(change, Create) - stubber.assert_no_pending_responses() - - create_hosted_zone_resp = { - 'HostedZone': { - 'Name': 'unit.tests.', - 'Id': 'z42', - 'CallerReference': 'abc', - }, - 'ChangeInfo': { - 'Id': 'a12', - 'Status': 'PENDING', - 'SubmittedAt': '2017-01-29T01:02:03Z', - 'Comment': 'hrm', - }, - 'DelegationSet': { - 'Id': 'b23', - 'CallerReference': 'blip', - 'NameServers': [ - 'n12.unit.tests.', - ], - }, - 'Location': 'us-east-1', - } - stubber.add_response('create_hosted_zone', - create_hosted_zone_resp, { - 'Name': got.name, - 'CallerReference': ANY, - }) - - list_resource_record_sets_resp = { - 'ResourceRecordSets': [{ - 'Name': 'a.unit.tests.', - 'Type': 'A', - 'GeoLocation': { - 'ContinentCode': 'NA', - }, - 'ResourceRecords': [{ - 'Value': '2.2.3.4', - }], - 'TTL': 61, - }], - 'IsTruncated': False, - 'MaxItems': '100', - } - stubber.add_response('list_resource_record_sets', - list_resource_record_sets_resp, - {'HostedZoneId': 'z42'}) - - stubber.add_response('list_health_checks', - { - 'HealthChecks': self.health_checks, - 'IsTruncated': False, - 'MaxItems': '100', - 'Marker': '', - }) - - stubber.add_response('change_resource_record_sets', - {'ChangeInfo': { - 'Id': 'id', - 'Status': 'PENDING', - 'SubmittedAt': '2017-01-29T01:02:03Z', - }}, {'HostedZoneId': 'z42', 'ChangeBatch': ANY}) - - self.assertEquals(9, provider.apply(plan)) - stubber.assert_no_pending_responses() - - def test_sync_create_with_delegation_set(self): - provider, stubber = self._get_stubbed_delegation_set_provider() - - got = Zone('unit.tests.', []) - - list_hosted_zones_resp = { - 'HostedZones': [], - 'Marker': 'm', - 'IsTruncated': False, - 'MaxItems': '100', - } - stubber.add_response('list_hosted_zones', list_hosted_zones_resp, - {}) - - plan = provider.plan(self.expected) - self.assertEquals(9, len(plan.changes)) - self.assertFalse(plan.exists) - for change in plan.changes: - self.assertIsInstance(change, Create) - stubber.assert_no_pending_responses() - - create_hosted_zone_resp = { - 'HostedZone': { - 'Name': 'unit.tests.', - 'Id': 'z42', - 'CallerReference': 'abc', - }, - 'ChangeInfo': { - 'Id': 'a12', - 'Status': 'PENDING', - 'SubmittedAt': '2017-01-29T01:02:03Z', - 'Comment': 'hrm', - }, - 'DelegationSet': { - 'Id': 'b23', - 'CallerReference': 'blip', - 'NameServers': [ - 'n12.unit.tests.', - ], - }, - 'Location': 'us-east-1', - } - stubber.add_response('create_hosted_zone', - create_hosted_zone_resp, { - 'Name': got.name, - 'CallerReference': ANY, - 'DelegationSetId': 'ABCDEFG123456' - }) - - list_resource_record_sets_resp = { - 'ResourceRecordSets': [{ - 'Name': 'a.unit.tests.', - 'Type': 'A', - 'GeoLocation': { - 'ContinentCode': 'NA', - }, - 'ResourceRecords': [{ - 'Value': '2.2.3.4', - }], - 'TTL': 61, - }], - 'IsTruncated': False, - 'MaxItems': '100', - } - stubber.add_response('list_resource_record_sets', - list_resource_record_sets_resp, - {'HostedZoneId': 'z42'}) - - stubber.add_response('list_health_checks', - { - 'HealthChecks': self.health_checks, - 'IsTruncated': False, - 'MaxItems': '100', - 'Marker': '', - }) - - stubber.add_response('change_resource_record_sets', - {'ChangeInfo': { - 'Id': 'id', - 'Status': 'PENDING', - 'SubmittedAt': '2017-01-29T01:02:03Z', - }}, {'HostedZoneId': 'z42', 'ChangeBatch': ANY}) - - self.assertEquals(9, provider.apply(plan)) - stubber.assert_no_pending_responses() - - def test_health_checks_pagination(self): - provider, stubber = self._get_stubbed_provider() - - health_checks_p1 = [{ - 'Id': '42', - 'CallerReference': self.caller_ref, - 'HealthCheckConfig': { - 'Disabled': False, - 'EnableSNI': True, - 'Inverted': False, - 'Type': 'HTTPS', - 'FullyQualifiedDomainName': 'unit.tests', - 'IPAddress': '4.2.3.4', - 'ResourcePath': '/_dns', - 'Type': 'HTTPS', - 'Port': 443, - 'MeasureLatency': True, - 'RequestInterval': 10, - }, - 'HealthCheckVersion': 2, - }, { - 'Id': '43', - 'CallerReference': 'abc123', - 'HealthCheckConfig': { - 'Disabled': False, - 'EnableSNI': True, - 'Inverted': False, - 'Type': 'HTTPS', - 'FullyQualifiedDomainName': 'unit.tests', - 'IPAddress': '9.2.3.4', - 'ResourcePath': '/_dns', - 'Type': 'HTTPS', - 'Port': 443, - 'MeasureLatency': True, - 'RequestInterval': 10, - }, - 'HealthCheckVersion': 2, - }] - stubber.add_response('list_health_checks', - { - 'HealthChecks': health_checks_p1, - 'IsTruncated': True, - 'MaxItems': '2', - 'Marker': '', - 'NextMarker': 'moar', - }) - - health_checks_p2 = [{ - 'Id': '44', - 'CallerReference': self.caller_ref, - 'HealthCheckConfig': { - 'Disabled': False, - 'EnableSNI': True, - 'Inverted': False, - 'Type': 'HTTPS', - 'FullyQualifiedDomainName': 'unit.tests', - 'IPAddress': '8.2.3.4', - 'ResourcePath': '/_dns', - 'Type': 'HTTPS', - 'Port': 443, - 'MeasureLatency': True, - 'RequestInterval': 10, - }, - 'HealthCheckVersion': 2, - }] - stubber.add_response('list_health_checks', - { - 'HealthChecks': health_checks_p2, - 'IsTruncated': False, - 'MaxItems': '2', - 'Marker': 'moar', - }, {'Marker': 'moar'}) - - health_checks = provider.health_checks - self.assertEquals({ - '42': health_checks_p1[0], - '44': health_checks_p2[0], - }, health_checks) - stubber.assert_no_pending_responses() - - # get without create - record = Record.new(self.expected, '', { - 'ttl': 61, - 'type': 'A', - 'values': ['2.2.3.4', '3.2.3.4'], - 'geo': { - 'AF': ['4.2.3.4'], - } - }) - value = record.geo['AF'].values[0] - id = provider.get_health_check_id(record, value, 'obey', True) - self.assertEquals('42', id) - - def test_health_check_status_support(self): - provider, stubber = self._get_stubbed_provider() - - health_checks = [{ - 'Id': '42', - 'CallerReference': self.caller_ref, - 'HealthCheckConfig': { - 'Disabled': False, - 'EnableSNI': True, - 'Inverted': False, - 'Type': 'HTTPS', - 'FullyQualifiedDomainName': 'unit.tests', - 'IPAddress': '1.1.1.1', - 'ResourcePath': '/_dns', - 'Type': 'HTTPS', - 'Port': 443, - 'MeasureLatency': True, - 'RequestInterval': 10, - }, - 'HealthCheckVersion': 2, - }, { - 'Id': '43', - 'CallerReference': self.caller_ref, - 'HealthCheckConfig': { - 'Disabled': True, - 'EnableSNI': True, - 'Inverted': False, - 'Type': 'HTTPS', - 'FullyQualifiedDomainName': 'unit.tests', - 'IPAddress': '2.2.2.2', - 'ResourcePath': '/_dns', - 'Type': 'HTTPS', - 'Port': 443, - 'MeasureLatency': True, - 'RequestInterval': 10, - }, - 'HealthCheckVersion': 2, - }, { - 'Id': '44', - 'CallerReference': self.caller_ref, - 'HealthCheckConfig': { - 'Disabled': True, - 'EnableSNI': True, - 'Inverted': True, - 'Type': 'HTTPS', - 'FullyQualifiedDomainName': 'unit.tests', - 'IPAddress': '3.3.3.3', - 'ResourcePath': '/_dns', - 'Type': 'HTTPS', - 'Port': 443, - 'MeasureLatency': True, - 'RequestInterval': 10, - }, - 'HealthCheckVersion': 2, - }] - stubber.add_response('list_health_checks', - { - 'HealthChecks': health_checks, - 'IsTruncated': False, - 'MaxItems': '20', - 'Marker': '', - }) - - health_checks = provider.health_checks - - # get without create - record = Record.new(self.expected, '', { - 'ttl': 61, - 'type': 'A', - 'value': '5.5.5.5', - 'dynamic': { - 'pools': { - 'main': { - 'values': [{ - 'value': '6.6.6.6', - }] - } - }, - 'rules': [{ - 'pool': 'main', - }] - } - }) - self.assertEquals('42', - provider.get_health_check_id(record, '1.1.1.1', - 'obey', False)) - self.assertEquals(None, - provider.get_health_check_id(record, '2.2.2.2', - 'up', False)) - self.assertEquals('44', - provider.get_health_check_id(record, '3.3.3.3', - 'down', False)) - - # If we're not allowed to create we won't find a health check for - # 1.1.1.1 with status up or down - self.assertFalse(provider.get_health_check_id(record, '1.1.1.1', - 'up', False)) - self.assertFalse(provider.get_health_check_id(record, '1.1.1.1', - 'down', False)) - - def test_health_check_create(self): - provider, stubber = self._get_stubbed_provider() - - # No match based on type - caller_ref = f'{Route53Provider.HEALTH_CHECK_VERSION}:AAAA:foo1234' - health_checks = [{ - 'Id': '42', - # No match based on version - 'CallerReference': '9999:A:foo1234', - 'HealthCheckConfig': { - 'Disabled': False, - 'EnableSNI': True, - 'Inverted': False, - 'Type': 'HTTPS', - 'FullyQualifiedDomainName': 'unit.tests', - 'IPAddress': '4.2.3.4', - 'ResourcePath': '/_dns', - 'Type': 'HTTPS', - 'Port': 443, - 'MeasureLatency': True, - 'RequestInterval': 10, - }, - 'HealthCheckVersion': 2, - }, { - 'Id': '43', - 'CallerReference': caller_ref, - 'HealthCheckConfig': { - 'Disabled': False, - 'EnableSNI': True, - 'Inverted': False, - 'Type': 'HTTPS', - 'FullyQualifiedDomainName': 'unit.tests', - 'IPAddress': '4.2.3.4', - 'ResourcePath': '/_dns', - 'Type': 'HTTPS', - 'Port': 443, - 'MeasureLatency': True, - 'RequestInterval': 10, - }, - 'HealthCheckVersion': 2, - }] - stubber.add_response('list_health_checks', { - 'HealthChecks': health_checks, - 'IsTruncated': False, - 'MaxItems': '100', - 'Marker': '', - }) - - health_check_config = { - 'Disabled': False, - 'EnableSNI': False, - 'Inverted': False, - 'FailureThreshold': 6, - 'FullyQualifiedDomainName': 'foo.bar.com', - 'IPAddress': '4.2.3.4', - 'MeasureLatency': True, - 'Port': 8080, - 'RequestInterval': 10, - 'ResourcePath': '/_status', - 'Type': 'HTTP' - } - stubber.add_response('create_health_check', { - 'HealthCheck': { - 'Id': '42', - 'CallerReference': self.caller_ref, - 'HealthCheckConfig': health_check_config, - 'HealthCheckVersion': 1, - }, - 'Location': 'http://url', - }, { - 'CallerReference': ANY, - 'HealthCheckConfig': health_check_config, - }) - stubber.add_response('change_tags_for_resource', {}) - - health_check_config = { - 'Disabled': False, - 'EnableSNI': False, - 'Inverted': False, - 'FailureThreshold': 6, - 'FullyQualifiedDomainName': '4.2.3.4', - 'IPAddress': '4.2.3.4', - 'MeasureLatency': True, - 'Port': 8080, - 'RequestInterval': 10, - 'ResourcePath': '/_status', - 'Type': 'HTTP' - } - stubber.add_response('create_health_check', { - 'HealthCheck': { - 'Id': '43', - 'CallerReference': self.caller_ref, - 'HealthCheckConfig': health_check_config, - 'HealthCheckVersion': 1, - }, - 'Location': 'http://url', - }, { - 'CallerReference': ANY, - 'HealthCheckConfig': health_check_config, - }) - stubber.add_response('change_tags_for_resource', {}) - - record = Record.new(self.expected, '', { - 'ttl': 61, - 'type': 'A', - 'values': ['2.2.3.4', '3.2.3.4'], - 'geo': { - 'AF': ['4.2.3.4'], - }, - 'octodns': { - 'healthcheck': { - 'host': 'foo.bar.com', - 'path': '/_status', - 'port': 8080, - 'protocol': 'HTTP', - }, - } - }) - - # if not allowed to create returns none - value = record.geo['AF'].values[0] - id = provider.get_health_check_id(record, value, 'obey', False) - self.assertFalse(id) - - # when allowed to create we do - id = provider.get_health_check_id(record, value, 'obey', True) - self.assertEquals('42', id) - - # when allowed to create and when host is None - record._octodns['healthcheck']['host'] = None - id = provider.get_health_check_id(record, value, 'obey', True) - self.assertEquals('43', id) - stubber.assert_no_pending_responses() - - # A CNAME style healthcheck, without a value - - health_check_config = { - 'Disabled': False, - 'EnableSNI': False, - 'Inverted': False, - 'FailureThreshold': 6, - 'FullyQualifiedDomainName': 'target-1.unit.tests.', - 'MeasureLatency': True, - 'Port': 8080, - 'RequestInterval': 10, - 'ResourcePath': '/_status', - 'Type': 'HTTP' - } - stubber.add_response('create_health_check', { - 'HealthCheck': { - 'Id': '42', - 'CallerReference': self.caller_ref, - 'HealthCheckConfig': health_check_config, - 'HealthCheckVersion': 1, - }, - 'Location': 'http://url', - }, { - 'CallerReference': ANY, - 'HealthCheckConfig': health_check_config, - }) - stubber.add_response('change_tags_for_resource', {}) - - id = provider.get_health_check_id(record, 'target-1.unit.tests.', - 'obey', True) - self.assertEquals('42', id) - stubber.assert_no_pending_responses() - - # TCP health check - - health_check_config = { - 'Disabled': False, - 'EnableSNI': False, - 'Inverted': False, - 'FailureThreshold': 6, - 'MeasureLatency': True, - 'Port': 8080, - 'RequestInterval': 10, - 'Type': 'TCP' - } - stubber.add_response('create_health_check', { - 'HealthCheck': { - 'Id': '42', - 'CallerReference': self.caller_ref, - 'HealthCheckConfig': health_check_config, - 'HealthCheckVersion': 1, - }, - 'Location': 'http://url', - }, { - 'CallerReference': ANY, - 'HealthCheckConfig': health_check_config, - }) - stubber.add_response('change_tags_for_resource', {}) - - record._octodns['healthcheck']['protocol'] = 'TCP' - id = provider.get_health_check_id(record, 'target-1.unit.tests.', - 'obey', True) - self.assertEquals('42', id) - stubber.assert_no_pending_responses() - - def test_health_check_provider_options(self): - provider, stubber = self._get_stubbed_provider() - record = Record.new(self.expected, 'a', { - 'ttl': 61, - 'type': 'A', - 'value': '1.2.3.4', - 'octodns': { - 'healthcheck': { - }, - 'route53': { - 'healthcheck': { - 'measure_latency': True, - 'request_interval': 10, - } - } - } - }) - latency = provider._healthcheck_measure_latency(record) - interval = provider._healthcheck_request_interval(record) - self.assertTrue(latency) - self.assertEquals(10, interval) - - record_default = Record.new(self.expected, 'a', { - 'ttl': 61, - 'type': 'A', - 'value': '1.2.3.4', - }) - latency = provider._healthcheck_measure_latency(record_default) - interval = provider._healthcheck_request_interval(record_default) - self.assertTrue(latency) - self.assertEquals(10, interval) - - record = Record.new(self.expected, 'a', { - 'ttl': 61, - 'type': 'A', - 'value': '1.2.3.4', - 'octodns': { - 'healthcheck': { - }, - 'route53': { - 'healthcheck': { - 'measure_latency': False, - 'request_interval': 30, - } - } - } - }) - latency = provider._healthcheck_measure_latency(record) - interval = provider._healthcheck_request_interval(record) - self.assertFalse(latency) - self.assertEquals(30, interval) - - record_invalid = Record.new(self.expected, 'a', { - 'ttl': 61, - 'type': 'A', - 'value': '1.2.3.4', - 'octodns': { - 'healthcheck': { - }, - 'route53': { - 'healthcheck': { - 'request_interval': 20, - } - } - } - }) - with self.assertRaises(Route53ProviderException): - interval = provider._healthcheck_request_interval(record_invalid) - - def test_create_health_checks_provider_options(self): - provider, stubber = self._get_stubbed_provider() - - health_check_config = { - 'Disabled': False, - 'EnableSNI': True, - 'Inverted': False, - 'FailureThreshold': 6, - 'FullyQualifiedDomainName': 'a.unit.tests', - 'IPAddress': '1.2.3.4', - 'MeasureLatency': False, - 'Port': 443, - 'RequestInterval': 30, - 'ResourcePath': '/_dns', - 'Type': 'HTTPS' - } - - stubber.add_response('list_health_checks', { - 'HealthChecks': [], - 'IsTruncated': False, - 'MaxItems': '100', - 'Marker': '', - }) - - stubber.add_response('create_health_check', { - 'HealthCheck': { - 'Id': '42', - 'CallerReference': self.caller_ref, - 'HealthCheckConfig': health_check_config, - 'HealthCheckVersion': 1, - }, - 'Location': 'http://url', - }, { - 'CallerReference': ANY, - 'HealthCheckConfig': health_check_config, - }) - stubber.add_response('change_tags_for_resource', {}) - stubber.add_response('change_tags_for_resource', {}) - - record = Record.new(self.expected, 'a', { - 'ttl': 61, - 'type': 'A', - 'value': '2.2.3.4', - 'geo': { - 'AF': ['1.2.3.4'], - }, - 'octodns': { - 'healthcheck': { - }, - 'route53': { - 'healthcheck': { - 'measure_latency': False, - 'request_interval': 30 - } - } - } - }) - - value = record.geo['AF'].values[0] - id = provider.get_health_check_id(record, value, 'obey', True) - ml = provider.health_checks[id]['HealthCheckConfig']['MeasureLatency'] - ri = provider.health_checks[id]['HealthCheckConfig']['RequestInterval'] - self.assertFalse(ml) - self.assertEquals(30, ri) - - def test_health_check_gc(self): - provider, stubber = self._get_stubbed_provider() - - stubber.add_response('list_health_checks', { - 'HealthChecks': self.health_checks, - 'IsTruncated': False, - 'MaxItems': '100', - 'Marker': '', - }) - - record = Record.new(self.expected, '', { - 'ttl': 61, - 'type': 'A', - 'values': ['2.2.3.4', '3.2.3.4'], - 'geo': { - 'AF': ['4.2.3.4'], - 'NA-US': ['5.2.3.4', '6.2.3.4'], - # removed one geo - } - }) - - # gc no longer in_use records (directly) - stubber.add_response('delete_health_check', {}, { - 'HealthCheckId': '44', - }) - provider._gc_health_checks(record, [ - DummyR53Record('42'), - DummyR53Record('43'), - ]) - stubber.assert_no_pending_responses() - - # gc through _mod_Create - stubber.add_response('delete_health_check', {}, { - 'HealthCheckId': '44', - }) - change = Create(record) - provider._mod_Create(change, 'z43', []) - stubber.assert_no_pending_responses() - - # gc through _mod_Update - stubber.add_response('delete_health_check', {}, { - 'HealthCheckId': '44', - }) - # first record is ignored for our purposes, we have to pass something - change = Update(record, record) - provider._mod_Create(change, 'z43', []) - stubber.assert_no_pending_responses() - - # gc through _mod_Delete, expect 3 to go away, can't check order - # b/c it's not deterministic - stubber.add_response('delete_health_check', {}, { - 'HealthCheckId': ANY, - }) - stubber.add_response('delete_health_check', {}, { - 'HealthCheckId': ANY, - }) - stubber.add_response('delete_health_check', {}, { - 'HealthCheckId': ANY, - }) - change = Delete(record) - provider._mod_Delete(change, 'z43', []) - stubber.assert_no_pending_responses() - - # gc only AAAA, leave the A's alone - stubber.add_response('delete_health_check', {}, { - 'HealthCheckId': '45', - }) - record = Record.new(self.expected, '', { - 'ttl': 61, - 'type': 'AAAA', - 'value': '2001:0db8:3c4d:0015:0000:0000:1a2f:1a4b' - }) - provider._gc_health_checks(record, []) - stubber.assert_no_pending_responses() - - def test_legacy_health_check_gc(self): - provider, stubber = self._get_stubbed_provider() - - old_caller_ref = '0000:A:3333' - health_checks = [{ - 'Id': '42', - 'CallerReference': self.caller_ref, - 'HealthCheckConfig': { - 'Disabled': False, - 'EnableSNI': True, - 'Inverted': False, - 'Type': 'HTTPS', - 'FullyQualifiedDomainName': 'unit.tests', - 'IPAddress': '4.2.3.4', - 'ResourcePath': '/_dns', - 'Type': 'HTTPS', - 'Port': 443, - 'MeasureLatency': True, - 'RequestInterval': 10, - }, - 'HealthCheckVersion': 2, - }, { - 'Id': '43', - 'CallerReference': old_caller_ref, - 'HealthCheckConfig': { - 'Disabled': False, - 'EnableSNI': True, - 'Inverted': False, - 'Type': 'HTTPS', - 'FullyQualifiedDomainName': 'unit.tests', - 'IPAddress': '4.2.3.4', - 'ResourcePath': '/_dns', - 'Type': 'HTTPS', - 'Port': 443, - 'MeasureLatency': True, - 'RequestInterval': 10, - }, - 'HealthCheckVersion': 2, - }, { - 'Id': '44', - 'CallerReference': old_caller_ref, - 'HealthCheckConfig': { - 'Disabled': False, - 'EnableSNI': True, - 'Inverted': False, - 'Type': 'HTTPS', - 'FullyQualifiedDomainName': 'other.unit.tests', - 'IPAddress': '4.2.3.4', - 'ResourcePath': '/_dns', - 'Type': 'HTTPS', - 'Port': 443, - 'MeasureLatency': True, - 'RequestInterval': 10, - }, - 'HealthCheckVersion': 2, - }] - - stubber.add_response('list_health_checks', { - 'HealthChecks': health_checks, - 'IsTruncated': False, - 'MaxItems': '100', - 'Marker': '', - }) - - # No changes to the record itself - record = Record.new(self.expected, '', { - 'ttl': 61, - 'type': 'A', - 'values': ['2.2.3.4', '3.2.3.4'], - 'geo': { - 'AF': ['4.2.3.4'], - 'NA-US': ['5.2.3.4', '6.2.3.4'], - 'NA-US-CA': ['7.2.3.4'] - } - }) - - # Expect to delete the legacy hc for our record, but not touch the new - # one or the other legacy record - stubber.add_response('delete_health_check', {}, { - 'HealthCheckId': '43', - }) - - provider._gc_health_checks(record, [ - DummyR53Record('42'), - ]) - - def test_no_extra_changes(self): - provider, stubber = self._get_stubbed_provider() - - list_hosted_zones_resp = { - 'HostedZones': [{ - 'Name': 'unit.tests.', - 'Id': 'z42', - 'CallerReference': 'abc', - }], - 'Marker': 'm', - 'IsTruncated': False, - 'MaxItems': '100', - } - stubber.add_response('list_hosted_zones', list_hosted_zones_resp, {}) - - # empty is empty - desired = Zone('unit.tests.', []) - extra = provider._extra_changes(desired=desired, changes=[]) - self.assertEquals([], extra) - stubber.assert_no_pending_responses() - - # single record w/o geo is empty - desired = Zone('unit.tests.', []) - record = Record.new(desired, 'a', { - 'ttl': 30, - 'type': 'A', - 'value': '1.2.3.4', - }) - desired.add_record(record) - extra = provider._extra_changes(desired=desired, changes=[]) - self.assertEquals([], extra) - stubber.assert_no_pending_responses() - - # short-circuit for unknown zone - other = Zone('other.tests.', []) - extra = provider._extra_changes(desired=other, changes=[]) - self.assertEquals([], extra) - stubber.assert_no_pending_responses() - - def test_no_changes_with_get_zones_by_name(self): - provider = Route53Provider( - 'test', 'abc', '123', get_zones_by_name=True) - - # Use the stubber - stubber = Stubber(provider._conn) - stubber.activate() - - list_hosted_zones_by_name_resp_1 = { - 'HostedZones': [{ - 'Id': 'z42', - 'Name': 'unit.tests.', - 'CallerReference': 'abc', - 'Config': { - 'Comment': 'string', - 'PrivateZone': False - }, - 'ResourceRecordSetCount': 123, - }, ], - 'DNSName': 'unit.tests.', - 'HostedZoneId': 'z42', - 'IsTruncated': False, - 'MaxItems': 'string' - } - - list_hosted_zones_by_name_resp_2 = { - 'HostedZones': [{ - 'Id': 'z43', - 'Name': 'unit2.tests.', - 'CallerReference': 'abc', - 'Config': { - 'Comment': 'string', - 'PrivateZone': False - }, - 'ResourceRecordSetCount': 123, - }, ], - 'DNSName': 'unit2.tests.', - 'HostedZoneId': 'z43', - 'IsTruncated': False, - 'MaxItems': 'string' - } - - stubber.add_response( - 'list_hosted_zones_by_name', - list_hosted_zones_by_name_resp_1, - {'DNSName': 'unit.tests.', 'MaxItems': '1'} - ) - - # empty is empty - desired = Zone('unit.tests.', []) - extra = provider._extra_changes(desired=desired, changes=[]) - self.assertEquals([], extra) - stubber.assert_no_pending_responses() - - stubber.add_response( - 'list_hosted_zones_by_name', - list_hosted_zones_by_name_resp_2, - {'DNSName': 'unit2.tests.', 'MaxItems': '1'} - ) - - # empty is empty - desired = Zone('unit2.tests.', []) - extra = provider._extra_changes(desired=desired, changes=[]) - self.assertEquals([], extra) - stubber.assert_no_pending_responses() - - def test_zone_not_found_get_zones_by_name(self): - provider = Route53Provider( - 'test', 'abc', '123', get_zones_by_name=True) - - # Use the stubber - stubber = Stubber(provider._conn) - stubber.activate() - - list_hosted_zones_by_name_resp = { - 'HostedZones': [{ - 'Id': 'z43', - 'Name': 'bad.tests.', - 'CallerReference': 'abc', - 'Config': { - 'Comment': 'string', - 'PrivateZone': False - }, - 'ResourceRecordSetCount': 123, - }, ], - 'DNSName': 'unit.tests.', - 'HostedZoneId': 'z42', - 'IsTruncated': False, - 'MaxItems': 'string' - } - - stubber.add_response( - 'list_hosted_zones_by_name', - list_hosted_zones_by_name_resp, - {'DNSName': 'unit.tests.', 'MaxItems': '1'} - ) - - # empty is empty - desired = Zone('unit.tests.', []) - extra = provider._extra_changes(desired=desired, changes=[]) - self.assertEquals([], extra) - stubber.assert_no_pending_responses() - - def test_plan_apply_with_get_zones_by_name_zone_not_exists(self): - provider = Route53Provider( - 'test', 'abc', '123', get_zones_by_name=True) - - # Use the stubber - stubber = Stubber(provider._conn) - stubber.activate() - - # this is an empty response - # zone name not found - list_hosted_zones_by_name_resp = { - 'HostedZones': [], - 'DNSName': 'unit.tests.', - 'HostedZoneId': 'z42', - 'IsTruncated': False, - 'MaxItems': 'string' - } - - stubber.add_response( - 'list_hosted_zones_by_name', - list_hosted_zones_by_name_resp, - {'DNSName': 'unit.tests.', 'MaxItems': '1'} - ) - - plan = provider.plan(self.expected) - self.assertEquals(9, len(plan.changes)) - - create_hosted_zone_resp = { - 'HostedZone': { - 'Name': 'unit.tests.', - 'Id': 'z42', - 'CallerReference': 'abc', - }, - 'ChangeInfo': { - 'Id': 'a12', - 'Status': 'PENDING', - 'SubmittedAt': '2017-01-29T01:02:03Z', - 'Comment': 'hrm', - }, - 'DelegationSet': { - 'Id': 'b23', - 'CallerReference': 'blip', - 'NameServers': [ - 'n12.unit.tests.', - ], - }, - 'Location': 'us-east-1', - } - stubber.add_response('create_hosted_zone', - create_hosted_zone_resp, { - 'Name': 'unit.tests.', - 'CallerReference': ANY, - }) - - list_resource_record_sets_resp = { - 'ResourceRecordSets': [{ - 'Name': 'a.unit.tests.', - 'Type': 'A', - 'GeoLocation': { - 'ContinentCode': 'NA', - }, - 'ResourceRecords': [{ - 'Value': '2.2.3.4', - }], - 'TTL': 61, - }], - 'IsTruncated': False, - 'MaxItems': '100', - } - stubber.add_response('list_resource_record_sets', - list_resource_record_sets_resp, - {'HostedZoneId': 'z42'}) - - stubber.add_response('list_health_checks', - { - 'HealthChecks': self.health_checks, - 'IsTruncated': False, - 'MaxItems': '100', - 'Marker': '', - }) - - stubber.add_response('change_resource_record_sets', - {'ChangeInfo': { - 'Id': 'id', - 'Status': 'PENDING', - 'SubmittedAt': '2017-01-29T01:02:03Z', - }}, {'HostedZoneId': 'z42', 'ChangeBatch': ANY}) - - self.assertEquals(9, provider.apply(plan)) - stubber.assert_no_pending_responses() - - def test_plan_apply_with_get_zones_by_name_zone_exists(self): - provider = Route53Provider( - 'test', 'abc', '123', get_zones_by_name=True) - - # Use the stubber - stubber = Stubber(provider._conn) - stubber.activate() - - list_hosted_zones_by_name_resp = { - 'HostedZones': [{ - 'Id': 'z42', - 'Name': 'unit.tests.', - 'CallerReference': 'abc', - 'Config': { - 'Comment': 'string', - 'PrivateZone': False - }, - 'ResourceRecordSetCount': 123, - }, ], - 'DNSName': 'unit.tests.', - 'HostedZoneId': 'z42', - 'IsTruncated': False, - 'MaxItems': 'string' - } - - list_resource_record_sets_resp = { - 'ResourceRecordSets': [{ - 'Name': 'a.unit.tests.', - 'Type': 'A', - 'ResourceRecords': [{ - 'Value': '2.2.3.4', - }], - 'TTL': 61, - }], - 'IsTruncated': False, - 'MaxItems': '100', - } - - stubber.add_response( - 'list_hosted_zones_by_name', - list_hosted_zones_by_name_resp, - {'DNSName': 'unit.tests.', 'MaxItems': '1'} - ) - - stubber.add_response('list_resource_record_sets', - list_resource_record_sets_resp, - {'HostedZoneId': 'z42'}) - - plan = provider.plan(self.expected) - self.assertEquals(10, len(plan.changes)) - - stubber.add_response('list_health_checks', - { - 'HealthChecks': self.health_checks, - 'IsTruncated': False, - 'MaxItems': '100', - 'Marker': '', - }) - - stubber.add_response('change_resource_record_sets', - {'ChangeInfo': { - 'Id': 'id', - 'Status': 'PENDING', - 'SubmittedAt': '2017-01-29T01:02:03Z', - }}, {'HostedZoneId': 'z42', 'ChangeBatch': ANY}) - - self.assertEquals(10, provider.apply(plan)) - stubber.assert_no_pending_responses() - - def test_extra_change_no_health_check(self): - provider, stubber = self._get_stubbed_provider() - - list_hosted_zones_resp = { - 'HostedZones': [{ - 'Name': 'unit.tests.', - 'Id': 'z42', - 'CallerReference': 'abc', - }], - 'Marker': 'm', - 'IsTruncated': False, - 'MaxItems': '100', - } - stubber.add_response('list_hosted_zones', list_hosted_zones_resp, {}) - - # record with geo and no health check returns change - desired = Zone('unit.tests.', []) - record = Record.new(desired, 'a', { - 'ttl': 30, - 'type': 'A', - 'value': '1.2.3.4', - 'geo': { - 'NA': ['2.2.3.4'], - } - }) - desired.add_record(record) - list_resource_record_sets_resp = { - 'ResourceRecordSets': [{ - 'Name': 'a.unit.tests.', - 'Type': 'A', - 'GeoLocation': { - 'ContinentCode': 'NA', - }, - 'ResourceRecords': [{ - 'Value': '2.2.3.4', - }], - 'TTL': 61, - }], - 'IsTruncated': False, - 'MaxItems': '100', - } - stubber.add_response('list_resource_record_sets', - list_resource_record_sets_resp, - {'HostedZoneId': 'z42'}) - extra = provider._extra_changes(desired=desired, changes=[]) - self.assertEquals(1, len(extra)) - stubber.assert_no_pending_responses() - - def test_extra_change_has_wrong_health_check(self): - provider, stubber = self._get_stubbed_provider() - - list_hosted_zones_resp = { - 'HostedZones': [{ - 'Name': 'unit.tests.', - 'Id': 'z42', - 'CallerReference': 'abc', - }], - 'Marker': 'm', - 'IsTruncated': False, - 'MaxItems': '100', - } - stubber.add_response('list_hosted_zones', list_hosted_zones_resp, {}) - - # record with geo and no health check returns change - desired = Zone('unit.tests.', []) - record = Record.new(desired, 'a', { - 'ttl': 30, - 'type': 'A', - 'value': '1.2.3.4', - 'geo': { - 'NA': ['2.2.3.4'], - } - }) - desired.add_record(record) - list_resource_record_sets_resp = { - 'ResourceRecordSets': [{ - 'Name': 'a.unit.tests.', - 'Type': 'A', - 'GeoLocation': { - 'ContinentCode': 'NA', - }, - 'ResourceRecords': [{ - 'Value': '2.2.3.4', - }], - 'TTL': 61, - 'HealthCheckId': '42', - }], - 'IsTruncated': False, - 'MaxItems': '100', - } - stubber.add_response('list_resource_record_sets', - list_resource_record_sets_resp, - {'HostedZoneId': 'z42'}) - stubber.add_response('list_health_checks', { - 'HealthChecks': [{ - 'Id': '42', - 'CallerReference': 'foo', - 'HealthCheckConfig': { - 'Disabled': False, - 'EnableSNI': True, - 'Inverted': False, - 'Type': 'HTTPS', - 'FullyQualifiedDomainName': 'unit.tests', - 'IPAddress': '2.2.3.4', - 'ResourcePath': '/_dns', - 'Type': 'HTTPS', - 'Port': 443, - 'MeasureLatency': True, - 'RequestInterval': 10, - }, - 'HealthCheckVersion': 2, - }], - 'IsTruncated': False, - 'MaxItems': '100', - 'Marker': '', - }) - extra = provider._extra_changes(desired=desired, changes=[]) - self.assertEquals(1, len(extra)) - stubber.assert_no_pending_responses() - - for change in (Create(record), Update(record, record), Delete(record)): - extra = provider._extra_changes(desired=desired, changes=[change]) - self.assertEquals(0, len(extra)) - stubber.assert_no_pending_responses() - - def test_extra_change_has_health_check(self): - provider, stubber = self._get_stubbed_provider() - - list_hosted_zones_resp = { - 'HostedZones': [{ - 'Name': 'unit.tests.', - 'Id': 'z42', - 'CallerReference': 'abc', - }], - 'Marker': 'm', - 'IsTruncated': False, - 'MaxItems': '100', - } - stubber.add_response('list_hosted_zones', list_hosted_zones_resp, {}) - - # record with geo and no health check returns change - desired = Zone('unit.tests.', []) - record = Record.new(desired, 'a', { - 'ttl': 30, - 'type': 'A', - 'value': '1.2.3.4', - 'geo': { - 'NA': ['2.2.3.4'], - } - }) - desired.add_record(record) - list_resource_record_sets_resp = { - 'ResourceRecordSets': [{ - # other name - 'Name': 'unit.tests.', - 'Type': 'A', - 'GeoLocation': { - 'CountryCode': '*', - }, - 'ResourceRecords': [{ - 'Value': '1.2.3.4', - }], - 'TTL': 61, - }, { - # matching name, other type - 'Name': 'a.unit.tests.', - 'Type': 'AAAA', - 'ResourceRecords': [{ - 'Value': '2001:0db8:3c4d:0015:0000:0000:1a2f:1a4b' - }], - 'TTL': 61, - }, { - # default geo - 'Name': 'a.unit.tests.', - 'Type': 'A', - 'GeoLocation': { - 'CountryCode': '*', - }, - 'ResourceRecords': [{ - 'Value': '1.2.3.4', - }], - 'TTL': 61, - }, { - # match w/correct geo - 'Name': 'a.unit.tests.', - 'Type': 'A', - 'GeoLocation': { - 'ContinentCode': 'NA', - }, - 'ResourceRecords': [{ - 'Value': '2.2.3.4', - }], - 'TTL': 61, - 'HealthCheckId': '42', - }], - 'IsTruncated': False, - 'MaxItems': '100', - } - stubber.add_response('list_resource_record_sets', - list_resource_record_sets_resp, - {'HostedZoneId': 'z42'}) - stubber.add_response('list_health_checks', { - 'HealthChecks': [{ - 'Id': '42', - 'CallerReference': self.caller_ref, - 'HealthCheckConfig': { - 'Disabled': False, - 'EnableSNI': True, - 'Inverted': False, - 'Type': 'HTTPS', - 'FullyQualifiedDomainName': 'a.unit.tests', - 'IPAddress': '2.2.3.4', - 'ResourcePath': '/_dns', - 'Type': 'HTTPS', - 'Port': 443, - 'MeasureLatency': True, - 'RequestInterval': 10, - }, - 'HealthCheckVersion': 2, - }], - 'IsTruncated': False, - 'MaxItems': '100', - 'Marker': '', - }) - extra = provider._extra_changes(desired=desired, changes=[]) - self.assertEquals(0, len(extra)) - stubber.assert_no_pending_responses() - - # change b/c of healthcheck path - record._octodns['healthcheck'] = { - 'path': '/_ready' - } - extra = provider._extra_changes(desired=desired, changes=[]) - self.assertEquals(1, len(extra)) - stubber.assert_no_pending_responses() - - def test_extra_change_dynamic_has_health_check(self): - provider, stubber = self._get_stubbed_provider() - - list_hosted_zones_resp = { - 'HostedZones': [{ - 'Name': 'unit.tests.', - 'Id': 'z42', - 'CallerReference': 'abc', - }], - 'Marker': 'm', - 'IsTruncated': False, - 'MaxItems': '100', - } - stubber.add_response('list_hosted_zones', list_hosted_zones_resp, {}) - - # record with geo and no health check returns change - desired = Zone('unit.tests.', []) - record = Record.new(desired, 'a', { - 'ttl': 30, - 'type': 'A', - 'value': '1.2.3.4', - 'dynamic': { - 'pools': { - 'one': { - 'values': [{ - 'value': '2.2.3.4', - }], - }, - }, - 'rules': [{ - 'pool': 'one', - }], - }, - }) - desired.add_record(record) - list_resource_record_sets_resp = { - 'ResourceRecordSets': [{ - # Not dynamic value and other name - 'Name': 'unit.tests.', - 'Type': 'A', - 'GeoLocation': { - 'CountryCode': '*', - }, - 'ResourceRecords': [{ - 'Value': '1.2.3.4', - }], - 'TTL': 61, - # All the non-matches have a different Id so we'll fail if they - # match - 'HealthCheckId': '33', - }, { - # Not dynamic value, matching name, other type - 'Name': 'a.unit.tests.', - 'Type': 'AAAA', - 'ResourceRecords': [{ - 'Value': '2001:0db8:3c4d:0015:0000:0000:1a2f:1a4b' - }], - 'TTL': 61, - 'HealthCheckId': '33', - }, { - # default value pool - 'Name': '_octodns-default-value.a.unit.tests.', - 'Type': 'A', - 'GeoLocation': { - 'CountryCode': '*', - }, - 'ResourceRecords': [{ - 'Value': '1.2.3.4', - }], - 'TTL': 61, - 'HealthCheckId': '33', - }, { - # different record - 'Name': '_octodns-two-value.other.unit.tests.', - 'Type': 'A', - 'GeoLocation': { - 'CountryCode': '*', - }, - 'ResourceRecords': [{ - 'Value': '1.2.3.4', - }], - 'TTL': 61, - 'HealthCheckId': '33', - }, { - # same everything, but different type - 'Name': '_octodns-one-value.a.unit.tests.', - 'Type': 'AAAA', - 'ResourceRecords': [{ - 'Value': '2001:0db8:3c4d:0015:0000:0000:1a2f:1a4b' - }], - 'TTL': 61, - 'HealthCheckId': '33', - }, { - # same everything, sub - 'Name': '_octodns-one-value.sub.a.unit.tests.', - 'Type': 'A', - 'ResourceRecords': [{ - 'Value': '1.2.3.4', - }], - 'TTL': 61, - 'HealthCheckId': '33', - }, { - # match - 'Name': '_octodns-one-value.a.unit.tests.', - 'Type': 'A', - 'ResourceRecords': [{ - 'Value': '2.2.3.4', - }], - 'TTL': 61, - 'HealthCheckId': '42', - }], - 'IsTruncated': False, - 'MaxItems': '100', - } - stubber.add_response('list_resource_record_sets', - list_resource_record_sets_resp, - {'HostedZoneId': 'z42'}) - stubber.add_response('list_health_checks', { - 'HealthChecks': [{ - 'Id': '42', - 'CallerReference': self.caller_ref, - 'HealthCheckConfig': { - 'Disabled': False, - 'EnableSNI': True, - 'Inverted': False, - 'Type': 'HTTPS', - 'FullyQualifiedDomainName': 'a.unit.tests', - 'IPAddress': '2.2.3.4', - 'ResourcePath': '/_dns', - 'Type': 'HTTPS', - 'Port': 443, - 'MeasureLatency': True, - 'RequestInterval': 10, - }, - 'HealthCheckVersion': 2, - }], - 'IsTruncated': False, - 'MaxItems': '100', - 'Marker': '', - }) - extra = provider._extra_changes(desired=desired, changes=[]) - self.assertEquals(0, len(extra)) - stubber.assert_no_pending_responses() - - # change b/c of healthcheck path - record._octodns['healthcheck'] = { - 'path': '/_ready' - } - extra = provider._extra_changes(desired=desired, changes=[]) - self.assertEquals(1, len(extra)) - stubber.assert_no_pending_responses() - - # change b/c of healthcheck host - record._octodns['healthcheck'] = { - 'host': 'foo.bar.io' - } - extra = provider._extra_changes(desired=desired, changes=[]) - self.assertEquals(1, len(extra)) - stubber.assert_no_pending_responses() - - def test_extra_change_dyamic_status_up(self): - provider, stubber = self._get_stubbed_provider() - - zone = Zone('unit.tests.', []) - record = Record.new(zone, 'a', { - 'ttl': 30, - 'type': 'A', - 'value': '1.1.1.1', - 'dynamic': { - 'pools': { - 'one': { - 'values': [{ - 'status': 'up', - 'value': '1.2.3.4', - }], - }, - }, - 'rules': [{ - 'pool': 'one', - }], - }, - }) - - # status up and no health check so we're good - rrset = { - 'ResourceRecords': [{'Value': '1.2.3.4'}], - } - statuses = {'1.2.3.4': 'up'} - self.assertFalse( - provider._extra_changes_update_needed(record, rrset, statuses) - ) - - # status up and has a health check so update needed - rrset = { - 'ResourceRecords': [{'Value': '1.2.3.4'}], - 'HealthCheckId': 'foo', - } - statuses = {'1.2.3.4': 'up'} - self.assertTrue( - provider._extra_changes_update_needed(record, rrset, statuses) - ) - - def test_extra_change_dynamic_has_health_check_cname(self): - provider, stubber = self._get_stubbed_provider() - - list_hosted_zones_resp = { - 'HostedZones': [{ - 'Name': 'unit.tests.', - 'Id': 'z42', - 'CallerReference': 'abc', - }], - 'Marker': 'm', - 'IsTruncated': False, - 'MaxItems': '100', - } - stubber.add_response('list_hosted_zones', list_hosted_zones_resp, {}) - - # record with geo and no health check returns change - desired = Zone('unit.tests.', []) - record = Record.new(desired, 'cname', { - 'ttl': 30, - 'type': 'CNAME', - 'value': 'cname.unit.tests.', - 'dynamic': { - 'pools': { - 'one': { - 'values': [{ - 'value': 'one.cname.unit.tests.', - }], - }, - }, - 'rules': [{ - 'pool': 'one', - }], - }, - }) - desired.add_record(record) - list_resource_record_sets_resp = { - 'ResourceRecordSets': [{ - # Not dynamic value and other name - 'Name': 'unit.tests.', - 'Type': 'CNAME', - 'GeoLocation': { - 'CountryCode': '*', - }, - 'ResourceRecords': [{ - 'Value': 'cname.unit.tests.', - }], - 'TTL': 61, - # All the non-matches have a different Id so we'll fail if they - # match - 'HealthCheckId': '33', - }, { - # Not dynamic value, matching name, other type - 'Name': 'cname.unit.tests.', - 'Type': 'AAAA', - 'ResourceRecords': [{ - 'Value': '2001:0db8:3c4d:0015:0000:0000:1a2f:1a4b' - }], - 'TTL': 61, - 'HealthCheckId': '33', - }, { - # default value pool - 'Name': '_octodns-default-value.cname.unit.tests.', - 'Type': 'CNAME', - 'GeoLocation': { - 'CountryCode': '*', - }, - 'ResourceRecords': [{ - 'Value': 'cname.unit.tests.', - }], - 'TTL': 61, - 'HealthCheckId': '33', - }, { - # different record - 'Name': '_octodns-two-value.other.unit.tests.', - 'Type': 'CNAME', - 'GeoLocation': { - 'CountryCode': '*', - }, - 'ResourceRecords': [{ - 'Value': 'cname.unit.tests.', - }], - 'TTL': 61, - 'HealthCheckId': '33', - }, { - # same everything, but different type - 'Name': '_octodns-one-value.cname.unit.tests.', - 'Type': 'AAAA', - 'ResourceRecords': [{ - 'Value': '2001:0db8:3c4d:0015:0000:0000:1a2f:1a4b' - }], - 'TTL': 61, - 'HealthCheckId': '33', - }, { - # same everything, sub - 'Name': '_octodns-one-value.sub.cname.unit.tests.', - 'Type': 'CNAME', - 'ResourceRecords': [{ - 'Value': 'cname.unit.tests.', - }], - 'TTL': 61, - 'HealthCheckId': '33', - }, { - # match - 'Name': '_octodns-one-value.cname.unit.tests.', - 'Type': 'CNAME', - 'ResourceRecords': [{ - 'Value': 'one.cname.unit.tests.', - }], - 'TTL': 61, - 'HealthCheckId': '42', - }], - 'IsTruncated': False, - 'MaxItems': '100', - } - stubber.add_response('list_resource_record_sets', - list_resource_record_sets_resp, - {'HostedZoneId': 'z42'}) - - stubber.add_response('list_health_checks', { - 'HealthChecks': [{ - 'Id': '42', - 'CallerReference': self.caller_ref, - 'HealthCheckConfig': { - 'Disabled': False, - 'EnableSNI': True, - 'Inverted': False, - 'Type': 'HTTPS', - 'FullyQualifiedDomainName': 'one.cname.unit.tests.', - 'ResourcePath': '/_dns', - 'Type': 'HTTPS', - 'Port': 443, - 'MeasureLatency': True, - 'RequestInterval': 10, - }, - 'HealthCheckVersion': 2, - }], - 'IsTruncated': False, - 'MaxItems': '100', - 'Marker': '', - }) - extra = provider._extra_changes(desired=desired, changes=[]) - self.assertEquals(0, len(extra)) - stubber.assert_no_pending_responses() - - # change b/c of healthcheck path - record._octodns['healthcheck'] = { - 'path': '/_ready' - } - extra = provider._extra_changes(desired=desired, changes=[]) - self.assertEquals(1, len(extra)) - stubber.assert_no_pending_responses() - - # no change b/c healthcheck host ignored for dynamic cname - record._octodns['healthcheck'] = { - 'host': 'foo.bar.io' - } - extra = provider._extra_changes(desired=desired, changes=[]) - self.assertEquals(0, len(extra)) - stubber.assert_no_pending_responses() - - def _get_test_plan(self, max_changes): - - provider = Route53Provider('test', 'abc', '123', max_changes) - - # Use the stubber - stubber = Stubber(provider._conn) - stubber.activate() - - got = Zone('unit.tests.', []) - - list_hosted_zones_resp = { - 'HostedZones': [], - 'Marker': 'm', - 'IsTruncated': False, - 'MaxItems': '100', - } - stubber.add_response('list_hosted_zones', list_hosted_zones_resp, - {}) - - create_hosted_zone_resp = { - 'HostedZone': { - 'Name': 'unit.tests.', - 'Id': 'z42', - 'CallerReference': 'abc', - }, - 'ChangeInfo': { - 'Id': 'a12', - 'Status': 'PENDING', - 'SubmittedAt': '2017-01-29T01:02:03Z', - 'Comment': 'hrm', - }, - 'DelegationSet': { - 'Id': 'b23', - 'CallerReference': 'blip', - 'NameServers': [ - 'n12.unit.tests.', - ], - }, - 'Location': 'us-east-1', - } - stubber.add_response('create_hosted_zone', - create_hosted_zone_resp, { - 'Name': got.name, - 'CallerReference': ANY, - }) - - stubber.add_response('list_health_checks', - { - 'HealthChecks': self.health_checks, - 'IsTruncated': False, - 'MaxItems': '100', - 'Marker': '', - }) - - stubber.add_response('change_resource_record_sets', - {'ChangeInfo': { - 'Id': 'id', - 'Status': 'PENDING', - 'SubmittedAt': '2017-01-29T01:02:03Z', - }}, {'HostedZoneId': 'z42', 'ChangeBatch': ANY}) - - plan = provider.plan(self.expected) - - return provider, plan - - # _get_test_plan() returns a plan with 11 modifications, 17 RRs - - @patch('octodns.provider.route53.Route53Provider._load_records') - @patch('octodns.provider.route53.Route53Provider._really_apply') - def test_apply_1(self, really_apply_mock, _): - - # 18 RRs with max of 19 should only get applied in one call - provider, plan = self._get_test_plan(19) - provider.apply(plan) - really_apply_mock.assert_called_once() - - @patch('octodns.provider.route53.Route53Provider._load_records') - @patch('octodns.provider.route53.Route53Provider._really_apply') - def test_apply_2(self, really_apply_mock, _): - - # 18 RRs with max of 17 should only get applied in two calls - provider, plan = self._get_test_plan(18) - provider.apply(plan) - self.assertEquals(2, really_apply_mock.call_count) - - @patch('octodns.provider.route53.Route53Provider._load_records') - @patch('octodns.provider.route53.Route53Provider._really_apply') - def test_apply_3(self, really_apply_mock, _): - - # with a max of seven modifications, three calls - provider, plan = self._get_test_plan(7) - provider.apply(plan) - self.assertEquals(3, really_apply_mock.call_count) - - @patch('octodns.provider.route53.Route53Provider._load_records') - @patch('octodns.provider.route53.Route53Provider._really_apply') - def test_apply_4(self, really_apply_mock, _): - - # with a max of 11 modifications, two calls - provider, plan = self._get_test_plan(11) - provider.apply(plan) - self.assertEquals(2, really_apply_mock.call_count) - - @patch('octodns.provider.route53.Route53Provider._load_records') - @patch('octodns.provider.route53.Route53Provider._really_apply') - def test_apply_bad(self, really_apply_mock, _): - - # with a max of 1 modifications, fail - provider, plan = self._get_test_plan(1) - with self.assertRaises(Exception) as ctx: - provider.apply(plan) - self.assertTrue('modifications' in str(ctx.exception)) - - def test_semicolon_fixup(self): - provider = Route53Provider('test', 'abc', '123') - - self.assertEquals({ - 'type': 'TXT', - 'ttl': 30, - 'values': [ - 'abcd\\; ef\\;g', - 'hij\\; klm\\;n', - ], - }, provider._data_for_quoted({ - 'ResourceRecords': [{ - 'Value': '"abcd; ef;g"', - }, { - 'Value': '"hij\\; klm\\;n"', - }], - 'TTL': 30, - 'Type': 'TXT', - })) - - def test_client_max_attempts(self): - provider = Route53Provider('test', 'abc', '123', - client_max_attempts=42) - # NOTE: this will break if boto ever changes the impl details... - self.assertEquals({ - 'mode': 'legacy', - 'total_max_attempts': 43, - }, provider._conn._client_config.retries) - - def test_data_for_dynamic(self): - provider = Route53Provider('test', 'abc', '123') - provider._health_checks = dynamic_health_checks - - data = provider._data_for_dynamic('', 'A', dynamic_rrsets) - self.assertEquals(dynamic_record_data, data) - - @patch('octodns.provider.route53.Route53Provider._get_zone_id') - @patch('octodns.provider.route53.Route53Provider._load_records') - def test_dynamic_populate(self, load_records_mock, get_zone_id_mock): - provider = Route53Provider('test', 'abc', '123') - provider._health_checks = {} - - get_zone_id_mock.side_effect = ['z44'] - load_records_mock.side_effect = [dynamic_rrsets] - - got = Zone('unit.tests.', []) - provider.populate(got) - - self.assertEquals(1, len(got.records)) - record = list(got.records)[0] - self.assertEquals('', record.name) - self.assertEquals('A', record._type) - self.assertEquals([ - '1.1.2.1', - '1.1.2.2', - ], record.values) - self.assertTrue(record.dynamic) - - self.assertEquals({ - 'ap-southeast-1': { - 'fallback': 'us-east-1', - 'values': [{ - 'weight': 2, 'value': '1.4.1.1', 'status': 'up', - }, { - 'weight': 2, 'value': '1.4.1.2', 'status': 'up', - }] - }, - 'eu-central-1': { - 'fallback': 'us-east-1', - 'values': [{ - 'weight': 1, 'value': '1.3.1.1', 'status': 'up', - }, { - 'weight': 1, 'value': '1.3.1.2', 'status': 'up', - }], - }, - 'us-east-1': { - 'fallback': None, - 'values': [{ - 'weight': 1, 'value': '1.5.1.1', 'status': 'up', - }, { - 'weight': 1, 'value': '1.5.1.2', 'status': 'up', - }], - } - }, {k: v.data for k, v in record.dynamic.pools.items()}) - - self.assertEquals([ - { - 'geos': ['AS-CN', 'AS-JP'], - 'pool': 'ap-southeast-1', - }, { - 'geos': ['EU', 'NA-US-FL'], - 'pool': 'eu-central-1', - }, { - 'pool': 'us-east-1', - }], [r.data for r in record.dynamic.rules]) - - -class DummyProvider(object): - - def get_health_check_id(self, *args, **kwargs): - return None - - -class TestRoute53Records(TestCase): - existing = Zone('unit.tests.', []) - record_a = Record.new(existing, '', { - 'geo': { - 'NA-US': ['2.2.2.2', '3.3.3.3'], - 'OC': ['4.4.4.4', '5.5.5.5'] - }, - 'ttl': 99, - 'type': 'A', - 'values': ['9.9.9.9'] - }) - - def test_value_fors(self): - route53_record = _Route53Record(None, self.record_a, False) - - for value in (None, '', 'foo', 'bar', '1.2.3.4'): - converted = route53_record._value_convert_value(value, - self.record_a) - self.assertEquals(value, converted) - - record_txt = Record.new(self.existing, 'txt', { - 'ttl': 98, - 'type': 'TXT', - 'value': 'Not Important', - }) - - # We don't really have to test the details fo chunked_value as that's - # tested elsewhere, we just need to make sure that it's plumbed up and - # working - self.assertEquals('"Not Important"', route53_record - ._value_convert_quoted(record_txt.values[0], - record_txt)) - - def test_route53_record(self): - a = _Route53Record(None, self.record_a, False) - self.assertEquals(a, a) - b = _Route53Record(None, Record.new(self.existing, '', - {'ttl': 32, 'type': 'A', - 'values': ['8.8.8.8', - '1.1.1.1']}), - False) - self.assertEquals(b, b) - c = _Route53Record(None, Record.new(self.existing, 'other', - {'ttl': 99, 'type': 'A', - 'values': ['9.9.9.9']}), - False) - self.assertEquals(c, c) - d = _Route53Record(None, Record.new(self.existing, '', - {'ttl': 42, 'type': 'MX', - 'value': { - 'preference': 10, - 'exchange': 'foo.bar.'}}), - False) - self.assertEquals(d, d) - - # Same fqdn & type is same record - self.assertEquals(a, b) - # Same name & different type is not the same - self.assertNotEquals(a, d) - # Different name & same type is not the same - self.assertNotEquals(a, c) - - # Same everything, different class is not the same - e = _Route53GeoDefault(None, self.record_a, False) - self.assertNotEquals(a, e) - - provider = DummyProvider() - f = _Route53GeoRecord(provider, self.record_a, 'NA-US', - self.record_a.geo['NA-US'], False) - self.assertEquals(f, f) - g = _Route53GeoRecord(provider, self.record_a, 'OC', - self.record_a.geo['OC'], False) - self.assertEquals(g, g) - - # Geo and non-geo are not the same, using Geo as primary to get it's - # __cmp__ - self.assertNotEquals(f, a) - # Same everything, different geo's is not the same - self.assertNotEquals(f, g) - - # Make sure it doesn't blow up - a.__repr__() - e.__repr__() - f.__repr__() - - def test_route53_record_ordering(self): - # Matches - a = _Route53Record(None, self.record_a, False) - b = _Route53Record(None, self.record_a, False) - self.assertTrue(a == b) - self.assertFalse(a != b) - self.assertFalse(a < b) - self.assertTrue(a <= b) - self.assertFalse(a > b) - self.assertTrue(a >= b) - - # Change the fqdn is greater - fqdn = _Route53Record(None, self.record_a, False, - fqdn_override='other') - self.assertFalse(a == fqdn) - self.assertTrue(a != fqdn) - self.assertFalse(a < fqdn) - self.assertFalse(a <= fqdn) - self.assertTrue(a > fqdn) - self.assertTrue(a >= fqdn) - - provider = DummyProvider() - geo_a = _Route53GeoRecord(provider, self.record_a, 'NA-US', - self.record_a.geo['NA-US'], False) - geo_b = _Route53GeoRecord(provider, self.record_a, 'NA-US', - self.record_a.geo['NA-US'], False) - self.assertTrue(geo_a == geo_b) - self.assertFalse(geo_a != geo_b) - self.assertFalse(geo_a < geo_b) - self.assertTrue(geo_a <= geo_b) - self.assertFalse(geo_a > geo_b) - self.assertTrue(geo_a >= geo_b) - - # Other base - geo_fqdn = _Route53GeoRecord(provider, self.record_a, 'NA-US', - self.record_a.geo['NA-US'], False) - geo_fqdn.fqdn = 'other' - self.assertFalse(geo_a == geo_fqdn) - self.assertTrue(geo_a != geo_fqdn) - self.assertFalse(geo_a < geo_fqdn) - self.assertFalse(geo_a <= geo_fqdn) - self.assertTrue(geo_a > geo_fqdn) - self.assertTrue(geo_a >= geo_fqdn) - - # Other class - self.assertFalse(a == geo_a) - self.assertTrue(a != geo_a) - self.assertFalse(a < geo_a) - self.assertFalse(a <= geo_a) - self.assertTrue(a > geo_a) - self.assertTrue(a >= geo_a) - - def test_dynamic_value_delete(self): - provider = DummyProvider() - geo = _Route53DynamicValue(provider, self.record_a, 'iad', '2.2.2.2', - 1, 'obey', 0, False) - - rrset = { - 'HealthCheckId': 'x12346z', - 'Name': '_octodns-iad-value.unit.tests.', - 'ResourceRecords': [{ - 'Value': '2.2.2.2' - }], - 'SetIdentifier': 'iad-000', - 'TTL': 99, - 'Type': 'A', - 'Weight': 1, - } - - candidates = [ - # Empty, will test no SetIdentifier - {}, - # Non-matching - { - 'SetIdentifier': 'not-a-match', - }, - # Same set-id, different name - { - 'Name': 'not-a-match', - 'SetIdentifier': 'x12346z', - }, - rrset, - ] - - # Provide a matching rrset so that we'll just use it for the delete - # rathr than building up an almost identical one, note the way we'll - # know that we got the one we passed in is that it'll have a - # HealthCheckId and one that was created wouldn't since DummyProvider - # stubs out the lookup for them - mod = geo.mod('DELETE', candidates) - self.assertEquals('x12346z', mod['ResourceRecordSet']['HealthCheckId']) - - # If we don't provide the candidate rrsets we get back exactly what we - # put in minus the healthcheck - del rrset['HealthCheckId'] - mod = geo.mod('DELETE', []) - self.assertEquals(rrset, mod['ResourceRecordSet']) - - def test_geo_delete(self): - provider = DummyProvider() - geo = _Route53GeoRecord(provider, self.record_a, 'NA-US', - self.record_a.geo['NA-US'], False) - - rrset = { - 'GeoLocation': { - 'CountryCode': 'US' - }, - 'HealthCheckId': 'x12346z', - 'Name': 'unit.tests.', - 'ResourceRecords': [{ - 'Value': '2.2.2.2' - }, { - 'Value': '3.3.3.3' - }], - 'SetIdentifier': 'NA-US', - 'TTL': 99, - 'Type': 'A' - } - - candidates = [ - # Empty, will test no SetIdentifier - {}, - { - 'SetIdentifier': 'not-a-match', - }, - # Same set-id, different name - { - 'Name': 'not-a-match', - 'SetIdentifier': 'x12346z', - }, - rrset, - ] - - # Provide a matching rrset so that we'll just use it for the delete - # rathr than building up an almost identical one, note the way we'll - # know that we got the one we passed in is that it'll have a - # HealthCheckId and one that was created wouldn't since DummyProvider - # stubs out the lookup for them - mod = geo.mod('DELETE', candidates) - self.assertEquals('x12346z', mod['ResourceRecordSet']['HealthCheckId']) - - # If we don't provide the candidate rrsets we get back exactly what we - # put in minus the healthcheck - del rrset['HealthCheckId'] - mod = geo.mod('DELETE', []) - self.assertEquals(rrset, mod['ResourceRecordSet']) - - def test_new_dynamic(self): - provider = Route53Provider('test', 'abc', '123') - - # Just so boto won't try and make any calls - stubber = Stubber(provider._conn) - stubber.activate() - - # We'll assume we create all healthchecks here, this functionality is - # thoroughly tested elsewhere - provider._health_checks = {} - # When asked for a healthcheck return dummy info - provider.get_health_check_id = lambda r, v, s, c: 'hc42' - - zone = Zone('unit.tests.', []) - record = Record.new(zone, '', dynamic_record_data) - - # Convert a record into _Route53Records - route53_records = _Route53Record.new(provider, record, 'z45', - creating=True) - self.assertEquals(18, len(route53_records)) - - expected_mods = [r.mod('CREATE', []) for r in route53_records] - # Sort so that we get a consistent order and don't rely on set ordering - expected_mods.sort(key=_mod_keyer) - - # Convert the route53_records into mods - self.assertEquals([{ - 'Action': 'CREATE', - 'ResourceRecordSet': { - 'HealthCheckId': 'hc42', - 'Name': '_octodns-ap-southeast-1-value.unit.tests.', - 'ResourceRecords': [{'Value': '1.4.1.1'}], - 'SetIdentifier': 'ap-southeast-1-000', - 'TTL': 60, - 'Type': 'A', - 'Weight': 2} - }, { - 'Action': 'CREATE', - 'ResourceRecordSet': { - 'HealthCheckId': 'hc42', - 'Name': '_octodns-ap-southeast-1-value.unit.tests.', - 'ResourceRecords': [{'Value': '1.4.1.2'}], - 'SetIdentifier': 'ap-southeast-1-001', - 'TTL': 60, - 'Type': 'A', - 'Weight': 2} - }, { - 'Action': 'CREATE', - 'ResourceRecordSet': { - 'Name': '_octodns-default-pool.unit.tests.', - 'ResourceRecords': [ - {'Value': '1.1.2.1'}, - {'Value': '1.1.2.2'}], - 'TTL': 60, - 'Type': 'A'} - }, { - 'Action': 'CREATE', - 'ResourceRecordSet': { - 'HealthCheckId': 'hc42', - 'Name': '_octodns-eu-central-1-value.unit.tests.', - 'ResourceRecords': [{'Value': '1.3.1.1'}], - 'SetIdentifier': 'eu-central-1-000', - 'TTL': 60, - 'Type': 'A', - 'Weight': 1} - }, { - 'Action': 'CREATE', - 'ResourceRecordSet': { - 'HealthCheckId': 'hc42', - 'Name': '_octodns-eu-central-1-value.unit.tests.', - 'ResourceRecords': [{'Value': '1.3.1.2'}], - 'SetIdentifier': 'eu-central-1-001', - 'TTL': 60, - 'Type': 'A', - 'Weight': 1} - }, { - 'Action': 'CREATE', - 'ResourceRecordSet': { - 'HealthCheckId': 'hc42', - 'Name': '_octodns-us-east-1-value.unit.tests.', - 'ResourceRecords': [{'Value': '1.5.1.1'}], - 'SetIdentifier': 'us-east-1-000', - 'TTL': 60, - 'Type': 'A', - 'Weight': 1} - }, { - 'Action': 'CREATE', - 'ResourceRecordSet': { - 'HealthCheckId': 'hc42', - 'Name': '_octodns-us-east-1-value.unit.tests.', - 'ResourceRecords': [{'Value': '1.5.1.2'}], - 'SetIdentifier': 'us-east-1-001', - 'TTL': 60, - 'Type': 'A', - 'Weight': 1} - }, { - 'Action': 'CREATE', - 'ResourceRecordSet': { - 'AliasTarget': { - 'DNSName': '_octodns-ap-southeast-1-value.unit.tests.', - 'EvaluateTargetHealth': True, - 'HostedZoneId': 'z45'}, - 'Failover': 'PRIMARY', - 'Name': '_octodns-ap-southeast-1-pool.unit.tests.', - 'SetIdentifier': 'ap-southeast-1-Primary', - 'Type': 'A'} - }, { - 'Action': 'CREATE', - 'ResourceRecordSet': { - 'AliasTarget': { - 'DNSName': '_octodns-eu-central-1-value.unit.tests.', - 'EvaluateTargetHealth': True, - 'HostedZoneId': 'z45'}, - 'Failover': 'PRIMARY', - 'Name': '_octodns-eu-central-1-pool.unit.tests.', - 'SetIdentifier': 'eu-central-1-Primary', - 'Type': 'A'} - }, { - 'Action': 'CREATE', - 'ResourceRecordSet': { - 'AliasTarget': { - 'DNSName': '_octodns-us-east-1-value.unit.tests.', - 'EvaluateTargetHealth': True, - 'HostedZoneId': 'z45'}, - 'Failover': 'PRIMARY', - 'Name': '_octodns-us-east-1-pool.unit.tests.', - 'SetIdentifier': 'us-east-1-Primary', - 'Type': 'A'} - }, { - 'Action': 'CREATE', - 'ResourceRecordSet': { - 'AliasTarget': { - 'DNSName': '_octodns-us-east-1-pool.unit.tests.', - 'EvaluateTargetHealth': True, - 'HostedZoneId': 'z45'}, - 'Failover': 'SECONDARY', - 'Name': '_octodns-ap-southeast-1-pool.unit.tests.', - 'SetIdentifier': 'ap-southeast-1-Secondary-us-east-1', - 'Type': 'A'} - }, { - 'Action': 'CREATE', - 'ResourceRecordSet': { - 'AliasTarget': { - 'DNSName': '_octodns-us-east-1-pool.unit.tests.', - 'EvaluateTargetHealth': True, - 'HostedZoneId': 'z45'}, - 'Failover': 'SECONDARY', - 'Name': '_octodns-eu-central-1-pool.unit.tests.', - 'SetIdentifier': 'eu-central-1-Secondary-us-east-1', - 'Type': 'A'} - }, { - 'Action': 'CREATE', - 'ResourceRecordSet': { - 'AliasTarget': { - 'DNSName': '_octodns-default-pool.unit.tests.', - 'EvaluateTargetHealth': True, - 'HostedZoneId': 'z45'}, - 'Failover': 'SECONDARY', - 'Name': '_octodns-us-east-1-pool.unit.tests.', - 'SetIdentifier': 'us-east-1-Secondary-default', - 'Type': 'A'} - }, { - 'Action': 'CREATE', - 'ResourceRecordSet': { - 'AliasTarget': { - 'DNSName': '_octodns-ap-southeast-1-pool.unit.tests.', - 'EvaluateTargetHealth': True, - 'HostedZoneId': 'z45'}, - 'GeoLocation': { - 'CountryCode': 'CN'}, - 'Name': 'unit.tests.', - 'SetIdentifier': '0-ap-southeast-1-AS-CN', - 'Type': 'A'} - }, { - 'Action': 'CREATE', - 'ResourceRecordSet': { - 'AliasTarget': { - 'DNSName': '_octodns-ap-southeast-1-pool.unit.tests.', - 'EvaluateTargetHealth': True, - 'HostedZoneId': 'z45'}, - 'GeoLocation': { - 'CountryCode': 'JP'}, - 'Name': 'unit.tests.', - 'SetIdentifier': '0-ap-southeast-1-AS-JP', - 'Type': 'A'} - }, { - 'Action': 'CREATE', - 'ResourceRecordSet': { - 'AliasTarget': { - 'DNSName': '_octodns-eu-central-1-pool.unit.tests.', - 'EvaluateTargetHealth': True, - 'HostedZoneId': 'z45'}, - 'GeoLocation': { - 'ContinentCode': 'EU'}, - 'Name': 'unit.tests.', - 'SetIdentifier': '1-eu-central-1-EU', - 'Type': 'A'} - }, { - 'Action': 'CREATE', - 'ResourceRecordSet': { - 'AliasTarget': { - 'DNSName': '_octodns-eu-central-1-pool.unit.tests.', - 'EvaluateTargetHealth': True, - 'HostedZoneId': 'z45'}, - 'GeoLocation': { - 'CountryCode': 'US', - 'SubdivisionCode': 'FL'}, - 'Name': 'unit.tests.', - 'SetIdentifier': '1-eu-central-1-NA-US-FL', - 'Type': 'A'} - }, { - 'Action': 'CREATE', - 'ResourceRecordSet': { - 'AliasTarget': { - 'DNSName': '_octodns-us-east-1-pool.unit.tests.', - 'EvaluateTargetHealth': True, - 'HostedZoneId': 'z45'}, - 'GeoLocation': { - 'CountryCode': '*'}, - 'Name': 'unit.tests.', - 'SetIdentifier': '2-us-east-1-None', - 'Type': 'A'} - }], expected_mods) - - for route53_record in route53_records: - # Smoke test stringification - route53_record.__repr__() - - -class TestModKeyer(TestCase): - - def test_mod_keyer(self): - - # First "column" is the action priority for C/R/U - - # Deletes come first - self.assertEquals((0, 0, 'something'), _mod_keyer({ - 'Action': 'DELETE', - 'ResourceRecordSet': { - 'Name': 'something', - } - })) - - # Creates come next - self.assertEquals((1, 0, 'another'), _mod_keyer({ - 'Action': 'CREATE', - 'ResourceRecordSet': { - 'Name': 'another', - } - })) - - # Upserts are the same as creates - self.assertEquals((1, 0, 'last'), _mod_keyer({ - 'Action': 'UPSERT', - 'ResourceRecordSet': { - 'Name': 'last', - } - })) - - # Second "column" value records tested above - - # AliasTarget primary second (to value) - self.assertEquals((0, -1, 'thing'), _mod_keyer({ - 'Action': 'DELETE', - 'ResourceRecordSet': { - 'AliasTarget': 'some-target', - 'Failover': 'PRIMARY', - 'Name': 'thing', - } - })) - - self.assertEquals((1, 1, 'thing'), _mod_keyer({ - 'Action': 'UPSERT', - 'ResourceRecordSet': { - 'AliasTarget': 'some-target', - 'Failover': 'PRIMARY', - 'Name': 'thing', - } - })) - - # AliasTarget secondary third - self.assertEquals((0, -2, 'thing'), _mod_keyer({ - 'Action': 'DELETE', - 'ResourceRecordSet': { - 'AliasTarget': 'some-target', - 'Failover': 'SECONDARY', - 'Name': 'thing', - } - })) - - self.assertEquals((1, 2, 'thing'), _mod_keyer({ - 'Action': 'UPSERT', - 'ResourceRecordSet': { - 'AliasTarget': 'some-target', - 'Failover': 'SECONDARY', - 'Name': 'thing', - } - })) - - # GeoLocation fourth - self.assertEquals((0, -3, 'some-id'), _mod_keyer({ - 'Action': 'DELETE', - 'ResourceRecordSet': { - 'GeoLocation': 'some-target', - 'SetIdentifier': 'some-id', - } - })) - - self.assertEquals((1, 3, 'some-id'), _mod_keyer({ - 'Action': 'UPSERT', - 'ResourceRecordSet': { - 'GeoLocation': 'some-target', - 'SetIdentifier': 'some-id', - } - })) - - # The third "column" has already been tested above, Name/SetIdentifier + def test_missing(self): + with self.assertRaises(ModuleNotFoundError): + from octodns.provider.route53 import Route53Provider + Route53Provider