.*)$',
+ re.IGNORECASE)
+
+ SUPPORTS_GEO = False
+ SUPPORTS_DYNAMIC = False
+ SUPPORTS = set(('A', 'AAAA', 'ALIAS', 'CNAME', 'MX', 'NS',
+ 'SRV', 'SSHFP', 'CAA', 'TXT'))
+ BASE = 'https://dnsapi.mythic-beasts.com/'
+
+ def __init__(self, identifier, passwords, *args, **kwargs):
+ self.log = getLogger('MythicBeastsProvider[{}]'.format(identifier))
+
+ assert isinstance(passwords, dict), 'Passwords must be a dictionary'
+
+ self.log.debug(
+ '__init__: id=%s, registered zones; %s',
+ identifier,
+ passwords.keys())
+ super(MythicBeastsProvider, self).__init__(identifier, *args, **kwargs)
+
+ self._passwords = passwords
+ sess = Session()
+ self._sess = sess
+
+ def _request(self, method, path, data=None):
+ self.log.debug('_request: method=%s, path=%s data=%s',
+ method, path, data)
+
+ resp = self._sess.request(method, path, data=data)
+ self.log.debug(
+ '_request: status=%d data=%s',
+ resp.status_code,
+ resp.text[:20])
+
+ if resp.status_code == 401:
+ raise MythicBeastsUnauthorizedException(data['domain'])
+
+ if resp.status_code == 400:
+ raise MythicBeastsRecordException(
+ data['domain'],
+ data['command']
+ )
+ resp.raise_for_status()
+ return resp
+
+ def _post(self, data=None):
+ return self._request('POST', self.BASE, data=data)
+
+ def records(self, zone):
+ assert zone in self._passwords, 'Missing password for domain: {}' \
+ .format(remove_trailing_dot(zone))
+
+ return self._post({
+ 'domain': remove_trailing_dot(zone),
+ 'password': self._passwords[zone],
+ 'showall': 0,
+ 'command': 'LIST',
+ })
+
+ @staticmethod
+ def _data_for_single(_type, data):
+ return {
+ 'type': _type,
+ 'value': data['raw_values'][0]['value'],
+ 'ttl': data['raw_values'][0]['ttl']
+ }
+
+ @staticmethod
+ def _data_for_multiple(_type, data):
+ return {
+ 'type': _type,
+ 'values':
+ [raw_values['value'] for raw_values in data['raw_values']],
+ 'ttl':
+ max([raw_values['ttl'] for raw_values in data['raw_values']]),
+ }
+
+ @staticmethod
+ def _data_for_TXT(_type, data):
+ return {
+ 'type': _type,
+ 'values':
+ [
+ str(raw_values['value']).replace(';', '\\;')
+ for raw_values in data['raw_values']
+ ],
+ 'ttl':
+ max([raw_values['ttl'] for raw_values in data['raw_values']]),
+ }
+
+ @staticmethod
+ def _data_for_MX(_type, data):
+ ttl = max([raw_values['ttl'] for raw_values in data['raw_values']])
+ values = []
+
+ for raw_value in \
+ [raw_values['value'] for raw_values in data['raw_values']]:
+ match = MythicBeastsProvider.RE_MX.match(raw_value)
+
+ assert match is not None, 'Unable to parse MX data'
+
+ exchange = match.group('exchange')
+
+ if not exchange.endswith('.'):
+ exchange = '{}.{}'.format(exchange, data['zone'])
+
+ values.append({
+ 'preference': match.group('preference'),
+ 'exchange': exchange,
+ })
+
+ return {
+ 'type': _type,
+ 'values': values,
+ 'ttl': ttl,
+ }
+
+ @staticmethod
+ def _data_for_CNAME(_type, data):
+ ttl = data['raw_values'][0]['ttl']
+ value = data['raw_values'][0]['value']
+ if not value.endswith('.'):
+ value = '{}.{}'.format(value, data['zone'])
+
+ return MythicBeastsProvider._data_for_single(
+ _type,
+ {'raw_values': [
+ {'value': value, 'ttl': ttl}
+ ]})
+
+ @staticmethod
+ def _data_for_ANAME(_type, data):
+ ttl = data['raw_values'][0]['ttl']
+ value = data['raw_values'][0]['value']
+ return MythicBeastsProvider._data_for_single(
+ 'ALIAS',
+ {'raw_values': [
+ {'value': value, 'ttl': ttl}
+ ]})
+
+ @staticmethod
+ def _data_for_SRV(_type, data):
+ ttl = max([raw_values['ttl'] for raw_values in data['raw_values']])
+ values = []
+
+ for raw_value in \
+ [raw_values['value'] for raw_values in data['raw_values']]:
+
+ match = MythicBeastsProvider.RE_SRV.match(raw_value)
+
+ assert match is not None, 'Unable to parse SRV data'
+
+ target = match.group('target')
+ if not target.endswith('.'):
+ target = '{}.{}'.format(target, data['zone'])
+
+ values.append({
+ 'priority': match.group('priority'),
+ 'weight': match.group('weight'),
+ 'port': match.group('port'),
+ 'target': target,
+ })
+
+ return {
+ 'type': _type,
+ 'values': values,
+ 'ttl': ttl,
+ }
+
+ @staticmethod
+ def _data_for_SSHFP(_type, data):
+ ttl = max([raw_values['ttl'] for raw_values in data['raw_values']])
+ values = []
+
+ for raw_value in \
+ [raw_values['value'] for raw_values in data['raw_values']]:
+ match = MythicBeastsProvider.RE_SSHFP.match(raw_value)
+
+ assert match is not None, 'Unable to parse SSHFP data'
+
+ values.append({
+ 'algorithm': match.group('algorithm'),
+ 'fingerprint_type': match.group('fingerprint_type'),
+ 'fingerprint': match.group('fingerprint'),
+ })
+
+ return {
+ 'type': _type,
+ 'values': values,
+ 'ttl': ttl,
+ }
+
+ @staticmethod
+ def _data_for_CAA(_type, data):
+ ttl = data['raw_values'][0]['ttl']
+ raw_value = data['raw_values'][0]['value']
+
+ match = MythicBeastsProvider.RE_CAA.match(raw_value)
+
+ assert match is not None, 'Unable to parse CAA data'
+
+ value = {
+ 'flags': match.group('flags'),
+ 'tag': match.group('tag'),
+ 'value': match.group('value'),
+ }
+
+ return MythicBeastsProvider._data_for_single(
+ 'CAA',
+ {'raw_values': [{'value': value, 'ttl': ttl}]})
+
+ _data_for_NS = _data_for_multiple
+ _data_for_A = _data_for_multiple
+ _data_for_AAAA = _data_for_multiple
+
+ def populate(self, zone, target=False, lenient=False):
+ self.log.debug('populate: name=%s, target=%s, lenient=%s', zone.name,
+ target, lenient)
+
+ resp = self.records(zone.name)
+
+ before = len(zone.records)
+ exists = False
+ data = defaultdict(lambda: defaultdict(lambda: {
+ 'raw_values': [],
+ 'name': None,
+ 'zone': None,
+ }))
+
+ exists = True
+ for line in resp.content.splitlines():
+ match = MythicBeastsProvider.RE_POPLINE.match(line.decode("utf-8"))
+
+ if match is None:
+ self.log.debug('failed to match line: %s', line)
+ continue
+
+ if match.group(1) == '@':
+ _name = ''
+ else:
+ _name = match.group('name')
+
+ _type = match.group('type')
+ _ttl = int(match.group('ttl'))
+ _value = match.group('value').strip()
+
+ if hasattr(self, '_data_for_{}'.format(_type)):
+ if _name not in data[_type]:
+ data[_type][_name] = {
+ 'raw_values': [{'value': _value, 'ttl': _ttl}],
+ 'name': _name,
+ 'zone': zone.name,
+ }
+
+ else:
+ data[_type][_name].get('raw_values').append(
+ {'value': _value, 'ttl': _ttl}
+ )
+ else:
+ self.log.debug('skipping %s as not supported', _type)
+
+ for _type in data:
+ for _name in data[_type]:
+ data_for = getattr(self, '_data_for_{}'.format(_type))
+
+ record = Record.new(
+ zone,
+ _name,
+ data_for(_type, data[_type][_name]),
+ source=self
+ )
+ zone.add_record(record, lenient=lenient)
+
+ self.log.debug('populate: found %s records, exists=%s',
+ len(zone.records) - before, exists)
+
+ return exists
+
+ def _compile_commands(self, action, record):
+ commands = []
+
+ hostname = remove_trailing_dot(record.fqdn)
+ ttl = record.ttl
+ _type = record._type
+
+ if _type == 'ALIAS':
+ _type = 'ANAME'
+
+ if hasattr(record, 'values'):
+ values = record.values
+ else:
+ values = [record.value]
+
+ base = '{} {} {} {}'.format(action, hostname, ttl, _type)
+
+ # Unescape TXT records
+ if _type == 'TXT':
+ values = [value.replace('\\;', ';') for value in values]
+
+ # Handle specific types or default
+ if _type == 'SSHFP':
+ data = values[0].data
+ commands.append('{} {} {} {}'.format(
+ base,
+ data['algorithm'],
+ data['fingerprint_type'],
+ data['fingerprint']
+ ))
+
+ elif _type == 'SRV':
+ for value in values:
+ data = value.data
+ commands.append('{} {} {} {} {}'.format(
+ base,
+ data['priority'],
+ data['weight'],
+ data['port'],
+ data['target']))
+
+ elif _type == 'MX':
+ for value in values:
+ data = value.data
+ commands.append('{} {} {}'.format(
+ base,
+ data['preference'],
+ data['exchange']))
+
+ else:
+ if hasattr(self, '_data_for_{}'.format(_type)):
+ for value in values:
+ commands.append('{} {}'.format(base, value))
+ else:
+ self.log.debug('skipping %s as not supported', _type)
+
+ return commands
+
+ def _apply_Create(self, change):
+ zone = change.new.zone
+ commands = self._compile_commands('ADD', change.new)
+
+ for command in commands:
+ self._post({
+ 'domain': remove_trailing_dot(zone.name),
+ 'origin': '.',
+ 'password': self._passwords[zone.name],
+ 'command': command,
+ })
+ return True
+
+ def _apply_Update(self, change):
+ self._apply_Delete(change)
+ self._apply_Create(change)
+
+ def _apply_Delete(self, change):
+ zone = change.existing.zone
+ commands = self._compile_commands('DELETE', change.existing)
+
+ for command in commands:
+ self._post({
+ 'domain': remove_trailing_dot(zone.name),
+ 'origin': '.',
+ 'password': self._passwords[zone.name],
+ 'command': command,
+ })
+ return True
+
+ def _apply(self, plan):
+ desired = plan.desired
+ changes = plan.changes
+ self.log.debug('_apply: zone=%s, len(changes)=%d', desired.name,
+ len(changes))
+
+ for change in changes:
+ class_name = change.__class__.__name__
+ getattr(self, '_apply_{}'.format(class_name))(change)
diff --git a/octodns/provider/ns1.py b/octodns/provider/ns1.py
index 5fdf5b0..6cea185 100644
--- a/octodns/provider/ns1.py
+++ b/octodns/provider/ns1.py
@@ -7,38 +7,411 @@ from __future__ import absolute_import, division, print_function, \
from logging import getLogger
from itertools import chain
-from collections import OrderedDict, defaultdict
-from nsone import NSONE
-from nsone.rest.errors import RateLimitException, ResourceException
-from incf.countryutils import transformations
+from collections import Mapping, OrderedDict, defaultdict
+from ns1 import NS1
+from ns1.rest.errors import RateLimitException, ResourceException
+from pycountry_convert import country_alpha2_to_continent_code
from time import sleep
+from uuid import uuid4
-from ..record import Record
+from six import text_type
+
+from ..record import Record, Update
from .base import BaseProvider
+class Ns1Exception(Exception):
+ pass
+
+
+class Ns1Client(object):
+ log = getLogger('NS1Client')
+
+ def __init__(self, api_key, parallelism=None, retry_count=4,
+ client_config=None):
+ self.log.debug('__init__: parallelism=%s, retry_count=%d, '
+ 'client_config=%s', parallelism, retry_count,
+ client_config)
+ self.retry_count = retry_count
+
+ client = NS1(apiKey=api_key)
+
+ # NS1 rate limits via a "token bucket" scheme, and provides information
+ # about rate limiting in headers on responses. Token bucket can be
+ # thought of as an initially "full" bucket, where, if not full, tokens
+ # are added at some rate. This allows "bursting" requests until the
+ # bucket is empty, after which, you are limited to the rate of token
+ # replenishment.
+ # There are a couple of "strategies" built into the SDK to avoid 429s
+ # from rate limiting. Since octodns operates concurrently via
+ # `max_workers`, a concurrent strategy seems appropriate.
+ # This strategy does nothing until the remaining requests are equal to
+ # or less than our `parallelism`, after which, each process will sleep
+ # for the token replenishment interval times parallelism.
+ # For example, if we can make 10 requests in 60 seconds, a token is
+ # replenished every 6 seconds. If parallelism is 3, we will burst 7
+ # requests, and subsequently each process will sleep for 18 seconds
+ # before making another request.
+ # In general, parallelism should match the number of workers.
+ if parallelism is not None:
+ client.config['rate_limit_strategy'] = 'concurrent'
+ client.config['parallelism'] = parallelism
+
+ # The list of records for a zone is paginated at around ~2.5k records,
+ # this tells the client to handle any of that transparently and ensure
+ # we get the full list of records.
+ client.config['follow_pagination'] = True
+
+ # additional options or overrides
+ if isinstance(client_config, Mapping):
+ for k, v in client_config.items():
+ client.config[k] = v
+
+ self._client = client
+
+ self._records = client.records()
+ self._zones = client.zones()
+ self._monitors = client.monitors()
+ self._notifylists = client.notifylists()
+ self._datasource = client.datasource()
+ self._datafeed = client.datafeed()
+
+ self._datasource_id = None
+ self._feeds_for_monitors = None
+ self._monitors_cache = None
+
+ @property
+ def datasource_id(self):
+ if self._datasource_id is None:
+ name = 'octoDNS NS1 Data Source'
+ source = None
+ for candidate in self.datasource_list():
+ if candidate['name'] == name:
+ # Found it
+ source = candidate
+ break
+
+ if source is None:
+ self.log.info('datasource_id: creating datasource %s', name)
+ # We need to create it
+ source = self.datasource_create(name=name,
+ sourcetype='nsone_monitoring')
+ self.log.info('datasource_id: id=%s', source['id'])
+
+ self._datasource_id = source['id']
+
+ return self._datasource_id
+
+ @property
+ def feeds_for_monitors(self):
+ if self._feeds_for_monitors is None:
+ self.log.debug('feeds_for_monitors: fetching & building')
+ self._feeds_for_monitors = {
+ f['config']['jobid']: f['id']
+ for f in self.datafeed_list(self.datasource_id)
+ }
+
+ return self._feeds_for_monitors
+
+ @property
+ def monitors(self):
+ if self._monitors_cache is None:
+ self.log.debug('monitors: fetching & building')
+ self._monitors_cache = \
+ {m['id']: m for m in self.monitors_list()}
+ return self._monitors_cache
+
+ def datafeed_create(self, sourceid, name, config):
+ ret = self._try(self._datafeed.create, sourceid, name, config)
+ self.feeds_for_monitors[config['jobid']] = ret['id']
+ return ret
+
+ def datafeed_delete(self, sourceid, feedid):
+ ret = self._try(self._datafeed.delete, sourceid, feedid)
+ self._feeds_for_monitors = {
+ k: v for k, v in self._feeds_for_monitors.items() if v != feedid
+ }
+ return ret
+
+ def datafeed_list(self, sourceid):
+ return self._try(self._datafeed.list, sourceid)
+
+ def datasource_create(self, **body):
+ return self._try(self._datasource.create, **body)
+
+ def datasource_list(self):
+ return self._try(self._datasource.list)
+
+ def monitors_create(self, **params):
+ body = {}
+ ret = self._try(self._monitors.create, body, **params)
+ self.monitors[ret['id']] = ret
+ return ret
+
+ def monitors_delete(self, jobid):
+ ret = self._try(self._monitors.delete, jobid)
+ self.monitors.pop(jobid)
+ return ret
+
+ def monitors_list(self):
+ return self._try(self._monitors.list)
+
+ def monitors_update(self, job_id, **params):
+ body = {}
+ ret = self._try(self._monitors.update, job_id, body, **params)
+ self.monitors[ret['id']] = ret
+ return ret
+
+ def notifylists_delete(self, nlid):
+ return self._try(self._notifylists.delete, nlid)
+
+ def notifylists_create(self, **body):
+ return self._try(self._notifylists.create, body)
+
+ def notifylists_list(self):
+ return self._try(self._notifylists.list)
+
+ def records_create(self, zone, domain, _type, **params):
+ return self._try(self._records.create, zone, domain, _type, **params)
+
+ def records_delete(self, zone, domain, _type):
+ return self._try(self._records.delete, zone, domain, _type)
+
+ def records_retrieve(self, zone, domain, _type):
+ return self._try(self._records.retrieve, zone, domain, _type)
+
+ def records_update(self, zone, domain, _type, **params):
+ return self._try(self._records.update, zone, domain, _type, **params)
+
+ def zones_create(self, name):
+ return self._try(self._zones.create, name)
+
+ def zones_retrieve(self, name):
+ return self._try(self._zones.retrieve, name)
+
+ def _try(self, method, *args, **kwargs):
+ tries = self.retry_count
+ while True: # We'll raise to break after our tries expire
+ try:
+ return method(*args, **kwargs)
+ except RateLimitException as e:
+ if tries <= 1:
+ raise
+ period = float(e.period)
+ self.log.warn('rate limit encountered, pausing '
+ 'for %ds and trying again, %d remaining',
+ period, tries)
+ sleep(period)
+ tries -= 1
+
+
class Ns1Provider(BaseProvider):
'''
Ns1 provider
- nsone:
+ ns1:
+ # Required
class: octodns.provider.ns1.Ns1Provider
api_key: env/NS1_API_KEY
+ # Only required if using dynamic records
+ monitor_regions:
+ - lga
+ # Optional. Default: None. If set, back off in advance to avoid 429s
+ # from rate-limiting. Generally this should be set to the number
+ # of processes or workers hitting the API, e.g. the value of
+ # `max_workers`.
+ parallelism: 11
+ # Optional. Default: 4. Number of times to retry if a 429 response
+ # is received.
+ retry_count: 4
+ # Optional. Default: None. Additional options or overrides passed to
+ # the NS1 SDK config, as key-value pairs.
+ client_config:
+ endpoint: my.nsone.endpoint # Default: api.nsone.net
+ ignore-ssl-errors: true # Default: false
+ follow_pagination: false # Default: true
'''
SUPPORTS_GEO = True
- SUPPORTS_DYNAMIC = False
+ SUPPORTS_DYNAMIC = True
SUPPORTS = set(('A', 'AAAA', 'ALIAS', 'CAA', 'CNAME', 'MX', 'NAPTR',
'NS', 'PTR', 'SPF', 'SRV', 'TXT'))
ZONE_NOT_FOUND_MESSAGE = 'server error: zone not found'
- def __init__(self, id, api_key, *args, **kwargs):
- self.log = getLogger('Ns1Provider[{}]'.format(id))
- self.log.debug('__init__: id=%s, api_key=***', id)
- super(Ns1Provider, self).__init__(id, *args, **kwargs)
- self._client = NSONE(apiKey=api_key)
+ def _update_filter(self, filter, with_disabled):
+ if with_disabled:
+ filter['disabled'] = False
+ return (dict(sorted(filter.items(), key=lambda t: t[0])))
+ return filter
- def _data_for_A(self, _type, record):
+ def _UP_FILTER(self, with_disabled):
+ return self._update_filter({
+ 'config': {},
+ 'filter': 'up'
+ }, with_disabled)
+
+ def _REGION_FILTER(self, with_disabled):
+ return self._update_filter({
+ 'config': {
+ 'remove_no_georegion': True
+ },
+ 'filter': u'geofence_regional'
+ }, with_disabled)
+
+ def _COUNTRY_FILTER(self, with_disabled):
+ return self._update_filter({
+ 'config': {
+ 'remove_no_location': True
+ },
+ 'filter': u'geofence_country'
+ }, with_disabled)
+
+ # In the NS1 UI/portal, this filter is called "SELECT FIRST GROUP" though
+ # the filter name in the NS1 api is 'select_first_region'
+ def _SELECT_FIRST_REGION_FILTER(self, with_disabled):
+ return self._update_filter({
+ 'config': {},
+ 'filter': u'select_first_region'
+ }, with_disabled)
+
+ def _PRIORITY_FILTER(self, with_disabled):
+ return self._update_filter({
+ 'config': {
+ 'eliminate': u'1'
+ },
+ 'filter': 'priority'
+ }, with_disabled)
+
+ def _WEIGHTED_SHUFFLE_FILTER(self, with_disabled):
+ return self._update_filter({
+ 'config': {},
+ 'filter': u'weighted_shuffle'
+ }, with_disabled)
+
+ def _SELECT_FIRST_N_FILTER(self, with_disabled):
+ return self._update_filter({
+ 'config': {
+ 'N': u'1'
+ },
+ 'filter': u'select_first_n'
+ }, with_disabled)
+
+ def _BASIC_FILTER_CHAIN(self, with_disabled):
+ return [
+ self._UP_FILTER(with_disabled),
+ self._SELECT_FIRST_REGION_FILTER(with_disabled),
+ self._PRIORITY_FILTER(with_disabled),
+ self._WEIGHTED_SHUFFLE_FILTER(with_disabled),
+ self._SELECT_FIRST_N_FILTER(with_disabled)
+ ]
+
+ def _FILTER_CHAIN_WITH_REGION(self, with_disabled):
+ return [
+ self._UP_FILTER(with_disabled),
+ self._REGION_FILTER(with_disabled),
+ self._SELECT_FIRST_REGION_FILTER(with_disabled),
+ self._PRIORITY_FILTER(with_disabled),
+ self._WEIGHTED_SHUFFLE_FILTER(with_disabled),
+ self._SELECT_FIRST_N_FILTER(with_disabled)
+ ]
+
+ def _FILTER_CHAIN_WITH_COUNTRY(self, with_disabled):
+ return [
+ self._UP_FILTER(with_disabled),
+ self._COUNTRY_FILTER(with_disabled),
+ self._SELECT_FIRST_REGION_FILTER(with_disabled),
+ self._PRIORITY_FILTER(with_disabled),
+ self._WEIGHTED_SHUFFLE_FILTER(with_disabled),
+ self._SELECT_FIRST_N_FILTER(with_disabled)
+ ]
+
+ def _FILTER_CHAIN_WITH_REGION_AND_COUNTRY(self, with_disabled):
+ return [
+ self._UP_FILTER(with_disabled),
+ self._REGION_FILTER(with_disabled),
+ self._COUNTRY_FILTER(with_disabled),
+ self._SELECT_FIRST_REGION_FILTER(with_disabled),
+ self._PRIORITY_FILTER(with_disabled),
+ self._WEIGHTED_SHUFFLE_FILTER(with_disabled),
+ self._SELECT_FIRST_N_FILTER(with_disabled)
+ ]
+
+ _REGION_TO_CONTINENT = {
+ 'AFRICA': 'AF',
+ 'ASIAPAC': 'AS',
+ 'EUROPE': 'EU',
+ 'SOUTH-AMERICA': 'SA',
+ 'US-CENTRAL': 'NA',
+ 'US-EAST': 'NA',
+ 'US-WEST': 'NA',
+ }
+ _CONTINENT_TO_REGIONS = {
+ 'AF': ('AFRICA',),
+ 'AS': ('ASIAPAC',),
+ 'EU': ('EUROPE',),
+ 'SA': ('SOUTH-AMERICA',),
+ # TODO: what about CA, MX, and all the other NA countries?
+ 'NA': ('US-CENTRAL', 'US-EAST', 'US-WEST'),
+ }
+
+ # Necessary for handling unsupported continents in _CONTINENT_TO_REGIONS
+ _CONTINENT_TO_LIST_OF_COUNTRIES = {
+ 'OC': {'FJ', 'NC', 'PG', 'SB', 'VU', 'AU', 'NF', 'NZ', 'FM', 'GU',
+ 'KI', 'MH', 'MP', 'NR', 'PW', 'AS', 'CK', 'NU', 'PF', 'PN',
+ 'TK', 'TO', 'TV', 'WF', 'WS'},
+ }
+
+ def __init__(self, id, api_key, retry_count=4, monitor_regions=None,
+ parallelism=None, client_config=None, *args, **kwargs):
+ self.log = getLogger('Ns1Provider[{}]'.format(id))
+ self.log.debug('__init__: id=%s, api_key=***, retry_count=%d, '
+ 'monitor_regions=%s, parallelism=%s, client_config=%s',
+ id, retry_count, monitor_regions, parallelism,
+ client_config)
+ super(Ns1Provider, self).__init__(id, *args, **kwargs)
+ self.monitor_regions = monitor_regions
+ self._client = Ns1Client(api_key, parallelism, retry_count,
+ client_config)
+
+ def _valid_filter_config(self, filter_cfg, domain):
+ with_disabled = self._disabled_flag_in_filters(filter_cfg, domain)
+ has_region = self._REGION_FILTER(with_disabled) in filter_cfg
+ has_country = self._COUNTRY_FILTER(with_disabled) in filter_cfg
+ expected_filter_cfg = self._get_updated_filter_chain(has_region,
+ has_country,
+ with_disabled)
+ return filter_cfg == expected_filter_cfg
+
+ def _get_updated_filter_chain(self, has_region, has_country,
+ with_disabled=True):
+ if has_region and has_country:
+ filter_chain = self._FILTER_CHAIN_WITH_REGION_AND_COUNTRY(
+ with_disabled)
+ elif has_region:
+ filter_chain = self._FILTER_CHAIN_WITH_REGION(with_disabled)
+ elif has_country:
+ filter_chain = self._FILTER_CHAIN_WITH_COUNTRY(with_disabled)
+ else:
+ filter_chain = self._BASIC_FILTER_CHAIN(with_disabled)
+
+ return filter_chain
+
+ def _encode_notes(self, data):
+ return ' '.join(['{}:{}'.format(k, v)
+ for k, v in sorted(data.items())])
+
+ def _parse_notes(self, note):
+ data = {}
+ if note:
+ for piece in note.split(' '):
+ try:
+ k, v = piece.split(':', 1)
+ data[k] = v
+ except ValueError:
+ pass
+ return data
+
+ def _data_for_geo_A(self, _type, record):
# record meta (which would include geo information is only
# returned when getting a record's detail, not from zone detail
geo = defaultdict(list)
@@ -47,8 +420,6 @@ class Ns1Provider(BaseProvider):
'type': _type,
}
values, codes = [], []
- if 'answers' not in record:
- values = record['short_answers']
for answer in record.get('answers', []):
meta = answer.get('meta', {})
if meta:
@@ -60,8 +431,7 @@ class Ns1Provider(BaseProvider):
us_state = meta.get('us_state', [])
ca_province = meta.get('ca_province', [])
for cntry in country:
- cn = transformations.cc_to_cn(cntry)
- con = transformations.cn_to_ctca2(cn)
+ con = country_alpha2_to_continent_code(cntry)
key = '{}-{}'.format(con, cntry)
geo[key].extend(answer['answer'])
for state in us_state:
@@ -76,14 +446,178 @@ class Ns1Provider(BaseProvider):
else:
values.extend(answer['answer'])
codes.append([])
- values = [unicode(x) for x in values]
+ values = [text_type(x) for x in values]
geo = OrderedDict(
- {unicode(k): [unicode(x) for x in v] for k, v in geo.items()}
+ {text_type(k): [text_type(x) for x in v] for k, v in geo.items()}
)
data['values'] = values
data['geo'] = geo
return data
+ def _parse_dynamic_pool_name(self, pool_name):
+ if pool_name.startswith('catchall__'):
+ # Special case for the old-style catchall prefix
+ return pool_name[10:]
+ try:
+ pool_name, _ = pool_name.rsplit('__', 1)
+ except ValueError:
+ pass
+ return pool_name
+
+ def _data_for_dynamic_A(self, _type, record):
+ # First make sure we have the expected filters config
+ if not self._valid_filter_config(record['filters'], record['domain']):
+ self.log.error('_data_for_dynamic_A: %s %s has unsupported '
+ 'filters', record['domain'], _type)
+ raise Ns1Exception('Unrecognized advanced record')
+
+ # All regions (pools) will include the list of default values
+ # (eventually) at higher priorities, we'll just add them to this set to
+ # we'll have the complete collection.
+ default = set()
+ # Fill out the pools by walking the answers and looking at their
+ # region.
+ pools = defaultdict(lambda: {'fallback': None, 'values': []})
+ for answer in record['answers']:
+ # region (group name in the UI) is the pool name
+ pool_name = answer['region']
+ # Get the actual pool name by removing the type
+ pool_name = self._parse_dynamic_pool_name(pool_name)
+ pool = pools[pool_name]
+
+ meta = answer['meta']
+ value = text_type(answer['answer'][0])
+ if meta['priority'] == 1:
+ # priority 1 means this answer is part of the pools own values
+ value_dict = {
+ 'value': value,
+ 'weight': int(meta.get('weight', 1)),
+ }
+ # If we have the original pool name and the catchall pool name
+ # in the answers, they point at the same pool. Add values only
+ # once
+ if value_dict not in pool['values']:
+ pool['values'].append(value_dict)
+ else:
+ # It's a fallback, we only care about it if it's a
+ # final/default
+ notes = self._parse_notes(meta.get('note', ''))
+ if notes.get('from', False) == '--default--':
+ default.add(value)
+
+ # The regions objects map to rules, but it's a bit fuzzy since they're
+ # tied to pools on the NS1 side, e.g. we can only have 1 rule per pool,
+ # that may eventually run into problems, but I don't have any use-cases
+ # examples currently where it would
+ rules = {}
+ for pool_name, region in sorted(record['regions'].items()):
+ # Get the actual pool name by removing the type
+ pool_name = self._parse_dynamic_pool_name(pool_name)
+
+ meta = region['meta']
+ notes = self._parse_notes(meta.get('note', ''))
+
+ rule_order = notes['rule-order']
+ try:
+ rule = rules[rule_order]
+ except KeyError:
+ rule = {
+ 'pool': pool_name,
+ '_order': rule_order,
+ }
+ rules[rule_order] = rule
+
+ # The group notes field in the UI is a `note` on the region here,
+ # that's where we can find our pool's fallback.
+ if 'fallback' in notes:
+ # set the fallback pool name
+ pools[pool_name]['fallback'] = notes['fallback']
+
+ geos = set()
+
+ # continents are mapped (imperfectly) to regions, but what about
+ # Canada/North America
+ for georegion in meta.get('georegion', []):
+ geos.add(self._REGION_TO_CONTINENT[georegion])
+
+ # Countries are easy enough to map, we just have to find their
+ # continent
+ #
+ # NOTE: Special handling for Oceania
+ # NS1 doesn't support Oceania as a region. So the Oceania countries
+ # will be present in meta['country']. If all the countries in the
+ # Oceania countries list are found, set the region to OC and remove
+ # individual oceania country entries
+
+ oc_countries = set()
+ for country in meta.get('country', []):
+ # country_alpha2_to_continent_code fails for Pitcairn ('PN')
+ if country == 'PN':
+ con = 'OC'
+ else:
+ con = country_alpha2_to_continent_code(country)
+
+ if con == 'OC':
+ oc_countries.add(country)
+ else:
+ # Adding only non-OC countries here to geos
+ geos.add('{}-{}'.format(con, country))
+
+ if oc_countries:
+ if oc_countries == self._CONTINENT_TO_LIST_OF_COUNTRIES['OC']:
+ # All OC countries found, so add 'OC' to geos
+ geos.add('OC')
+ else:
+ # Partial OC countries found, just add them as-is to geos
+ for c in oc_countries:
+ geos.add('{}-{}'.format('OC', c))
+
+ # States are easy too, just assume NA-US (CA providences aren't
+ # supported by octoDNS currently)
+ for state in meta.get('us_state', []):
+ geos.add('NA-US-{}'.format(state))
+
+ if geos:
+ # There are geos, combine them with any existing geos for this
+ # pool and recorded the sorted unique set of them
+ rule['geos'] = sorted(set(rule.get('geos', [])) | geos)
+
+ # Order and convert to a list
+ default = sorted(default)
+ # Convert to list and order
+ rules = list(rules.values())
+ rules.sort(key=lambda r: (r['_order'], r['pool']))
+
+ return {
+ 'dynamic': {
+ 'pools': pools,
+ 'rules': rules,
+ },
+ 'ttl': record['ttl'],
+ 'type': _type,
+ 'values': sorted(default),
+ }
+
+ def _data_for_A(self, _type, record):
+ if record.get('tier', 1) > 1:
+ # Advanced record, see if it's first answer has a note
+ try:
+ first_answer_note = record['answers'][0]['meta']['note']
+ except (IndexError, KeyError):
+ first_answer_note = ''
+ # If that note includes a `from` (pool name) it's a dynamic record
+ if 'from:' in first_answer_note:
+ return self._data_for_dynamic_A(_type, record)
+ # If not it's an old geo record
+ return self._data_for_geo_A(_type, record)
+
+ # This is a basic record, just convert it
+ return {
+ 'ttl': record['ttl'],
+ 'type': _type,
+ 'values': [text_type(x) for x in record['short_answers']]
+ }
+
_data_for_AAAA = _data_for_A
def _data_for_SPF(self, _type, record):
@@ -188,18 +722,29 @@ class Ns1Provider(BaseProvider):
target, lenient)
try:
- nsone_zone = self._client.loadZone(zone.name[:-1])
- records = nsone_zone.data['records']
+ ns1_zone_name = zone.name[:-1]
+ ns1_zone = self._client.zones_retrieve(ns1_zone_name)
+
+ records = []
+ geo_records = []
# change answers for certain types to always be absolute
- for record in records:
+ for record in ns1_zone['records']:
if record['type'] in ['ALIAS', 'CNAME', 'MX', 'NS', 'PTR',
'SRV']:
for i, a in enumerate(record['short_answers']):
if not a.endswith('.'):
record['short_answers'][i] = '{}.'.format(a)
- geo_records = nsone_zone.search(has_geo=True)
+ if record.get('tier', 1) > 1:
+ # Need to get the full record data for geo records
+ record = self._client.records_retrieve(ns1_zone_name,
+ record['domain'],
+ record['type'])
+ geo_records.append(record)
+ else:
+ records.append(record)
+
exists = True
except ResourceException as e:
if e.message != self.ZONE_NOT_FOUND_MESSAGE:
@@ -218,49 +763,397 @@ class Ns1Provider(BaseProvider):
continue
data_for = getattr(self, '_data_for_{}'.format(_type))
name = zone.hostname_from_fqdn(record['domain'])
- record = Record.new(zone, name, data_for(_type, record),
- source=self, lenient=lenient)
+ data = data_for(_type, record)
+ record = Record.new(zone, name, data, source=self, lenient=lenient)
zone_hash[(_type, name)] = record
[zone.add_record(r, lenient=lenient) for r in zone_hash.values()]
self.log.info('populate: found %s records, exists=%s',
len(zone.records) - before, exists)
return exists
- def _params_for_A(self, record):
- params = {'answers': record.values, 'ttl': record.ttl}
- if hasattr(record, 'geo'):
- # purposefully set non-geo answers to have an empty meta,
- # so that we know we did this on purpose if/when troubleshooting
- params['answers'] = [{"answer": [x], "meta": {}}
- for x in record.values]
- has_country = False
- for iso_region, target in record.geo.items():
- key = 'iso_region_code'
- value = iso_region
- if not has_country and \
- len(value.split('-')) > 1: # pragma: nocover
- has_country = True
- for answer in target.values:
- params['answers'].append(
- {
- 'answer': [answer],
- 'meta': {key: [value]},
+ def _params_for_geo_A(self, record):
+ # purposefully set non-geo answers to have an empty meta,
+ # so that we know we did this on purpose if/when troubleshooting
+ params = {
+ 'answers': [{"answer": [x], "meta": {}} for x in record.values],
+ 'ttl': record.ttl,
+ }
+
+ has_country = False
+ for iso_region, target in record.geo.items():
+ key = 'iso_region_code'
+ value = iso_region
+ if not has_country and len(value.split('-')) > 1:
+ has_country = True
+ for answer in target.values:
+ params['answers'].append(
+ {
+ 'answer': [answer],
+ 'meta': {key: [value]},
+ },
+ )
+
+ params['filters'] = []
+ if has_country:
+ params['filters'].append(
+ {"filter": "shuffle", "config": {}}
+ )
+ params['filters'].append(
+ {"filter": "geotarget_country", "config": {}}
+ )
+ params['filters'].append(
+ {"filter": "select_first_n",
+ "config": {"N": 1}}
+ )
+
+ return params, None
+
+ def _monitors_for(self, record):
+ monitors = {}
+
+ if getattr(record, 'dynamic', False):
+ expected_host = record.fqdn[:-1]
+ expected_type = record._type
+
+ for monitor in self._client.monitors.values():
+ data = self._parse_notes(monitor['notes'])
+ if expected_host == data['host'] and \
+ expected_type == data['type']:
+ # This monitor does not belong to this record
+ config = monitor['config']
+ value = config['host']
+ monitors[value] = monitor
+
+ return monitors
+
+ def _uuid(self):
+ return uuid4().hex
+
+ def _feed_create(self, monitor):
+ monitor_id = monitor['id']
+ self.log.debug('_feed_create: monitor=%s', monitor_id)
+ # TODO: looks like length limit is 64 char
+ name = '{} - {}'.format(monitor['name'], self._uuid()[:6])
+
+ # Create the data feed
+ config = {
+ 'jobid': monitor_id,
+ }
+ feed = self._client.datafeed_create(self._client.datasource_id, name,
+ config)
+ feed_id = feed['id']
+ self.log.debug('_feed_create: feed=%s', feed_id)
+
+ return feed_id
+
+ def _monitor_create(self, monitor):
+ self.log.debug('_monitor_create: monitor="%s"', monitor['name'])
+ # Create the notify list
+ notify_list = [{
+ 'config': {
+ 'sourceid': self._client.datasource_id,
+ },
+ 'type': 'datafeed',
+ }]
+ nl = self._client.notifylists_create(name=monitor['name'],
+ notify_list=notify_list)
+ nl_id = nl['id']
+ self.log.debug('_monitor_create: notify_list=%s', nl_id)
+
+ # Create the monitor
+ monitor['notify_list'] = nl_id
+ monitor = self._client.monitors_create(**monitor)
+ monitor_id = monitor['id']
+ self.log.debug('_monitor_create: monitor=%s', monitor_id)
+
+ return monitor_id, self._feed_create(monitor)
+
+ def _monitor_gen(self, record, value):
+ host = record.fqdn[:-1]
+ _type = record._type
+
+ ret = {
+ 'active': True,
+ 'config': {
+ 'connect_timeout': 2000,
+ 'host': value,
+ 'port': record.healthcheck_port,
+ 'response_timeout': 10000,
+ 'ssl': record.healthcheck_protocol == 'HTTPS',
+ },
+ 'frequency': 60,
+ 'job_type': 'tcp',
+ 'name': '{} - {} - {}'.format(host, _type, value),
+ 'notes': self._encode_notes({
+ 'host': host,
+ 'type': _type,
+ }),
+ 'policy': 'quorum',
+ 'rapid_recheck': False,
+ 'region_scope': 'fixed',
+ 'regions': self.monitor_regions,
+ }
+
+ if record.healthcheck_protocol != 'TCP':
+ # IF it's HTTP we need to send the request string
+ path = record.healthcheck_path
+ host = record.healthcheck_host
+ request = r'GET {path} HTTP/1.0\r\nHost: {host}\r\n' \
+ r'User-agent: NS1\r\n\r\n'.format(path=path, host=host)
+ ret['config']['send'] = request
+ # We'll also expect a HTTP response
+ ret['rules'] = [{
+ 'comparison': 'contains',
+ 'key': 'output',
+ 'value': '200 OK',
+ }]
+
+ return ret
+
+ def _monitor_is_match(self, expected, have):
+ # Make sure what we have matches what's in expected exactly. Anything
+ # else in have will be ignored.
+ for k, v in expected.items():
+ if have.get(k, '--missing--') != v:
+ return False
+
+ return True
+
+ def _monitor_sync(self, record, value, existing):
+ self.log.debug('_monitor_sync: record=%s, value=%s', record.fqdn,
+ value)
+ expected = self._monitor_gen(record, value)
+
+ if existing:
+ self.log.debug('_monitor_sync: existing=%s', existing['id'])
+ monitor_id = existing['id']
+
+ if not self._monitor_is_match(expected, existing):
+ self.log.debug('_monitor_sync: existing needs update')
+ # Update the monitor to match expected, everything else will be
+ # left alone and assumed correct
+ self._client.monitors_update(monitor_id, **expected)
+
+ feed_id = self._client.feeds_for_monitors.get(monitor_id)
+ if feed_id is None:
+ self.log.warn('_monitor_sync: %s (%s) missing feed, creating',
+ existing['name'], monitor_id)
+ feed_id = self._feed_create(existing)
+ else:
+ self.log.debug('_monitor_sync: needs create')
+ # We don't have an existing monitor create it (and related bits)
+ monitor_id, feed_id = self._monitor_create(expected)
+
+ return monitor_id, feed_id
+
+ def _monitors_gc(self, record, active_monitor_ids=None):
+ self.log.debug('_monitors_gc: record=%s, active_monitor_ids=%s',
+ record.fqdn, active_monitor_ids)
+
+ if active_monitor_ids is None:
+ active_monitor_ids = set()
+
+ for monitor in self._monitors_for(record).values():
+ monitor_id = monitor['id']
+ if monitor_id in active_monitor_ids:
+ continue
+
+ self.log.debug('_monitors_gc: deleting %s', monitor_id)
+
+ feed_id = self._client.feeds_for_monitors.get(monitor_id)
+ if feed_id:
+ self._client.datafeed_delete(self._client.datasource_id,
+ feed_id)
+
+ self._client.monitors_delete(monitor_id)
+
+ notify_list_id = monitor['notify_list']
+ self._client.notifylists_delete(notify_list_id)
+
+ def _add_answers_for_pool(self, answers, default_answers, pool_name,
+ pool_label, pool_answers, pools, priority):
+ current_pool_name = pool_name
+ seen = set()
+ while current_pool_name and current_pool_name not in seen:
+ seen.add(current_pool_name)
+ pool = pools[current_pool_name]
+ for answer in pool_answers[current_pool_name]:
+ answer = {
+ 'answer': answer['answer'],
+ 'meta': {
+ 'priority': priority,
+ 'note': self._encode_notes({
+ 'from': pool_label,
+ }),
+ 'up': {
+ 'feed': answer['feed_id'],
},
- )
- params['filters'] = []
- if has_country:
- params['filters'].append(
- {"filter": "shuffle", "config": {}}
- )
- params['filters'].append(
- {"filter": "geotarget_country", "config": {}}
- )
- params['filters'].append(
- {"filter": "select_first_n",
- "config": {"N": 1}}
- )
- self.log.debug("params for A: %s", params)
- return params
+ 'weight': answer['weight'],
+ },
+ 'region': pool_label, # the one we're answering
+ }
+ answers.append(answer)
+
+ current_pool_name = pool.data.get('fallback', None)
+ priority += 1
+
+ # Static/default
+ for answer in default_answers:
+ answer = {
+ 'answer': answer['answer'],
+ 'meta': {
+ 'priority': priority,
+ 'note': self._encode_notes({
+ 'from': '--default--',
+ }),
+ 'up': True,
+ 'weight': 1,
+ },
+ 'region': pool_label, # the one we're answering
+ }
+ answers.append(answer)
+
+ def _params_for_dynamic_A(self, record):
+ pools = record.dynamic.pools
+
+ # Convert rules to regions
+ has_country = False
+ has_region = False
+ regions = {}
+
+ for i, rule in enumerate(record.dynamic.rules):
+ pool_name = rule.data['pool']
+
+ notes = {
+ 'rule-order': i,
+ }
+
+ fallback = pools[pool_name].data.get('fallback', None)
+ if fallback:
+ notes['fallback'] = fallback
+
+ country = set()
+ georegion = set()
+ us_state = set()
+
+ for geo in rule.data.get('geos', []):
+ n = len(geo)
+ if n == 8:
+ # US state, e.g. NA-US-KY
+ us_state.add(geo[-2:])
+ # For filtering. State filtering is done by the country
+ # filter
+ has_country = True
+ elif n == 5:
+ # Country, e.g. EU-FR
+ country.add(geo[-2:])
+ has_country = True
+ else:
+ # Continent, e.g. AS
+ if geo in self._CONTINENT_TO_REGIONS:
+ georegion.update(self._CONTINENT_TO_REGIONS[geo])
+ has_region = True
+ else:
+ # No maps for geo in _CONTINENT_TO_REGIONS.
+ # Use the country list
+ self.log.debug('Converting geo {} to country list'.
+ format(geo))
+ for c in self._CONTINENT_TO_LIST_OF_COUNTRIES[geo]:
+ country.add(c)
+ has_country = True
+
+ meta = {
+ 'note': self._encode_notes(notes),
+ }
+
+ if georegion:
+ georegion_meta = dict(meta)
+ georegion_meta['georegion'] = sorted(georegion)
+ regions['{}__georegion'.format(pool_name)] = {
+ 'meta': georegion_meta,
+ }
+
+ if country or us_state:
+ # If there's country and/or states its a country pool,
+ # countries and states can coexist as they're handled by the
+ # same step in the filterchain (countries and georegions
+ # cannot as they're seperate stages and run the risk of
+ # eliminating all options)
+ country_state_meta = dict(meta)
+ if country:
+ country_state_meta['country'] = sorted(country)
+ if us_state:
+ country_state_meta['us_state'] = sorted(us_state)
+ regions['{}__country'.format(pool_name)] = {
+ 'meta': country_state_meta,
+ }
+
+ if not georegion and not country and not us_state:
+ # If there's no targeting it's a catchall
+ regions['{}__catchall'.format(pool_name)] = {
+ 'meta': meta,
+ }
+
+ existing_monitors = self._monitors_for(record)
+ active_monitors = set()
+
+ # Build a list of primary values for each pool, including their
+ # feed_id (monitor)
+ pool_answers = defaultdict(list)
+ for pool_name, pool in sorted(pools.items()):
+ for value in pool.data['values']:
+ weight = value['weight']
+ value = value['value']
+ existing = existing_monitors.get(value)
+ monitor_id, feed_id = self._monitor_sync(record, value,
+ existing)
+ active_monitors.add(monitor_id)
+ pool_answers[pool_name].append({
+ 'answer': [value],
+ 'weight': weight,
+ 'feed_id': feed_id,
+ })
+
+ default_answers = [{
+ 'answer': [v],
+ 'weight': 1,
+ } for v in record.values]
+
+ # Build our list of answers
+ # The regions dictionary built above already has the required pool
+ # names. Iterate over them and add answers.
+ answers = []
+ for pool_name in sorted(regions.keys()):
+ priority = 1
+
+ # Dynamic/health checked
+ pool_label = pool_name
+ # Remove the pool type from the end of the name
+ pool_name = self._parse_dynamic_pool_name(pool_name)
+ self._add_answers_for_pool(answers, default_answers, pool_name,
+ pool_label, pool_answers, pools,
+ priority)
+
+ # Update filters as necessary
+ filters = self._get_updated_filter_chain(has_region, has_country)
+
+ return {
+ 'answers': answers,
+ 'filters': filters,
+ 'regions': regions,
+ 'ttl': record.ttl,
+ }, active_monitors
+
+ def _params_for_A(self, record):
+ if getattr(record, 'dynamic', False):
+ return self._params_for_dynamic_A(record)
+ elif hasattr(record, 'geo'):
+ return self._params_for_geo_A(record)
+
+ return {
+ 'answers': record.values,
+ 'ttl': record.ttl,
+ }, None
_params_for_AAAA = _params_for_A
_params_for_NS = _params_for_A
@@ -270,81 +1163,144 @@ class Ns1Provider(BaseProvider):
# escaped in values so we have to strip them here and add
# them when going the other way
values = [v.replace('\\;', ';') for v in record.values]
- return {'answers': values, 'ttl': record.ttl}
+ return {'answers': values, 'ttl': record.ttl}, None
_params_for_TXT = _params_for_SPF
def _params_for_CAA(self, record):
values = [(v.flags, v.tag, v.value) for v in record.values]
- return {'answers': values, 'ttl': record.ttl}
+ return {'answers': values, 'ttl': record.ttl}, None
+ # TODO: dynamic CNAME support
def _params_for_CNAME(self, record):
- return {'answers': [record.value], 'ttl': record.ttl}
+ return {'answers': [record.value], 'ttl': record.ttl}, None
_params_for_ALIAS = _params_for_CNAME
_params_for_PTR = _params_for_CNAME
def _params_for_MX(self, record):
values = [(v.preference, v.exchange) for v in record.values]
- return {'answers': values, 'ttl': record.ttl}
+ return {'answers': values, 'ttl': record.ttl}, None
def _params_for_NAPTR(self, record):
values = [(v.order, v.preference, v.flags, v.service, v.regexp,
v.replacement) for v in record.values]
- return {'answers': values, 'ttl': record.ttl}
+ return {'answers': values, 'ttl': record.ttl}, None
def _params_for_SRV(self, record):
values = [(v.priority, v.weight, v.port, v.target)
for v in record.values]
- return {'answers': values, 'ttl': record.ttl}
+ return {'answers': values, 'ttl': record.ttl}, None
- def _get_name(self, record):
- return record.fqdn[:-1] if record.name == '' else record.name
+ def _get_ns1_filters(self, ns1_zone_name):
+ ns1_filters = {}
+ ns1_zone = {}
- def _apply_Create(self, nsone_zone, change):
+ try:
+ ns1_zone = self._client.zones_retrieve(ns1_zone_name)
+ except ResourceException as e:
+ if e.message != self.ZONE_NOT_FOUND_MESSAGE:
+ raise
+
+ if 'records' in ns1_zone:
+ for ns1_record in ns1_zone['records']:
+ if ns1_record.get('tier', 1) > 1:
+ # Need to get the full record data for geo records
+ full_rec = self._client.records_retrieve(
+ ns1_zone_name,
+ ns1_record['domain'],
+ ns1_record['type'])
+ if 'filters' in full_rec:
+ filter_key = '{}.'.format(ns1_record['domain'])
+ ns1_filters[filter_key] = full_rec['filters']
+
+ return ns1_filters
+
+ def _disabled_flag_in_filters(self, filters, domain):
+ disabled_count = ['disabled' in f for f in filters].count(True)
+ if disabled_count and disabled_count != len(filters):
+ # Some filters have the disabled flag, and some don't. Disallow
+ exception_msg = 'Mixed disabled flag in filters for {}'.format(
+ domain)
+ raise Ns1Exception(exception_msg)
+ return disabled_count == len(filters)
+
+ def _extra_changes(self, desired, changes, **kwargs):
+ self.log.debug('_extra_changes: desired=%s', desired.name)
+ ns1_filters = self._get_ns1_filters(desired.name[:-1])
+ changed = set([c.record for c in changes])
+ extra = []
+ for record in desired.records:
+ if record in changed or not getattr(record, 'dynamic', False):
+ # Already changed, or no dynamic , no need to check it
+ continue
+
+ # Filter normalization
+ # Check if filters for existing domains need an update
+ # Needs an explicit check since there might be no change in the
+ # config at all. Filters however might still need an update
+ domain = '{}.{}'.format(record.name, record.zone.name)
+ if domain in ns1_filters:
+ domain_filters = ns1_filters[domain]
+ if not self._disabled_flag_in_filters(domain_filters, domain):
+ # 'disabled' entry absent in filter config. Need to update
+ # filters. Update record
+ self.log.info('_extra_changes: change in filters for %s',
+ domain)
+ extra.append(Update(record, record))
+ continue
+
+ for have in self._monitors_for(record).values():
+ value = have['config']['host']
+ expected = self._monitor_gen(record, value)
+ # TODO: find values which have missing monitors
+ if not self._monitor_is_match(expected, have):
+ self.log.info('_extra_changes: monitor mis-match for %s',
+ expected['name'])
+ extra.append(Update(record, record))
+ break
+ if not have.get('notify_list'):
+ self.log.info('_extra_changes: broken monitor no notify '
+ 'list %s (%s)', have['name'], have['id'])
+ extra.append(Update(record, record))
+ break
+
+ return extra
+
+ def _apply_Create(self, ns1_zone, change):
new = change.new
- name = self._get_name(new)
+ zone = new.zone.name[:-1]
+ domain = new.fqdn[:-1]
_type = new._type
- params = getattr(self, '_params_for_{}'.format(_type))(new)
- meth = getattr(nsone_zone, 'add_{}'.format(_type))
- try:
- meth(name, **params)
- except RateLimitException as e:
- period = float(e.period)
- self.log.warn('_apply_Create: rate limit encountered, pausing '
- 'for %ds and trying again', period)
- sleep(period)
- meth(name, **params)
+ params, active_monitor_ids = \
+ getattr(self, '_params_for_{}'.format(_type))(new)
+ self._client.records_create(zone, domain, _type, **params)
+ self._monitors_gc(new, active_monitor_ids)
- def _apply_Update(self, nsone_zone, change):
- existing = change.existing
- name = self._get_name(existing)
- _type = existing._type
- record = nsone_zone.loadRecord(name, _type)
+ def _apply_Update(self, ns1_zone, change):
new = change.new
- params = getattr(self, '_params_for_{}'.format(_type))(new)
- try:
- record.update(**params)
- except RateLimitException as e:
- period = float(e.period)
- self.log.warn('_apply_Update: rate limit encountered, pausing '
- 'for %ds and trying again', period)
- sleep(period)
- record.update(**params)
+ zone = new.zone.name[:-1]
+ domain = new.fqdn[:-1]
+ _type = new._type
+ params, active_monitor_ids = \
+ getattr(self, '_params_for_{}'.format(_type))(new)
+ self._client.records_update(zone, domain, _type, **params)
+ self._monitors_gc(new, active_monitor_ids)
- def _apply_Delete(self, nsone_zone, change):
+ def _apply_Delete(self, ns1_zone, change):
existing = change.existing
- name = self._get_name(existing)
+ zone = existing.zone.name[:-1]
+ domain = existing.fqdn[:-1]
_type = existing._type
- record = nsone_zone.loadRecord(name, _type)
- try:
- record.delete()
- except RateLimitException as e:
- period = float(e.period)
- self.log.warn('_apply_Delete: rate limit encountered, pausing '
- 'for %ds and trying again', period)
- sleep(period)
- record.delete()
+ self._client.records_delete(zone, domain, _type)
+ self._monitors_gc(existing)
+
+ def _has_dynamic(self, changes):
+ for change in changes:
+ if getattr(change.record, 'dynamic', False):
+ return True
+
+ return False
def _apply(self, plan):
desired = plan.desired
@@ -352,16 +1308,22 @@ class Ns1Provider(BaseProvider):
self.log.debug('_apply: zone=%s, len(changes)=%d', desired.name,
len(changes))
+ # Make sure that if we're going to make any dynamic changes that we
+ # have monitor_regions configured before touching anything so we can
+ # abort early and not half-apply
+ if self._has_dynamic(changes) and self.monitor_regions is None:
+ raise Ns1Exception('Monitored record, but monitor_regions not set')
+
domain_name = desired.name[:-1]
try:
- nsone_zone = self._client.loadZone(domain_name)
+ ns1_zone = self._client.zones_retrieve(domain_name)
except ResourceException as e:
if e.message != self.ZONE_NOT_FOUND_MESSAGE:
raise
self.log.debug('_apply: no matching zone, creating')
- nsone_zone = self._client.createZone(domain_name)
+ ns1_zone = self._client.zones_create(domain_name)
for change in changes:
class_name = change.__class__.__name__
- getattr(self, '_apply_{}'.format(class_name))(nsone_zone,
+ getattr(self, '_apply_{}'.format(class_name))(ns1_zone,
change)
diff --git a/octodns/provider/ovh.py b/octodns/provider/ovh.py
index d968da4..54f62ac 100644
--- a/octodns/provider/ovh.py
+++ b/octodns/provider/ovh.py
@@ -9,6 +9,7 @@ import base64
import binascii
import logging
from collections import defaultdict
+from six import text_type
import ovh
from ovh import ResourceNotFoundError
@@ -39,8 +40,8 @@ class OvhProvider(BaseProvider):
# This variable is also used in populate method to filter which OVH record
# types are supported by octodns
- SUPPORTS = set(('A', 'AAAA', 'CNAME', 'DKIM', 'MX', 'NAPTR', 'NS', 'PTR',
- 'SPF', 'SRV', 'SSHFP', 'TXT'))
+ SUPPORTS = set(('A', 'AAAA', 'CAA', 'CNAME', 'DKIM', 'MX', 'NAPTR', 'NS',
+ 'PTR', 'SPF', 'SRV', 'SSHFP', 'TXT'))
def __init__(self, id, endpoint, application_key, application_secret,
consumer_key, *args, **kwargs):
@@ -64,7 +65,7 @@ class OvhProvider(BaseProvider):
records = self.get_records(zone_name=zone_name)
exists = True
except ResourceNotFoundError as e:
- if e.message != self.ZONE_NOT_FOUND_MESSAGE:
+ if text_type(e) != self.ZONE_NOT_FOUND_MESSAGE:
raise
exists = False
records = []
@@ -138,6 +139,22 @@ class OvhProvider(BaseProvider):
'value': record['target']
}
+ @staticmethod
+ def _data_for_CAA(_type, records):
+ values = []
+ for record in records:
+ flags, tag, value = record['target'].split(' ', 2)
+ values.append({
+ 'flags': flags,
+ 'tag': tag,
+ 'value': value[1:-1]
+ })
+ return {
+ 'ttl': records[0]['ttl'],
+ 'type': _type,
+ 'values': values
+ }
+
@staticmethod
def _data_for_MX(_type, records):
values = []
@@ -243,6 +260,17 @@ class OvhProvider(BaseProvider):
'fieldType': record._type
}
+ @staticmethod
+ def _params_for_CAA(record):
+ for value in record.values:
+ yield {
+ 'target': '{} {} "{}"'.format(value.flags, value.tag,
+ value.value),
+ 'subDomain': record.name,
+ 'ttl': record.ttl,
+ 'fieldType': record._type
+ }
+
@staticmethod
def _params_for_MX(record):
for value in record.values:
@@ -322,10 +350,10 @@ class OvhProvider(BaseProvider):
'n': lambda _: True,
'g': lambda _: True}
- splitted = value.split('\\;')
+ splitted = [v for v in value.split('\\;') if v]
found_key = False
for splitted_value in splitted:
- sub_split = map(lambda x: x.strip(), splitted_value.split("=", 1))
+ sub_split = [x.strip() for x in splitted_value.split("=", 1)]
if len(sub_split) < 2:
return False
key, value = sub_split[0], sub_split[1]
@@ -343,7 +371,7 @@ class OvhProvider(BaseProvider):
@staticmethod
def _is_valid_dkim_key(key):
try:
- base64.decodestring(key)
+ base64.decodestring(bytearray(key, 'utf-8'))
except binascii.Error:
return False
return True
diff --git a/octodns/provider/plan.py b/octodns/provider/plan.py
index bae244f..af6863a 100644
--- a/octodns/provider/plan.py
+++ b/octodns/provider/plan.py
@@ -5,10 +5,11 @@
from __future__ import absolute_import, division, print_function, \
unicode_literals
-from StringIO import StringIO
from logging import DEBUG, ERROR, INFO, WARN, getLogger
from sys import stdout
+from six import StringIO, text_type
+
class UnsafePlan(Exception):
pass
@@ -26,7 +27,11 @@ class Plan(object):
delete_pcent_threshold=MAX_SAFE_DELETE_PCENT):
self.existing = existing
self.desired = desired
- self.changes = changes
+ # Sort changes to ensure we always have a consistent ordering for
+ # things that make assumptions about that. Many providers will do their
+ # own ordering to ensure things happen in a way that makes sense to
+ # them and/or is as safe as possible.
+ self.changes = sorted(changes)
self.exists = exists
self.update_pcent_threshold = update_pcent_threshold
self.delete_pcent_threshold = delete_pcent_threshold
@@ -122,7 +127,7 @@ class PlanLogger(_PlanOutput):
buf.write('* ')
buf.write(target.id)
buf.write(' (')
- buf.write(target)
+ buf.write(text_type(target))
buf.write(')\n* ')
if plan.exists is False:
@@ -135,7 +140,7 @@ class PlanLogger(_PlanOutput):
buf.write('\n* ')
buf.write('Summary: ')
- buf.write(plan)
+ buf.write(text_type(plan))
buf.write('\n')
else:
buf.write(hr)
@@ -147,11 +152,11 @@ class PlanLogger(_PlanOutput):
def _value_stringifier(record, sep):
try:
- values = [unicode(v) for v in record.values]
+ values = [text_type(v) for v in record.values]
except AttributeError:
values = [record.value]
for code, gv in sorted(getattr(record, 'geo', {}).items()):
- vs = ', '.join([unicode(v) for v in gv.values])
+ vs = ', '.join([text_type(v) for v in gv.values])
values.append('{}: {}'.format(code, vs))
return sep.join(values)
@@ -193,7 +198,7 @@ class PlanMarkdown(_PlanOutput):
fh.write(' | ')
# TTL
if existing:
- fh.write(unicode(existing.ttl))
+ fh.write(text_type(existing.ttl))
fh.write(' | ')
fh.write(_value_stringifier(existing, '; '))
fh.write(' | |\n')
@@ -201,7 +206,7 @@ class PlanMarkdown(_PlanOutput):
fh.write('| | | | ')
if new:
- fh.write(unicode(new.ttl))
+ fh.write(text_type(new.ttl))
fh.write(' | ')
fh.write(_value_stringifier(new, '; '))
fh.write(' | ')
@@ -210,7 +215,7 @@ class PlanMarkdown(_PlanOutput):
fh.write(' |\n')
fh.write('\nSummary: ')
- fh.write(unicode(plan))
+ fh.write(text_type(plan))
fh.write('\n\n')
else:
fh.write('## No changes were planned\n')
@@ -261,7 +266,7 @@ class PlanHtml(_PlanOutput):
# TTL
if existing:
fh.write(' | ')
- fh.write(unicode(existing.ttl))
+ fh.write(text_type(existing.ttl))
fh.write(' | \n ')
fh.write(_value_stringifier(existing, ' '))
fh.write(' | \n | \n \n')
@@ -270,7 +275,7 @@ class PlanHtml(_PlanOutput):
if new:
fh.write(' ')
- fh.write(unicode(new.ttl))
+ fh.write(text_type(new.ttl))
fh.write(' | \n ')
fh.write(_value_stringifier(new, ' '))
fh.write(' | \n ')
@@ -279,7 +284,7 @@ class PlanHtml(_PlanOutput):
fh.write(' | \n \n')
fh.write(' \n | Summary: ')
- fh.write(unicode(plan))
+ fh.write(text_type(plan))
fh.write(' | \n
\n\n')
else:
fh.write('No changes were planned')
diff --git a/octodns/provider/powerdns.py b/octodns/provider/powerdns.py
index 8d75163..bcb6980 100644
--- a/octodns/provider/powerdns.py
+++ b/octodns/provider/powerdns.py
@@ -19,8 +19,8 @@ class PowerDnsBaseProvider(BaseProvider):
'PTR', 'SPF', 'SSHFP', 'SRV', 'TXT'))
TIMEOUT = 5
- def __init__(self, id, host, api_key, port=8081, scheme="http",
- timeout=TIMEOUT, *args, **kwargs):
+ def __init__(self, id, host, api_key, port=8081,
+ scheme="http", timeout=TIMEOUT, *args, **kwargs):
super(PowerDnsBaseProvider, self).__init__(id, *args, **kwargs)
self.host = host
@@ -28,6 +28,8 @@ class PowerDnsBaseProvider(BaseProvider):
self.scheme = scheme
self.timeout = timeout
+ self._powerdns_version = None
+
sess = Session()
sess.headers.update({'X-API-Key': api_key})
self._sess = sess
@@ -36,7 +38,8 @@ class PowerDnsBaseProvider(BaseProvider):
self.log.debug('_request: method=%s, path=%s', method, path)
url = '{}://{}:{}/api/v1/servers/localhost/{}' \
- .format(self.scheme, self.host, self.port, path)
+ .format(self.scheme, self.host, self.port, path).rstrip("/")
+ # Strip trailing / from url.
resp = self._sess.request(method, url, json=data, timeout=self.timeout)
self.log.debug('_request: status=%d', resp.status_code)
resp.raise_for_status()
@@ -165,6 +168,42 @@ class PowerDnsBaseProvider(BaseProvider):
'ttl': rrset['ttl']
}
+ @property
+ def powerdns_version(self):
+ if self._powerdns_version is None:
+ try:
+ resp = self._get('')
+ except HTTPError as e:
+ if e.response.status_code == 401:
+ # Nicer error message for auth problems
+ raise Exception('PowerDNS unauthorized host={}'
+ .format(self.host))
+ raise
+
+ version = resp.json()['version']
+ self.log.debug('powerdns_version: got version %s from server',
+ version)
+ self._powerdns_version = [int(p) for p in version.split('.')]
+
+ return self._powerdns_version
+
+ @property
+ def soa_edit_api(self):
+ # >>> [4, 4, 3] >= [4, 3]
+ # True
+ # >>> [4, 3, 3] >= [4, 3]
+ # True
+ # >>> [4, 1, 3] >= [4, 3]
+ # False
+ if self.powerdns_version >= [4, 3]:
+ return 'DEFAULT'
+ return 'INCEPTION-INCREMENT'
+
+ @property
+ def check_status_not_found(self):
+ # >=4.2.x returns 404 when not found
+ return self.powerdns_version >= [4, 2]
+
def populate(self, zone, target=False, lenient=False):
self.log.debug('populate: name=%s, target=%s, lenient=%s', zone.name,
target, lenient)
@@ -174,11 +213,20 @@ class PowerDnsBaseProvider(BaseProvider):
resp = self._get('zones/{}'.format(zone.name))
self.log.debug('populate: loaded')
except HTTPError as e:
+ error = self._get_error(e)
if e.response.status_code == 401:
# Nicer error message for auth problems
raise Exception('PowerDNS unauthorized host={}'
.format(self.host))
- elif e.response.status_code == 422:
+ elif e.response.status_code == 404 \
+ and self.check_status_not_found:
+ # 404 means powerdns doesn't know anything about the requested
+ # domain. We'll just ignore it here and leave the zone
+ # untouched.
+ pass
+ elif e.response.status_code == 422 \
+ and error.startswith('Could not find domain ') \
+ and not self.check_status_not_found:
# 422 means powerdns doesn't know anything about the requested
# domain. We'll just ignore it here and leave the zone
# untouched.
@@ -338,23 +386,34 @@ class PowerDnsBaseProvider(BaseProvider):
self.log.debug('_apply: patched')
except HTTPError as e:
error = self._get_error(e)
- if e.response.status_code != 422 or \
- not error.startswith('Could not find domain '):
- self.log.error('_apply: status=%d, text=%s',
- e.response.status_code,
- e.response.text)
+ if not (
+ (
+ e.response.status_code == 404 and
+ self.check_status_not_found
+ ) or (
+ e.response.status_code == 422 and
+ error.startswith('Could not find domain ') and
+ not self.check_status_not_found
+ )
+ ):
+ self.log.error(
+ '_apply: status=%d, text=%s',
+ e.response.status_code,
+ e.response.text)
raise
+
self.log.info('_apply: creating zone=%s', desired.name)
- # 422 means powerdns doesn't know anything about the requested
- # domain. We'll try to create it with the correct records instead
- # of update. Hopefully all the mods are creates :-)
+ # 404 or 422 means powerdns doesn't know anything about the
+ # requested domain. We'll try to create it with the correct
+ # records instead of update. Hopefully all the mods are
+ # creates :-)
data = {
'name': desired.name,
'kind': 'Master',
'masters': [],
'nameservers': [],
'rrsets': mods,
- 'soa_edit_api': 'INCEPTION-INCREMENT',
+ 'soa_edit_api': self.soa_edit_api,
'serial': 0,
}
try:
@@ -391,13 +450,15 @@ class PowerDnsProvider(PowerDnsBaseProvider):
'''
def __init__(self, id, host, api_key, port=8081, nameserver_values=None,
- nameserver_ttl=600, *args, **kwargs):
+ nameserver_ttl=600,
+ *args, **kwargs):
self.log = logging.getLogger('PowerDnsProvider[{}]'.format(id))
self.log.debug('__init__: id=%s, host=%s, port=%d, '
'nameserver_values=%s, nameserver_ttl=%d',
id, host, port, nameserver_values, nameserver_ttl)
super(PowerDnsProvider, self).__init__(id, host=host, api_key=api_key,
- port=port, *args, **kwargs)
+ port=port,
+ *args, **kwargs)
self.nameserver_values = nameserver_values
self.nameserver_ttl = nameserver_ttl
diff --git a/octodns/provider/rackspace.py b/octodns/provider/rackspace.py
index 5038929..7fed05b 100644
--- a/octodns/provider/rackspace.py
+++ b/octodns/provider/rackspace.py
@@ -7,13 +7,16 @@ from __future__ import absolute_import, division, print_function, \
from requests import HTTPError, Session, post
from collections import defaultdict
import logging
-import string
import time
from ..record import Record
from .base import BaseProvider
+def _value_keyer(v):
+ return (v.get('type', ''), v['name'], v.get('data', ''))
+
+
def add_trailing_dot(s):
assert s
assert s[-1] != '.'
@@ -28,12 +31,12 @@ def remove_trailing_dot(s):
def escape_semicolon(s):
assert s
- return string.replace(s, ';', '\\;')
+ return s.replace(';', '\\;')
def unescape_semicolon(s):
assert s
- return string.replace(s, '\\;', ';')
+ return s.replace('\\;', ';')
class RackspaceProvider(BaseProvider):
@@ -367,11 +370,9 @@ class RackspaceProvider(BaseProvider):
self._delete('domains/{}/records?{}'.format(domain_id, params))
if updates:
- data = {"records": sorted(updates, key=lambda v: v['name'])}
+ data = {"records": sorted(updates, key=_value_keyer)}
self._put('domains/{}/records'.format(domain_id), data=data)
if creates:
- data = {"records": sorted(creates, key=lambda v: v['type'] +
- v['name'] +
- v.get('data', ''))}
+ data = {"records": sorted(creates, key=_value_keyer)}
self._post('domains/{}/records'.format(domain_id), data=data)
diff --git a/octodns/provider/route53.py b/octodns/provider/route53.py
index b645772..0d5bab9 100644
--- a/octodns/provider/route53.py
+++ b/octodns/provider/route53.py
@@ -8,14 +8,18 @@ from __future__ import absolute_import, division, print_function, \
from boto3 import client
from botocore.config import Config
from collections import defaultdict
-from incf.countryutils.transformations import cca_to_ctca2
+from ipaddress import AddressValueError, ip_address
+from pycountry_convert import country_alpha2_to_continent_code
from uuid import uuid4
import logging
import re
-from ..record import Record, Update
-from .base import BaseProvider
+from six import text_type
+from ..equality import EqualityTupleMixin
+from ..record import Record, Update
+from ..record.geo import GeoCodes
+from .base import BaseProvider
octal_re = re.compile(r'\\(\d\d\d)')
@@ -26,29 +30,115 @@ def _octal_replace(s):
return octal_re.sub(lambda m: chr(int(m.group(1), 8)), s)
-class _Route53Record(object):
+class _Route53Record(EqualityTupleMixin):
@classmethod
- def new(self, provider, record, creating):
+ def _new_dynamic(cls, provider, record, hosted_zone_id, creating):
+ # Creates the RRSets that correspond to the given dynamic record
ret = set()
- if getattr(record, 'geo', False):
- ret.add(_Route53GeoDefault(provider, record, creating))
- for ident, geo in record.geo.items():
- ret.add(_Route53GeoRecord(provider, record, ident, geo,
- creating))
- else:
- ret.add(_Route53Record(provider, record, creating))
+
+ # HostedZoneId wants just the last bit, but the place we're getting
+ # this from looks like /hostedzone/Z424CArX3BB224
+ hosted_zone_id = hosted_zone_id.split('/', 2)[-1]
+
+ # Create the default pool which comes from the base `values` of the
+ # record object. Its only used if all other values fail their
+ # healthchecks, which hopefully never happens.
+ fqdn = record.fqdn
+ ret.add(_Route53Record(provider, record, creating,
+ '_octodns-default-pool.{}'.format(fqdn)))
+
+ # Pools
+ for pool_name, pool in record.dynamic.pools.items():
+
+ # Create the primary, this will be the rrset that geo targeted
+ # rrsets will point to when they want to use a pool of values. It's
+ # a primary and observes target health so if all the values for
+ # this pool go red, we'll use the fallback/SECONDARY just below
+ ret.add(_Route53DynamicPool(provider, hosted_zone_id, record,
+ pool_name, creating))
+
+ # Create the fallback for this pool
+ fallback = pool.data.get('fallback', False)
+ if fallback:
+ # We have an explicitly configured fallback, another pool to
+ # use if all our values go red. This RRSet configures that pool
+ # as the next best option
+ ret.add(_Route53DynamicPool(provider, hosted_zone_id, record,
+ pool_name, creating,
+ target_name=fallback))
+ else:
+ # We fallback on the default, no explicit fallback so if all of
+ # this pool's values go red we'll fallback to the base
+ # (non-health-checked) default pool of values
+ ret.add(_Route53DynamicPool(provider, hosted_zone_id, record,
+ pool_name, creating,
+ target_name='default'))
+
+ # Create the values for this pool. These are health checked and in
+ # general each unique value will have an associated healthcheck.
+ # The PRIMARY pool up above will point to these RRSets which will
+ # be served out according to their weights
+ for i, value in enumerate(pool.data['values']):
+ weight = value['weight']
+ value = value['value']
+ ret.add(_Route53DynamicValue(provider, record, pool_name,
+ value, weight, i, creating))
+
+ # Rules
+ for i, rule in enumerate(record.dynamic.rules):
+ pool_name = rule.data['pool']
+ geos = rule.data.get('geos', [])
+ if geos:
+ for geo in geos:
+ # Create a RRSet for each geo in each rule that uses the
+ # desired target pool
+ ret.add(_Route53DynamicRule(provider, hosted_zone_id,
+ record, pool_name, i,
+ creating, geo=geo))
+ else:
+ # There's no geo's for this rule so it's the catchall that will
+ # just point things that don't match any geo rules to the
+ # specified pool
+ ret.add(_Route53DynamicRule(provider, hosted_zone_id, record,
+ pool_name, i, creating))
+
return ret
- def __init__(self, provider, record, creating):
- self.fqdn = record.fqdn
+ @classmethod
+ def _new_geo(cls, provider, record, creating):
+ # Creates the RRSets that correspond to the given geo record
+ ret = set()
+
+ ret.add(_Route53GeoDefault(provider, record, creating))
+ for ident, geo in record.geo.items():
+ ret.add(_Route53GeoRecord(provider, record, ident, geo,
+ creating))
+
+ return ret
+
+ @classmethod
+ def new(cls, provider, record, hosted_zone_id, creating):
+ # Creates the RRSets that correspond to the given record
+
+ if getattr(record, 'dynamic', False):
+ ret = cls._new_dynamic(provider, record, hosted_zone_id, creating)
+ return ret
+ elif getattr(record, 'geo', False):
+ return cls._new_geo(provider, record, creating)
+
+ # Its a simple record that translates into a single RRSet
+ return set((_Route53Record(provider, record, creating),))
+
+ def __init__(self, provider, record, creating, fqdn_override=None):
+ self.fqdn = fqdn_override or record.fqdn
self._type = record._type
self.ttl = record.ttl
values_for = getattr(self, '_values_for_{}'.format(self._type))
self.values = values_for(record)
- def mod(self, action):
+ def mod(self, action, existing_rrsets):
return {
'Action': action,
'ResourceRecordSet': {
@@ -59,7 +149,7 @@ class _Route53Record(object):
}
}
- # NOTE: we're using __hash__ and __cmp__ methods that consider
+ # NOTE: we're using __hash__ and ordering methods that consider
# _Route53Records equivalent if they have the same class, fqdn, and _type.
# Values are ignored. This is useful when computing diffs/changes.
@@ -67,22 +157,24 @@ class _Route53Record(object):
'sub-classes should never use this method'
return '{}:{}'.format(self.fqdn, self._type).__hash__()
- def __cmp__(self, other):
- '''sub-classes should call up to this and return its value if non-zero.
- When it's zero they should compute their own __cmp__'''
- if self.__class__ != other.__class__:
- return cmp(self.__class__, other.__class__)
- elif self.fqdn != other.fqdn:
- return cmp(self.fqdn, other.fqdn)
- elif self._type != other._type:
- return cmp(self._type, other._type)
- # We're ignoring ttl, it's not an actual differentiator
- return 0
+ def _equality_tuple(self):
+ '''Sub-classes should call up to this and return its value and add
+ any additional fields they need to hav considered.'''
+ return (self.__class__.__name__, self.fqdn, self._type)
def __repr__(self):
return '_Route53Record<{} {} {} {}>'.format(self.fqdn, self._type,
self.ttl, self.values)
+ def _value_convert_value(self, value, record):
+ return value
+
+ _value_convert_A = _value_convert_value
+ _value_convert_AAAA = _value_convert_value
+ _value_convert_NS = _value_convert_value
+ _value_convert_CNAME = _value_convert_value
+ _value_convert_PTR = _value_convert_value
+
def _values_for_values(self, record):
return record.values
@@ -90,9 +182,11 @@ class _Route53Record(object):
_values_for_AAAA = _values_for_values
_values_for_NS = _values_for_values
+ def _value_convert_CAA(self, value, record):
+ return '{} {} "{}"'.format(value.flags, value.tag, value.value)
+
def _values_for_CAA(self, record):
- return ['{} {} "{}"'.format(v.flags, v.tag, v.value)
- for v in record.values]
+ return [self._value_convert_CAA(v, record) for v in record.values]
def _values_for_value(self, record):
return [record.value]
@@ -100,18 +194,28 @@ class _Route53Record(object):
_values_for_CNAME = _values_for_value
_values_for_PTR = _values_for_value
+ def _value_convert_MX(self, value, record):
+ return '{} {}'.format(value.preference, value.exchange)
+
def _values_for_MX(self, record):
- return ['{} {}'.format(v.preference, v.exchange)
- for v in record.values]
+ return [self._value_convert_MX(v, record) for v in record.values]
+
+ def _value_convert_NAPTR(self, value, record):
+ return '{} {} "{}" "{}" "{}" {}' \
+ .format(value.order, value.preference,
+ value.flags if value.flags else '',
+ value.service if value.service else '',
+ value.regexp if value.regexp else '',
+ value.replacement)
def _values_for_NAPTR(self, record):
- return ['{} {} "{}" "{}" "{}" {}'
- .format(v.order, v.preference,
- v.flags if v.flags else '',
- v.service if v.service else '',
- v.regexp if v.regexp else '',
- v.replacement)
- for v in record.values]
+ return [self._value_convert_NAPTR(v, record) for v in record.values]
+
+ def _value_convert_quoted(self, value, record):
+ return record.chunked_value(value)
+
+ _value_convert_SPF = _value_convert_quoted
+ _value_convert_TXT = _value_convert_quoted
def _values_for_quoted(self, record):
return record.chunked_values
@@ -119,15 +223,197 @@ class _Route53Record(object):
_values_for_SPF = _values_for_quoted
_values_for_TXT = _values_for_quoted
+ def _value_for_SRV(self, value, record):
+ return '{} {} {} {}'.format(value.priority, value.weight,
+ value.port, value.target)
+
def _values_for_SRV(self, record):
- return ['{} {} {} {}'.format(v.priority, v.weight, v.port,
- v.target)
- for v in record.values]
+ return [self._value_for_SRV(v, record) for v in record.values]
+
+
+class _Route53DynamicPool(_Route53Record):
+
+ def __init__(self, provider, hosted_zone_id, record, pool_name, creating,
+ target_name=None):
+ fqdn_override = '_octodns-{}-pool.{}'.format(pool_name, record.fqdn)
+ super(_Route53DynamicPool, self) \
+ .__init__(provider, record, creating, fqdn_override=fqdn_override)
+
+ self.hosted_zone_id = hosted_zone_id
+ self.pool_name = pool_name
+
+ self.target_name = target_name
+ if target_name:
+ # We're pointing down the chain
+ self.target_dns_name = '_octodns-{}-pool.{}'.format(target_name,
+ record.fqdn)
+ else:
+ # We're a paimary, point at our values
+ self.target_dns_name = '_octodns-{}-value.{}'.format(pool_name,
+ record.fqdn)
+
+ @property
+ def mode(self):
+ return 'Secondary' if self.target_name else 'Primary'
+
+ @property
+ def identifer(self):
+ if self.target_name:
+ return '{}-{}-{}'.format(self.pool_name, self.mode,
+ self.target_name)
+ return '{}-{}'.format(self.pool_name, self.mode)
+
+ def mod(self, action, existing_rrsets):
+ return {
+ 'Action': action,
+ 'ResourceRecordSet': {
+ 'AliasTarget': {
+ 'DNSName': self.target_dns_name,
+ 'EvaluateTargetHealth': True,
+ 'HostedZoneId': self.hosted_zone_id,
+ },
+ 'Failover': 'SECONDARY' if self.target_name else 'PRIMARY',
+ 'Name': self.fqdn,
+ 'SetIdentifier': self.identifer,
+ 'Type': self._type,
+ }
+ }
+
+ def __hash__(self):
+ return '{}:{}:{}'.format(self.fqdn, self._type,
+ self.identifer).__hash__()
+
+ def __repr__(self):
+ return '_Route53DynamicPool<{} {} {} {}>' \
+ .format(self.fqdn, self._type, self.mode, self.target_dns_name)
+
+
+class _Route53DynamicRule(_Route53Record):
+
+ def __init__(self, provider, hosted_zone_id, record, pool_name, index,
+ creating, geo=None):
+ super(_Route53DynamicRule, self).__init__(provider, record, creating)
+
+ self.hosted_zone_id = hosted_zone_id
+ self.geo = geo
+ self.pool_name = pool_name
+ self.index = index
+
+ self.target_dns_name = '_octodns-{}-pool.{}'.format(pool_name,
+ record.fqdn)
+
+ @property
+ def identifer(self):
+ return '{}-{}-{}'.format(self.index, self.pool_name, self.geo)
+
+ def mod(self, action, existing_rrsets):
+ rrset = {
+ 'AliasTarget': {
+ 'DNSName': self.target_dns_name,
+ 'EvaluateTargetHealth': True,
+ 'HostedZoneId': self.hosted_zone_id,
+ },
+ 'GeoLocation': {
+ 'CountryCode': '*'
+ },
+ 'Name': self.fqdn,
+ 'SetIdentifier': self.identifer,
+ 'Type': self._type,
+ }
+
+ if self.geo:
+ geo = GeoCodes.parse(self.geo)
+
+ if geo['province_code']:
+ rrset['GeoLocation'] = {
+ 'CountryCode': geo['country_code'],
+ 'SubdivisionCode': geo['province_code'],
+ }
+ elif geo['country_code']:
+ rrset['GeoLocation'] = {
+ 'CountryCode': geo['country_code']
+ }
+ else:
+ rrset['GeoLocation'] = {
+ 'ContinentCode': geo['continent_code'],
+ }
+
+ return {
+ 'Action': action,
+ 'ResourceRecordSet': rrset,
+ }
+
+ def __hash__(self):
+ return '{}:{}:{}'.format(self.fqdn, self._type,
+ self.identifer).__hash__()
+
+ def __repr__(self):
+ return '_Route53DynamicRule<{} {} {} {} {}>' \
+ .format(self.fqdn, self._type, self.index, self.geo,
+ self.target_dns_name)
+
+
+class _Route53DynamicValue(_Route53Record):
+
+ def __init__(self, provider, record, pool_name, value, weight, index,
+ creating):
+ fqdn_override = '_octodns-{}-value.{}'.format(pool_name, record.fqdn)
+ super(_Route53DynamicValue, self).__init__(provider, record, creating,
+ fqdn_override=fqdn_override)
+
+ self.pool_name = pool_name
+ self.index = index
+ value_convert = getattr(self, '_value_convert_{}'.format(record._type))
+ self.value = value_convert(value, record)
+ self.weight = weight
+
+ self.health_check_id = provider.get_health_check_id(record, self.value,
+ creating)
+
+ @property
+ def identifer(self):
+ return '{}-{:03d}'.format(self.pool_name, self.index)
+
+ def mod(self, action, existing_rrsets):
+
+ if action == 'DELETE':
+ # When deleting records try and find the original rrset so that
+ # we're 100% sure to have the complete & accurate data (this mostly
+ # ensures we have the right health check id when there's multiple
+ # potential matches)
+ for existing in existing_rrsets:
+ if self.fqdn == existing.get('Name') and \
+ self.identifer == existing.get('SetIdentifier', None):
+ return {
+ 'Action': action,
+ 'ResourceRecordSet': existing,
+ }
+
+ return {
+ 'Action': action,
+ 'ResourceRecordSet': {
+ 'HealthCheckId': self.health_check_id,
+ 'Name': self.fqdn,
+ 'ResourceRecords': [{'Value': self.value}],
+ 'SetIdentifier': self.identifer,
+ 'TTL': self.ttl,
+ 'Type': self._type,
+ 'Weight': self.weight,
+ }
+ }
+
+ def __hash__(self):
+ return '{}:{}:{}'.format(self.fqdn, self._type,
+ self.identifer).__hash__()
+
+ def __repr__(self):
+ return '_Route53DynamicValue<{} {} {} {}>' \
+ .format(self.fqdn, self._type, self.identifer, self.value)
class _Route53GeoDefault(_Route53Record):
- def mod(self, action):
+ def mod(self, action, existing_rrsets):
return {
'Action': action,
'ResourceRecordSet': {
@@ -156,18 +442,35 @@ class _Route53GeoRecord(_Route53Record):
super(_Route53GeoRecord, self).__init__(provider, record, creating)
self.geo = geo
- self.health_check_id = provider.get_health_check_id(record, ident,
- geo, creating)
+ value = geo.values[0]
+ self.health_check_id = provider.get_health_check_id(record, value,
+ creating)
- def mod(self, action):
+ def mod(self, action, existing_rrsets):
geo = self.geo
+ set_identifier = geo.code
+ fqdn = self.fqdn
+
+ if action == 'DELETE':
+ # When deleting records try and find the original rrset so that
+ # we're 100% sure to have the complete & accurate data (this mostly
+ # ensures we have the right health check id when there's multiple
+ # potential matches)
+ for existing in existing_rrsets:
+ if fqdn == existing.get('Name') and \
+ set_identifier == existing.get('SetIdentifier', None):
+ return {
+ 'Action': action,
+ 'ResourceRecordSet': existing,
+ }
+
rrset = {
'Name': self.fqdn,
'GeoLocation': {
'CountryCode': '*'
},
'ResourceRecords': [{'Value': v} for v in geo.values],
- 'SetIdentifier': geo.code,
+ 'SetIdentifier': set_identifier,
'TTL': self.ttl,
'Type': self._type,
}
@@ -198,11 +501,9 @@ class _Route53GeoRecord(_Route53Record):
return '{}:{}:{}'.format(self.fqdn, self._type,
self.geo.code).__hash__()
- def __cmp__(self, other):
- ret = super(_Route53GeoRecord, self).__cmp__(other)
- if ret != 0:
- return ret
- return cmp(self.geo.code, other.geo.code)
+ def _equality_tuple(self):
+ return super(_Route53GeoRecord, self)._equality_tuple() + \
+ (self.geo.code,)
def __repr__(self):
return '_Route53GeoRecord<{} {} {} {} {}>'.format(self.fqdn,
@@ -215,6 +516,81 @@ class Route53ProviderException(Exception):
pass
+def _mod_keyer(mod):
+ rrset = mod['ResourceRecordSet']
+
+ # Route53 requires that changes are ordered such that a target of an
+ # AliasTarget is created or upserted prior to the record that targets it.
+ # This is complicated by "UPSERT" appearing to be implemented as "DELETE"
+ # before all changes, followed by a "CREATE", internally in the AWS API.
+ # Because of this, we order changes as follows:
+ # - Delete any records that we wish to delete that are GEOS
+ # (because they are never targeted by anything)
+ # - Delete any records that we wish to delete that are SECONDARY
+ # (because they are no longer targeted by GEOS)
+ # - Delete any records that we wish to delete that are PRIMARY
+ # (because they are no longer targeted by SECONDARY)
+ # - Delete any records that we wish to delete that are VALUES
+ # (because they are no longer targeted by PRIMARY)
+ # - CREATE/UPSERT any records that are VALUES
+ # (because they don't depend on other records)
+ # - CREATE/UPSERT any records that are PRIMARY
+ # (because they always point to VALUES which now exist)
+ # - CREATE/UPSERT any records that are SECONDARY
+ # (because they now have PRIMARY records to target)
+ # - CREATE/UPSERT any records that are GEOS
+ # (because they now have all their PRIMARY pools to target)
+ # - :tada:
+ #
+ # In theory we could also do this based on actual target reference
+ # checking, but that's more complex. Since our rules have a known
+ # dependency order, we just rely on that.
+
+ # Get the unique ID from the name/id to get a consistent ordering.
+ if rrset.get('GeoLocation', False):
+ unique_id = rrset['SetIdentifier']
+ else:
+ if 'SetIdentifier' in rrset:
+ unique_id = '{}-{}'.format(rrset['Name'], rrset['SetIdentifier'])
+ else:
+ unique_id = rrset['Name']
+
+ # Prioritise within the action_priority, ensuring targets come first.
+ if rrset.get('GeoLocation', False):
+ # Geos reference pools, so they come last.
+ record_priority = 3
+ elif rrset.get('AliasTarget', False):
+ # We use an alias
+ if rrset.get('Failover', False) == 'SECONDARY':
+ # We're a secondary, which reference the primary (failover, P1).
+ record_priority = 2
+ else:
+ # We're a primary, we reference values (P0).
+ record_priority = 1
+ else:
+ # We're just a plain value, has no dependencies so first.
+ record_priority = 0
+
+ if mod['Action'] == 'DELETE':
+ # Delete things first, so we can never trounce our own additions
+ action_priority = 0
+ # Delete in the reverse order of priority, e.g. start with the deepest
+ # reference and work back to the values, rather than starting at the
+ # values (still ref'd).
+ record_priority = -record_priority
+ else:
+ # For CREATE and UPSERT, Route53 seems to treat them the same, so
+ # interleave these, keeping the reference order described above.
+ action_priority = 1
+
+ return (action_priority, record_priority, unique_id)
+
+
+def _parse_pool_name(n):
+ # Parse the pool name out of _octodns--pool...
+ return n.split('.', 1)[0][9:-5]
+
+
class Route53Provider(BaseProvider):
'''
AWS Route53 Provider
@@ -236,8 +612,7 @@ class Route53Provider(BaseProvider):
In general the account used will need full permissions on Route53.
'''
SUPPORTS_GEO = True
- # TODO: dynamic
- SUPPORTS_DYNAMIC = False
+ SUPPORTS_DYNAMIC = True
SUPPORTS = set(('A', 'AAAA', 'CAA', 'CNAME', 'MX', 'NAPTR', 'NS', 'PTR',
'SPF', 'SRV', 'TXT'))
@@ -247,8 +622,9 @@ class Route53Provider(BaseProvider):
def __init__(self, id, access_key_id=None, secret_access_key=None,
max_changes=1000, client_max_attempts=None,
- session_token=None, *args, **kwargs):
+ session_token=None, delegation_set_id=None, *args, **kwargs):
self.max_changes = max_changes
+ self.delegation_set_id = delegation_set_id
_msg = 'access_key_id={}, secret_access_key=***, ' \
'session_token=***'.format(access_key_id)
use_fallback_auth = access_key_id is None and \
@@ -303,10 +679,16 @@ class Route53Provider(BaseProvider):
return id
if create:
ref = uuid4().hex
+ del_set = self.delegation_set_id
self.log.debug('_get_zone_id: no matching zone, creating, '
'ref=%s', ref)
- resp = self._conn.create_hosted_zone(Name=name,
- CallerReference=ref)
+ if del_set:
+ resp = self._conn.create_hosted_zone(Name=name,
+ CallerReference=ref,
+ DelegationSetId=del_set)
+ else:
+ resp = self._conn.create_hosted_zone(Name=name,
+ CallerReference=ref)
self.r53_zones[name] = id = resp['HostedZone']['Id']
return id
return None
@@ -325,7 +707,7 @@ class Route53Provider(BaseProvider):
if cc == '*':
# This is the default
return
- cn = cca_to_ctca2(cc)
+ cn = country_alpha2_to_continent_code(cc)
try:
return '{}-{}-{}'.format(cn, cc, loc['SubdivisionCode'])
except KeyError:
@@ -348,7 +730,7 @@ class Route53Provider(BaseProvider):
def _data_for_CAA(self, rrset):
values = []
for rr in rrset['ResourceRecords']:
- flags, tag, value = rr['Value'].split(' ')
+ flags, tag, value = rr['Value'].split()
values.append({
'flags': flags,
'tag': tag,
@@ -386,7 +768,7 @@ class Route53Provider(BaseProvider):
def _data_for_MX(self, rrset):
values = []
for rr in rrset['ResourceRecords']:
- preference, exchange = rr['Value'].split(' ')
+ preference, exchange = rr['Value'].split()
values.append({
'preference': preference,
'exchange': exchange,
@@ -401,7 +783,7 @@ class Route53Provider(BaseProvider):
values = []
for rr in rrset['ResourceRecords']:
order, preference, flags, service, regexp, replacement = \
- rr['Value'].split(' ')
+ rr['Value'].split()
flags = flags[1:-1]
service = service[1:-1]
regexp = regexp[1:-1]
@@ -429,7 +811,7 @@ class Route53Provider(BaseProvider):
def _data_for_SRV(self, rrset):
values = []
for rr in rrset['ResourceRecords']:
- priority, weight, port, target = rr['Value'].split(' ')
+ priority, weight, port, target = rr['Value'].split()
values.append({
'priority': priority,
'weight': weight,
@@ -469,6 +851,79 @@ class Route53Provider(BaseProvider):
return self._r53_rrsets[zone_id]
+ def _data_for_dynamic(self, name, _type, rrsets):
+ # This converts a bunch of RRSets into their corresponding dynamic
+ # Record. It's used by populate.
+ pools = defaultdict(lambda: {'values': []})
+ # Data to build our rules will be collected here and "converted" into
+ # their final form below
+ rules = defaultdict(lambda: {'pool': None, 'geos': []})
+ # Base/empty data
+ data = {
+ 'dynamic': {
+ 'pools': pools,
+ 'rules': [],
+ }
+ }
+
+ # For all the rrsets that comprise this dynamic record
+ for rrset in rrsets:
+ name = rrset['Name']
+ if '-pool.' in name:
+ # This is a pool rrset
+ pool_name = _parse_pool_name(name)
+ if pool_name == 'default':
+ # default becomes the base for the record and its
+ # value(s) will fill the non-dynamic values
+ data_for = getattr(self, '_data_for_{}'.format(_type))
+ data.update(data_for(rrset))
+ elif rrset['Failover'] == 'SECONDARY':
+ # This is a failover record, we'll ignore PRIMARY, but
+ # SECONDARY will tell us what the pool's fallback is
+ fallback_name = \
+ _parse_pool_name(rrset['AliasTarget']['DNSName'])
+ # Don't care about default fallbacks, anything else
+ # we'll record
+ if fallback_name != 'default':
+ pools[pool_name]['fallback'] = fallback_name
+ elif 'GeoLocation' in rrset:
+ # These are rules
+ _id = rrset['SetIdentifier']
+ # We record rule index as the first part of set-id, the 2nd
+ # part just ensures uniqueness across geos and is ignored
+ i = int(_id.split('-', 1)[0])
+ target_pool = _parse_pool_name(rrset['AliasTarget']['DNSName'])
+ # Record the pool
+ rules[i]['pool'] = target_pool
+ # Record geo if we have one
+ geo = self._parse_geo(rrset)
+ if geo:
+ rules[i]['geos'].append(geo)
+ else:
+ # These are the pool value(s)
+ # Grab the pool name out of the SetIdentifier, format looks
+ # like ...-000 where 000 is a zero-padded index for the value
+ # it's ignored only used to make sure the value is unique
+ pool_name = rrset['SetIdentifier'][:-4]
+ value = rrset['ResourceRecords'][0]['Value']
+ pools[pool_name]['values'].append({
+ 'value': value,
+ 'weight': rrset['Weight'],
+ })
+
+ # Convert our map of rules into an ordered list now that we have all
+ # the data
+ for _, rule in sorted(rules.items()):
+ r = {
+ 'pool': rule['pool'],
+ }
+ geos = sorted(rule['geos'])
+ if geos:
+ r['geos'] = geos
+ data['dynamic']['rules'].append(r)
+
+ return data
+
def populate(self, zone, target=False, lenient=False):
self.log.debug('populate: name=%s, target=%s, lenient=%s', zone.name,
target, lenient)
@@ -480,21 +935,46 @@ class Route53Provider(BaseProvider):
if zone_id:
exists = True
records = defaultdict(lambda: defaultdict(list))
+ dynamic = defaultdict(lambda: defaultdict(list))
+
for rrset in self._load_records(zone_id):
record_name = zone.hostname_from_fqdn(rrset['Name'])
record_name = _octal_replace(record_name)
record_type = rrset['Type']
if record_type not in self.SUPPORTS:
+ # Skip stuff we don't support
continue
- if 'AliasTarget' in rrset:
- # Alias records are Route53 specific and are not
- # portable, so we need to skip them
- self.log.warning("%s is an Alias record. Skipping..."
- % rrset['Name'])
+ if record_name.startswith('_octodns-'):
+ # Part of a dynamic record
+ try:
+ record_name = record_name.split('.', 1)[1]
+ except IndexError:
+ record_name = ''
+ dynamic[record_name][record_type].append(rrset)
continue
+ elif 'AliasTarget' in rrset:
+ if rrset['AliasTarget']['DNSName'].startswith('_octodns-'):
+ # Part of a dynamic record
+ dynamic[record_name][record_type].append(rrset)
+ else:
+ # Alias records are Route53 specific and are not
+ # portable, so we need to skip them
+ self.log.warning("%s is an Alias record. Skipping..."
+ % rrset['Name'])
+ continue
+ # A basic record (potentially including geo)
data = getattr(self, '_data_for_{}'.format(record_type))(rrset)
records[record_name][record_type].append(data)
+ # Convert the dynamic rrsets to Records
+ for name, types in dynamic.items():
+ for _type, rrsets in types.items():
+ data = self._data_for_dynamic(name, _type, rrsets)
+ record = Record.new(zone, name, data, source=self,
+ lenient=lenient)
+ zone.add_record(record, lenient=lenient)
+
+ # Convert the basic (potentially with geo) rrsets to records
for name, types in records.items():
for _type, data in types.items():
if len(data) > 1:
@@ -518,11 +998,11 @@ class Route53Provider(BaseProvider):
len(zone.records) - before, exists)
return exists
- def _gen_mods(self, action, records):
+ def _gen_mods(self, action, records, existing_rrsets):
'''
Turns `_Route53*`s in to `change_resource_record_sets` `Changes`
'''
- return [r.mod(action) for r in records]
+ return [r.mod(action, existing_rrsets) for r in records]
@property
def health_checks(self):
@@ -541,6 +1021,7 @@ class Route53Provider(BaseProvider):
# ignore anything else
continue
checks[health_check['Id']] = health_check
+
more = resp['IsTruncated']
start['Marker'] = resp.get('NextMarker', None)
@@ -565,29 +1046,50 @@ class Route53Provider(BaseProvider):
'route53.healthcheck.request_interval '
'parameter must be either 10 or 30.')
- def _health_check_equivilent(self, host, path, protocol, port,
+ def _health_check_equivalent(self, host, path, protocol, port,
measure_latency, request_interval,
- health_check, first_value=None):
+ health_check, value=None):
config = health_check['HealthCheckConfig']
- return host == config['FullyQualifiedDomainName'] and \
- path == config['ResourcePath'] and protocol == config['Type'] \
- and port == config['Port'] and \
+
+ # So interestingly Route53 normalizes IPAddress which will cause us to
+ # fail to find see things as equivalent. To work around this we'll
+ # ip_address's returned object for equivalence
+ # E.g 2001:4860:4860::8842 -> 2001:4860:4860:0:0:0:0:8842
+ if value:
+ value = ip_address(text_type(value))
+ config_ip_address = ip_address(text_type(config['IPAddress']))
+ else:
+ # No value so give this a None to match value's
+ config_ip_address = None
+
+ fully_qualified_domain_name = config.get('FullyQualifiedDomainName',
+ None)
+ resource_path = config.get('ResourcePath', None)
+ return host == fully_qualified_domain_name and \
+ path == resource_path and protocol == config['Type'] and \
+ port == config['Port'] and \
measure_latency == config['MeasureLatency'] and \
request_interval == config['RequestInterval'] and \
- (first_value is None or first_value == config['IPAddress'])
+ value == config_ip_address
- def get_health_check_id(self, record, ident, geo, create):
+ def get_health_check_id(self, record, value, create):
# fqdn & the first value are special, we use them to match up health
# checks to their records. Route53 health checks check a single ip and
# we're going to assume that ips are interchangeable to avoid
# health-checking each one independently
fqdn = record.fqdn
- first_value = geo.values[0]
- self.log.debug('get_health_check_id: fqdn=%s, type=%s, geo=%s, '
- 'first_value=%s', fqdn, record._type, ident,
- first_value)
+ self.log.debug('get_health_check_id: fqdn=%s, type=%s, value=%s',
+ fqdn, record._type, value)
+
+ try:
+ ip_address(text_type(value))
+ # We're working with an IP, host is the Host header
+ healthcheck_host = record.healthcheck_host
+ except (AddressValueError, ValueError):
+ # This isn't an IP, host is the value, value should be None
+ healthcheck_host = value
+ value = None
- healthcheck_host = record.healthcheck_host
healthcheck_path = record.healthcheck_path
healthcheck_protocol = record.healthcheck_protocol
healthcheck_port = record.healthcheck_port
@@ -602,14 +1104,14 @@ class Route53Provider(BaseProvider):
if not health_check['CallerReference'].startswith(expected_ref):
# not match, ignore
continue
- if self._health_check_equivilent(healthcheck_host,
+ if self._health_check_equivalent(healthcheck_host,
healthcheck_path,
healthcheck_protocol,
healthcheck_port,
healthcheck_latency,
healthcheck_interval,
health_check,
- first_value=first_value):
+ value=value):
# this is the health check we're looking for
self.log.debug('get_health_check_id: found match id=%s', id)
return id
@@ -623,30 +1125,47 @@ class Route53Provider(BaseProvider):
config = {
'EnableSNI': healthcheck_protocol == 'HTTPS',
'FailureThreshold': 6,
- 'FullyQualifiedDomainName': healthcheck_host,
- 'IPAddress': first_value,
'MeasureLatency': healthcheck_latency,
'Port': healthcheck_port,
'RequestInterval': healthcheck_interval,
- 'ResourcePath': healthcheck_path,
'Type': healthcheck_protocol,
}
+ if healthcheck_protocol != 'TCP':
+ config['FullyQualifiedDomainName'] = healthcheck_host
+ config['ResourcePath'] = healthcheck_path
+ if value:
+ config['IPAddress'] = value
+
ref = '{}:{}:{}:{}'.format(self.HEALTH_CHECK_VERSION, record._type,
record.fqdn, uuid4().hex[:12])
resp = self._conn.create_health_check(CallerReference=ref,
HealthCheckConfig=config)
health_check = resp['HealthCheck']
id = health_check['Id']
+
+ # Set a Name for the benefit of the UI
+ name = '{}:{} - {}'.format(record.fqdn, record._type,
+ value or healthcheck_host)
+ self._conn.change_tags_for_resource(ResourceType='healthcheck',
+ ResourceId=id,
+ AddTags=[{
+ 'Key': 'Name',
+ 'Value': name,
+ }])
+ # Manually add it to our cache
+ health_check['Tags'] = {
+ 'Name': name
+ }
+
# store the new health check so that we'll be able to find it in the
# future
self._health_checks[id] = health_check
self.log.info('get_health_check_id: created id=%s, host=%s, '
'path=%s, protocol=%s, port=%d, measure_latency=%r, '
- 'request_interval=%d, first_value=%s',
+ 'request_interval=%d, value=%s',
id, healthcheck_host, healthcheck_path,
healthcheck_protocol, healthcheck_port,
- healthcheck_latency, healthcheck_interval,
- first_value)
+ healthcheck_latency, healthcheck_interval, value)
return id
def _gc_health_checks(self, record, new):
@@ -683,25 +1202,26 @@ class Route53Provider(BaseProvider):
id)
self._conn.delete_health_check(HealthCheckId=id)
- def _gen_records(self, record, creating=False):
+ def _gen_records(self, record, zone_id, creating=False):
'''
Turns an octodns.Record into one or more `_Route53*`s
'''
- return _Route53Record.new(self, record, creating)
+ return _Route53Record.new(self, record, zone_id, creating)
- def _mod_Create(self, change):
+ def _mod_Create(self, change, zone_id, existing_rrsets):
# New is the stuff that needs to be created
- new_records = self._gen_records(change.new, creating=True)
+ new_records = self._gen_records(change.new, zone_id, creating=True)
# Now is a good time to clear out any unused health checks since we
# know what we'll be using going forward
self._gc_health_checks(change.new, new_records)
- return self._gen_mods('CREATE', new_records)
+ return self._gen_mods('CREATE', new_records, existing_rrsets)
- def _mod_Update(self, change):
+ def _mod_Update(self, change, zone_id, existing_rrsets):
# See comments in _Route53Record for how the set math is made to do our
# bidding here.
- existing_records = self._gen_records(change.existing, creating=False)
- new_records = self._gen_records(change.new, creating=True)
+ existing_records = self._gen_records(change.existing, zone_id,
+ creating=False)
+ new_records = self._gen_records(change.new, zone_id, creating=True)
# Now is a good time to clear out any unused health checks since we
# know what we'll be using going forward
self._gc_health_checks(change.new, new_records)
@@ -719,17 +1239,108 @@ class Route53Provider(BaseProvider):
if new_record in existing_records:
upserts.add(new_record)
- return self._gen_mods('DELETE', deletes) + \
- self._gen_mods('CREATE', creates) + \
- self._gen_mods('UPSERT', upserts)
+ return self._gen_mods('DELETE', deletes, existing_rrsets) + \
+ self._gen_mods('CREATE', creates, existing_rrsets) + \
+ self._gen_mods('UPSERT', upserts, existing_rrsets)
- def _mod_Delete(self, change):
+ def _mod_Delete(self, change, zone_id, existing_rrsets):
# Existing is the thing that needs to be deleted
- existing_records = self._gen_records(change.existing, creating=False)
+ existing_records = self._gen_records(change.existing, zone_id,
+ creating=False)
# Now is a good time to clear out all the health checks since we know
# we're done with them
self._gc_health_checks(change.existing, [])
- return self._gen_mods('DELETE', existing_records)
+ return self._gen_mods('DELETE', existing_records, existing_rrsets)
+
+ def _extra_changes_update_needed(self, record, rrset):
+ healthcheck_host = record.healthcheck_host
+ healthcheck_path = record.healthcheck_path
+ healthcheck_protocol = record.healthcheck_protocol
+ healthcheck_port = record.healthcheck_port
+ healthcheck_latency = self._healthcheck_measure_latency(record)
+ healthcheck_interval = self._healthcheck_request_interval(record)
+
+ try:
+ health_check_id = rrset['HealthCheckId']
+ health_check = self.health_checks[health_check_id]
+ caller_ref = health_check['CallerReference']
+ if caller_ref.startswith(self.HEALTH_CHECK_VERSION):
+ if self._health_check_equivalent(healthcheck_host,
+ healthcheck_path,
+ healthcheck_protocol,
+ healthcheck_port,
+ healthcheck_latency,
+ healthcheck_interval,
+ health_check):
+ # it has the right health check
+ return False
+ except (IndexError, KeyError):
+ # no health check id or one that isn't the right version
+ pass
+
+ # no good, doesn't have the right health check, needs an update
+ self.log.info('_extra_changes_update_needed: health-check caused '
+ 'update of %s:%s', record.fqdn, record._type)
+ return True
+
+ def _extra_changes_geo_needs_update(self, zone_id, record):
+ # OK this is a record we don't have change for that does have geo
+ # information. We need to look and see if it needs to be updated b/c of
+ # a health check version bump or other mismatch
+ self.log.debug('_extra_changes_geo_needs_update: inspecting=%s, %s',
+ record.fqdn, record._type)
+
+ fqdn = record.fqdn
+
+ # loop through all the r53 rrsets
+ for rrset in self._load_records(zone_id):
+ if fqdn == rrset['Name'] and record._type == rrset['Type'] and \
+ rrset.get('GeoLocation', {}).get('CountryCode', False) != '*' \
+ and self._extra_changes_update_needed(record, rrset):
+ # no good, doesn't have the right health check, needs an update
+ self.log.info('_extra_changes_geo_needs_update: health-check '
+ 'caused update of %s:%s', record.fqdn,
+ record._type)
+ return True
+
+ return False
+
+ def _extra_changes_dynamic_needs_update(self, zone_id, record):
+ # OK this is a record we don't have change for that does have dynamic
+ # information. We need to look and see if it needs to be updated b/c of
+ # a health check version bump or other mismatch
+ self.log.debug('_extra_changes_dynamic_needs_update: inspecting=%s, '
+ '%s', record.fqdn, record._type)
+
+ fqdn = record.fqdn
+ _type = record._type
+
+ # loop through all the r53 rrsets
+ for rrset in self._load_records(zone_id):
+ name = rrset['Name']
+ # Break off the first piece of the name, it'll let us figure out if
+ # this is an rrset we're interested in.
+ maybe_meta, rest = name.split('.', 1)
+
+ if not maybe_meta.startswith('_octodns-') or \
+ not maybe_meta.endswith('-value') or \
+ '-default-' in name:
+ # We're only interested in non-default dynamic value records,
+ # as that's where healthchecks live
+ continue
+
+ if rest != fqdn or _type != rrset['Type']:
+ # rrset isn't for the current record
+ continue
+
+ if self._extra_changes_update_needed(record, rrset):
+ # no good, doesn't have the right health check, needs an update
+ self.log.info('_extra_changes_dynamic_needs_update: '
+ 'health-check caused update of %s:%s',
+ record.fqdn, record._type)
+ return True
+
+ return False
def _extra_changes(self, desired, changes, **kwargs):
self.log.debug('_extra_changes: desired=%s', desired.name)
@@ -741,63 +1352,20 @@ class Route53Provider(BaseProvider):
changed = set([c.record for c in changes])
# ok, now it's time for the reason we're here, we need to go over all
# the desired records
- extra = []
+ extras = []
for record in desired.records:
if record in changed:
# already have a change for it, skipping
continue
- if not getattr(record, 'geo', False):
- # record doesn't support geo, we don't need to inspect it
- continue
- # OK this is a record we don't have change for that does have geo
- # information. We need to look and see if it needs to be updated
- # b/c of a health check version bump
- self.log.debug('_extra_changes: inspecting=%s, %s', record.fqdn,
- record._type)
- healthcheck_host = record.healthcheck_host
- healthcheck_path = record.healthcheck_path
- healthcheck_protocol = record.healthcheck_protocol
- healthcheck_port = record.healthcheck_port
- healthcheck_latency = self._healthcheck_measure_latency(record)
- healthcheck_interval = self._healthcheck_request_interval(record)
- fqdn = record.fqdn
+ if getattr(record, 'geo', False):
+ if self._extra_changes_geo_needs_update(zone_id, record):
+ extras.append(Update(record, record))
+ elif getattr(record, 'dynamic', False):
+ if self._extra_changes_dynamic_needs_update(zone_id, record):
+ extras.append(Update(record, record))
- # loop through all the r53 rrsets
- for rrset in self._load_records(zone_id):
- if fqdn != rrset['Name'] or record._type != rrset['Type']:
- # not a name and type match
- continue
- if rrset.get('GeoLocation', {}) \
- .get('CountryCode', False) == '*':
- # it's a default record
- continue
- # we expect a healthcheck now
- try:
- health_check_id = rrset['HealthCheckId']
- health_check = self.health_checks[health_check_id]
- caller_ref = health_check['CallerReference']
- if caller_ref.startswith(self.HEALTH_CHECK_VERSION):
- if self._health_check_equivilent(healthcheck_host,
- healthcheck_path,
- healthcheck_protocol,
- healthcheck_port,
- healthcheck_latency,
- healthcheck_interval,
- health_check):
- # it has the right health check
- continue
- except (IndexError, KeyError):
- # no health check id or one that isn't the right version
- pass
- # no good, doesn't have the right health check, needs an update
- self.log.info('_extra_changes: health-check caused '
- 'update of %s:%s', record.fqdn, record._type)
- extra.append(Update(record, record))
- # We don't need to process this record any longer
- break
-
- return extra
+ return extras
def _apply(self, plan):
desired = plan.desired
@@ -808,10 +1376,19 @@ class Route53Provider(BaseProvider):
batch = []
batch_rs_count = 0
zone_id = self._get_zone_id(desired.name, True)
+ existing_rrsets = self._load_records(zone_id)
for c in changes:
- mods = getattr(self, '_mod_{}'.format(c.__class__.__name__))(c)
+ # Generate the mods for this change
+ mod_type = getattr(self, '_mod_{}'.format(c.__class__.__name__))
+ mods = mod_type(c, zone_id, existing_rrsets)
+
+ # Order our mods to make sure targets exist before alises point to
+ # them and we CRUD in the desired order
+ mods.sort(key=_mod_keyer)
+
mods_rs_count = sum(
- [len(m['ResourceRecordSet']['ResourceRecords']) for m in mods]
+ [len(m['ResourceRecordSet'].get('ResourceRecords', ''))
+ for m in mods]
)
if mods_rs_count > self.max_changes:
@@ -844,6 +1421,8 @@ class Route53Provider(BaseProvider):
self._really_apply(batch, zone_id)
def _really_apply(self, batch, zone_id):
+ # Ensure this batch is ordered (deletes before creates etc.)
+ batch.sort(key=_mod_keyer)
uuid = uuid4().hex
batch = {
'Comment': 'Change: {}'.format(uuid),
diff --git a/octodns/provider/selectel.py b/octodns/provider/selectel.py
new file mode 100644
index 0000000..072b8cf
--- /dev/null
+++ b/octodns/provider/selectel.py
@@ -0,0 +1,305 @@
+#
+#
+#
+
+from __future__ import absolute_import, division, print_function, \
+ unicode_literals
+
+from collections import defaultdict
+
+from logging import getLogger
+
+from requests import Session
+
+from ..record import Record, Update
+from .base import BaseProvider
+
+
+class SelectelAuthenticationRequired(Exception):
+ def __init__(self, msg):
+ message = 'Authorization failed. Invalid or empty token.'
+ super(SelectelAuthenticationRequired, self).__init__(message)
+
+
+class SelectelProvider(BaseProvider):
+ SUPPORTS_GEO = False
+
+ SUPPORTS = set(('A', 'AAAA', 'CNAME', 'MX', 'NS', 'TXT', 'SPF', 'SRV'))
+
+ MIN_TTL = 60
+
+ PAGINATION_LIMIT = 50
+
+ API_URL = 'https://api.selectel.ru/domains/v1'
+
+ def __init__(self, id, token, *args, **kwargs):
+ self.log = getLogger('SelectelProvider[{}]'.format(id))
+ self.log.debug('__init__: id=%s', id)
+ super(SelectelProvider, self).__init__(id, *args, **kwargs)
+
+ self._sess = Session()
+ self._sess.headers.update({
+ 'X-Token': token,
+ 'Content-Type': 'application/json',
+ })
+ self._zone_records = {}
+ self._domain_list = self.domain_list()
+ self._zones = None
+
+ def _request(self, method, path, params=None, data=None):
+ self.log.debug('_request: method=%s, path=%s', method, path)
+
+ url = '{}{}'.format(self.API_URL, path)
+ resp = self._sess.request(method, url, params=params, json=data)
+
+ self.log.debug('_request: status=%s', resp.status_code)
+ if resp.status_code == 401:
+ raise SelectelAuthenticationRequired(resp.text)
+ elif resp.status_code == 404:
+ return {}
+ resp.raise_for_status()
+ if method == 'DELETE':
+ return {}
+ return resp.json()
+
+ def _get_total_count(self, path):
+ url = '{}{}'.format(self.API_URL, path)
+ resp = self._sess.request('HEAD', url)
+ return int(resp.headers['X-Total-Count'])
+
+ def _request_with_pagination(self, path, total_count):
+ result = []
+ for offset in range(0, total_count, self.PAGINATION_LIMIT):
+ result += self._request('GET', path,
+ params={'limit': self.PAGINATION_LIMIT,
+ 'offset': offset})
+ return result
+
+ def _include_change(self, change):
+ if isinstance(change, Update):
+ existing = change.existing.data
+ new = change.new.data
+ new['ttl'] = max(self.MIN_TTL, new['ttl'])
+ if new == existing:
+ self.log.debug('_include_changes: new=%s, found existing=%s',
+ new, existing)
+ return False
+ return True
+
+ def _apply(self, plan):
+ desired = plan.desired
+ changes = plan.changes
+ self.log.debug('_apply: zone=%s, len(changes)=%d', desired.name,
+ len(changes))
+
+ zone_name = desired.name[:-1]
+ for change in changes:
+ class_name = change.__class__.__name__
+ getattr(self, '_apply_{}'.format(class_name).lower())(zone_name,
+ change)
+
+ def _apply_create(self, zone_name, change):
+ new = change.new
+ params_for = getattr(self, '_params_for_{}'.format(new._type))
+ for params in params_for(new):
+ self.create_record(zone_name, params)
+
+ def _apply_update(self, zone_name, change):
+ self._apply_delete(zone_name, change)
+ self._apply_create(zone_name, change)
+
+ def _apply_delete(self, zone_name, change):
+ existing = change.existing
+ self.delete_record(zone_name, existing._type, existing.name)
+
+ def _params_for_multiple(self, record):
+ for value in record.values:
+ yield {
+ 'content': value,
+ 'name': record.fqdn,
+ 'ttl': max(self.MIN_TTL, record.ttl),
+ 'type': record._type,
+ }
+
+ def _params_for_single(self, record):
+ yield {
+ 'content': record.value,
+ 'name': record.fqdn,
+ 'ttl': max(self.MIN_TTL, record.ttl),
+ 'type': record._type
+ }
+
+ def _params_for_MX(self, record):
+ for value in record.values:
+ yield {
+ 'content': value.exchange,
+ 'name': record.fqdn,
+ 'ttl': max(self.MIN_TTL, record.ttl),
+ 'type': record._type,
+ 'priority': value.preference
+ }
+
+ def _params_for_SRV(self, record):
+ for value in record.values:
+ yield {
+ 'name': record.fqdn,
+ 'target': value.target,
+ 'ttl': max(self.MIN_TTL, record.ttl),
+ 'type': record._type,
+ 'port': value.port,
+ 'weight': value.weight,
+ 'priority': value.priority
+ }
+
+ _params_for_A = _params_for_multiple
+ _params_for_AAAA = _params_for_multiple
+ _params_for_NS = _params_for_multiple
+ _params_for_TXT = _params_for_multiple
+ _params_for_SPF = _params_for_multiple
+
+ _params_for_CNAME = _params_for_single
+
+ def _data_for_A(self, _type, records):
+ return {
+ 'ttl': records[0]['ttl'],
+ 'type': _type,
+ 'values': [r['content'] for r in records],
+ }
+
+ _data_for_AAAA = _data_for_A
+
+ def _data_for_NS(self, _type, records):
+ return {
+ 'ttl': records[0]['ttl'],
+ 'type': _type,
+ 'values': ['{}.'.format(r['content']) for r in records],
+ }
+
+ def _data_for_MX(self, _type, records):
+ values = []
+ for record in records:
+ values.append({
+ 'preference': record['priority'],
+ 'exchange': '{}.'.format(record['content']),
+ })
+ return {
+ 'ttl': records[0]['ttl'],
+ 'type': _type,
+ 'values': values,
+ }
+
+ def _data_for_CNAME(self, _type, records):
+ only = records[0]
+ return {
+ 'ttl': only['ttl'],
+ 'type': _type,
+ 'value': '{}.'.format(only['content'])
+ }
+
+ def _data_for_TXT(self, _type, records):
+ return {
+ 'ttl': records[0]['ttl'],
+ 'type': _type,
+ 'values': [r['content'] for r in records],
+ }
+
+ def _data_for_SRV(self, _type, records):
+ values = []
+ for record in records:
+ values.append({
+ 'priority': record['priority'],
+ 'weight': record['weight'],
+ 'port': record['port'],
+ 'target': '{}.'.format(record['target']),
+ })
+
+ return {
+ 'type': _type,
+ 'ttl': records[0]['ttl'],
+ 'values': values,
+ }
+
+ def populate(self, zone, target=False, lenient=False):
+ self.log.debug('populate: name=%s, target=%s, lenient=%s',
+ zone.name, target, lenient)
+ before = len(zone.records)
+ records = self.zone_records(zone)
+ if records:
+ values = defaultdict(lambda: defaultdict(list))
+ for record in records:
+ name = zone.hostname_from_fqdn(record['name'])
+ _type = record['type']
+ if _type in self.SUPPORTS:
+ values[name][record['type']].append(record)
+ for name, types in values.items():
+ for _type, records in types.items():
+ data_for = getattr(self, '_data_for_{}'.format(_type))
+ data = data_for(_type, records)
+ record = Record.new(zone, name, data, source=self,
+ lenient=lenient)
+ zone.add_record(record)
+ self.log.info('populate: found %s records',
+ len(zone.records) - before)
+
+ def domain_list(self):
+ path = '/'
+ domains = {}
+ domains_list = []
+
+ total_count = self._get_total_count(path)
+ domains_list = self._request_with_pagination(path, total_count)
+
+ for domain in domains_list:
+ domains[domain['name']] = domain
+ return domains
+
+ def zone_records(self, zone):
+ path = '/{}/records/'.format(zone.name[:-1])
+ zone_records = []
+
+ total_count = self._get_total_count(path)
+ zone_records = self._request_with_pagination(path, total_count)
+
+ self._zone_records[zone.name] = zone_records
+ return self._zone_records[zone.name]
+
+ def create_domain(self, name, zone=""):
+ path = '/'
+
+ data = {
+ 'name': name,
+ 'bind_zone': zone,
+ }
+
+ resp = self._request('POST', path, data=data)
+ self._domain_list[name] = resp
+ return resp
+
+ def create_record(self, zone_name, data):
+ self.log.debug('Create record. Zone: %s, data %s', zone_name, data)
+ if zone_name in self._domain_list.keys():
+ domain_id = self._domain_list[zone_name]['id']
+ else:
+ domain_id = self.create_domain(zone_name)['id']
+
+ path = '/{}/records/'.format(domain_id)
+ return self._request('POST', path, data=data)
+
+ def delete_record(self, domain, _type, zone):
+ self.log.debug('Delete record. Domain: %s, Type: %s', domain, _type)
+
+ domain_id = self._domain_list[domain]['id']
+ records = self._zone_records.get('{}.'.format(domain), False)
+ if not records:
+ path = '/{}/records/'.format(domain_id)
+ records = self._request('GET', path)
+
+ for record in records:
+ full_domain = domain
+ if zone:
+ full_domain = '{}{}'.format(zone, domain)
+ if record['type'] == _type and record['name'] == full_domain:
+ path = '/{}/records/{}'.format(domain_id, record['id'])
+ return self._request('DELETE', path)
+
+ self.log.debug('Delete record failed (Record not found)')
diff --git a/octodns/provider/transip.py b/octodns/provider/transip.py
new file mode 100644
index 0000000..7458e36
--- /dev/null
+++ b/octodns/provider/transip.py
@@ -0,0 +1,353 @@
+#
+#
+#
+
+from __future__ import absolute_import, division, print_function, \
+ unicode_literals
+
+from suds import WebFault
+
+from collections import defaultdict
+from .base import BaseProvider
+from logging import getLogger
+from ..record import Record
+from transip.service.domain import DomainService
+from transip.service.objects import DnsEntry
+
+
+class TransipException(Exception):
+ pass
+
+
+class TransipConfigException(TransipException):
+ pass
+
+
+class TransipNewZoneException(TransipException):
+ pass
+
+
+class TransipProvider(BaseProvider):
+ '''
+ Transip DNS provider
+
+ transip:
+ class: octodns.provider.transip.TransipProvider
+ # Your Transip account name (required)
+ account: yourname
+ # Path to a private key file (required if key is not used)
+ key_file: /path/to/file
+ # The api key as string (required if key_file is not used)
+ key: |
+ \'''
+ -----BEGIN PRIVATE KEY-----
+ ...
+ -----END PRIVATE KEY-----
+ \'''
+ # if both `key_file` and `key` are presented `key_file` is used
+
+ '''
+ SUPPORTS_GEO = False
+ SUPPORTS_DYNAMIC = False
+ SUPPORTS = set(
+ ('A', 'AAAA', 'CNAME', 'MX', 'SRV', 'SPF', 'TXT', 'SSHFP', 'CAA'))
+ # unsupported by OctoDNS: 'TLSA'
+ MIN_TTL = 120
+ TIMEOUT = 15
+ ROOT_RECORD = '@'
+
+ def __init__(self, id, account, key=None, key_file=None, *args, **kwargs):
+ self.log = getLogger('TransipProvider[{}]'.format(id))
+ self.log.debug('__init__: id=%s, account=%s, token=***', id,
+ account)
+ super(TransipProvider, self).__init__(id, *args, **kwargs)
+
+ if key_file is not None:
+ self._client = DomainService(account, private_key_file=key_file)
+ elif key is not None:
+ self._client = DomainService(account, private_key=key)
+ else:
+ raise TransipConfigException(
+ 'Missing `key` of `key_file` parameter in config'
+ )
+
+ self.account = account
+ self.key = key
+
+ self._currentZone = {}
+
+ def populate(self, zone, target=False, lenient=False):
+
+ exists = False
+ self._currentZone = zone
+ self.log.debug('populate: name=%s, target=%s, lenient=%s', zone.name,
+ target, lenient)
+
+ before = len(zone.records)
+ try:
+ zoneInfo = self._client.get_info(zone.name[:-1])
+ except WebFault as e:
+ if e.fault.faultcode == '102' and target is False:
+ # Zone not found in account, and not a target so just
+ # leave an empty zone.
+ return exists
+ elif e.fault.faultcode == '102' and target is True:
+ self.log.warning('populate: Transip can\'t create new zones')
+ raise TransipNewZoneException(
+ ('populate: ({}) Transip used ' +
+ 'as target for non-existing zone: {}').format(
+ e.fault.faultcode, zone.name))
+ else:
+ self.log.error('populate: (%s) %s ', e.fault.faultcode,
+ e.fault.faultstring)
+ raise e
+
+ self.log.debug('populate: found %s records for zone %s',
+ len(zoneInfo.dnsEntries), zone.name)
+ exists = True
+ if zoneInfo.dnsEntries:
+ values = defaultdict(lambda: defaultdict(list))
+ for record in zoneInfo.dnsEntries:
+ name = zone.hostname_from_fqdn(record['name'])
+ if name == self.ROOT_RECORD:
+ name = ''
+
+ if record['type'] in self.SUPPORTS:
+ values[name][record['type']].append(record)
+
+ for name, types in values.items():
+ for _type, records in types.items():
+ data_for = getattr(self, '_data_for_{}'.format(_type))
+ record = Record.new(zone, name, data_for(_type, records),
+ source=self, lenient=lenient)
+ zone.add_record(record, lenient=lenient)
+ self.log.info('populate: found %s records, exists = %s',
+ len(zone.records) - before, exists)
+
+ self._currentZone = {}
+ return exists
+
+ def _apply(self, plan):
+ desired = plan.desired
+ changes = plan.changes
+ self.log.debug('apply: zone=%s, changes=%d', desired.name,
+ len(changes))
+
+ self._currentZone = plan.desired
+ try:
+ self._client.get_info(plan.desired.name[:-1])
+ except WebFault as e:
+ self.log.exception('_apply: get_info failed')
+ raise e
+
+ _dns_entries = []
+ for record in plan.desired.records:
+ if record._type in self.SUPPORTS:
+ entries_for = getattr(self,
+ '_entries_for_{}'.format(record._type))
+
+ # Root records have '@' as name
+ name = record.name
+ if name == '':
+ name = self.ROOT_RECORD
+
+ _dns_entries.extend(entries_for(name, record))
+
+ try:
+ self._client.set_dns_entries(plan.desired.name[:-1], _dns_entries)
+ except WebFault as e:
+ self.log.warning(('_apply: Set DNS returned ' +
+ 'one or more errors: {}').format(
+ e.fault.faultstring))
+ raise TransipException(200, e.fault.faultstring)
+
+ self._currentZone = {}
+
+ def _entries_for_multiple(self, name, record):
+ _entries = []
+
+ for value in record.values:
+ _entries.append(DnsEntry(name, record.ttl, record._type, value))
+
+ return _entries
+
+ def _entries_for_single(self, name, record):
+
+ return [DnsEntry(name, record.ttl, record._type, record.value)]
+
+ _entries_for_A = _entries_for_multiple
+ _entries_for_AAAA = _entries_for_multiple
+ _entries_for_NS = _entries_for_multiple
+ _entries_for_SPF = _entries_for_multiple
+ _entries_for_CNAME = _entries_for_single
+
+ def _entries_for_MX(self, name, record):
+ _entries = []
+
+ for value in record.values:
+ content = "{} {}".format(value.preference, value.exchange)
+ _entries.append(DnsEntry(name, record.ttl, record._type, content))
+
+ return _entries
+
+ def _entries_for_SRV(self, name, record):
+ _entries = []
+
+ for value in record.values:
+ content = "{} {} {} {}".format(value.priority, value.weight,
+ value.port, value.target)
+ _entries.append(DnsEntry(name, record.ttl, record._type, content))
+
+ return _entries
+
+ def _entries_for_SSHFP(self, name, record):
+ _entries = []
+
+ for value in record.values:
+ content = "{} {} {}".format(value.algorithm,
+ value.fingerprint_type,
+ value.fingerprint)
+ _entries.append(DnsEntry(name, record.ttl, record._type, content))
+
+ return _entries
+
+ def _entries_for_CAA(self, name, record):
+ _entries = []
+
+ for value in record.values:
+ content = "{} {} {}".format(value.flags, value.tag,
+ value.value)
+ _entries.append(DnsEntry(name, record.ttl, record._type, content))
+
+ return _entries
+
+ def _entries_for_TXT(self, name, record):
+ _entries = []
+
+ for value in record.values:
+ value = value.replace('\\;', ';')
+ _entries.append(DnsEntry(name, record.ttl, record._type, value))
+
+ return _entries
+
+ def _parse_to_fqdn(self, value):
+
+ # Enforce switch from suds.sax.text.Text to string
+ value = str(value)
+
+ # TransIP allows '@' as value to alias the root record.
+ # this provider won't set an '@' value, but can be an existing record
+ if value == self.ROOT_RECORD:
+ value = self._currentZone.name
+
+ if value[-1] != '.':
+ self.log.debug('parseToFQDN: changed %s to %s', value,
+ '{}.{}'.format(value, self._currentZone.name))
+ value = '{}.{}'.format(value, self._currentZone.name)
+
+ return value
+
+ def _get_lowest_ttl(self, records):
+ _ttl = 100000
+ for record in records:
+ _ttl = min(_ttl, record['expire'])
+ return _ttl
+
+ def _data_for_multiple(self, _type, records):
+
+ _values = []
+ for record in records:
+ # Enforce switch from suds.sax.text.Text to string
+ _values.append(str(record['content']))
+
+ return {
+ 'ttl': self._get_lowest_ttl(records),
+ 'type': _type,
+ 'values': _values
+ }
+
+ _data_for_A = _data_for_multiple
+ _data_for_AAAA = _data_for_multiple
+ _data_for_NS = _data_for_multiple
+ _data_for_SPF = _data_for_multiple
+
+ def _data_for_CNAME(self, _type, records):
+ return {
+ 'ttl': records[0]['expire'],
+ 'type': _type,
+ 'value': self._parse_to_fqdn(records[0]['content'])
+ }
+
+ def _data_for_MX(self, _type, records):
+ _values = []
+ for record in records:
+ preference, exchange = record['content'].split(" ", 1)
+ _values.append({
+ 'preference': preference,
+ 'exchange': self._parse_to_fqdn(exchange)
+ })
+ return {
+ 'ttl': self._get_lowest_ttl(records),
+ 'type': _type,
+ 'values': _values
+ }
+
+ def _data_for_SRV(self, _type, records):
+ _values = []
+ for record in records:
+ priority, weight, port, target = record['content'].split(' ', 3)
+ _values.append({
+ 'port': port,
+ 'priority': priority,
+ 'target': self._parse_to_fqdn(target),
+ 'weight': weight
+ })
+
+ return {
+ 'type': _type,
+ 'ttl': self._get_lowest_ttl(records),
+ 'values': _values
+ }
+
+ def _data_for_SSHFP(self, _type, records):
+ _values = []
+ for record in records:
+ algorithm, fp_type, fingerprint = record['content'].split(' ', 2)
+ _values.append({
+ 'algorithm': algorithm,
+ 'fingerprint': fingerprint.lower(),
+ 'fingerprint_type': fp_type
+ })
+
+ return {
+ 'type': _type,
+ 'ttl': self._get_lowest_ttl(records),
+ 'values': _values
+ }
+
+ def _data_for_CAA(self, _type, records):
+ _values = []
+ for record in records:
+ flags, tag, value = record['content'].split(' ', 2)
+ _values.append({
+ 'flags': flags,
+ 'tag': tag,
+ 'value': value
+ })
+
+ return {
+ 'type': _type,
+ 'ttl': self._get_lowest_ttl(records),
+ 'values': _values
+ }
+
+ def _data_for_TXT(self, _type, records):
+ _values = []
+ for record in records:
+ _values.append(record['content'].replace(';', '\\;'))
+
+ return {
+ 'type': _type,
+ 'ttl': self._get_lowest_ttl(records),
+ 'values': _values
+ }
diff --git a/octodns/provider/yaml.py b/octodns/provider/yaml.py
index a9631a0..10add5a 100644
--- a/octodns/provider/yaml.py
+++ b/octodns/provider/yaml.py
@@ -6,8 +6,8 @@ from __future__ import absolute_import, division, print_function, \
unicode_literals
from collections import defaultdict
-from os import makedirs
-from os.path import isdir, join
+from os import listdir, makedirs
+from os.path import isdir, isfile, join
import logging
from ..record import Record
@@ -28,7 +28,79 @@ class YamlProvider(BaseProvider):
default_ttl: 3600
# Whether or not to enforce sorting order on the yaml config
# (optional, default True)
- enforce_order: True
+ enforce_order: true
+ # Whether duplicate records should replace rather than error
+ # (optiona, default False)
+ populate_should_replace: false
+
+ Overriding values can be accomplished using multiple yaml providers in the
+ `sources` list where subsequent providers have `populate_should_replace`
+ set to `true`. An example use of this would be a zone that you want to push
+ to external DNS providers and internally, but you want to modify some of
+ the records in the internal version.
+
+ config/octodns.com.yaml
+ ---
+ other:
+ type: A
+ values:
+ - 192.30.252.115
+ - 192.30.252.116
+ www:
+ type: A
+ values:
+ - 192.30.252.113
+ - 192.30.252.114
+
+
+ internal/octodns.com.yaml
+ ---
+ 'www':
+ type: A
+ values:
+ - 10.0.0.12
+ - 10.0.0.13
+
+ external.yaml
+ ---
+ providers:
+ config:
+ class: octodns.provider.yaml.YamlProvider
+ directory: ./config
+
+ zones:
+
+ octodns.com.:
+ sources:
+ - config
+ targets:
+ - route53
+
+ internal.yaml
+ ---
+ providers:
+ config:
+ class: octodns.provider.yaml.YamlProvider
+ directory: ./config
+
+ internal:
+ class: octodns.provider.yaml.YamlProvider
+ directory: ./internal
+ populate_should_replace: true
+
+ zones:
+
+ octodns.com.:
+ sources:
+ - config
+ - internal
+ targets:
+ - pdns
+
+ You can then sync our records eternally with `--config-file=external.yaml`
+ and internally (with the custom overrides) with
+ `--config-file=internal.yaml`
+
'''
SUPPORTS_GEO = True
SUPPORTS_DYNAMIC = True
@@ -36,15 +108,35 @@ class YamlProvider(BaseProvider):
'PTR', 'SSHFP', 'SPF', 'SRV', 'TXT'))
def __init__(self, id, directory, default_ttl=3600, enforce_order=True,
- *args, **kwargs):
- self.log = logging.getLogger('YamlProvider[{}]'.format(id))
+ populate_should_replace=False, *args, **kwargs):
+ self.log = logging.getLogger('{}[{}]'.format(
+ self.__class__.__name__, id))
self.log.debug('__init__: id=%s, directory=%s, default_ttl=%d, '
- 'enforce_order=%d', id, directory, default_ttl,
- enforce_order)
+ 'enforce_order=%d, populate_should_replace=%d',
+ id, directory, default_ttl, enforce_order,
+ populate_should_replace)
super(YamlProvider, self).__init__(id, *args, **kwargs)
self.directory = directory
self.default_ttl = default_ttl
self.enforce_order = enforce_order
+ self.populate_should_replace = populate_should_replace
+
+ def _populate_from_file(self, filename, zone, lenient):
+ with open(filename, 'r') as fh:
+ yaml_data = safe_load(fh, enforce_order=self.enforce_order)
+ if yaml_data:
+ for name, data in yaml_data.items():
+ if not isinstance(data, list):
+ data = [data]
+ for d in data:
+ if 'ttl' not in d:
+ d['ttl'] = self.default_ttl
+ record = Record.new(zone, name, d, source=self,
+ lenient=lenient)
+ zone.add_record(record, lenient=lenient,
+ replace=self.populate_should_replace)
+ self.log.debug('_populate_from_file: successfully loaded "%s"',
+ filename)
def populate(self, zone, target=False, lenient=False):
self.log.debug('populate: name=%s, target=%s, lenient=%s', zone.name,
@@ -57,18 +149,7 @@ class YamlProvider(BaseProvider):
before = len(zone.records)
filename = join(self.directory, '{}yaml'.format(zone.name))
- with open(filename, 'r') as fh:
- yaml_data = safe_load(fh, enforce_order=self.enforce_order)
- if yaml_data:
- for name, data in yaml_data.items():
- if not isinstance(data, list):
- data = [data]
- for d in data:
- if 'ttl' not in d:
- d['ttl'] = self.default_ttl
- record = Record.new(zone, name, d, source=self,
- lenient=lenient)
- zone.add_record(record, lenient=lenient)
+ self._populate_from_file(filename, zone, lenient)
self.log.info('populate: found %s records, exists=False',
len(zone.records) - before)
@@ -102,7 +183,106 @@ class YamlProvider(BaseProvider):
if not isdir(self.directory):
makedirs(self.directory)
+ self._do_apply(desired, data)
+
+ def _do_apply(self, desired, data):
filename = join(self.directory, '{}yaml'.format(desired.name))
self.log.debug('_apply: writing filename=%s', filename)
with open(filename, 'w') as fh:
safe_dump(dict(data), fh)
+
+
+def _list_all_yaml_files(directory):
+ yaml_files = set()
+ for f in listdir(directory):
+ filename = join(directory, '{}'.format(f))
+ if f.endswith('.yaml') and isfile(filename):
+ yaml_files.add(filename)
+ return list(yaml_files)
+
+
+class SplitYamlProvider(YamlProvider):
+ '''
+ Core provider for records configured in multiple YAML files on disk.
+
+ Behaves mostly similarly to YamlConfig, but interacts with multiple YAML
+ files, instead of a single monolitic one. All files are stored in a
+ subdirectory matching the name of the zone (including the trailing .) of
+ the directory config. The files are named RECORD.yaml, except for any
+ record which cannot be represented easily as a file; these are stored in
+ the catchall file, which is a YAML file the zone name, prepended with '$'.
+ For example, a zone, 'github.com.' would have a catch-all file named
+ '$github.com.yaml'.
+
+ A full directory structure for the zone github.com. managed under directory
+ "zones/" would be:
+
+ zones/
+ github.com./
+ $github.com.yaml
+ www.yaml
+ ...
+
+ config:
+ class: octodns.provider.yaml.SplitYamlProvider
+ # The location of yaml config files (required)
+ directory: ./config
+ # The ttl to use for records when not specified in the data
+ # (optional, default 3600)
+ default_ttl: 3600
+ # Whether or not to enforce sorting order on the yaml config
+ # (optional, default True)
+ enforce_order: True
+ '''
+
+ # Any record name added to this set will be included in the catch-all file,
+ # instead of a file matching the record name.
+ CATCHALL_RECORD_NAMES = ('*', '')
+
+ def __init__(self, id, directory, *args, **kwargs):
+ super(SplitYamlProvider, self).__init__(id, directory, *args, **kwargs)
+
+ def _zone_directory(self, zone):
+ return join(self.directory, zone.name)
+
+ def populate(self, zone, target=False, lenient=False):
+ self.log.debug('populate: name=%s, target=%s, lenient=%s', zone.name,
+ target, lenient)
+
+ if target:
+ # When acting as a target we ignore any existing records so that we
+ # create a completely new copy
+ return False
+
+ before = len(zone.records)
+ yaml_filenames = _list_all_yaml_files(self._zone_directory(zone))
+ self.log.info('populate: found %s YAML files', len(yaml_filenames))
+ for yaml_filename in yaml_filenames:
+ self._populate_from_file(yaml_filename, zone, lenient)
+
+ self.log.info('populate: found %s records, exists=False',
+ len(zone.records) - before)
+ return False
+
+ def _do_apply(self, desired, data):
+ zone_dir = self._zone_directory(desired)
+ if not isdir(zone_dir):
+ makedirs(zone_dir)
+
+ catchall = dict()
+ for record, config in data.items():
+ if record in self.CATCHALL_RECORD_NAMES:
+ catchall[record] = config
+ continue
+ filename = join(zone_dir, '{}.yaml'.format(record))
+ self.log.debug('_apply: writing filename=%s', filename)
+ with open(filename, 'w') as fh:
+ record_data = {record: config}
+ safe_dump(record_data, fh)
+ if catchall:
+ # Scrub the trailing . to make filenames more sane.
+ dname = desired.name[:-1]
+ filename = join(zone_dir, '${}.yaml'.format(dname))
+ self.log.debug('_apply: writing catchall filename=%s', filename)
+ with open(filename, 'w') as fh:
+ safe_dump(catchall, fh)
diff --git a/octodns/record/__init__.py b/octodns/record/__init__.py
index ba0ab98..04eb2da 100644
--- a/octodns/record/__init__.py
+++ b/octodns/record/__init__.py
@@ -9,6 +9,9 @@ from ipaddress import IPv4Address, IPv6Address
from logging import getLogger
import re
+from six import string_types, text_type
+
+from ..equality import EqualityTupleMixin
from .geo import GeoCodes
@@ -23,6 +26,12 @@ class Change(object):
'Returns new if we have one, existing otherwise'
return self.new or self.existing
+ def __lt__(self, other):
+ self_record = self.record
+ other_record = other.record
+ return ((self_record.name, self_record._type) <
+ (other_record.name, other_record._type))
+
class Create(Change):
@@ -68,11 +77,12 @@ class ValidationError(Exception):
self.reasons = reasons
-class Record(object):
+class Record(EqualityTupleMixin):
log = getLogger('Record')
@classmethod
def new(cls, zone, name, data, source=None, lenient=False):
+ name = text_type(name)
fqdn = '{}.{}'.format(name, zone.name) if name else zone.name
try:
_type = data['type']
@@ -96,7 +106,7 @@ class Record(object):
}[_type]
except KeyError:
raise Exception('Unknown record type: "{}"'.format(_type))
- reasons = _class.validate(name, data)
+ reasons = _class.validate(name, fqdn, data)
try:
lenient |= data['octodns']['lenient']
except KeyError:
@@ -109,8 +119,16 @@ class Record(object):
return _class(zone, name, data, source=source)
@classmethod
- def validate(cls, name, data):
+ def validate(cls, name, fqdn, data):
reasons = []
+ n = len(fqdn)
+ if n > 253:
+ reasons.append('invalid fqdn, "{}" is too long at {} chars, max '
+ 'is 253'.format(fqdn, n))
+ n = len(name)
+ if n > 63:
+ reasons.append('invalid name, "{}" is too long at {} chars, max '
+ 'is 63'.format(name, n))
try:
ttl = int(data['ttl'])
if ttl < 0:
@@ -119,7 +137,7 @@ class Record(object):
reasons.append('missing ttl')
try:
if data['octodns']['healthcheck']['protocol'] \
- not in ('HTTP', 'HTTPS'):
+ not in ('HTTP', 'HTTPS', 'TCP'):
reasons.append('invalid healthcheck protocol')
except KeyError:
pass
@@ -130,7 +148,7 @@ class Record(object):
self.__class__.__name__, name)
self.zone = zone
# force everything lower-case just to be safe
- self.name = unicode(name).lower() if name else name
+ self.name = text_type(name).lower() if name else name
self.source = source
self.ttl = int(data['ttl'])
@@ -163,15 +181,21 @@ class Record(object):
@property
def healthcheck_host(self):
+ healthcheck = self._octodns.get('healthcheck', {})
+ if healthcheck.get('protocol', None) == 'TCP':
+ return None
try:
- return self._octodns['healthcheck']['host']
+ return healthcheck['host']
except KeyError:
return self.fqdn[:-1]
@property
def healthcheck_path(self):
+ healthcheck = self._octodns.get('healthcheck', {})
+ if healthcheck.get('protocol', None) == 'TCP':
+ return None
try:
- return self._octodns['healthcheck']['path']
+ return healthcheck['path']
except KeyError:
return '/_dns'
@@ -194,24 +218,22 @@ class Record(object):
if self.ttl != other.ttl:
return Update(self, other)
- # NOTE: we're using __hash__ and __cmp__ methods that consider Records
+ # NOTE: we're using __hash__ and ordering methods that consider Records
# equivalent if they have the same name & _type. Values are ignored. This
# is useful when computing diffs/changes.
def __hash__(self):
return '{}:{}'.format(self.name, self._type).__hash__()
- def __cmp__(self, other):
- a = '{}:{}'.format(self.name, self._type)
- b = '{}:{}'.format(other.name, other._type)
- return cmp(a, b)
+ def _equality_tuple(self):
+ return (self.name, self._type)
def __repr__(self):
# Make sure this is always overridden
raise NotImplementedError('Abstract base class, __repr__ required')
-class GeoValue(object):
+class GeoValue(EqualityTupleMixin):
geo_re = re.compile(r'^(?P\w\w)(-(?P\w\w)'
r'(-(?P\w\w))?)?$')
@@ -238,11 +260,9 @@ class GeoValue(object):
yield '-'.join(bits)
bits.pop()
- def __cmp__(self, other):
- return 0 if (self.continent_code == other.continent_code and
- self.country_code == other.country_code and
- self.subdivision_code == other.subdivision_code and
- self.values == other.values) else 1
+ def _equality_tuple(self):
+ return (self.continent_code, self.country_code, self.subdivision_code,
+ self.values)
def __repr__(self):
return "'Geo {} {} {} {}'".format(self.continent_code,
@@ -253,8 +273,8 @@ class GeoValue(object):
class _ValuesMixin(object):
@classmethod
- def validate(cls, name, data):
- reasons = super(_ValuesMixin, cls).validate(name, data)
+ def validate(cls, name, fqdn, data):
+ reasons = super(_ValuesMixin, cls).validate(name, fqdn, data)
values = data.get('values', data.get('value', []))
@@ -268,7 +288,6 @@ class _ValuesMixin(object):
values = data['values']
except KeyError:
values = [data['value']]
- # TODO: should we natsort values?
self.values = sorted(self._value_type.process(values))
def changes(self, other, target):
@@ -292,7 +311,7 @@ class _ValuesMixin(object):
return ret
def __repr__(self):
- values = "['{}']".format("', '".join([unicode(v)
+ values = "['{}']".format("', '".join([text_type(v)
for v in self.values]))
return '<{} {} {}, {}, {}>'.format(self.__class__.__name__,
self._type, self.ttl,
@@ -307,8 +326,8 @@ class _GeoMixin(_ValuesMixin):
'''
@classmethod
- def validate(cls, name, data):
- reasons = super(_GeoMixin, cls).validate(name, data)
+ def validate(cls, name, fqdn, data):
+ reasons = super(_GeoMixin, cls).validate(name, fqdn, data)
try:
geo = dict(data['geo'])
for code, values in geo.items():
@@ -354,8 +373,8 @@ class _GeoMixin(_ValuesMixin):
class _ValueMixin(object):
@classmethod
- def validate(cls, name, data):
- reasons = super(_ValueMixin, cls).validate(name, data)
+ def validate(cls, name, fqdn, data):
+ reasons = super(_ValueMixin, cls).validate(name, fqdn, data)
reasons.extend(cls._value_type.validate(data.get('value', None),
cls._type))
return reasons
@@ -481,8 +500,8 @@ class _DynamicMixin(object):
r'(-(?P\w\w))?)?$')
@classmethod
- def validate(cls, name, data):
- reasons = super(_DynamicMixin, cls).validate(name, data)
+ def validate(cls, name, fqdn, data):
+ reasons = super(_DynamicMixin, cls).validate(name, fqdn, data)
if 'dynamic' not in data:
return reasons
@@ -494,6 +513,8 @@ class _DynamicMixin(object):
except KeyError:
pools = {}
+ pools_exist = set()
+ pools_seen = set()
if not isinstance(pools, dict):
reasons.append('pools must be a dict')
elif not pools:
@@ -509,12 +530,14 @@ class _DynamicMixin(object):
reasons.append('pool "{}" is missing values'.format(_id))
continue
+ pools_exist.add(_id)
+
for i, value in enumerate(values):
value_num = i + 1
try:
weight = value['weight']
weight = int(weight)
- if weight < 1 or weight > 255:
+ if weight < 1 or weight > 15:
reasons.append('invalid weight "{}" in pool "{}" '
'value {}'.format(weight, _id,
value_num))
@@ -565,7 +588,6 @@ class _DynamicMixin(object):
seen_default = False
# TODO: don't allow 'default' as a pool name, reserved
- # TODO: warn or error on unused pools?
for i, rule in enumerate(rules):
rule_num = i + 1
try:
@@ -574,17 +596,25 @@ class _DynamicMixin(object):
reasons.append('rule {} missing pool'.format(rule_num))
continue
- if not isinstance(pool, basestring):
- reasons.append('rule {} invalid pool "{}"'
- .format(rule_num, pool))
- elif pool not in pools:
- reasons.append('rule {} undefined pool "{}"'
- .format(rule_num, pool))
-
try:
geos = rule['geos']
except KeyError:
geos = []
+
+ if not isinstance(pool, string_types):
+ reasons.append('rule {} invalid pool "{}"'
+ .format(rule_num, pool))
+ else:
+ if pool not in pools:
+ reasons.append('rule {} undefined pool "{}"'
+ .format(rule_num, pool))
+ pools_seen.add(pool)
+ elif pool in pools_seen and geos:
+ reasons.append('rule {} invalid, target pool "{}" '
+ 'reused'.format(rule_num, pool))
+ pools_seen.add(pool)
+
+ if not geos:
if seen_default:
reasons.append('rule {} duplicate default'
.format(rule_num))
@@ -598,6 +628,11 @@ class _DynamicMixin(object):
reasons.extend(GeoCodes.validate(geo, 'rule {} '
.format(rule_num)))
+ unused = pools_exist - pools_seen
+ if unused:
+ unused = '", "'.join(sorted(unused))
+ reasons.append('unused pools: "{}"'.format(unused))
+
return reasons
def __init__(self, zone, name, data, *args, **kwargs):
@@ -671,13 +706,13 @@ class _IpList(object):
return ['missing value(s)']
reasons = []
for value in data:
- if value is '':
+ if value == '':
reasons.append('empty value')
elif value is None:
reasons.append('missing value(s)')
else:
try:
- cls._address_type(unicode(value))
+ cls._address_type(text_type(value))
except Exception:
reasons.append('invalid {} address "{}"'
.format(cls._address_name, value))
@@ -685,7 +720,8 @@ class _IpList(object):
@classmethod
def process(cls, values):
- return values
+ # Translating None into '' so that the list will be sortable in python3
+ return [v if v is not None else '' for v in values]
class Ipv4List(_IpList):
@@ -714,6 +750,8 @@ class _TargetValue(object):
@classmethod
def process(self, value):
+ if value:
+ return value.lower()
return value
@@ -740,7 +778,7 @@ class AliasRecord(_ValueMixin, Record):
_value_type = AliasValue
-class CaaValue(object):
+class CaaValue(EqualityTupleMixin):
# https://tools.ietf.org/html/rfc6844#page-5
@classmethod
@@ -779,12 +817,8 @@ class CaaValue(object):
'value': self.value,
}
- def __cmp__(self, other):
- if self.flags == other.flags:
- if self.tag == other.tag:
- return cmp(self.value, other.value)
- return cmp(self.tag, other.tag)
- return cmp(self.flags, other.flags)
+ def _equality_tuple(self):
+ return (self.flags, self.tag, self.value)
def __repr__(self):
return '{} {} "{}"'.format(self.flags, self.tag, self.value)
@@ -800,15 +834,15 @@ class CnameRecord(_DynamicMixin, _ValueMixin, Record):
_value_type = CnameValue
@classmethod
- def validate(cls, name, data):
+ def validate(cls, name, fqdn, data):
reasons = []
if name == '':
reasons.append('root CNAME not allowed')
- reasons.extend(super(CnameRecord, cls).validate(name, data))
+ reasons.extend(super(CnameRecord, cls).validate(name, fqdn, data))
return reasons
-class MxValue(object):
+class MxValue(EqualityTupleMixin):
@classmethod
def validate(cls, data, _type):
@@ -852,7 +886,7 @@ class MxValue(object):
exchange = value['exchange']
except KeyError:
exchange = value['value']
- self.exchange = exchange
+ self.exchange = exchange.lower()
@property
def data(self):
@@ -861,10 +895,11 @@ class MxValue(object):
'exchange': self.exchange,
}
- def __cmp__(self, other):
- if self.preference == other.preference:
- return cmp(self.exchange, other.exchange)
- return cmp(self.preference, other.preference)
+ def __hash__(self):
+ return hash((self.preference, self.exchange))
+
+ def _equality_tuple(self):
+ return (self.preference, self.exchange)
def __repr__(self):
return "'{} {}'".format(self.preference, self.exchange)
@@ -875,7 +910,7 @@ class MxRecord(_ValuesMixin, Record):
_value_type = MxValue
-class NaptrValue(object):
+class NaptrValue(EqualityTupleMixin):
VALID_FLAGS = ('S', 'A', 'U', 'P')
@classmethod
@@ -934,18 +969,12 @@ class NaptrValue(object):
'replacement': self.replacement,
}
- def __cmp__(self, other):
- if self.order != other.order:
- return cmp(self.order, other.order)
- elif self.preference != other.preference:
- return cmp(self.preference, other.preference)
- elif self.flags != other.flags:
- return cmp(self.flags, other.flags)
- elif self.service != other.service:
- return cmp(self.service, other.service)
- elif self.regexp != other.regexp:
- return cmp(self.regexp, other.regexp)
- return cmp(self.replacement, other.replacement)
+ def __hash__(self):
+ return hash(self.__repr__())
+
+ def _equality_tuple(self):
+ return (self.order, self.preference, self.flags, self.service,
+ self.regexp, self.replacement)
def __repr__(self):
flags = self.flags if self.flags is not None else ''
@@ -995,7 +1024,7 @@ class PtrRecord(_ValueMixin, Record):
_value_type = PtrValue
-class SshfpValue(object):
+class SshfpValue(EqualityTupleMixin):
VALID_ALGORITHMS = (1, 2, 3, 4)
VALID_FINGERPRINT_TYPES = (1, 2)
@@ -1046,12 +1075,11 @@ class SshfpValue(object):
'fingerprint': self.fingerprint,
}
- def __cmp__(self, other):
- if self.algorithm != other.algorithm:
- return cmp(self.algorithm, other.algorithm)
- elif self.fingerprint_type != other.fingerprint_type:
- return cmp(self.fingerprint_type, other.fingerprint_type)
- return cmp(self.fingerprint, other.fingerprint)
+ def __hash__(self):
+ return hash(self.__repr__())
+
+ def _equality_tuple(self):
+ return (self.algorithm, self.fingerprint_type, self.fingerprint)
def __repr__(self):
return "'{} {} {}'".format(self.algorithm, self.fingerprint_type,
@@ -1067,15 +1095,18 @@ class _ChunkedValuesMixin(_ValuesMixin):
CHUNK_SIZE = 255
_unescaped_semicolon_re = re.compile(r'\w;')
+ def chunked_value(self, value):
+ value = value.replace('"', '\\"')
+ vs = [value[i:i + self.CHUNK_SIZE]
+ for i in range(0, len(value), self.CHUNK_SIZE)]
+ vs = '" "'.join(vs)
+ return '"{}"'.format(vs)
+
@property
def chunked_values(self):
values = []
for v in self.values:
- v = v.replace('"', '\\"')
- vs = [v[i:i + self.CHUNK_SIZE]
- for i in range(0, len(v), self.CHUNK_SIZE)]
- vs = '" "'.join(vs)
- values.append('"{}"'.format(vs))
+ values.append(self.chunked_value(v))
return values
@@ -1109,7 +1140,7 @@ class SpfRecord(_ChunkedValuesMixin, Record):
_value_type = _ChunkedValue
-class SrvValue(object):
+class SrvValue(EqualityTupleMixin):
@classmethod
def validate(cls, data, _type):
@@ -1164,14 +1195,11 @@ class SrvValue(object):
'target': self.target,
}
- def __cmp__(self, other):
- if self.priority != other.priority:
- return cmp(self.priority, other.priority)
- elif self.weight != other.weight:
- return cmp(self.weight, other.weight)
- elif self.port != other.port:
- return cmp(self.port, other.port)
- return cmp(self.target, other.target)
+ def __hash__(self):
+ return hash(self.__repr__())
+
+ def _equality_tuple(self):
+ return (self.priority, self.weight, self.port, self.target)
def __repr__(self):
return "'{} {} {} {}'".format(self.priority, self.weight, self.port,
@@ -1184,11 +1212,11 @@ class SrvRecord(_ValuesMixin, Record):
_name_re = re.compile(r'^_[^\.]+\.[^\.]+')
@classmethod
- def validate(cls, name, data):
+ def validate(cls, name, fqdn, data):
reasons = []
if not cls._name_re.match(name):
- reasons.append('invalid name')
- reasons.extend(super(SrvRecord, cls).validate(name, data))
+ reasons.append('invalid name for SRV record')
+ reasons.extend(super(SrvRecord, cls).validate(name, fqdn, data))
return reasons
diff --git a/octodns/record/geo.py b/octodns/record/geo.py
index ed54194..0a2f1a3 100644
--- a/octodns/record/geo.py
+++ b/octodns/record/geo.py
@@ -63,9 +63,15 @@ class GeoCodes(object):
@classmethod
def province_to_code(cls, province):
- # We get to cheat on this one since we only support provinces in NA-US
- if province not in geo_data['NA']['US']['provinces']:
+ # We cheat on this one a little since we only support provinces in
+ # NA-US, NA-CA
+ if (province not in geo_data['NA']['US']['provinces'] and
+ province not in geo_data['NA']['CA']['provinces']):
cls.log.warn('country_to_code: unrecognized province "%s"',
province)
return
- return 'NA-US-{}'.format(province)
+ if province in geo_data['NA']['US']['provinces']:
+ country = 'US'
+ if province in geo_data['NA']['CA']['provinces']:
+ country = 'CA'
+ return 'NA-{}-{}'.format(country, province)
diff --git a/octodns/record/geo_data.py b/octodns/record/geo_data.py
index 5393db0..39fa5db 100644
--- a/octodns/record/geo_data.py
+++ b/octodns/record/geo_data.py
@@ -55,7 +55,7 @@ geo_data = \
'SO': {'name': 'Somalia'},
'SS': {'name': 'South Sudan'},
'ST': {'name': 'Sao Tome and Principe'},
- 'SZ': {'name': 'Swaziland'},
+ 'SZ': {'name': 'Eswatini'},
'TD': {'name': 'Chad'},
'TG': {'name': 'Togo'},
'TN': {'name': 'Tunisia'},
@@ -116,6 +116,7 @@ geo_data = \
'SY': {'name': 'Syrian Arab Republic'},
'TH': {'name': 'Thailand'},
'TJ': {'name': 'Tajikistan'},
+ 'TL': {'name': 'Timor-Leste'},
'TM': {'name': 'Turkmenistan'},
'TR': {'name': 'Turkey'},
'TW': {'name': 'Taiwan, Province of China'},
@@ -157,7 +158,7 @@ geo_data = \
'MC': {'name': 'Monaco'},
'MD': {'name': 'Moldova, Republic of'},
'ME': {'name': 'Montenegro'},
- 'MK': {'name': 'Macedonia, Republic of'},
+ 'MK': {'name': 'North Macedonia'},
'MT': {'name': 'Malta'},
'NL': {'name': 'Netherlands'},
'NO': {'name': 'Norway'},
@@ -173,7 +174,6 @@ geo_data = \
'SM': {'name': 'San Marino'},
'UA': {'name': 'Ukraine'},
'VA': {'name': 'Holy See (Vatican City State)'}},
- 'ID': {'TL': {'name': 'Timor-Leste'}},
'NA': {'AG': {'name': 'Antigua and Barbuda'},
'AI': {'name': 'Anguilla'},
'AW': {'name': 'Aruba'},
@@ -183,7 +183,20 @@ geo_data = \
'BQ': {'name': 'Bonaire, Sint Eustatius and Saba'},
'BS': {'name': 'Bahamas'},
'BZ': {'name': 'Belize'},
- 'CA': {'name': 'Canada'},
+ 'CA': {'name': 'Canada',
+ 'provinces': {'AB': {'name': 'Alberta'},
+ 'BC': {'name': 'British Columbia'},
+ 'MB': {'name': 'Manitoba'},
+ 'NB': {'name': 'New Brunswick'},
+ 'NL': {'name': 'Newfoundland and Labrador'},
+ 'NS': {'name': 'Nova Scotia'},
+ 'NT': {'name': 'Northwest Territories'},
+ 'NU': {'name': 'Nunavut'},
+ 'ON': {'name': 'Ontario'},
+ 'PE': {'name': 'Prince Edward Island'},
+ 'QC': {'name': 'Quebec'},
+ 'SK': {'name': 'Saskatchewan'},
+ 'YT': {'name': 'Yukon Territory'}}},
'CR': {'name': 'Costa Rica'},
'CU': {'name': 'Cuba'},
'CW': {'name': 'Curaçao'},
diff --git a/octodns/source/axfr.py b/octodns/source/axfr.py
index f35c4b3..70569d1 100644
--- a/octodns/source/axfr.py
+++ b/octodns/source/axfr.py
@@ -15,6 +15,7 @@ from dns.exception import DNSException
from collections import defaultdict
from os import listdir
from os.path import join
+from six import text_type
import logging
from ..record import Record
@@ -179,8 +180,7 @@ class ZoneFileSourceNotFound(ZoneFileSourceException):
class ZoneFileSourceLoadFailure(ZoneFileSourceException):
def __init__(self, error):
- super(ZoneFileSourceLoadFailure, self).__init__(
- error.message)
+ super(ZoneFileSourceLoadFailure, self).__init__(text_type(error))
class ZoneFileSource(AxfrBaseSource):
@@ -192,12 +192,17 @@ class ZoneFileSource(AxfrBaseSource):
# The directory holding the zone files
# Filenames should match zone name (eg. example.com.)
directory: ./zonefiles
+ # Should sanity checks of the origin node be done
+ # (optional, default true)
+ check_origin: false
'''
- def __init__(self, id, directory):
+ def __init__(self, id, directory, check_origin=True):
self.log = logging.getLogger('ZoneFileSource[{}]'.format(id))
- self.log.debug('__init__: id=%s, directory=%s', id, directory)
+ self.log.debug('__init__: id=%s, directory=%s, check_origin=%s', id,
+ directory, check_origin)
super(ZoneFileSource, self).__init__(id)
self.directory = directory
+ self.check_origin = check_origin
self._zone_records = {}
@@ -206,7 +211,8 @@ class ZoneFileSource(AxfrBaseSource):
if zone_name in zonefiles:
try:
z = dns.zone.from_file(join(self.directory, zone_name),
- zone_name, relativize=False)
+ zone_name, relativize=False,
+ check_origin=self.check_origin)
except DNSException as error:
raise ZoneFileSourceLoadFailure(error)
else:
diff --git a/octodns/source/tinydns.py b/octodns/source/tinydns.py
old mode 100644
new mode 100755
index 679accb..9c44ed8
--- a/octodns/source/tinydns.py
+++ b/octodns/source/tinydns.py
@@ -11,6 +11,7 @@ from os import listdir
from os.path import join
import logging
import re
+import textwrap
from ..record import Record
from ..zone import DuplicateRecordException, SubzoneRecordException
@@ -20,7 +21,7 @@ from .base import BaseSource
class TinyDnsBaseSource(BaseSource):
SUPPORTS_GEO = False
SUPPORTS_DYNAMIC = False
- SUPPORTS = set(('A', 'CNAME', 'MX', 'NS'))
+ SUPPORTS = set(('A', 'CNAME', 'MX', 'NS', 'TXT', 'AAAA'))
split_re = re.compile(r':+')
@@ -45,6 +46,41 @@ class TinyDnsBaseSource(BaseSource):
'values': values,
}
+ def _data_for_AAAA(self, _type, records):
+ values = []
+ for record in records:
+ # TinyDNS files have the ipv6 address written in full, but with the
+ # colons removed. This inserts a colon every 4th character to make
+ # the address correct.
+ values.append(u":".join(textwrap.wrap(record[0], 4)))
+ try:
+ ttl = records[0][1]
+ except IndexError:
+ ttl = self.default_ttl
+ return {
+ 'ttl': ttl,
+ 'type': _type,
+ 'values': values,
+ }
+
+ def _data_for_TXT(self, _type, records):
+ values = []
+
+ for record in records:
+ new_value = record[0].encode('latin1').decode('unicode-escape') \
+ .replace(";", "\\;")
+ values.append(new_value)
+
+ try:
+ ttl = records[0][1]
+ except IndexError:
+ ttl = self.default_ttl
+ return {
+ 'ttl': ttl,
+ 'type': _type,
+ 'values': values,
+ }
+
def _data_for_CNAME(self, _type, records):
first = records[0]
try:
@@ -104,6 +140,9 @@ class TinyDnsBaseSource(BaseSource):
'C': 'CNAME',
'+': 'A',
'@': 'MX',
+ '\'': 'TXT',
+ '3': 'AAAA',
+ '6': 'AAAA',
}
name_re = re.compile(r'((?P.+)\.)?{}$'.format(zone.name[:-1]))
@@ -214,7 +253,7 @@ class TinyDnsFileSource(TinyDnsBaseSource):
# Ignore hidden files
continue
with open(join(self.directory, filename), 'r') as fh:
- lines += filter(lambda l: l, fh.read().split('\n'))
+ lines += [l for l in fh.read().split('\n') if l]
self._cache = lines
diff --git a/octodns/yaml.py b/octodns/yaml.py
index 98bafdb..4187199 100644
--- a/octodns/yaml.py
+++ b/octodns/yaml.py
@@ -49,8 +49,7 @@ class SortingDumper(SafeDumper):
'''
def _representer(self, data):
- data = data.items()
- data.sort(key=lambda d: _natsort_key(d[0]))
+ data = sorted(data.items(), key=lambda d: _natsort_key(d[0]))
return self.represent_mapping(self.DEFAULT_MAPPING_TAG, data)
diff --git a/octodns/zone.py b/octodns/zone.py
index 916f81b..5f099ac 100644
--- a/octodns/zone.py
+++ b/octodns/zone.py
@@ -9,6 +9,8 @@ from collections import defaultdict
from logging import getLogger
import re
+from six import text_type
+
from .record import Create, Delete
@@ -38,7 +40,7 @@ class Zone(object):
raise Exception('Invalid zone name {}, missing ending dot'
.format(name))
# Force everything to lowercase just to be safe
- self.name = unicode(name).lower() if name else name
+ self.name = text_type(name).lower() if name else name
self.sub_zones = sub_zones
# We're grouping by node, it allows us to efficiently search for
# duplicates and detect when CNAMEs co-exist with other records
@@ -82,8 +84,8 @@ class Zone(object):
raise DuplicateRecordException('Duplicate record {}, type {}'
.format(record.fqdn,
record._type))
- elif not lenient and (((record._type == 'CNAME' and len(node) > 0) or
- ('CNAME' in map(lambda r: r._type, node)))):
+ elif not lenient and ((record._type == 'CNAME' and len(node) > 0) or
+ ('CNAME' in [r._type for r in node])):
# We're adding a CNAME to existing records or adding to an existing
# CNAME
raise InvalidNodeException('Invalid state, CNAME at {} cannot '
diff --git a/requirements-dev.txt b/requirements-dev.txt
index 1afee06..485a33f 100644
--- a/requirements-dev.txt
+++ b/requirements-dev.txt
@@ -1,9 +1,8 @@
coverage
mock
nose
-pycodestyle==2.4.0
-pycountry>=18.12.8
-pycountry_convert>=0.7.2
-pyflakes==1.6.0
+pycodestyle==2.6.0
+pyflakes==2.2.0
+readme_renderer[md]==26.0
requests_mock
-twine==1.11.0
+twine==1.15.0
diff --git a/requirements.txt b/requirements.txt
index c56a6d7..e8dcb7d 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -1,23 +1,26 @@
-PyYaml==4.2b1
-azure-common==1.1.9
-azure-mgmt-dns==1.2.0
-boto3==1.7.5
-botocore==1.10.5
-dnspython==1.15.0
-docutils==0.14
+PyYaml==5.3.1
+azure-common==1.1.25
+azure-mgmt-dns==3.0.0
+boto3==1.13.19
+botocore==1.17.14
+dnspython==1.16.0
+docutils==0.16
dyn==1.8.1
-futures==3.2.0
-google-cloud-core==0.28.1
-google-cloud-dns==0.29.0
-incf.countryutils==1.0
-ipaddress==1.0.22
-jmespath==0.9.3
-msrestazure==0.4.27
-natsort==5.5.0
-nsone==0.9.100
-ovh==0.4.8
-python-dateutil==2.6.1
-requests==2.20.0
-s3transfer==0.1.13
-six==1.11.0
-setuptools==38.5.2
+edgegrid-python==1.1.1
+futures==3.2.0; python_version < '3.0'
+google-cloud-core==1.3.0
+google-cloud-dns==0.32.0
+ipaddress==1.0.23
+jmespath==0.10.0
+msrestazure==0.6.4
+natsort==6.2.1
+ns1-python==0.16.0
+ovh==0.5.0
+pycountry-convert==0.7.2
+pycountry==19.8.18
+python-dateutil==2.8.1
+requests==2.24.0
+s3transfer==0.3.3
+setuptools==44.1.1
+six==1.15.0
+transip==2.1.2
diff --git a/script/bootstrap b/script/bootstrap
index 7a82923..b135122 100755
--- a/script/bootstrap
+++ b/script/bootstrap
@@ -4,7 +4,7 @@
set -e
-cd "$(dirname $0)"/..
+cd "$(dirname "$0")"/..
ROOT=$(pwd)
if [ -z "$VENV_NAME" ]; then
@@ -13,9 +13,9 @@ fi
if [ ! -d "$VENV_NAME" ]; then
if [ -z "$VENV_PYTHON" ]; then
- VENV_PYTHON=`which python`
+ VENV_PYTHON=$(command -v python3)
fi
- virtualenv --python=$VENV_PYTHON $VENV_NAME
+ virtualenv --python="$VENV_PYTHON" "$VENV_NAME"
fi
. "$VENV_NAME/bin/activate"
diff --git a/script/cibuild b/script/cibuild
index d048e8e..a2dc527 100755
--- a/script/cibuild
+++ b/script/cibuild
@@ -27,4 +27,6 @@ echo "## lint ##################################################################
script/lint
echo "## tests/coverage ##############################################################"
script/coverage
+echo "## validate setup.py build #####################################################"
+python setup.py build
echo "## complete ####################################################################"
diff --git a/script/coverage b/script/coverage
index d38a41a..32bdaea 100755
--- a/script/coverage
+++ b/script/coverage
@@ -26,11 +26,17 @@ export DYN_PASSWORD=
export DYN_USERNAME=
export GOOGLE_APPLICATION_CREDENTIALS=
-coverage run --branch --source=octodns --omit=octodns/cmds/* `which nosetests` --with-xunit "$@"
+# Don't allow disabling coverage
+grep -r -I --line-number "# pragma: nocover" octodns && {
+ echo "Code coverage should not be disabled"
+ exit 1
+}
+
+coverage run --branch --source=octodns --omit=octodns/cmds/* "$(command -v nosetests)" --with-xunit "$@"
coverage html
coverage xml
-coverage report
-coverage report | grep ^TOTAL| grep -qv 100% && {
- echo "Incomplete code coverage"
+coverage report --show-missing
+coverage report | grep ^TOTAL | grep -qv 100% && {
+ echo "Incomplete code coverage" >&2
exit 1
} || echo "Code coverage 100%"
diff --git a/script/generate-geo-data b/script/generate-geo-data
index 87a57b1..8767e49 100755
--- a/script/generate-geo-data
+++ b/script/generate-geo-data
@@ -8,8 +8,8 @@ from pycountry_convert import country_alpha2_to_continent_code
subs = defaultdict(dict)
for subdivision in subdivisions:
- # Route53 only supports US states, Dyn supports US states and CA provinces, but for now we'll just do US
- if subdivision.country_code not in ('US'):
+ # Route53 only supports US states, Dyn (and others) support US states and CA provinces
+ if subdivision.country_code not in ('US', 'CA'):
continue
subs[subdivision.country_code][subdivision.code[3:]] = {
'name': subdivision.name
@@ -24,7 +24,7 @@ continent_backups = {
'PN': 'OC',
'SX': 'NA',
'TF': 'AN',
- 'TL': 'ID',
+ 'TL': 'AS',
'UM': 'OC',
'VA': 'EU',
}
diff --git a/script/lint b/script/lint
index 5fd9a7d..627c5be 100755
--- a/script/lint
+++ b/script/lint
@@ -17,5 +17,5 @@ fi
SOURCES="*.py octodns/*.py octodns/*/*.py tests/*.py"
-pycodestyle --ignore=E221,E241,E251,E722,W504 $SOURCES
+pycodestyle --ignore=E221,E241,E251,E722,E741,W504 $SOURCES
pyflakes $SOURCES
diff --git a/script/release b/script/release
index 3b64911..f2c90bf 100755
--- a/script/release
+++ b/script/release
@@ -2,7 +2,7 @@
set -e
-cd "$(dirname $0)"/..
+cd "$(dirname "$0")"/..
ROOT=$(pwd)
if [ -z "$VENV_NAME" ]; then
@@ -16,11 +16,12 @@ if [ ! -f "$ACTIVATE" ]; then
fi
. "$ACTIVATE"
-VERSION=$(grep __VERSION__ $ROOT/octodns/__init__.py | sed -e "s/.* = '//" -e "s/'$//")
+VERSION="$(grep __VERSION__ "$ROOT/octodns/__init__.py" | sed -e "s/.* = '//" -e "s/'$//")"
-git tag -s v$VERSION -m "Release $VERSION"
-git push origin v$VERSION
+git tag -s "v$VERSION" -m "Release $VERSION"
+git push origin "v$VERSION"
echo "Tagged and pushed v$VERSION"
python setup.py sdist
+twine check dist/*$VERSION.tar.gz
twine upload dist/*$VERSION.tar.gz
echo "Uploaded $VERSION"
diff --git a/script/sdist b/script/sdist
index f244363..1ab0949 100755
--- a/script/sdist
+++ b/script/sdist
@@ -3,13 +3,13 @@
set -e
if ! git diff-index --quiet HEAD --; then
- echo "Changes in local directory, commit or clear"
+ echo "Changes in local directory, commit or clear" >&2
exit 1
fi
SHA=$(git rev-parse HEAD)
python setup.py sdist
-TARBALL=dist/octodns-$SHA.tar.gz
-mv dist/octodns-0.*.tar.gz $TARBALL
+TARBALL="dist/octodns-$SHA.tar.gz"
+mv dist/octodns-0.*.tar.gz "$TARBALL"
echo "Created $TARBALL"
diff --git a/setup.py b/setup.py
index 7a9348e..c56aa82 100644
--- a/setup.py
+++ b/setup.py
@@ -1,5 +1,9 @@
#!/usr/bin/env python
+try:
+ from StringIO import StringIO
+except ImportError:
+ from io import StringIO
from os.path import dirname, join
import octodns
@@ -21,6 +25,39 @@ console_scripts = {
for name in cmds
}
+
+def long_description():
+ buf = StringIO()
+ yaml_block = False
+ supported_providers = False
+ with open('README.md') as fh:
+ for line in fh:
+ if line == '```yaml\n':
+ yaml_block = True
+ continue
+ elif yaml_block and line == '---\n':
+ # skip the line
+ continue
+ elif yaml_block and line == '```\n':
+ yaml_block = False
+ continue
+ elif supported_providers:
+ if line.startswith('## '):
+ supported_providers = False
+ # write this line out, no continue
+ else:
+ # We're ignoring this one
+ continue
+ elif line == '## Supported providers\n':
+ supported_providers = True
+ continue
+ buf.write(line)
+ buf = buf.getvalue()
+ with open('/tmp/mod', 'w') as fh:
+ fh.write(buf)
+ return buf
+
+
setup(
author='Ross McFarland',
author_email='rwmcfa1@gmail.com',
@@ -31,16 +68,17 @@ setup(
install_requires=[
'PyYaml>=4.2b1',
'dnspython>=1.15.0',
- 'futures>=3.2.0',
- 'incf.countryutils>=1.0',
+ 'futures>=3.2.0; python_version<"3.2"',
'ipaddress>=1.0.22',
'natsort>=5.5.0',
- # botocore doesn't like >=2.7.0 for some reason
- 'python-dateutil>=2.6.0,<2.7.0',
+ 'pycountry>=19.8.18',
+ 'pycountry-convert>=0.7.2',
+ 'python-dateutil>=2.8.1',
'requests>=2.20.0'
],
license='MIT',
- long_description=open('README.md').read(),
+ long_description=long_description(),
+ long_description_content_type='text/markdown',
name='octodns',
packages=find_packages(),
url='https://github.com/github/octodns',
diff --git a/tests/config/dynamic.tests.yaml b/tests/config/dynamic.tests.yaml
index fb33aec..4bd97a7 100644
--- a/tests/config/dynamic.tests.yaml
+++ b/tests/config/dynamic.tests.yaml
@@ -19,16 +19,17 @@ a:
- value: 6.6.6.6
weight: 10
- value: 5.5.5.5
- weight: 25
+ weight: 15
rules:
- geos:
- EU-GB
- pool: iad
+ pool: lax
- geos:
- EU
pool: ams
- geos:
- NA-US-CA
+ - NA-US-NC
- NA-US-OR
- NA-US-WA
pool: sea
@@ -59,12 +60,13 @@ aaaa:
rules:
- geos:
- EU-GB
- pool: iad
+ pool: lax
- geos:
- EU
pool: ams
- geos:
- NA-US-CA
+ - NA-US-NC
- NA-US-OR
- NA-US-WA
pool: sea
@@ -88,18 +90,19 @@ cname:
sea:
values:
- value: target-sea-1.unit.tests.
- weight: 100
+ weight: 10
- value: target-sea-2.unit.tests.
- weight: 175
+ weight: 14
rules:
- geos:
- EU-GB
- pool: iad
+ pool: lax
- geos:
- EU
pool: ams
- geos:
- NA-US-CA
+ - NA-US-NC
- NA-US-OR
- NA-US-WA
pool: sea
@@ -159,6 +162,7 @@ real-ish-a:
- geos:
# TODO: require sorted
- NA-US-CA
+ - NA-US-NC
- NA-US-OR
- NA-US-WA
pool: us-west-2
diff --git a/tests/config/override/dynamic.tests.yaml b/tests/config/override/dynamic.tests.yaml
new file mode 100644
index 0000000..d79e092
--- /dev/null
+++ b/tests/config/override/dynamic.tests.yaml
@@ -0,0 +1,13 @@
+---
+# Replace 'a' with a generic record
+a:
+ type: A
+ values:
+ - 4.4.4.4
+ - 5.5.5.5
+# Add another record
+added:
+ type: A
+ values:
+ - 6.6.6.6
+ - 7.7.7.7
diff --git a/tests/config/provider-problems.yaml b/tests/config/provider-problems.yaml
new file mode 100644
index 0000000..9071046
--- /dev/null
+++ b/tests/config/provider-problems.yaml
@@ -0,0 +1,28 @@
+providers:
+ yaml:
+ class: octodns.provider.yaml.YamlProvider
+ directory: ./config
+ simple_source:
+ class: helpers.SimpleSource
+zones:
+ missing.sources.:
+ targets:
+ - yaml
+ missing.targets.:
+ sources:
+ - yaml
+ unknown.source.:
+ sources:
+ - not-there
+ targets:
+ - yaml
+ unknown.target.:
+ sources:
+ - yaml
+ targets:
+ - not-there-either
+ not.targetable.:
+ sources:
+ - yaml
+ targets:
+ - simple_source
diff --git a/tests/config/simple-split.yaml b/tests/config/simple-split.yaml
new file mode 100644
index 0000000..d106506
--- /dev/null
+++ b/tests/config/simple-split.yaml
@@ -0,0 +1,37 @@
+manager:
+ max_workers: 2
+providers:
+ in:
+ class: octodns.provider.yaml.SplitYamlProvider
+ directory: tests/config/split
+ dump:
+ class: octodns.provider.yaml.SplitYamlProvider
+ directory: env/YAML_TMP_DIR
+ # This is sort of ugly, but it shouldn't hurt anything. It'll just write out
+ # the target file twice where it and dump are both used
+ dump2:
+ class: octodns.provider.yaml.SplitYamlProvider
+ directory: env/YAML_TMP_DIR
+ simple:
+ class: helpers.SimpleProvider
+ geo:
+ class: helpers.GeoProvider
+ nosshfp:
+ class: helpers.NoSshFpProvider
+zones:
+ unit.tests.:
+ sources:
+ - in
+ targets:
+ - dump
+ subzone.unit.tests.:
+ sources:
+ - in
+ targets:
+ - dump
+ - dump2
+ empty.:
+ sources:
+ - in
+ targets:
+ - dump
diff --git a/tests/config/split/dynamic.tests./a.yaml b/tests/config/split/dynamic.tests./a.yaml
new file mode 100644
index 0000000..3027686
--- /dev/null
+++ b/tests/config/split/dynamic.tests./a.yaml
@@ -0,0 +1,46 @@
+---
+a:
+ dynamic:
+ pools:
+ ams:
+ fallback: null
+ values:
+ - value: 1.1.1.1
+ weight: 1
+ iad:
+ fallback: null
+ values:
+ - value: 2.2.2.2
+ weight: 1
+ - value: 3.3.3.3
+ weight: 1
+ lax:
+ fallback: null
+ values:
+ - value: 4.4.4.4
+ weight: 1
+ sea:
+ fallback: null
+ values:
+ - value: 5.5.5.5
+ weight: 15
+ - value: 6.6.6.6
+ weight: 10
+ rules:
+ - geos:
+ - EU-GB
+ pool: lax
+ - geos:
+ - EU
+ pool: ams
+ - geos:
+ - NA-US-CA
+ - NA-US-NC
+ - NA-US-OR
+ - NA-US-WA
+ pool: sea
+ - pool: iad
+ type: A
+ values:
+ - 2.2.2.2
+ - 3.3.3.3
diff --git a/tests/config/split/dynamic.tests./aaaa.yaml b/tests/config/split/dynamic.tests./aaaa.yaml
new file mode 100644
index 0000000..a2d8779
--- /dev/null
+++ b/tests/config/split/dynamic.tests./aaaa.yaml
@@ -0,0 +1,46 @@
+---
+aaaa:
+ dynamic:
+ pools:
+ ams:
+ fallback: null
+ values:
+ - value: 2601:642:500:e210:62f8:1dff:feb8:9471
+ weight: 1
+ iad:
+ fallback: null
+ values:
+ - value: 2601:642:500:e210:62f8:1dff:feb8:9472
+ weight: 1
+ - value: 2601:642:500:e210:62f8:1dff:feb8:9473
+ weight: 1
+ lax:
+ fallback: null
+ values:
+ - value: 2601:642:500:e210:62f8:1dff:feb8:9474
+ weight: 1
+ sea:
+ fallback: null
+ values:
+ - value: 2601:642:500:e210:62f8:1dff:feb8:9475
+ weight: 1
+ - value: 2601:642:500:e210:62f8:1dff:feb8:9476
+ weight: 2
+ rules:
+ - geos:
+ - EU-GB
+ pool: lax
+ - geos:
+ - EU
+ pool: ams
+ - geos:
+ - NA-US-CA
+ - NA-US-NC
+ - NA-US-OR
+ - NA-US-WA
+ pool: sea
+ - pool: iad
+ type: AAAA
+ values:
+ - 2601:642:500:e210:62f8:1dff:feb8:947a
+ - 2601:644:500:e210:62f8:1dff:feb8:947a
diff --git a/tests/config/split/dynamic.tests./cname.yaml b/tests/config/split/dynamic.tests./cname.yaml
new file mode 100644
index 0000000..b716ad9
--- /dev/null
+++ b/tests/config/split/dynamic.tests./cname.yaml
@@ -0,0 +1,42 @@
+---
+cname:
+ dynamic:
+ pools:
+ ams:
+ fallback: null
+ values:
+ - value: target-ams.unit.tests.
+ weight: 1
+ iad:
+ fallback: null
+ values:
+ - value: target-iad.unit.tests.
+ weight: 1
+ lax:
+ fallback: null
+ values:
+ - value: target-lax.unit.tests.
+ weight: 1
+ sea:
+ fallback: null
+ values:
+ - value: target-sea-1.unit.tests.
+ weight: 10
+ - value: target-sea-2.unit.tests.
+ weight: 14
+ rules:
+ - geos:
+ - EU-GB
+ pool: lax
+ - geos:
+ - EU
+ pool: ams
+ - geos:
+ - NA-US-CA
+ - NA-US-NC
+ - NA-US-OR
+ - NA-US-WA
+ pool: sea
+ - pool: iad
+ type: CNAME
+ value: target.unit.tests.
diff --git a/tests/config/split/dynamic.tests./real-ish-a.yaml b/tests/config/split/dynamic.tests./real-ish-a.yaml
new file mode 100644
index 0000000..0338b9d
--- /dev/null
+++ b/tests/config/split/dynamic.tests./real-ish-a.yaml
@@ -0,0 +1,87 @@
+---
+real-ish-a:
+ dynamic:
+ pools:
+ ap-southeast-1:
+ fallback: null
+ values:
+ - value: 1.4.1.1
+ weight: 2
+ - value: 1.4.1.2
+ weight: 2
+ - value: 1.4.2.1
+ weight: 1
+ - value: 1.4.2.2
+ weight: 1
+ - value: 1.4.3.1
+ weight: 1
+ - value: 1.4.3.2
+ weight: 1
+ eu-central-1:
+ fallback: null
+ values:
+ - value: 1.3.1.1
+ weight: 1
+ - value: 1.3.1.2
+ weight: 1
+ - value: 1.3.2.1
+ weight: 1
+ - value: 1.3.2.2
+ weight: 1
+ - value: 1.3.3.1
+ weight: 1
+ - value: 1.3.3.2
+ weight: 1
+ us-east-1:
+ fallback: null
+ values:
+ - value: 1.1.1.1
+ weight: 1
+ - value: 1.1.1.2
+ weight: 1
+ - value: 1.1.2.1
+ weight: 1
+ - value: 1.1.2.2
+ weight: 1
+ - value: 1.1.3.1
+ weight: 1
+ - value: 1.1.3.2
+ weight: 1
+ us-west-2:
+ fallback: null
+ values:
+ - value: 1.2.1.1
+ weight: 1
+ - value: 1.2.1.2
+ weight: 1
+ - value: 1.2.2.1
+ weight: 1
+ - value: 1.2.2.2
+ weight: 1
+ - value: 1.2.3.1
+ weight: 1
+ - value: 1.2.3.2
+ weight: 1
+ rules:
+ - geos:
+ - NA-US-CA
+ - NA-US-NC
+ - NA-US-OR
+ - NA-US-WA
+ pool: us-west-2
+ - geos:
+ - AS-CN
+ pool: ap-southeast-1
+ - geos:
+ - AF
+ - EU
+ pool: eu-central-1
+ - pool: us-east-1
+ type: A
+ values:
+ - 1.1.1.1
+ - 1.1.1.2
+ - 1.1.2.1
+ - 1.1.2.2
+ - 1.1.3.1
+ - 1.1.3.2
diff --git a/tests/config/split/dynamic.tests./simple-weighted.yaml b/tests/config/split/dynamic.tests./simple-weighted.yaml
new file mode 100644
index 0000000..1c722dd
--- /dev/null
+++ b/tests/config/split/dynamic.tests./simple-weighted.yaml
@@ -0,0 +1,15 @@
+---
+simple-weighted:
+ dynamic:
+ pools:
+ default:
+ fallback: null
+ values:
+ - value: one.unit.tests.
+ weight: 3
+ - value: two.unit.tests.
+ weight: 2
+ rules:
+ - pool: default
+ type: CNAME
+ value: default.unit.tests.
diff --git a/tests/config/split/empty./.gitkeep b/tests/config/split/empty./.gitkeep
new file mode 100644
index 0000000..e69de29
diff --git a/tests/config/split/subzone.unit.tests./12.yaml b/tests/config/split/subzone.unit.tests./12.yaml
new file mode 100644
index 0000000..e5d4dff
--- /dev/null
+++ b/tests/config/split/subzone.unit.tests./12.yaml
@@ -0,0 +1,4 @@
+---
+'12':
+ type: A
+ value: 12.4.4.4
diff --git a/tests/config/split/subzone.unit.tests./2.yaml b/tests/config/split/subzone.unit.tests./2.yaml
new file mode 100644
index 0000000..812cb49
--- /dev/null
+++ b/tests/config/split/subzone.unit.tests./2.yaml
@@ -0,0 +1,4 @@
+---
+'2':
+ type: A
+ value: 2.4.4.4
diff --git a/tests/config/split/subzone.unit.tests./test.yaml b/tests/config/split/subzone.unit.tests./test.yaml
new file mode 100644
index 0000000..bc28512
--- /dev/null
+++ b/tests/config/split/subzone.unit.tests./test.yaml
@@ -0,0 +1,4 @@
+---
+test:
+ type: A
+ value: 4.4.4.4
diff --git a/tests/config/split/unit.tests./$unit.tests.yaml b/tests/config/split/unit.tests./$unit.tests.yaml
new file mode 100644
index 0000000..cf85a87
--- /dev/null
+++ b/tests/config/split/unit.tests./$unit.tests.yaml
@@ -0,0 +1,37 @@
+---
+? ''
+: - geo:
+ AF:
+ - 2.2.3.4
+ - 2.2.3.5
+ AS-JP:
+ - 3.2.3.4
+ - 3.2.3.5
+ NA-US:
+ - 4.2.3.4
+ - 4.2.3.5
+ NA-US-CA:
+ - 5.2.3.4
+ - 5.2.3.5
+ ttl: 300
+ type: A
+ values:
+ - 1.2.3.4
+ - 1.2.3.5
+ - type: CAA
+ value:
+ flags: 0
+ tag: issue
+ value: ca.unit.tests
+ - type: NS
+ values:
+ - 6.2.3.4.
+ - 7.2.3.4.
+ - type: SSHFP
+ values:
+ - algorithm: 1
+ fingerprint: 7491973e5f8b39d5327cd4e08bc81b05f7710b49
+ fingerprint_type: 1
+ - algorithm: 1
+ fingerprint: bf6b6825d2977c511a475bbefb88aad54a92ac73
+ fingerprint_type: 1
diff --git a/tests/config/split/unit.tests./_srv._tcp.yaml b/tests/config/split/unit.tests./_srv._tcp.yaml
new file mode 100644
index 0000000..220731e
--- /dev/null
+++ b/tests/config/split/unit.tests./_srv._tcp.yaml
@@ -0,0 +1,13 @@
+---
+_srv._tcp:
+ ttl: 600
+ type: SRV
+ values:
+ - port: 30
+ priority: 10
+ target: foo-1.unit.tests.
+ weight: 20
+ - port: 30
+ priority: 12
+ target: foo-2.unit.tests.
+ weight: 20
diff --git a/tests/config/split/unit.tests./aaaa.yaml b/tests/config/split/unit.tests./aaaa.yaml
new file mode 100644
index 0000000..845ab70
--- /dev/null
+++ b/tests/config/split/unit.tests./aaaa.yaml
@@ -0,0 +1,5 @@
+---
+aaaa:
+ ttl: 600
+ type: AAAA
+ value: 2601:644:500:e210:62f8:1dff:feb8:947a
diff --git a/tests/config/split/unit.tests./cname.yaml b/tests/config/split/unit.tests./cname.yaml
new file mode 100644
index 0000000..8664bd2
--- /dev/null
+++ b/tests/config/split/unit.tests./cname.yaml
@@ -0,0 +1,5 @@
+---
+cname:
+ ttl: 300
+ type: CNAME
+ value: unit.tests.
diff --git a/tests/config/split/unit.tests./excluded.yaml b/tests/config/split/unit.tests./excluded.yaml
new file mode 100644
index 0000000..7d9cb56
--- /dev/null
+++ b/tests/config/split/unit.tests./excluded.yaml
@@ -0,0 +1,7 @@
+---
+excluded:
+ octodns:
+ excluded:
+ - test
+ type: CNAME
+ value: unit.tests.
diff --git a/tests/config/split/unit.tests./ignored.yaml b/tests/config/split/unit.tests./ignored.yaml
new file mode 100644
index 0000000..4d55eb2
--- /dev/null
+++ b/tests/config/split/unit.tests./ignored.yaml
@@ -0,0 +1,6 @@
+---
+ignored:
+ octodns:
+ ignored: true
+ type: A
+ value: 9.9.9.9
diff --git a/tests/config/split/unit.tests./included.yaml b/tests/config/split/unit.tests./included.yaml
new file mode 100644
index 0000000..21d9e50
--- /dev/null
+++ b/tests/config/split/unit.tests./included.yaml
@@ -0,0 +1,7 @@
+---
+included:
+ octodns:
+ included:
+ - test
+ type: CNAME
+ value: unit.tests.
diff --git a/tests/config/split/unit.tests./mx.yaml b/tests/config/split/unit.tests./mx.yaml
new file mode 100644
index 0000000..87ca909
--- /dev/null
+++ b/tests/config/split/unit.tests./mx.yaml
@@ -0,0 +1,13 @@
+---
+mx:
+ ttl: 300
+ type: MX
+ values:
+ - exchange: smtp-4.unit.tests.
+ preference: 10
+ - exchange: smtp-2.unit.tests.
+ preference: 20
+ - exchange: smtp-3.unit.tests.
+ preference: 30
+ - exchange: smtp-1.unit.tests.
+ preference: 40
diff --git a/tests/config/split/unit.tests./naptr.yaml b/tests/config/split/unit.tests./naptr.yaml
new file mode 100644
index 0000000..f010d2f
--- /dev/null
+++ b/tests/config/split/unit.tests./naptr.yaml
@@ -0,0 +1,17 @@
+---
+naptr:
+ ttl: 600
+ type: NAPTR
+ values:
+ - flags: S
+ order: 10
+ preference: 100
+ regexp: '!^.*$!sip:info@bar.example.com!'
+ replacement: .
+ service: SIP+D2U
+ - flags: U
+ order: 100
+ preference: 100
+ regexp: '!^.*$!sip:info@bar.example.com!'
+ replacement: .
+ service: SIP+D2U
diff --git a/tests/config/split/unit.tests./ptr.yaml b/tests/config/split/unit.tests./ptr.yaml
new file mode 100644
index 0000000..0098b57
--- /dev/null
+++ b/tests/config/split/unit.tests./ptr.yaml
@@ -0,0 +1,5 @@
+---
+ptr:
+ ttl: 300
+ type: PTR
+ value: foo.bar.com.
diff --git a/tests/config/split/unit.tests./spf.yaml b/tests/config/split/unit.tests./spf.yaml
new file mode 100644
index 0000000..9321108
--- /dev/null
+++ b/tests/config/split/unit.tests./spf.yaml
@@ -0,0 +1,5 @@
+---
+spf:
+ ttl: 600
+ type: SPF
+ value: v=spf1 ip4:192.168.0.1/16-all
diff --git a/tests/config/split/unit.tests./sub.yaml b/tests/config/split/unit.tests./sub.yaml
new file mode 100644
index 0000000..ebd3d47
--- /dev/null
+++ b/tests/config/split/unit.tests./sub.yaml
@@ -0,0 +1,6 @@
+---
+sub:
+ type: NS
+ values:
+ - 6.2.3.4.
+ - 7.2.3.4.
diff --git a/tests/config/split/unit.tests./txt.yaml b/tests/config/split/unit.tests./txt.yaml
new file mode 100644
index 0000000..73eaba7
--- /dev/null
+++ b/tests/config/split/unit.tests./txt.yaml
@@ -0,0 +1,8 @@
+---
+txt:
+ ttl: 600
+ type: TXT
+ values:
+ - Bah bah black sheep
+ - have you any wool.
+ - v=DKIM1\;k=rsa\;s=email\;h=sha256\;p=A/kinda+of/long/string+with+numb3rs
diff --git a/tests/config/split/unit.tests./www.sub.yaml b/tests/config/split/unit.tests./www.sub.yaml
new file mode 100644
index 0000000..8cfd33f
--- /dev/null
+++ b/tests/config/split/unit.tests./www.sub.yaml
@@ -0,0 +1,5 @@
+---
+www.sub:
+ ttl: 300
+ type: A
+ value: 2.2.3.6
diff --git a/tests/config/split/unit.tests./www.yaml b/tests/config/split/unit.tests./www.yaml
new file mode 100644
index 0000000..d6d4ab0
--- /dev/null
+++ b/tests/config/split/unit.tests./www.yaml
@@ -0,0 +1,5 @@
+---
+www:
+ ttl: 300
+ type: A
+ value: 2.2.3.6
diff --git a/tests/config/split/unordered./abc.yaml b/tests/config/split/unordered./abc.yaml
new file mode 100644
index 0000000..e0ccccc
--- /dev/null
+++ b/tests/config/split/unordered./abc.yaml
@@ -0,0 +1,4 @@
+---
+abc:
+ type: A
+ value: 9.9.9.9
diff --git a/tests/config/split/unordered./xyz.yaml b/tests/config/split/unordered./xyz.yaml
new file mode 100644
index 0000000..14db338
--- /dev/null
+++ b/tests/config/split/unordered./xyz.yaml
@@ -0,0 +1,5 @@
+---
+xyz:
+ # t comes before v
+ value: 9.9.9.9
+ type: A
diff --git a/tests/config/unknown-provider.yaml b/tests/config/unknown-provider.yaml
index 9071046..a0e9f55 100644
--- a/tests/config/unknown-provider.yaml
+++ b/tests/config/unknown-provider.yaml
@@ -5,24 +5,8 @@ providers:
simple_source:
class: helpers.SimpleSource
zones:
- missing.sources.:
- targets:
- - yaml
- missing.targets.:
- sources:
- - yaml
unknown.source.:
sources:
- not-there
targets:
- yaml
- unknown.target.:
- sources:
- - yaml
- targets:
- - not-there-either
- not.targetable.:
- sources:
- - yaml
- targets:
- - simple_source
diff --git a/tests/fixtures/cloudflare-dns_records-page-1.json b/tests/fixtures/cloudflare-dns_records-page-1.json
index 3c423e2..efe0654 100644
--- a/tests/fixtures/cloudflare-dns_records-page-1.json
+++ b/tests/fixtures/cloudflare-dns_records-page-1.json
@@ -180,7 +180,7 @@
"per_page": 10,
"total_pages": 2,
"count": 10,
- "total_count": 19
+ "total_count": 20
},
"success": true,
"errors": [],
diff --git a/tests/fixtures/cloudflare-dns_records-page-2.json b/tests/fixtures/cloudflare-dns_records-page-2.json
index 558aa2c..b0bbaef 100644
--- a/tests/fixtures/cloudflare-dns_records-page-2.json
+++ b/tests/fixtures/cloudflare-dns_records-page-2.json
@@ -157,6 +157,23 @@
"auto_added": false
}
},
+ {
+ "id": "fc12ab34cd5611334422ab3322997677",
+ "type": "PTR",
+ "name": "ptr.unit.tests",
+ "content": "foo.bar.com",
+ "proxiable": true,
+ "proxied": false,
+ "ttl": 300,
+ "locked": false,
+ "zone_id": "ff12ab34cd5611334422ab3322997650",
+ "zone_name": "unit.tests",
+ "modified_on": "2017-03-11T18:01:43.940682Z",
+ "created_on": "2017-03-11T18:01:43.940682Z",
+ "meta": {
+ "auto_added": false
+ }
+ },
{
"id": "fc12ab34cd5611334422ab3322997656",
"type": "SRV",
@@ -212,8 +229,8 @@
"page": 2,
"per_page": 11,
"total_pages": 2,
- "count": 9,
- "total_count": 21
+ "count": 10,
+ "total_count": 20
},
"success": true,
"errors": [],
diff --git a/tests/fixtures/constellix-domains.json b/tests/fixtures/constellix-domains.json
new file mode 100644
index 0000000..4b6392d
--- /dev/null
+++ b/tests/fixtures/constellix-domains.json
@@ -0,0 +1,28 @@
+[{
+ "id": 123123,
+ "name": "unit.tests",
+ "soa": {
+ "primaryNameserver": "ns11.constellix.com.",
+ "email": "dns.constellix.com.",
+ "ttl": 86400,
+ "serial": 2015010102,
+ "refresh": 43200,
+ "retry": 3600,
+ "expire": 1209600,
+ "negCache": 180
+ },
+ "createdTs": "2019-08-07T03:36:02Z",
+ "modifiedTs": "2019-08-07T03:36:02Z",
+ "typeId": 1,
+ "domainTags": [],
+ "folder": null,
+ "hasGtdRegions": false,
+ "hasGeoIP": false,
+ "nameserverGroup": 1,
+ "nameservers": ["ns11.constellix.com.", "ns21.constellix.com.", "ns31.constellix.com.", "ns41.constellix.net.", "ns51.constellix.net.", "ns61.constellix.net."],
+ "note": "",
+ "version": 0,
+ "status": "ACTIVE",
+ "tags": [],
+ "contactIds": []
+}]
diff --git a/tests/fixtures/constellix-records.json b/tests/fixtures/constellix-records.json
new file mode 100644
index 0000000..c1f1fb4
--- /dev/null
+++ b/tests/fixtures/constellix-records.json
@@ -0,0 +1,598 @@
+[{
+ "id": 1808529,
+ "type": "CAA",
+ "recordType": "caa",
+ "name": "",
+ "recordOption": "roundRobin",
+ "noAnswer": false,
+ "note": "",
+ "ttl": 3600,
+ "gtdRegion": 1,
+ "parentId": 123123,
+ "parent": "domain",
+ "source": "Domain",
+ "modifiedTs": 1565149569216,
+ "value": [{
+ "flag": 0,
+ "tag": "issue",
+ "data": "ca.unit.tests",
+ "caaProviderId": 1,
+ "disableFlag": false
+ }],
+ "roundRobin": [{
+ "flag": 0,
+ "tag": "issue",
+ "data": "ca.unit.tests",
+ "caaProviderId": 1,
+ "disableFlag": false
+ }]
+}, {
+ "id": 1808516,
+ "type": "A",
+ "recordType": "a",
+ "name": "",
+ "recordOption": "roundRobin",
+ "noAnswer": false,
+ "note": "",
+ "ttl": 300,
+ "gtdRegion": 1,
+ "parentId": 123123,
+ "parent": "domain",
+ "source": "Domain",
+ "modifiedTs": 1565149623640,
+ "value": ["1.2.3.4", "1.2.3.5"],
+ "roundRobin": [{
+ "value": "1.2.3.4",
+ "disableFlag": false
+ }, {
+ "value": "1.2.3.5",
+ "disableFlag": false
+ }],
+ "geolocation": null,
+ "recordFailover": {
+ "disabled": false,
+ "failoverType": 1,
+ "failoverTypeStr": "Normal (always lowest level)",
+ "values": []
+ },
+ "failover": {
+ "disabled": false,
+ "failoverType": 1,
+ "failoverTypeStr": "Normal (always lowest level)",
+ "values": []
+ },
+ "roundRobinFailover": [],
+ "pools": [],
+ "poolsDetail": []
+}, {
+ "id": 1808527,
+ "type": "SRV",
+ "recordType": "srv",
+ "name": "_srv._tcp",
+ "recordOption": "roundRobin",
+ "noAnswer": false,
+ "note": "",
+ "ttl": 600,
+ "gtdRegion": 1,
+ "parentId": 123123,
+ "parent": "domain",
+ "source": "Domain",
+ "modifiedTs": 1565149714387,
+ "value": [{
+ "value": "foo-1.unit.tests.",
+ "priority": 10,
+ "weight": 20,
+ "port": 30,
+ "disableFlag": false
+ }, {
+ "value": "foo-2.unit.tests.",
+ "priority": 12,
+ "weight": 20,
+ "port": 30,
+ "disableFlag": false
+ }],
+ "roundRobin": [{
+ "value": "foo-1.unit.tests.",
+ "priority": 10,
+ "weight": 20,
+ "port": 30,
+ "disableFlag": false
+ }, {
+ "value": "foo-2.unit.tests.",
+ "priority": 12,
+ "weight": 20,
+ "port": 30,
+ "disableFlag": false
+ }]
+}, {
+ "id": 1808515,
+ "type": "AAAA",
+ "recordType": "aaaa",
+ "name": "aaaa",
+ "recordOption": "roundRobin",
+ "noAnswer": false,
+ "note": "",
+ "ttl": 600,
+ "gtdRegion": 1,
+ "parentId": 123123,
+ "parent": "domain",
+ "source": "Domain",
+ "modifiedTs": 1565149739464,
+ "value": ["2601:644:500:e210:62f8:1dff:feb8:947a"],
+ "roundRobin": [{
+ "value": "2601:644:500:e210:62f8:1dff:feb8:947a",
+ "disableFlag": false
+ }],
+ "geolocation": null,
+ "recordFailover": {
+ "disabled": false,
+ "failoverType": 1,
+ "failoverTypeStr": "Normal (always lowest level)",
+ "values": []
+ },
+ "failover": {
+ "disabled": false,
+ "failoverType": 1,
+ "failoverTypeStr": "Normal (always lowest level)",
+ "values": []
+ },
+ "pools": [],
+ "poolsDetail": [],
+ "roundRobinFailover": []
+}, {
+ "id": 1808530,
+ "type": "ANAME",
+ "recordType": "aname",
+ "name": "",
+ "recordOption": "roundRobin",
+ "noAnswer": false,
+ "note": "",
+ "ttl": 1800,
+ "gtdRegion": 1,
+ "parentId": 123123,
+ "parent": "domain",
+ "source": "Domain",
+ "modifiedTs": 1565150251379,
+ "value": [{
+ "value": "aname.unit.tests.",
+ "disableFlag": false
+ }],
+ "roundRobin": [{
+ "value": "aname.unit.tests.",
+ "disableFlag": false
+ }],
+ "geolocation": null,
+ "recordFailover": {
+ "disabled": false,
+ "failoverType": 1,
+ "failoverTypeStr": "Normal (always lowest level)",
+ "values": []
+ },
+ "failover": {
+ "disabled": false,
+ "failoverType": 1,
+ "failoverTypeStr": "Normal (always lowest level)",
+ "values": []
+ },
+ "pools": [],
+ "poolsDetail": []
+}, {
+ "id": 1808521,
+ "type": "CNAME",
+ "recordType": "cname",
+ "name": "cname",
+ "recordOption": "roundRobin",
+ "noAnswer": false,
+ "note": "",
+ "ttl": 300,
+ "gtdRegion": 1,
+ "parentId": 123123,
+ "parent": "domain",
+ "source": "Domain",
+ "modifiedTs": 1565152113825,
+ "value": "",
+ "roundRobin": [{
+ "value": "",
+ "disableFlag": false
+ }],
+ "recordFailover": {
+ "disabled": false,
+ "failoverType": 1,
+ "failoverTypeStr": "Normal (always lowest level)",
+ "values": [{
+ "id": null,
+ "value": "",
+ "disableFlag": false,
+ "failedFlag": false,
+ "status": "N/A",
+ "sortOrder": 1,
+ "markedActive": false
+ }, {
+ "id": null,
+ "value": "",
+ "disableFlag": false,
+ "failedFlag": false,
+ "status": "N/A",
+ "sortOrder": 2,
+ "markedActive": false
+ }]
+ },
+ "failover": {
+ "disabled": false,
+ "failoverType": 1,
+ "failoverTypeStr": "Normal (always lowest level)",
+ "values": [{
+ "id": null,
+ "value": "",
+ "disableFlag": false,
+ "failedFlag": false,
+ "status": "N/A",
+ "sortOrder": 1,
+ "markedActive": false
+ }, {
+ "id": null,
+ "value": "",
+ "disableFlag": false,
+ "failedFlag": false,
+ "status": "N/A",
+ "sortOrder": 2,
+ "markedActive": false
+ }]
+ },
+ "pools": [],
+ "poolsDetail": [],
+ "geolocation": null,
+ "host": ""
+}, {
+ "id": 1808522,
+ "type": "CNAME",
+ "recordType": "cname",
+ "name": "included",
+ "recordOption": "roundRobin",
+ "noAnswer": false,
+ "note": "",
+ "ttl": 3600,
+ "gtdRegion": 1,
+ "parentId": 123123,
+ "parent": "domain",
+ "source": "Domain",
+ "modifiedTs": 1565152119137,
+ "value": "",
+ "roundRobin": [{
+ "value": "",
+ "disableFlag": false
+ }],
+ "recordFailover": {
+ "disabled": false,
+ "failoverType": 1,
+ "failoverTypeStr": "Normal (always lowest level)",
+ "values": [{
+ "id": null,
+ "value": "",
+ "disableFlag": false,
+ "failedFlag": false,
+ "status": "N/A",
+ "sortOrder": 1,
+ "markedActive": false
+ }, {
+ "id": null,
+ "value": "",
+ "disableFlag": false,
+ "failedFlag": false,
+ "status": "N/A",
+ "sortOrder": 2,
+ "markedActive": false
+ }]
+ },
+ "failover": {
+ "disabled": false,
+ "failoverType": 1,
+ "failoverTypeStr": "Normal (always lowest level)",
+ "values": [{
+ "id": null,
+ "value": "",
+ "disableFlag": false,
+ "failedFlag": false,
+ "status": "N/A",
+ "sortOrder": 1,
+ "markedActive": false
+ }, {
+ "id": null,
+ "value": "",
+ "disableFlag": false,
+ "failedFlag": false,
+ "status": "N/A",
+ "sortOrder": 2,
+ "markedActive": false
+ }]
+ },
+ "pools": [],
+ "poolsDetail": [],
+ "geolocation": null,
+ "host": ""
+}, {
+ "id": 1808523,
+ "type": "MX",
+ "recordType": "mx",
+ "name": "mx",
+ "recordOption": "roundRobin",
+ "noAnswer": false,
+ "note": "",
+ "ttl": 300,
+ "gtdRegion": 1,
+ "parentId": 123123,
+ "parent": "domain",
+ "source": "Domain",
+ "modifiedTs": 1565149879856,
+ "value": [{
+ "value": "smtp-3.unit.tests.",
+ "level": 30,
+ "disableFlag": false
+ }, {
+ "value": "smtp-2.unit.tests.",
+ "level": 20,
+ "disableFlag": false
+ }, {
+ "value": "smtp-4.unit.tests.",
+ "level": 10,
+ "disableFlag": false
+ }, {
+ "value": "smtp-1.unit.tests.",
+ "level": 40,
+ "disableFlag": false
+ }],
+ "roundRobin": [{
+ "value": "smtp-3.unit.tests.",
+ "level": 30,
+ "disableFlag": false
+ }, {
+ "value": "smtp-2.unit.tests.",
+ "level": 20,
+ "disableFlag": false
+ }, {
+ "value": "smtp-4.unit.tests.",
+ "level": 10,
+ "disableFlag": false
+ }, {
+ "value": "smtp-1.unit.tests.",
+ "level": 40,
+ "disableFlag": false
+ }]
+}, {
+ "id": 1808525,
+ "type": "PTR",
+ "recordType": "ptr",
+ "name": "ptr",
+ "recordOption": "roundRobin",
+ "noAnswer": false,
+ "note": "",
+ "ttl": 300,
+ "gtdRegion": 1,
+ "parentId": 123123,
+ "parent": "domain",
+ "source": "Domain",
+ "modifiedTs": 1565150115139,
+ "value": [{
+ "value": "foo.bar.com.",
+ "disableFlag": false
+ }],
+ "roundRobin": [{
+ "value": "foo.bar.com.",
+ "disableFlag": false
+ }]
+}, {
+ "id": 1808526,
+ "type": "SPF",
+ "recordType": "spf",
+ "name": "spf",
+ "recordOption": "roundRobin",
+ "noAnswer": false,
+ "note": "",
+ "ttl": 600,
+ "gtdRegion": 1,
+ "parentId": 123123,
+ "parent": "domain",
+ "source": "Domain",
+ "modifiedTs": 1565149916132,
+ "value": [{
+ "value": "\"v=spf1 ip4:192.168.0.1/16-all\"",
+ "disableFlag": false
+ }],
+ "roundRobin": [{
+ "value": "\"v=spf1 ip4:192.168.0.1/16-all\"",
+ "disableFlag": false
+ }]
+}, {
+ "id": 1808528,
+ "type": "TXT",
+ "recordType": "txt",
+ "name": "txt",
+ "recordOption": "roundRobin",
+ "noAnswer": false,
+ "note": "",
+ "ttl": 600,
+ "gtdRegion": 1,
+ "parentId": 123123,
+ "parent": "domain",
+ "source": "Domain",
+ "modifiedTs": 1565149966915,
+ "value": [{
+ "value": "\"Bah bah black sheep\"",
+ "disableFlag": false
+ }, {
+ "value": "\"have you any wool.\"",
+ "disableFlag": false
+ }, {
+ "value": "\"v=DKIM1;k=rsa;s=email;h=sha256;p=A/kinda+of/long/string+with+numb3rs\"",
+ "disableFlag": false
+ }],
+ "roundRobin": [{
+ "value": "\"Bah bah black sheep\"",
+ "disableFlag": false
+ }, {
+ "value": "\"have you any wool.\"",
+ "disableFlag": false
+ }, {
+ "value": "\"v=DKIM1;k=rsa;s=email;h=sha256;p=A/kinda+of/long/string+with+numb3rs\"",
+ "disableFlag": false
+ }]
+}, {
+ "id": 1808524,
+ "type": "NS",
+ "recordType": "ns",
+ "name": "under",
+ "recordOption": "roundRobin",
+ "noAnswer": false,
+ "note": "",
+ "ttl": 3600,
+ "gtdRegion": 1,
+ "parentId": 123123,
+ "parent": "domain",
+ "source": "Domain",
+ "modifiedTs": 1565150062850,
+ "value": [{
+ "value": "ns1.unit.tests.",
+ "disableFlag": false
+ }, {
+ "value": "ns2",
+ "disableFlag": false
+ }],
+ "roundRobin": [{
+ "value": "ns1.unit.tests.",
+ "disableFlag": false
+ }, {
+ "value": "ns2",
+ "disableFlag": false
+ }]
+}, {
+ "id": 1808531,
+ "type": "HTTPRedirection",
+ "recordType": "httpredirection",
+ "name": "unsupported",
+ "recordOption": "roundRobin",
+ "noAnswer": false,
+ "note": "",
+ "ttl": 300,
+ "gtdRegion": 1,
+ "parentId": 123123,
+ "parent": "domain",
+ "source": "Domain",
+ "modifiedTs": 1565150348154,
+ "value": "https://redirect.unit.tests",
+ "roundRobin": [{
+ "value": "https://redirect.unit.tests"
+ }],
+ "title": "Unsupported Record",
+ "keywords": "unsupported",
+ "description": "unsupported record",
+ "hardlinkFlag": false,
+ "redirectTypeId": 1,
+ "url": "https://redirect.unit.tests"
+}, {
+ "id": 1808519,
+ "type": "A",
+ "recordType": "a",
+ "name": "www",
+ "recordOption": "roundRobin",
+ "noAnswer": false,
+ "note": "",
+ "ttl": 300,
+ "gtdRegion": 1,
+ "parentId": 123123,
+ "parent": "domain",
+ "source": "Domain",
+ "modifiedTs": 1565150079027,
+ "value": ["2.2.3.6"],
+ "roundRobin": [{
+ "value": "2.2.3.6",
+ "disableFlag": false
+ }],
+ "geolocation": null,
+ "recordFailover": {
+ "disabled": false,
+ "failoverType": 1,
+ "failoverTypeStr": "Normal (always lowest level)",
+ "values": []
+ },
+ "failover": {
+ "disabled": false,
+ "failoverType": 1,
+ "failoverTypeStr": "Normal (always lowest level)",
+ "values": []
+ },
+ "roundRobinFailover": [],
+ "pools": [],
+ "poolsDetail": []
+}, {
+ "id": 1808603,
+ "type": "ANAME",
+ "recordType": "aname",
+ "name": "sub",
+ "recordOption": "roundRobin",
+ "noAnswer": false,
+ "note": "",
+ "ttl": 1800,
+ "gtdRegion": 1,
+ "parentId": 123123,
+ "parent": "domain",
+ "source": "Domain",
+ "modifiedTs": 1565153387855,
+ "value": [{
+ "value": "aname.unit.tests.",
+ "disableFlag": false
+ }],
+ "roundRobin": [{
+ "value": "aname.unit.tests.",
+ "disableFlag": false
+ }],
+ "geolocation": null,
+ "recordFailover": {
+ "disabled": false,
+ "failoverType": 1,
+ "failoverTypeStr": "Normal (always lowest level)",
+ "values": []
+ },
+ "failover": {
+ "disabled": false,
+ "failoverType": 1,
+ "failoverTypeStr": "Normal (always lowest level)",
+ "values": []
+ },
+ "pools": [],
+ "poolsDetail": []
+}, {
+ "id": 1808520,
+ "type": "A",
+ "recordType": "a",
+ "name": "www.sub",
+ "recordOption": "roundRobin",
+ "noAnswer": false,
+ "note": "",
+ "ttl": 300,
+ "gtdRegion": 1,
+ "parentId": 123123,
+ "parent": "domain",
+ "source": "Domain",
+ "modifiedTs": 1565150090588,
+ "value": ["2.2.3.6"],
+ "roundRobin": [{
+ "value": "2.2.3.6",
+ "disableFlag": false
+ }],
+ "geolocation": null,
+ "recordFailover": {
+ "disabled": false,
+ "failoverType": 1,
+ "failoverTypeStr": "Normal (always lowest level)",
+ "values": []
+ },
+ "failover": {
+ "disabled": false,
+ "failoverType": 1,
+ "failoverTypeStr": "Normal (always lowest level)",
+ "values": []
+ },
+ "roundRobinFailover": [],
+ "pools": [],
+ "poolsDetail": []
+}]
diff --git a/tests/fixtures/digitalocean-page-1.json b/tests/fixtures/digitalocean-page-1.json
index db231ba..c931411 100644
--- a/tests/fixtures/digitalocean-page-1.json
+++ b/tests/fixtures/digitalocean-page-1.json
@@ -1,5 +1,16 @@
{
"domain_records": [{
+ "id": null,
+ "type": "SOA",
+ "name": "@",
+ "data": null,
+ "priority": null,
+ "port": null,
+ "ttl": null,
+ "weight": null,
+ "flags": null,
+ "tag": null
+ }, {
"id": 11189874,
"type": "NS",
"name": "@",
diff --git a/tests/fixtures/dnsmadeeasy-records.json b/tests/fixtures/dnsmadeeasy-records.json
index e03e0b5..4d3ba64 100644
--- a/tests/fixtures/dnsmadeeasy-records.json
+++ b/tests/fixtures/dnsmadeeasy-records.json
@@ -335,6 +335,24 @@
"value": "aname",
"id": 11189896,
"type": "ANAME"
- }],
+ }, {
+ "failover": false,
+ "monitor": false,
+ "sourceId": 123123,
+ "dynamicDns": false,
+ "failed": false,
+ "gtdLocation": "DEFAULT",
+ "hardLink": true,
+ "ttl": 1800,
+ "source": 1,
+ "name": "unsupported",
+ "value": "https://redirect.unit.tests",
+ "id": 11189897,
+ "title": "Unsupported Record",
+ "keywords": "unsupported",
+ "redirectType": "Standard - 302",
+ "description": "unsupported record",
+ "type": "HTTPRED"
+ }],
"page": 0
}
diff --git a/tests/fixtures/edgedns-invalid-content.json b/tests/fixtures/edgedns-invalid-content.json
new file mode 100644
index 0000000..8932f66
--- /dev/null
+++ b/tests/fixtures/edgedns-invalid-content.json
@@ -0,0 +1,35 @@
+{
+ "recordsets": [
+ {
+ "rdata": [
+ "",
+ "12 20 foo-2.unit.tests."
+ ],
+ "type": "SRV",
+ "name": "_srv._tcp.unit.tests",
+ "ttl": 600
+ },
+ {
+ "rdata": [
+ "",
+ "1 1"
+ ],
+ "type": "SSHFP",
+ "name": "unit.tests",
+ "ttl": 3600
+ },
+ {
+ "rdata": [
+ "",
+ "100 \"U\" \"SIP+D2U\" \"!^.*$!sip:info@bar.example.com!\" ."
+ ],
+ "type": "NAPTR",
+ "name": "naptr.unit.tests",
+ "ttl": 600
+ }
+ ],
+ "metadata": {
+ "totalElements": 3,
+ "showAll": true
+ }
+}
\ No newline at end of file
diff --git a/tests/fixtures/edgedns-records-prev-other.json b/tests/fixtures/edgedns-records-prev-other.json
new file mode 100644
index 0000000..acae3ec
--- /dev/null
+++ b/tests/fixtures/edgedns-records-prev-other.json
@@ -0,0 +1,166 @@
+{
+ "recordsets": [
+ {
+ "rdata": [
+ "10 20 30 foo-1.other.tests.",
+ "12 20 30 foo-2.other.tests."
+ ],
+ "type": "SRV",
+ "name": "_srv._tcp.old.other.tests",
+ "ttl": 600
+ },
+ {
+ "rdata": [
+ "10 20 30 foo-1.other.tests.",
+ "12 20 30 foo-2.other.tests."
+ ],
+ "type": "SRV",
+ "name": "_srv._tcp.old.other.tests",
+ "ttl": 600
+ },
+ {
+ "rdata": [
+ "2601:644:500:e210:62f8:1dff:feb8:9471"
+ ],
+ "type": "AAAA",
+ "name": "aaaa.old.other.tests",
+ "ttl": 600
+ },
+ {
+ "rdata": [
+ "ns1.akam.net.",
+ "ns2.akam.net.",
+ "ns3.akam.net.",
+ "ns4.akam.net."
+ ],
+ "type": "NS",
+ "name": "old.other.tests",
+ "ttl": 3600
+ },
+ {
+ "rdata": [
+ "1.2.3.4",
+ "1.2.3.5"
+ ],
+ "type": "A",
+ "name": "old.other.tests",
+ "ttl": 300
+ },
+ {
+ "rdata": [
+ "ns1.akam.net hostmaster.akamai.com 1489074932 86400 7200 604800 300"
+ ],
+ "type": "SOA",
+ "name": "other.tests",
+ "ttl": 3600
+ },
+ {
+ "rdata": [
+ "1 1 7491973e5f8b39d5327cd4e08bc81b05f7710b49",
+ "1 1 bf6b6825d2977c511a475bbefb88aad54a92ac73"
+ ],
+ "type": "SSHFP",
+ "name": "old.other.tests",
+ "ttl": 3600
+ },
+ {
+ "rdata": [
+ "other.tests."
+ ],
+ "type": "CNAME",
+ "name": "old.cname.other.tests",
+ "ttl": 300
+ },
+ {
+ "rdata": [
+ "other.tests."
+ ],
+ "type": "CNAME",
+ "name": "excluded.old.other.tests",
+ "ttl": 3600
+ },
+ {
+ "rdata": [
+ "other.tests."
+ ],
+ "type": "CNAME",
+ "name": "included.old.other.tests",
+ "ttl": 3600
+ },
+ {
+ "rdata": [
+ "10 smtp-4.other.tests.",
+ "20 smtp-2.other.tests.",
+ "30 smtp-3.other.tests.",
+ "40 smtp-1.other.tests."
+ ],
+ "type": "MX",
+ "name": "mx.old.other.tests",
+ "ttl": 300
+ },
+ {
+ "rdata": [
+ "10 100 \"S\" \"SIP+D2U\" \"!^.*$!sip:info@bar.example.com!\" .",
+ "100 100 \"U\" \"SIP+D2U\" \"!^.*$!sip:info@bar.example.com!\" ."
+ ],
+ "type": "NAPTR",
+ "name": "naptr.old.other.tests",
+ "ttl": 600
+ },
+ {
+ "rdata": [
+ "foo.bar.com."
+ ],
+ "type": "PTR",
+ "name": "ptr.old.other.tests",
+ "ttl": 300
+ },
+ {
+ "rdata": [
+ "\"v=spf1 ip4:192.168.0.1/16-all\""
+ ],
+ "type": "SPF",
+ "name": "spf.old.other.tests",
+ "ttl": 600
+ },
+ {
+ "rdata": [
+ "ns1.other.tests.",
+ "ns2.other.tests."
+ ],
+ "type": "NS",
+ "name": "under.old.other.tests",
+ "ttl": 3600
+ },
+ {
+ "rdata": [
+ "\"Bah bah black sheep\"",
+ "\"have you any wool.\"",
+ "\"v=DKIM1;k=rsa;s=email;h=sha256;p=A/kinda+of/long/string+with+numb3rs\""
+ ],
+ "type": "TXT",
+ "name": "txt.old.other.tests",
+ "ttl": 600
+ },
+ {
+ "rdata": [
+ "2.2.3.7"
+ ],
+ "type": "A",
+ "name": "www.other.tests",
+ "ttl": 300
+ },
+ {
+ "rdata": [
+ "2.2.3.6"
+ ],
+ "type": "A",
+ "name": "www.sub.old.other.tests",
+ "ttl": 300
+ }
+ ],
+ "metadata": {
+ "totalElements": 16,
+ "showAll": true
+ }
+}
\ No newline at end of file
diff --git a/tests/fixtures/edgedns-records-prev.json b/tests/fixtures/edgedns-records-prev.json
new file mode 100644
index 0000000..b07c63f
--- /dev/null
+++ b/tests/fixtures/edgedns-records-prev.json
@@ -0,0 +1,166 @@
+{
+ "recordsets": [
+ {
+ "rdata": [
+ "10 20 30 foo-1.unit.tests.",
+ "12 20 30 foo-2.unit.tests."
+ ],
+ "type": "SRV",
+ "name": "_srv._tcp.old.unit.tests",
+ "ttl": 600
+ },
+ {
+ "rdata": [
+ "10 20 30 foo-1.unit.tests.",
+ "12 20 30 foo-2.unit.tests."
+ ],
+ "type": "SRV",
+ "name": "_srv._tcp.old.unit.tests",
+ "ttl": 600
+ },
+ {
+ "rdata": [
+ "2601:644:500:e210:62f8:1dff:feb8:9471"
+ ],
+ "type": "AAAA",
+ "name": "aaaa.old.unit.tests",
+ "ttl": 600
+ },
+ {
+ "rdata": [
+ "ns1.akam.net.",
+ "ns2.akam.net.",
+ "ns3.akam.net.",
+ "ns4.akam.net."
+ ],
+ "type": "NS",
+ "name": "old.unit.tests",
+ "ttl": 3600
+ },
+ {
+ "rdata": [
+ "1.2.3.4",
+ "1.2.3.5"
+ ],
+ "type": "A",
+ "name": "old.unit.tests",
+ "ttl": 300
+ },
+ {
+ "rdata": [
+ "ns1.akam.net hostmaster.akamai.com 1489074932 86400 7200 604800 300"
+ ],
+ "type": "SOA",
+ "name": "unit.tests",
+ "ttl": 3600
+ },
+ {
+ "rdata": [
+ "1 1 7491973e5f8b39d5327cd4e08bc81b05f7710b49",
+ "1 1 bf6b6825d2977c511a475bbefb88aad54a92ac73"
+ ],
+ "type": "SSHFP",
+ "name": "old.unit.tests",
+ "ttl": 3600
+ },
+ {
+ "rdata": [
+ "unit.tests"
+ ],
+ "type": "CNAME",
+ "name": "old.cname.unit.tests",
+ "ttl": 300
+ },
+ {
+ "rdata": [
+ "unit.tests."
+ ],
+ "type": "CNAME",
+ "name": "excluded.old.unit.tests",
+ "ttl": 3600
+ },
+ {
+ "rdata": [
+ "unit.tests."
+ ],
+ "type": "CNAME",
+ "name": "included.old.unit.tests",
+ "ttl": 3600
+ },
+ {
+ "rdata": [
+ "10 smtp-4.unit.tests.",
+ "20 smtp-2.unit.tests.",
+ "30 smtp-3.unit.tests.",
+ "40 smtp-1.unit.tests."
+ ],
+ "type": "MX",
+ "name": "mx.old.unit.tests",
+ "ttl": 300
+ },
+ {
+ "rdata": [
+ "10 100 \"S\" \"SIP+D2U\" \"!^.*$!sip:info@bar.example.com!\" .",
+ "100 100 \"U\" \"SIP+D2U\" \"!^.*$!sip:info@bar.example.com!\" ."
+ ],
+ "type": "NAPTR",
+ "name": "naptr.old.unit.tests",
+ "ttl": 600
+ },
+ {
+ "rdata": [
+ "foo.bar.com."
+ ],
+ "type": "PTR",
+ "name": "ptr.old.unit.tests",
+ "ttl": 300
+ },
+ {
+ "rdata": [
+ "\"v=spf1 ip4:192.168.0.1/16-all\""
+ ],
+ "type": "SPF",
+ "name": "spf.old.unit.tests",
+ "ttl": 600
+ },
+ {
+ "rdata": [
+ "ns1.unit.tests.",
+ "ns2.unit.tests."
+ ],
+ "type": "NS",
+ "name": "under.old.unit.tests",
+ "ttl": 3600
+ },
+ {
+ "rdata": [
+ "\"Bah bah black sheep\"",
+ "\"have you any wool.\"",
+ "\"v=DKIM1;k=rsa;s=email;h=sha256;p=A/kinda+of/long/string+with+numb3rs\""
+ ],
+ "type": "TXT",
+ "name": "txt.old.unit.tests",
+ "ttl": 600
+ },
+ {
+ "rdata": [
+ "2.2.3.7"
+ ],
+ "type": "A",
+ "name": "www.unit.tests",
+ "ttl": 300
+ },
+ {
+ "rdata": [
+ "2.2.3.6"
+ ],
+ "type": "A",
+ "name": "www.sub.old.unit.tests",
+ "ttl": 300
+ }
+ ],
+ "metadata": {
+ "totalElements": 16,
+ "showAll": true
+ }
+}
\ No newline at end of file
diff --git a/tests/fixtures/edgedns-records.json b/tests/fixtures/edgedns-records.json
new file mode 100644
index 0000000..4693eb1
--- /dev/null
+++ b/tests/fixtures/edgedns-records.json
@@ -0,0 +1,157 @@
+{
+ "recordsets": [
+ {
+ "rdata": [
+ "10 20 30 foo-1.unit.tests.",
+ "12 20 30 foo-2.unit.tests."
+ ],
+ "type": "SRV",
+ "name": "_srv._tcp.unit.tests",
+ "ttl": 600
+ },
+ {
+ "rdata": [
+ "2601:644:500:e210:62f8:1dff:feb8:947a"
+ ],
+ "type": "AAAA",
+ "name": "aaaa.unit.tests",
+ "ttl": 600
+ },
+ {
+ "rdata": [
+ "ns1.akam.net.",
+ "ns2.akam.net.",
+ "ns3.akam.net.",
+ "ns4.akam.net."
+ ],
+ "type": "NS",
+ "name": "unit.tests",
+ "ttl": 3600
+ },
+ {
+ "rdata": [
+ "1.2.3.4",
+ "1.2.3.5"
+ ],
+ "type": "A",
+ "name": "unit.tests",
+ "ttl": 300
+ },
+ {
+ "rdata": [
+ "ns1.akam.net hostmaster.akamai.com 1489074932 86400 7200 604800 300"
+ ],
+ "type": "SOA",
+ "name": "unit.tests",
+ "ttl": 3600
+ },
+ {
+ "rdata": [
+ "1 1 7491973e5f8b39d5327cd4e08bc81b05f7710b49",
+ "1 1 bf6b6825d2977c511a475bbefb88aad54a92ac73"
+ ],
+ "type": "SSHFP",
+ "name": "unit.tests",
+ "ttl": 3600
+ },
+ {
+ "rdata": [
+ "unit.tests."
+ ],
+ "type": "CNAME",
+ "name": "cname.unit.tests",
+ "ttl": 300
+ },
+ {
+ "rdata": [
+ "unit.tests."
+ ],
+ "type": "CNAME",
+ "name": "excluded.unit.tests",
+ "ttl": 3600
+ },
+ {
+ "rdata": [
+ "unit.tests."
+ ],
+ "type": "CNAME",
+ "name": "included.unit.tests",
+ "ttl": 3600
+ },
+ {
+ "rdata": [
+ "10 smtp-4.unit.tests.",
+ "20 smtp-2.unit.tests.",
+ "30 smtp-3.unit.tests.",
+ "40 smtp-1.unit.tests."
+ ],
+ "type": "MX",
+ "name": "mx.unit.tests",
+ "ttl": 300
+ },
+ {
+ "rdata": [
+ "10 100 \"S\" \"SIP+D2U\" \"!^.*$!sip:info@bar.example.com!\" .",
+ "100 100 \"U\" \"SIP+D2U\" \"!^.*$!sip:info@bar.example.com!\" ."
+ ],
+ "type": "NAPTR",
+ "name": "naptr.unit.tests",
+ "ttl": 600
+ },
+ {
+ "rdata": [
+ "foo.bar.com."
+ ],
+ "type": "PTR",
+ "name": "ptr.unit.tests",
+ "ttl": 300
+ },
+ {
+ "rdata": [
+ "\"v=spf1 ip4:192.168.0.1/16-all\""
+ ],
+ "type": "SPF",
+ "name": "spf.unit.tests",
+ "ttl": 600
+ },
+ {
+ "rdata": [
+ "ns1.unit.tests.",
+ "ns2.unit.tests."
+ ],
+ "type": "NS",
+ "name": "under.unit.tests",
+ "ttl": 3600
+ },
+ {
+ "rdata": [
+ "\"Bah bah black sheep\"",
+ "\"have you any wool.\"",
+ "\"v=DKIM1;k=rsa;s=email;h=sha256;p=A/kinda+of/long/string+with+numb3rs\""
+ ],
+ "type": "TXT",
+ "name": "txt.unit.tests",
+ "ttl": 600
+ },
+ {
+ "rdata": [
+ "2.2.3.6"
+ ],
+ "type": "A",
+ "name": "www.unit.tests",
+ "ttl": 300
+ },
+ {
+ "rdata": [
+ "2.2.3.6"
+ ],
+ "type": "A",
+ "name": "www.sub.unit.tests",
+ "ttl": 300
+ }
+ ],
+ "metadata": {
+ "totalElements": 16,
+ "showAll": true
+ }
+}
\ No newline at end of file
diff --git a/tests/fixtures/mythicbeasts-list.txt b/tests/fixtures/mythicbeasts-list.txt
new file mode 100644
index 0000000..ed4ea4c
--- /dev/null
+++ b/tests/fixtures/mythicbeasts-list.txt
@@ -0,0 +1,25 @@
+@ 3600 NS 6.2.3.4.
+@ 3600 NS 7.2.3.4.
+@ 300 A 1.2.3.4
+@ 300 A 1.2.3.5
+@ 3600 SSHFP 1 1 bf6b6825d2977c511a475bbefb88aad54a92ac73
+@ 3600 SSHFP 1 1 7491973e5f8b39d5327cd4e08bc81b05f7710b49
+@ 3600 CAA 0 issue ca.unit.tests
+_srv._tcp 600 SRV 10 20 30 foo-1.unit.tests.
+_srv._tcp 600 SRV 12 20 30 foo-2.unit.tests.
+aaaa 600 AAAA 2601:644:500:e210:62f8:1dff:feb8:947a
+cname 300 CNAME unit.tests.
+excluded 300 CNAME unit.tests.
+ignored 300 A 9.9.9.9
+included 3600 CNAME unit.tests.
+mx 300 MX 10 smtp-4.unit.tests.
+mx 300 MX 20 smtp-2.unit.tests.
+mx 300 MX 30 smtp-3.unit.tests.
+mx 300 MX 40 smtp-1.unit.tests.
+sub 3600 NS 6.2.3.4.
+sub 3600 NS 7.2.3.4.
+txt 600 TXT "Bah bah black sheep"
+txt 600 TXT "have you any wool."
+txt 600 TXT "v=DKIM1;k=rsa;s=email;h=sha256;p=A/kinda+of/long/string+with+numb3rs"
+www 300 A 2.2.3.6
+www.sub 300 A 2.2.3.6
diff --git a/tests/test_octodns_equality.py b/tests/test_octodns_equality.py
new file mode 100644
index 0000000..dcdc460
--- /dev/null
+++ b/tests/test_octodns_equality.py
@@ -0,0 +1,68 @@
+#
+#
+#
+
+from __future__ import absolute_import, division, print_function, \
+ unicode_literals
+
+from unittest import TestCase
+
+from octodns.equality import EqualityTupleMixin
+
+
+class TestEqualityTupleMixin(TestCase):
+
+ def test_basics(self):
+
+ class Simple(EqualityTupleMixin):
+
+ def __init__(self, a, b, c):
+ self.a = a
+ self.b = b
+ self.c = c
+
+ def _equality_tuple(self):
+ return (self.a, self.b)
+
+ one = Simple(1, 2, 3)
+ same = Simple(1, 2, 3)
+ matches = Simple(1, 2, 'ignored')
+ doesnt = Simple(2, 3, 4)
+
+ # equality
+ self.assertEquals(one, one)
+ self.assertEquals(one, same)
+ self.assertEquals(same, one)
+ # only a & c are considered
+ self.assertEquals(one, matches)
+ self.assertEquals(matches, one)
+ self.assertNotEquals(one, doesnt)
+ self.assertNotEquals(doesnt, one)
+
+ # lt
+ self.assertTrue(one < doesnt)
+ self.assertFalse(doesnt < one)
+ self.assertFalse(one < same)
+
+ # le
+ self.assertTrue(one <= doesnt)
+ self.assertFalse(doesnt <= one)
+ self.assertTrue(one <= same)
+
+ # gt
+ self.assertFalse(one > doesnt)
+ self.assertTrue(doesnt > one)
+ self.assertFalse(one > same)
+
+ # ge
+ self.assertFalse(one >= doesnt)
+ self.assertTrue(doesnt >= one)
+ self.assertTrue(one >= same)
+
+ def test_not_implemented(self):
+
+ class MissingMethod(EqualityTupleMixin):
+ pass
+
+ with self.assertRaises(NotImplementedError):
+ MissingMethod() == MissingMethod()
diff --git a/tests/test_octodns_manager.py b/tests/test_octodns_manager.py
index 0e14bab..581689a 100644
--- a/tests/test_octodns_manager.py
+++ b/tests/test_octodns_manager.py
@@ -7,10 +7,12 @@ from __future__ import absolute_import, division, print_function, \
from os import environ
from os.path import dirname, join
+from six import text_type
from unittest import TestCase
from octodns.record import Record
-from octodns.manager import _AggregateTarget, MainThreadExecutor, Manager
+from octodns.manager import _AggregateTarget, MainThreadExecutor, Manager, \
+ ManagerException
from octodns.yaml import safe_load
from octodns.zone import Zone
@@ -27,80 +29,81 @@ def get_config_filename(which):
class TestManager(TestCase):
def test_missing_provider_class(self):
- with self.assertRaises(Exception) as ctx:
+ with self.assertRaises(ManagerException) as ctx:
Manager(get_config_filename('missing-provider-class.yaml')).sync()
- self.assertTrue('missing class' in ctx.exception.message)
+ self.assertTrue('missing class' in text_type(ctx.exception))
def test_bad_provider_class(self):
- with self.assertRaises(Exception) as ctx:
+ with self.assertRaises(ManagerException) as ctx:
Manager(get_config_filename('bad-provider-class.yaml')).sync()
- self.assertTrue('Unknown provider class' in ctx.exception.message)
+ self.assertTrue('Unknown provider class' in text_type(ctx.exception))
def test_bad_provider_class_module(self):
- with self.assertRaises(Exception) as ctx:
+ with self.assertRaises(ManagerException) as ctx:
Manager(get_config_filename('bad-provider-class-module.yaml')) \
.sync()
- self.assertTrue('Unknown provider class' in ctx.exception.message)
+ self.assertTrue('Unknown provider class' in text_type(ctx.exception))
def test_bad_provider_class_no_module(self):
- with self.assertRaises(Exception) as ctx:
+ with self.assertRaises(ManagerException) as ctx:
Manager(get_config_filename('bad-provider-class-no-module.yaml')) \
.sync()
- self.assertTrue('Unknown provider class' in ctx.exception.message)
+ self.assertTrue('Unknown provider class' in text_type(ctx.exception))
def test_missing_provider_config(self):
# Missing provider config
- with self.assertRaises(Exception) as ctx:
+ with self.assertRaises(ManagerException) as ctx:
Manager(get_config_filename('missing-provider-config.yaml')).sync()
- self.assertTrue('provider config' in ctx.exception.message)
+ self.assertTrue('provider config' in text_type(ctx.exception))
def test_missing_env_config(self):
- with self.assertRaises(Exception) as ctx:
+ with self.assertRaises(ManagerException) as ctx:
Manager(get_config_filename('missing-provider-env.yaml')).sync()
- self.assertTrue('missing env var' in ctx.exception.message)
+ self.assertTrue('missing env var' in text_type(ctx.exception))
def test_missing_source(self):
- with self.assertRaises(Exception) as ctx:
- Manager(get_config_filename('unknown-provider.yaml')) \
+ with self.assertRaises(ManagerException) as ctx:
+ Manager(get_config_filename('provider-problems.yaml')) \
.sync(['missing.sources.'])
- self.assertTrue('missing sources' in ctx.exception.message)
+ self.assertTrue('missing sources' in text_type(ctx.exception))
def test_missing_targets(self):
- with self.assertRaises(Exception) as ctx:
- Manager(get_config_filename('unknown-provider.yaml')) \
+ with self.assertRaises(ManagerException) as ctx:
+ Manager(get_config_filename('provider-problems.yaml')) \
.sync(['missing.targets.'])
- self.assertTrue('missing targets' in ctx.exception.message)
+ self.assertTrue('missing targets' in text_type(ctx.exception))
def test_unknown_source(self):
- with self.assertRaises(Exception) as ctx:
- Manager(get_config_filename('unknown-provider.yaml')) \
+ with self.assertRaises(ManagerException) as ctx:
+ Manager(get_config_filename('provider-problems.yaml')) \
.sync(['unknown.source.'])
- self.assertTrue('unknown source' in ctx.exception.message)
+ self.assertTrue('unknown source' in text_type(ctx.exception))
def test_unknown_target(self):
- with self.assertRaises(Exception) as ctx:
- Manager(get_config_filename('unknown-provider.yaml')) \
+ with self.assertRaises(ManagerException) as ctx:
+ Manager(get_config_filename('provider-problems.yaml')) \
.sync(['unknown.target.'])
- self.assertTrue('unknown target' in ctx.exception.message)
+ self.assertTrue('unknown target' in text_type(ctx.exception))
def test_bad_plan_output_class(self):
- with self.assertRaises(Exception) as ctx:
+ with self.assertRaises(ManagerException) as ctx:
name = 'bad-plan-output-missing-class.yaml'
Manager(get_config_filename(name)).sync()
self.assertEquals('plan_output bad is missing class',
- ctx.exception.message)
+ text_type(ctx.exception))
def test_bad_plan_output_config(self):
- with self.assertRaises(Exception) as ctx:
+ with self.assertRaises(ManagerException) as ctx:
Manager(get_config_filename('bad-plan-output-config.yaml')).sync()
self.assertEqual('Incorrect plan_output config for bad',
- ctx.exception.message)
+ text_type(ctx.exception))
def test_source_only_as_a_target(self):
- with self.assertRaises(Exception) as ctx:
- Manager(get_config_filename('unknown-provider.yaml')) \
+ with self.assertRaises(ManagerException) as ctx:
+ Manager(get_config_filename('provider-problems.yaml')) \
.sync(['not.targetable.'])
- self.assertTrue('does not support targeting' in ctx.exception.message)
+ self.assertTrue('does not support targeting' in
+ text_type(ctx.exception))
def test_always_dry_run(self):
with TemporaryDirectory() as tmpdir:
@@ -180,9 +183,9 @@ class TestManager(TestCase):
'unit.tests.')
self.assertEquals(14, len(changes))
- with self.assertRaises(Exception) as ctx:
+ with self.assertRaises(ManagerException) as ctx:
manager.compare(['nope'], ['dump'], 'unit.tests.')
- self.assertEquals('Unknown source: nope', ctx.exception.message)
+ self.assertEquals('Unknown source: nope', text_type(ctx.exception))
def test_aggregate_target(self):
simple = SimpleProvider()
@@ -220,40 +223,83 @@ class TestManager(TestCase):
environ['YAML_TMP_DIR'] = tmpdir.dirname
manager = Manager(get_config_filename('simple.yaml'))
- with self.assertRaises(Exception) as ctx:
- manager.dump('unit.tests.', tmpdir.dirname, False, 'nope')
- self.assertEquals('Unknown source: nope', ctx.exception.message)
+ with self.assertRaises(ManagerException) as ctx:
+ manager.dump('unit.tests.', tmpdir.dirname, False, False,
+ 'nope')
+ self.assertEquals('Unknown source: nope', text_type(ctx.exception))
- manager.dump('unit.tests.', tmpdir.dirname, False, 'in')
+ manager.dump('unit.tests.', tmpdir.dirname, False, False, 'in')
# make sure this fails with an IOError and not a KeyError when
# tyring to find sub zones
with self.assertRaises(IOError):
- manager.dump('unknown.zone.', tmpdir.dirname, False, 'in')
+ manager.dump('unknown.zone.', tmpdir.dirname, False, False,
+ 'in')
def test_dump_empty(self):
with TemporaryDirectory() as tmpdir:
environ['YAML_TMP_DIR'] = tmpdir.dirname
manager = Manager(get_config_filename('simple.yaml'))
- manager.dump('empty.', tmpdir.dirname, False, 'in')
+ manager.dump('empty.', tmpdir.dirname, False, False, 'in')
with open(join(tmpdir.dirname, 'empty.yaml')) as fh:
data = safe_load(fh, False)
self.assertFalse(data)
+ def test_dump_split(self):
+ with TemporaryDirectory() as tmpdir:
+ environ['YAML_TMP_DIR'] = tmpdir.dirname
+ manager = Manager(get_config_filename('simple-split.yaml'))
+
+ with self.assertRaises(ManagerException) as ctx:
+ manager.dump('unit.tests.', tmpdir.dirname, False, True,
+ 'nope')
+ self.assertEquals('Unknown source: nope', text_type(ctx.exception))
+
+ manager.dump('unit.tests.', tmpdir.dirname, False, True, 'in')
+
+ # make sure this fails with an OSError and not a KeyError when
+ # tyring to find sub zones
+ with self.assertRaises(OSError):
+ manager.dump('unknown.zone.', tmpdir.dirname, False, True,
+ 'in')
+
def test_validate_configs(self):
Manager(get_config_filename('simple-validate.yaml')).validate_configs()
- with self.assertRaises(Exception) as ctx:
+ with self.assertRaises(ManagerException) as ctx:
Manager(get_config_filename('missing-sources.yaml')) \
.validate_configs()
- self.assertTrue('missing sources' in ctx.exception.message)
+ self.assertTrue('missing sources' in text_type(ctx.exception))
- with self.assertRaises(Exception) as ctx:
+ with self.assertRaises(ManagerException) as ctx:
Manager(get_config_filename('unknown-provider.yaml')) \
.validate_configs()
- self.assertTrue('unknown source' in ctx.exception.message)
+ self.assertTrue('unknown source' in text_type(ctx.exception))
+
+ def test_populate_lenient_fallback(self):
+ with TemporaryDirectory() as tmpdir:
+ environ['YAML_TMP_DIR'] = tmpdir.dirname
+ # Only allow a target that doesn't exist
+ manager = Manager(get_config_filename('simple.yaml'))
+
+ class NoLenient(SimpleProvider):
+
+ def populate(self, zone, source=False):
+ pass
+
+ # This should be ok, we'll fall back to not passing it
+ manager._populate_and_plan('unit.tests.', [NoLenient()], [])
+
+ class NoZone(SimpleProvider):
+
+ def populate(self, lenient=False):
+ pass
+
+ # This will blow up, we don't fallback for source
+ with self.assertRaises(TypeError):
+ manager._populate_and_plan('unit.tests.', [NoZone()], [])
class TestMainThreadExecutor(TestCase):
diff --git a/tests/test_octodns_plan.py b/tests/test_octodns_plan.py
index 7d849be..9cf812d 100644
--- a/tests/test_octodns_plan.py
+++ b/tests/test_octodns_plan.py
@@ -5,8 +5,8 @@
from __future__ import absolute_import, division, print_function, \
unicode_literals
-from StringIO import StringIO
from logging import getLogger
+from six import StringIO, text_type
from unittest import TestCase
from octodns.provider.plan import Plan, PlanHtml, PlanLogger, PlanMarkdown
@@ -59,7 +59,7 @@ class TestPlanLogger(TestCase):
with self.assertRaises(Exception) as ctx:
PlanLogger('invalid', 'not-a-level')
self.assertEquals('Unsupported level: not-a-level',
- ctx.exception.message)
+ text_type(ctx.exception))
def test_create(self):
diff --git a/tests/test_octodns_provider_azuredns.py b/tests/test_octodns_provider_azuredns.py
index bcf7a29..1769cef 100644
--- a/tests/test_octodns_provider_azuredns.py
+++ b/tests/test_octodns_provider_azuredns.py
@@ -11,9 +11,9 @@ from octodns.provider.azuredns import _AzureRecord, AzureProvider, \
from octodns.zone import Zone
from octodns.provider.base import Plan
-from azure.mgmt.dns.models import ARecord, AaaaRecord, CnameRecord, MxRecord, \
- SrvRecord, NsRecord, PtrRecord, TxtRecord, RecordSet, SoaRecord, \
- Zone as AzureZone
+from azure.mgmt.dns.models import ARecord, AaaaRecord, CaaRecord, \
+ CnameRecord, MxRecord, SrvRecord, NsRecord, PtrRecord, TxtRecord, \
+ RecordSet, SoaRecord, Zone as AzureZone
from msrestazure.azure_exceptions import CloudError
from unittest import TestCase
@@ -38,6 +38,37 @@ octo_records.append(Record.new(zone, 'aaa', {
'ttl': 2,
'type': 'A',
'values': ['1.1.1.3']}))
+octo_records.append(Record.new(zone, 'aaaa1', {
+ 'ttl': 300,
+ 'type': 'AAAA',
+ 'values': ['2601:644:500:e210:62f8:1dff:feb8:947a',
+ '2601:642:500:e210:62f8:1dff:feb8:947a'],
+}))
+octo_records.append(Record.new(zone, 'aaaa2', {
+ 'ttl': 300,
+ 'type': 'AAAA',
+ 'value': '2601:644:500:e210:62f8:1dff:feb8:947a'
+}))
+octo_records.append(Record.new(zone, 'caa1', {
+ 'ttl': 9,
+ 'type': 'CAA',
+ 'value': {
+ 'flags': 0,
+ 'tag': 'issue',
+ 'value': 'ca.unit.tests',
+ }}))
+octo_records.append(Record.new(zone, 'caa2', {
+ 'ttl': 9,
+ 'type': 'CAA',
+ 'values': [{
+ 'flags': 0,
+ 'tag': 'issue',
+ 'value': 'ca1.unit.tests',
+ }, {
+ 'flags': 0,
+ 'tag': 'issue',
+ 'value': 'ca2.unit.tests',
+ }]}))
octo_records.append(Record.new(zone, 'cname', {
'ttl': 3,
'type': 'CNAME',
@@ -67,6 +98,10 @@ octo_records.append(Record.new(zone, 'foo', {
'ttl': 5,
'type': 'NS',
'value': 'ns1.unit.tests.'}))
+octo_records.append(Record.new(zone, 'ptr1', {
+ 'ttl': 5,
+ 'type': 'PTR',
+ 'value': 'ptr1.unit.tests.'}))
octo_records.append(Record.new(zone, '_srv._tcp', {
'ttl': 6,
'type': 'SRV',
@@ -105,7 +140,8 @@ _base0.zone_name = 'unit.tests'
_base0.relative_record_set_name = '@'
_base0.record_type = 'A'
_base0.params['ttl'] = 0
-_base0.params['arecords'] = [ARecord('1.2.3.4'), ARecord('10.10.10.10')]
+_base0.params['arecords'] = [ARecord(ipv4_address='1.2.3.4'),
+ ARecord(ipv4_address='10.10.10.10')]
azure_records.append(_base0)
_base1 = _AzureRecord('TestAzure', octo_records[1])
@@ -113,7 +149,8 @@ _base1.zone_name = 'unit.tests'
_base1.relative_record_set_name = 'a'
_base1.record_type = 'A'
_base1.params['ttl'] = 1
-_base1.params['arecords'] = [ARecord('1.2.3.4'), ARecord('1.1.1.1')]
+_base1.params['arecords'] = [ARecord(ipv4_address='1.2.3.4'),
+ ARecord(ipv4_address='1.1.1.1')]
azure_records.append(_base1)
_base2 = _AzureRecord('TestAzure', octo_records[2])
@@ -121,7 +158,7 @@ _base2.zone_name = 'unit.tests'
_base2.relative_record_set_name = 'aa'
_base2.record_type = 'A'
_base2.params['ttl'] = 9001
-_base2.params['arecords'] = ARecord('1.2.4.3')
+_base2.params['arecords'] = ARecord(ipv4_address='1.2.4.3')
azure_records.append(_base2)
_base3 = _AzureRecord('TestAzure', octo_records[3])
@@ -129,85 +166,146 @@ _base3.zone_name = 'unit.tests'
_base3.relative_record_set_name = 'aaa'
_base3.record_type = 'A'
_base3.params['ttl'] = 2
-_base3.params['arecords'] = ARecord('1.1.1.3')
+_base3.params['arecords'] = ARecord(ipv4_address='1.1.1.3')
azure_records.append(_base3)
_base4 = _AzureRecord('TestAzure', octo_records[4])
_base4.zone_name = 'unit.tests'
-_base4.relative_record_set_name = 'cname'
-_base4.record_type = 'CNAME'
-_base4.params['ttl'] = 3
-_base4.params['cname_record'] = CnameRecord('a.unit.tests.')
+_base4.relative_record_set_name = 'aaaa1'
+_base4.record_type = 'AAAA'
+_base4.params['ttl'] = 300
+aaaa1 = AaaaRecord(ipv6_address='2601:644:500:e210:62f8:1dff:feb8:947a')
+aaaa2 = AaaaRecord(ipv6_address='2601:642:500:e210:62f8:1dff:feb8:947a')
+_base4.params['aaaa_records'] = [aaaa1, aaaa2]
azure_records.append(_base4)
_base5 = _AzureRecord('TestAzure', octo_records[5])
_base5.zone_name = 'unit.tests'
-_base5.relative_record_set_name = 'mx1'
-_base5.record_type = 'MX'
-_base5.params['ttl'] = 3
-_base5.params['mx_records'] = [MxRecord(10, 'mx1.unit.tests.'),
- MxRecord(20, 'mx2.unit.tests.')]
+_base5.relative_record_set_name = 'aaaa2'
+_base5.record_type = 'AAAA'
+_base5.params['ttl'] = 300
+_base5.params['aaaa_records'] = [aaaa1]
azure_records.append(_base5)
_base6 = _AzureRecord('TestAzure', octo_records[6])
_base6.zone_name = 'unit.tests'
-_base6.relative_record_set_name = 'mx2'
-_base6.record_type = 'MX'
-_base6.params['ttl'] = 3
-_base6.params['mx_records'] = [MxRecord(10, 'mx1.unit.tests.')]
+_base6.relative_record_set_name = 'caa1'
+_base6.record_type = 'CAA'
+_base6.params['ttl'] = 9
+_base6.params['caa_records'] = [CaaRecord(flags=0,
+ tag='issue',
+ value='ca.unit.tests')]
azure_records.append(_base6)
_base7 = _AzureRecord('TestAzure', octo_records[7])
_base7.zone_name = 'unit.tests'
-_base7.relative_record_set_name = '@'
-_base7.record_type = 'NS'
-_base7.params['ttl'] = 4
-_base7.params['ns_records'] = [NsRecord('ns1.unit.tests.'),
- NsRecord('ns2.unit.tests.')]
+_base7.relative_record_set_name = 'caa2'
+_base7.record_type = 'CAA'
+_base7.params['ttl'] = 9
+_base7.params['caa_records'] = [CaaRecord(flags=0,
+ tag='issue',
+ value='ca1.unit.tests'),
+ CaaRecord(flags=0,
+ tag='issue',
+ value='ca2.unit.tests')]
azure_records.append(_base7)
_base8 = _AzureRecord('TestAzure', octo_records[8])
_base8.zone_name = 'unit.tests'
-_base8.relative_record_set_name = 'foo'
-_base8.record_type = 'NS'
-_base8.params['ttl'] = 5
-_base8.params['ns_records'] = [NsRecord('ns1.unit.tests.')]
+_base8.relative_record_set_name = 'cname'
+_base8.record_type = 'CNAME'
+_base8.params['ttl'] = 3
+_base8.params['cname_record'] = CnameRecord(cname='a.unit.tests.')
azure_records.append(_base8)
_base9 = _AzureRecord('TestAzure', octo_records[9])
_base9.zone_name = 'unit.tests'
-_base9.relative_record_set_name = '_srv._tcp'
-_base9.record_type = 'SRV'
-_base9.params['ttl'] = 6
-_base9.params['srv_records'] = [SrvRecord(10, 20, 30, 'foo-1.unit.tests.'),
- SrvRecord(12, 30, 30, 'foo-2.unit.tests.')]
+_base9.relative_record_set_name = 'mx1'
+_base9.record_type = 'MX'
+_base9.params['ttl'] = 3
+_base9.params['mx_records'] = [MxRecord(preference=10,
+ exchange='mx1.unit.tests.'),
+ MxRecord(preference=20,
+ exchange='mx2.unit.tests.')]
azure_records.append(_base9)
_base10 = _AzureRecord('TestAzure', octo_records[10])
_base10.zone_name = 'unit.tests'
-_base10.relative_record_set_name = '_srv2._tcp'
-_base10.record_type = 'SRV'
-_base10.params['ttl'] = 7
-_base10.params['srv_records'] = [SrvRecord(12, 17, 1, 'srvfoo.unit.tests.')]
+_base10.relative_record_set_name = 'mx2'
+_base10.record_type = 'MX'
+_base10.params['ttl'] = 3
+_base10.params['mx_records'] = [MxRecord(preference=10,
+ exchange='mx1.unit.tests.')]
azure_records.append(_base10)
_base11 = _AzureRecord('TestAzure', octo_records[11])
_base11.zone_name = 'unit.tests'
-_base11.relative_record_set_name = 'txt1'
-_base11.record_type = 'TXT'
-_base11.params['ttl'] = 8
-_base11.params['txt_records'] = [TxtRecord(['txt singleton test'])]
+_base11.relative_record_set_name = '@'
+_base11.record_type = 'NS'
+_base11.params['ttl'] = 4
+_base11.params['ns_records'] = [NsRecord(nsdname='ns1.unit.tests.'),
+ NsRecord(nsdname='ns2.unit.tests.')]
azure_records.append(_base11)
_base12 = _AzureRecord('TestAzure', octo_records[12])
_base12.zone_name = 'unit.tests'
-_base12.relative_record_set_name = 'txt2'
-_base12.record_type = 'TXT'
-_base12.params['ttl'] = 9
-_base12.params['txt_records'] = [TxtRecord(['txt multiple test']),
- TxtRecord(['txt multiple test 2'])]
+_base12.relative_record_set_name = 'foo'
+_base12.record_type = 'NS'
+_base12.params['ttl'] = 5
+_base12.params['ns_records'] = [NsRecord(nsdname='ns1.unit.tests.')]
azure_records.append(_base12)
+_base13 = _AzureRecord('TestAzure', octo_records[13])
+_base13.zone_name = 'unit.tests'
+_base13.relative_record_set_name = 'ptr1'
+_base13.record_type = 'PTR'
+_base13.params['ttl'] = 5
+_base13.params['ptr_records'] = [PtrRecord(ptrdname='ptr1.unit.tests.')]
+azure_records.append(_base13)
+
+_base14 = _AzureRecord('TestAzure', octo_records[14])
+_base14.zone_name = 'unit.tests'
+_base14.relative_record_set_name = '_srv._tcp'
+_base14.record_type = 'SRV'
+_base14.params['ttl'] = 6
+_base14.params['srv_records'] = [SrvRecord(priority=10,
+ weight=20,
+ port=30,
+ target='foo-1.unit.tests.'),
+ SrvRecord(priority=12,
+ weight=30,
+ port=30,
+ target='foo-2.unit.tests.')]
+azure_records.append(_base14)
+
+_base15 = _AzureRecord('TestAzure', octo_records[15])
+_base15.zone_name = 'unit.tests'
+_base15.relative_record_set_name = '_srv2._tcp'
+_base15.record_type = 'SRV'
+_base15.params['ttl'] = 7
+_base15.params['srv_records'] = [SrvRecord(priority=12,
+ weight=17,
+ port=1,
+ target='srvfoo.unit.tests.')]
+azure_records.append(_base15)
+
+_base16 = _AzureRecord('TestAzure', octo_records[16])
+_base16.zone_name = 'unit.tests'
+_base16.relative_record_set_name = 'txt1'
+_base16.record_type = 'TXT'
+_base16.params['ttl'] = 8
+_base16.params['txt_records'] = [TxtRecord(value=['txt singleton test'])]
+azure_records.append(_base16)
+
+_base17 = _AzureRecord('TestAzure', octo_records[17])
+_base17.zone_name = 'unit.tests'
+_base17.relative_record_set_name = 'txt2'
+_base17.record_type = 'TXT'
+_base17.params['ttl'] = 9
+_base17.params['txt_records'] = [TxtRecord(value=['txt multiple test']),
+ TxtRecord(value=['txt multiple test 2'])]
+azure_records.append(_base17)
+
class Test_AzureRecord(TestCase):
def test_azure_record(self):
@@ -223,7 +321,7 @@ class Test_ParseAzureType(TestCase):
['AAAA', 'Microsoft.Network/dnszones/AAAA'],
['NS', 'Microsoft.Network/dnszones/NS'],
['MX', 'Microsoft.Network/dnszones/MX']]:
- self.assertEquals(expected, _parse_azure_type(test))
+ self.assertEquals(expected, _parse_azure_type(test))
class Test_CheckEndswithDot(TestCase):
@@ -258,62 +356,91 @@ class TestAzureDnsProvider(TestCase):
provider = self._get_provider()
rs = []
- recordSet = RecordSet(arecords=[ARecord('1.1.1.1')])
+ recordSet = RecordSet(arecords=[ARecord(ipv4_address='1.1.1.1')])
recordSet.name, recordSet.ttl, recordSet.type = 'a1', 0, 'A'
rs.append(recordSet)
- recordSet = RecordSet(arecords=[ARecord('1.1.1.1'),
- ARecord('2.2.2.2')])
+ recordSet = RecordSet(arecords=[ARecord(ipv4_address='1.1.1.1'),
+ ARecord(ipv4_address='2.2.2.2')])
recordSet.name, recordSet.ttl, recordSet.type = 'a2', 1, 'A'
rs.append(recordSet)
- recordSet = RecordSet(aaaa_records=[AaaaRecord('1:1ec:1::1')])
+ aaaa1 = AaaaRecord(ipv6_address='1:1ec:1::1')
+ recordSet = RecordSet(aaaa_records=[aaaa1])
recordSet.name, recordSet.ttl, recordSet.type = 'aaaa1', 2, 'AAAA'
rs.append(recordSet)
- recordSet = RecordSet(aaaa_records=[AaaaRecord('1:1ec:1::1'),
- AaaaRecord('1:1ec:1::2')])
+ aaaa2 = AaaaRecord(ipv6_address='1:1ec:1::2')
+ recordSet = RecordSet(aaaa_records=[aaaa1,
+ aaaa2])
recordSet.name, recordSet.ttl, recordSet.type = 'aaaa2', 3, 'AAAA'
rs.append(recordSet)
- recordSet = RecordSet(cname_record=CnameRecord('cname.unit.test.'))
- recordSet.name, recordSet.ttl, recordSet.type = 'cname1', 4, 'CNAME'
+ recordSet = RecordSet(caa_records=[CaaRecord(flags=0,
+ tag='issue',
+ value='caa1.unit.tests')])
+ recordSet.name, recordSet.ttl, recordSet.type = 'caa1', 4, 'CAA'
+ rs.append(recordSet)
+ recordSet = RecordSet(caa_records=[CaaRecord(flags=0,
+ tag='issue',
+ value='caa1.unit.tests'),
+ CaaRecord(flags=0,
+ tag='issue',
+ value='caa2.unit.tests')])
+ recordSet.name, recordSet.ttl, recordSet.type = 'caa2', 4, 'CAA'
+ rs.append(recordSet)
+ cname1 = CnameRecord(cname='cname.unit.test.')
+ recordSet = RecordSet(cname_record=cname1)
+ recordSet.name, recordSet.ttl, recordSet.type = 'cname1', 5, 'CNAME'
rs.append(recordSet)
recordSet = RecordSet(cname_record=None)
- recordSet.name, recordSet.ttl, recordSet.type = 'cname2', 5, 'CNAME'
+ recordSet.name, recordSet.ttl, recordSet.type = 'cname2', 6, 'CNAME'
rs.append(recordSet)
- recordSet = RecordSet(mx_records=[MxRecord(10, 'mx1.unit.test.')])
- recordSet.name, recordSet.ttl, recordSet.type = 'mx1', 6, 'MX'
+ recordSet = RecordSet(mx_records=[MxRecord(preference=10,
+ exchange='mx1.unit.test.')])
+ recordSet.name, recordSet.ttl, recordSet.type = 'mx1', 7, 'MX'
rs.append(recordSet)
- recordSet = RecordSet(mx_records=[MxRecord(10, 'mx1.unit.test.'),
- MxRecord(11, 'mx2.unit.test.')])
- recordSet.name, recordSet.ttl, recordSet.type = 'mx2', 7, 'MX'
+ recordSet = RecordSet(mx_records=[MxRecord(preference=10,
+ exchange='mx1.unit.test.'),
+ MxRecord(preference=11,
+ exchange='mx2.unit.test.')])
+ recordSet.name, recordSet.ttl, recordSet.type = 'mx2', 8, 'MX'
rs.append(recordSet)
- recordSet = RecordSet(ns_records=[NsRecord('ns1.unit.test.')])
- recordSet.name, recordSet.ttl, recordSet.type = 'ns1', 8, 'NS'
+ recordSet = RecordSet(ns_records=[NsRecord(nsdname='ns1.unit.test.')])
+ recordSet.name, recordSet.ttl, recordSet.type = 'ns1', 9, 'NS'
rs.append(recordSet)
- recordSet = RecordSet(ns_records=[NsRecord('ns1.unit.test.'),
- NsRecord('ns2.unit.test.')])
- recordSet.name, recordSet.ttl, recordSet.type = 'ns2', 9, 'NS'
+ recordSet = RecordSet(ns_records=[NsRecord(nsdname='ns1.unit.test.'),
+ NsRecord(nsdname='ns2.unit.test.')])
+ recordSet.name, recordSet.ttl, recordSet.type = 'ns2', 10, 'NS'
rs.append(recordSet)
- recordSet = RecordSet(ptr_records=[PtrRecord('ptr1.unit.test.')])
- recordSet.name, recordSet.ttl, recordSet.type = 'ptr1', 10, 'PTR'
+ ptr1 = PtrRecord(ptrdname='ptr1.unit.test.')
+ recordSet = RecordSet(ptr_records=[ptr1])
+ recordSet.name, recordSet.ttl, recordSet.type = 'ptr1', 11, 'PTR'
rs.append(recordSet)
- recordSet = RecordSet(ptr_records=[PtrRecord(None)])
- recordSet.name, recordSet.ttl, recordSet.type = 'ptr2', 11, 'PTR'
+ recordSet = RecordSet(ptr_records=[PtrRecord(ptrdname=None)])
+ recordSet.name, recordSet.ttl, recordSet.type = 'ptr2', 12, 'PTR'
rs.append(recordSet)
- recordSet = RecordSet(srv_records=[SrvRecord(1, 2, 3, '1unit.tests.')])
- recordSet.name, recordSet.ttl, recordSet.type = '_srv1._tcp', 12, 'SRV'
+ recordSet = RecordSet(srv_records=[SrvRecord(priority=1,
+ weight=2,
+ port=3,
+ target='1unit.tests.')])
+ recordSet.name, recordSet.ttl, recordSet.type = '_srv1._tcp', 13, 'SRV'
rs.append(recordSet)
- recordSet = RecordSet(srv_records=[SrvRecord(1, 2, 3, '1unit.tests.'),
- SrvRecord(4, 5, 6, '2unit.tests.')])
- recordSet.name, recordSet.ttl, recordSet.type = '_srv2._tcp', 13, 'SRV'
+ recordSet = RecordSet(srv_records=[SrvRecord(priority=1,
+ weight=2,
+ port=3,
+ target='1unit.tests.'),
+ SrvRecord(priority=4,
+ weight=5,
+ port=6,
+ target='2unit.tests.')])
+ recordSet.name, recordSet.ttl, recordSet.type = '_srv2._tcp', 14, 'SRV'
rs.append(recordSet)
- recordSet = RecordSet(txt_records=[TxtRecord('sample text1')])
- recordSet.name, recordSet.ttl, recordSet.type = 'txt1', 14, 'TXT'
+ recordSet = RecordSet(txt_records=[TxtRecord(value='sample text1')])
+ recordSet.name, recordSet.ttl, recordSet.type = 'txt1', 15, 'TXT'
rs.append(recordSet)
- recordSet = RecordSet(txt_records=[TxtRecord('sample text1'),
- TxtRecord('sample text2')])
- recordSet.name, recordSet.ttl, recordSet.type = 'txt2', 15, 'TXT'
+ recordSet = RecordSet(txt_records=[TxtRecord(value='sample text1'),
+ TxtRecord(value='sample text2')])
+ recordSet.name, recordSet.ttl, recordSet.type = 'txt2', 16, 'TXT'
rs.append(recordSet)
recordSet = RecordSet(soa_record=[SoaRecord()])
- recordSet.name, recordSet.ttl, recordSet.type = '', 16, 'SOA'
+ recordSet.name, recordSet.ttl, recordSet.type = '', 17, 'SOA'
rs.append(recordSet)
record_list = provider._dns_client.record_sets.list_by_dns_zone
@@ -322,7 +449,7 @@ class TestAzureDnsProvider(TestCase):
exists = provider.populate(zone)
self.assertTrue(exists)
- self.assertEquals(len(zone.records), 16)
+ self.assertEquals(len(zone.records), 18)
def test_populate_zone(self):
provider = self._get_provider()
@@ -356,9 +483,9 @@ class TestAzureDnsProvider(TestCase):
changes.append(Create(i))
deletes.append(Delete(i))
- self.assertEquals(13, provider.apply(Plan(None, zone,
+ self.assertEquals(18, provider.apply(Plan(None, zone,
changes, True)))
- self.assertEquals(13, provider.apply(Plan(zone, zone,
+ self.assertEquals(18, provider.apply(Plan(zone, zone,
deletes, True)))
def test_create_zone(self):
@@ -374,18 +501,18 @@ class TestAzureDnsProvider(TestCase):
_get = provider._dns_client.zones.get
_get.side_effect = CloudError(Mock(status=404), err_msg)
- self.assertEquals(13, provider.apply(Plan(None, desired, changes,
+ self.assertEquals(18, provider.apply(Plan(None, desired, changes,
True)))
def test_check_zone_no_create(self):
provider = self._get_provider()
rs = []
- recordSet = RecordSet(arecords=[ARecord('1.1.1.1')])
+ recordSet = RecordSet(arecords=[ARecord(ipv4_address='1.1.1.1')])
recordSet.name, recordSet.ttl, recordSet.type = 'a1', 0, 'A'
rs.append(recordSet)
- recordSet = RecordSet(arecords=[ARecord('1.1.1.1'),
- ARecord('2.2.2.2')])
+ recordSet = RecordSet(arecords=[ARecord(ipv4_address='1.1.1.1'),
+ ARecord(ipv4_address='2.2.2.2')])
recordSet.name, recordSet.ttl, recordSet.type = 'a2', 1, 'A'
rs.append(recordSet)
diff --git a/tests/test_octodns_provider_base.py b/tests/test_octodns_provider_base.py
index e28850a..f33db0f 100644
--- a/tests/test_octodns_provider_base.py
+++ b/tests/test_octodns_provider_base.py
@@ -6,6 +6,7 @@ from __future__ import absolute_import, division, print_function, \
unicode_literals
from logging import getLogger
+from six import text_type
from unittest import TestCase
from octodns.record import Create, Delete, Record, Update
@@ -48,7 +49,7 @@ class TestBaseProvider(TestCase):
with self.assertRaises(NotImplementedError) as ctx:
BaseProvider('base')
self.assertEquals('Abstract base class, log property missing',
- ctx.exception.message)
+ text_type(ctx.exception))
class HasLog(BaseProvider):
log = getLogger('HasLog')
@@ -56,7 +57,7 @@ class TestBaseProvider(TestCase):
with self.assertRaises(NotImplementedError) as ctx:
HasLog('haslog')
self.assertEquals('Abstract base class, SUPPORTS_GEO property missing',
- ctx.exception.message)
+ text_type(ctx.exception))
class HasSupportsGeo(HasLog):
SUPPORTS_GEO = False
@@ -65,14 +66,14 @@ class TestBaseProvider(TestCase):
with self.assertRaises(NotImplementedError) as ctx:
HasSupportsGeo('hassupportsgeo').populate(zone)
self.assertEquals('Abstract base class, SUPPORTS property missing',
- ctx.exception.message)
+ text_type(ctx.exception))
class HasSupports(HasSupportsGeo):
SUPPORTS = set(('A',))
with self.assertRaises(NotImplementedError) as ctx:
HasSupports('hassupports').populate(zone)
self.assertEquals('Abstract base class, populate method missing',
- ctx.exception.message)
+ text_type(ctx.exception))
# SUPPORTS_DYNAMIC has a default/fallback
self.assertFalse(HasSupports('hassupports').SUPPORTS_DYNAMIC)
@@ -118,7 +119,7 @@ class TestBaseProvider(TestCase):
with self.assertRaises(NotImplementedError) as ctx:
HasPopulate('haspopulate').apply(plan)
self.assertEquals('Abstract base class, _apply method missing',
- ctx.exception.message)
+ text_type(ctx.exception))
def test_plan(self):
ignored = Zone('unit.tests.', [])
@@ -193,7 +194,7 @@ class TestBaseProvider(TestCase):
})
for i in range(int(Plan.MIN_EXISTING_RECORDS)):
- zone.add_record(Record.new(zone, unicode(i), {
+ zone.add_record(Record.new(zone, text_type(i), {
'ttl': 60,
'type': 'A',
'value': '2.3.4.5'
@@ -225,7 +226,7 @@ class TestBaseProvider(TestCase):
})
for i in range(int(Plan.MIN_EXISTING_RECORDS)):
- zone.add_record(Record.new(zone, unicode(i), {
+ zone.add_record(Record.new(zone, text_type(i), {
'ttl': 60,
'type': 'A',
'value': '2.3.4.5'
@@ -238,7 +239,7 @@ class TestBaseProvider(TestCase):
with self.assertRaises(UnsafePlan) as ctx:
Plan(zone, zone, changes, True).raise_if_unsafe()
- self.assertTrue('Too many updates' in ctx.exception.message)
+ self.assertTrue('Too many updates' in text_type(ctx.exception))
def test_safe_updates_min_existing_pcent(self):
# MAX_SAFE_UPDATE_PCENT is safe when more
@@ -251,7 +252,7 @@ class TestBaseProvider(TestCase):
})
for i in range(int(Plan.MIN_EXISTING_RECORDS)):
- zone.add_record(Record.new(zone, unicode(i), {
+ zone.add_record(Record.new(zone, text_type(i), {
'ttl': 60,
'type': 'A',
'value': '2.3.4.5'
@@ -273,7 +274,7 @@ class TestBaseProvider(TestCase):
})
for i in range(int(Plan.MIN_EXISTING_RECORDS)):
- zone.add_record(Record.new(zone, unicode(i), {
+ zone.add_record(Record.new(zone, text_type(i), {
'ttl': 60,
'type': 'A',
'value': '2.3.4.5'
@@ -286,7 +287,7 @@ class TestBaseProvider(TestCase):
with self.assertRaises(UnsafePlan) as ctx:
Plan(zone, zone, changes, True).raise_if_unsafe()
- self.assertTrue('Too many deletes' in ctx.exception.message)
+ self.assertTrue('Too many deletes' in text_type(ctx.exception))
def test_safe_deletes_min_existing_pcent(self):
# MAX_SAFE_DELETE_PCENT is safe when more
@@ -299,7 +300,7 @@ class TestBaseProvider(TestCase):
})
for i in range(int(Plan.MIN_EXISTING_RECORDS)):
- zone.add_record(Record.new(zone, unicode(i), {
+ zone.add_record(Record.new(zone, text_type(i), {
'ttl': 60,
'type': 'A',
'value': '2.3.4.5'
@@ -322,7 +323,7 @@ class TestBaseProvider(TestCase):
})
for i in range(int(Plan.MIN_EXISTING_RECORDS)):
- zone.add_record(Record.new(zone, unicode(i), {
+ zone.add_record(Record.new(zone, text_type(i), {
'ttl': 60,
'type': 'A',
'value': '2.3.4.5'
@@ -336,7 +337,7 @@ class TestBaseProvider(TestCase):
Plan(zone, zone, changes, True,
update_pcent_threshold=safe_pcent).raise_if_unsafe()
- self.assertTrue('Too many updates' in ctx.exception.message)
+ self.assertTrue('Too many updates' in text_type(ctx.exception))
def test_safe_deletes_min_existing_override(self):
safe_pcent = .4
@@ -350,7 +351,7 @@ class TestBaseProvider(TestCase):
})
for i in range(int(Plan.MIN_EXISTING_RECORDS)):
- zone.add_record(Record.new(zone, unicode(i), {
+ zone.add_record(Record.new(zone, text_type(i), {
'ttl': 60,
'type': 'A',
'value': '2.3.4.5'
@@ -364,4 +365,4 @@ class TestBaseProvider(TestCase):
Plan(zone, zone, changes, True,
delete_pcent_threshold=safe_pcent).raise_if_unsafe()
- self.assertTrue('Too many deletes' in ctx.exception.message)
+ self.assertTrue('Too many deletes' in text_type(ctx.exception))
diff --git a/tests/test_octodns_provider_cloudflare.py b/tests/test_octodns_provider_cloudflare.py
index f186309..08608ea 100644
--- a/tests/test_octodns_provider_cloudflare.py
+++ b/tests/test_octodns_provider_cloudflare.py
@@ -9,11 +9,13 @@ from mock import Mock, call
from os.path import dirname, join
from requests import HTTPError
from requests_mock import ANY, mock as requests_mock
+from six import text_type
from unittest import TestCase
from octodns.record import Record, Update
from octodns.provider.base import Plan
-from octodns.provider.cloudflare import CloudflareProvider
+from octodns.provider.cloudflare import CloudflareProvider, \
+ CloudflareRateLimitError
from octodns.provider.yaml import YamlProvider
from octodns.zone import Zone
@@ -51,7 +53,7 @@ class TestCloudflareProvider(TestCase):
empty = {'result': [], 'result_info': {'count': 0, 'per_page': 0}}
def test_populate(self):
- provider = CloudflareProvider('test', 'email', 'token')
+ provider = CloudflareProvider('test', 'email', 'token', retry_period=0)
# Bad requests
with requests_mock() as mock:
@@ -65,7 +67,7 @@ class TestCloudflareProvider(TestCase):
provider.populate(zone)
self.assertEquals('CloudflareError', type(ctx.exception).__name__)
- self.assertEquals('request was invalid', ctx.exception.message)
+ self.assertEquals('request was invalid', text_type(ctx.exception))
# Bad auth
with requests_mock() as mock:
@@ -80,7 +82,7 @@ class TestCloudflareProvider(TestCase):
self.assertEquals('CloudflareAuthenticationError',
type(ctx.exception).__name__)
self.assertEquals('Unknown X-Auth-Key or X-Auth-Email',
- ctx.exception.message)
+ text_type(ctx.exception))
# Bad auth, unknown resp
with requests_mock() as mock:
@@ -91,7 +93,7 @@ class TestCloudflareProvider(TestCase):
provider.populate(zone)
self.assertEquals('CloudflareAuthenticationError',
type(ctx.exception).__name__)
- self.assertEquals('Cloudflare error', ctx.exception.message)
+ self.assertEquals('Cloudflare error', text_type(ctx.exception))
# General error
with requests_mock() as mock:
@@ -102,7 +104,37 @@ class TestCloudflareProvider(TestCase):
provider.populate(zone)
self.assertEquals(502, ctx.exception.response.status_code)
- # Non-existant zone doesn't populate anything
+ # Rate Limit error
+ with requests_mock() as mock:
+ mock.get(ANY, status_code=429,
+ text='{"success":false,"errors":[{"code":10100,'
+ '"message":"More than 1200 requests per 300 seconds '
+ 'reached. Please wait and consider throttling your '
+ 'request speed"}],"messages":[],"result":null}')
+
+ with self.assertRaises(Exception) as ctx:
+ zone = Zone('unit.tests.', [])
+ provider.populate(zone)
+
+ self.assertEquals('CloudflareRateLimitError',
+ type(ctx.exception).__name__)
+ self.assertEquals('More than 1200 requests per 300 seconds '
+ 'reached. Please wait and consider throttling '
+ 'your request speed', text_type(ctx.exception))
+
+ # Rate Limit error, unknown resp
+ with requests_mock() as mock:
+ mock.get(ANY, status_code=429, text='{}')
+
+ with self.assertRaises(Exception) as ctx:
+ zone = Zone('unit.tests.', [])
+ provider.populate(zone)
+
+ self.assertEquals('CloudflareRateLimitError',
+ type(ctx.exception).__name__)
+ self.assertEquals('Cloudflare error', text_type(ctx.exception))
+
+ # Non-existent zone doesn't populate anything
with requests_mock() as mock:
mock.get(ANY, status_code=200, json=self.empty)
@@ -110,7 +142,7 @@ class TestCloudflareProvider(TestCase):
provider.populate(zone)
self.assertEquals(set(), zone.records)
- # re-populating the same non-existant zone uses cache and makes no
+ # re-populating the same non-existent zone uses cache and makes no
# calls
again = Zone('unit.tests.', [])
provider.populate(again)
@@ -148,7 +180,7 @@ class TestCloudflareProvider(TestCase):
zone = Zone('unit.tests.', [])
provider.populate(zone)
- self.assertEquals(12, len(zone.records))
+ self.assertEquals(13, len(zone.records))
changes = self.expected.changes(zone, provider)
@@ -157,10 +189,10 @@ class TestCloudflareProvider(TestCase):
# re-populating the same zone/records comes out of cache, no calls
again = Zone('unit.tests.', [])
provider.populate(again)
- self.assertEquals(12, len(again.records))
+ self.assertEquals(13, len(again.records))
def test_apply(self):
- provider = CloudflareProvider('test', 'email', 'token')
+ provider = CloudflareProvider('test', 'email', 'token', retry_period=0)
provider._request = Mock()
@@ -171,12 +203,12 @@ class TestCloudflareProvider(TestCase):
'id': 42,
}
}, # zone create
- ] + [None] * 20 # individual record creates
+ ] + [None] * 22 # individual record creates
- # non-existant zone, create everything
+ # non-existent zone, create everything
plan = provider.plan(self.expected)
- self.assertEquals(12, len(plan.changes))
- self.assertEquals(12, provider.apply(plan))
+ self.assertEquals(13, len(plan.changes))
+ self.assertEquals(13, provider.apply(plan))
self.assertFalse(plan.exists)
provider._request.assert_has_calls([
@@ -202,7 +234,7 @@ class TestCloudflareProvider(TestCase):
}),
], True)
# expected number of total calls
- self.assertEquals(22, provider._request.call_count)
+ self.assertEquals(23, provider._request.call_count)
provider._request.reset_mock()
@@ -279,7 +311,11 @@ class TestCloudflareProvider(TestCase):
# we don't care about the POST/create return values
provider._request.return_value = {}
- provider._request.side_effect = None
+
+ # Test out the create rate-limit handling, then 9 successes
+ provider._request.side_effect = [
+ CloudflareRateLimitError('{}'),
+ ] + ([None] * 3)
wanted = Zone('unit.tests.', [])
wanted.add_record(Record.new(wanted, 'nc', {
@@ -315,7 +351,7 @@ class TestCloudflareProvider(TestCase):
])
def test_update_add_swap(self):
- provider = CloudflareProvider('test', 'email', 'token')
+ provider = CloudflareProvider('test', 'email', 'token', retry_period=0)
provider.zone_records = Mock(return_value=[
{
@@ -356,6 +392,7 @@ class TestCloudflareProvider(TestCase):
provider._request = Mock()
provider._request.side_effect = [
+ CloudflareRateLimitError('{}'),
self.empty, # no zones
{
'result': {
@@ -422,7 +459,7 @@ class TestCloudflareProvider(TestCase):
def test_update_delete(self):
# We need another run so that we can delete, we can't both add and
# delete in one go b/c of swaps
- provider = CloudflareProvider('test', 'email', 'token')
+ provider = CloudflareProvider('test', 'email', 'token', retry_period=0)
provider.zone_records = Mock(return_value=[
{
@@ -463,6 +500,7 @@ class TestCloudflareProvider(TestCase):
provider._request = Mock()
provider._request.side_effect = [
+ CloudflareRateLimitError('{}'),
self.empty, # no zones
{
'result': {
@@ -509,6 +547,83 @@ class TestCloudflareProvider(TestCase):
'fc12ab34cd5611334422ab3322997653')
])
+ def test_ptr(self):
+ provider = CloudflareProvider('test', 'email', 'token')
+
+ zone = Zone('unit.tests.', [])
+ # PTR record
+ ptr_record = Record.new(zone, 'ptr', {
+ 'ttl': 300,
+ 'type': 'PTR',
+ 'value': 'foo.bar.com.'
+ })
+
+ ptr_record_contents = provider._gen_data(ptr_record)
+ self.assertEquals({
+ 'name': 'ptr.unit.tests',
+ 'ttl': 300,
+ 'type': 'PTR',
+ 'content': 'foo.bar.com.'
+ }, list(ptr_record_contents)[0])
+
+ def test_srv(self):
+ provider = CloudflareProvider('test', 'email', 'token')
+
+ zone = Zone('unit.tests.', [])
+ # SRV record not under a sub-domain
+ srv_record = Record.new(zone, '_example._tcp', {
+ 'ttl': 300,
+ 'type': 'SRV',
+ 'value': {
+ 'port': 1234,
+ 'priority': 0,
+ 'target': 'nc.unit.tests.',
+ 'weight': 5
+ }
+ })
+ # SRV record under a sub-domain
+ srv_record_with_sub = Record.new(zone, '_example._tcp.sub', {
+ 'ttl': 300,
+ 'type': 'SRV',
+ 'value': {
+ 'port': 1234,
+ 'priority': 0,
+ 'target': 'nc.unit.tests.',
+ 'weight': 5
+ }
+ })
+
+ srv_record_contents = provider._gen_data(srv_record)
+ srv_record_with_sub_contents = provider._gen_data(srv_record_with_sub)
+ self.assertEquals({
+ 'name': '_example._tcp.unit.tests',
+ 'ttl': 300,
+ 'type': 'SRV',
+ 'data': {
+ 'service': '_example',
+ 'proto': '_tcp',
+ 'name': 'unit.tests.',
+ 'priority': 0,
+ 'weight': 5,
+ 'port': 1234,
+ 'target': 'nc.unit.tests'
+ }
+ }, list(srv_record_contents)[0])
+ self.assertEquals({
+ 'name': '_example._tcp.sub.unit.tests',
+ 'ttl': 300,
+ 'type': 'SRV',
+ 'data': {
+ 'service': '_example',
+ 'proto': '_tcp',
+ 'name': 'sub',
+ 'priority': 0,
+ 'weight': 5,
+ 'port': 1234,
+ 'target': 'nc.unit.tests'
+ }
+ }, list(srv_record_with_sub_contents)[0])
+
def test_alias(self):
provider = CloudflareProvider('test', 'email', 'token')
@@ -684,23 +799,25 @@ class TestCloudflareProvider(TestCase):
# the CDN.
self.assertEquals(3, len(zone.records))
- record = list(zone.records)[0]
- self.assertEquals('multi', record.name)
- self.assertEquals('multi.unit.tests.', record.fqdn)
- self.assertEquals('CNAME', record._type)
- self.assertEquals('multi.unit.tests.cdn.cloudflare.net.', record.value)
+ ordered = sorted(zone.records, key=lambda r: r.name)
- record = list(zone.records)[1]
+ record = ordered[0]
+ self.assertEquals('a', record.name)
+ self.assertEquals('a.unit.tests.', record.fqdn)
+ self.assertEquals('CNAME', record._type)
+ self.assertEquals('a.unit.tests.cdn.cloudflare.net.', record.value)
+
+ record = ordered[1]
self.assertEquals('cname', record.name)
self.assertEquals('cname.unit.tests.', record.fqdn)
self.assertEquals('CNAME', record._type)
self.assertEquals('cname.unit.tests.cdn.cloudflare.net.', record.value)
- record = list(zone.records)[2]
- self.assertEquals('a', record.name)
- self.assertEquals('a.unit.tests.', record.fqdn)
+ record = ordered[2]
+ self.assertEquals('multi', record.name)
+ self.assertEquals('multi.unit.tests.', record.fqdn)
self.assertEquals('CNAME', record._type)
- self.assertEquals('a.unit.tests.cdn.cloudflare.net.', record.value)
+ self.assertEquals('multi.unit.tests.cdn.cloudflare.net.', record.value)
# CDN enabled records can't be updated, we don't know the real values
# never point a Cloudflare record to itself.
@@ -892,7 +1009,7 @@ class TestCloudflareProvider(TestCase):
'value': 'ns1.unit.tests.'
})
- data = provider._gen_data(record).next()
+ data = next(provider._gen_data(record))
self.assertFalse('proxied' in data)
@@ -907,7 +1024,7 @@ class TestCloudflareProvider(TestCase):
}), False
)
- data = provider._gen_data(record).next()
+ data = next(provider._gen_data(record))
self.assertFalse(data['proxied'])
@@ -922,7 +1039,7 @@ class TestCloudflareProvider(TestCase):
}), True
)
- data = provider._gen_data(record).next()
+ data = next(provider._gen_data(record))
self.assertTrue(data['proxied'])
@@ -1151,3 +1268,75 @@ class TestCloudflareProvider(TestCase):
self.assertFalse(
extra_changes[0].new._octodns['cloudflare']['proxied']
)
+
+ def test_emailless_auth(self):
+ provider = CloudflareProvider('test', token='token 123',
+ email='email 234')
+ headers = provider._sess.headers
+ self.assertEquals('email 234', headers['X-Auth-Email'])
+ self.assertEquals('token 123', headers['X-Auth-Key'])
+
+ provider = CloudflareProvider('test', token='token 123')
+ headers = provider._sess.headers
+ self.assertEquals('Bearer token 123', headers['Authorization'])
+
+ def test_retry_behavior(self):
+ provider = CloudflareProvider('test', token='token 123',
+ email='email 234', retry_period=0)
+ result = {
+ "success": True,
+ "errors": [],
+ "messages": [],
+ "result": [],
+ "result_info": {
+ "count": 1,
+ "per_page": 50
+ }
+ }
+ zone = Zone('unit.tests.', [])
+ provider._request = Mock()
+
+ # No retry required, just calls and is returned
+ provider._zones = None
+ provider._request.reset_mock()
+ provider._request.side_effect = [result]
+ self.assertEquals([], provider.zone_records(zone))
+ provider._request.assert_has_calls([call('GET', '/zones',
+ params={'page': 1})])
+
+ # One retry required
+ provider._zones = None
+ provider._request.reset_mock()
+ provider._request.side_effect = [
+ CloudflareRateLimitError('{}'),
+ result
+ ]
+ self.assertEquals([], provider.zone_records(zone))
+ provider._request.assert_has_calls([call('GET', '/zones',
+ params={'page': 1})])
+
+ # Two retries required
+ provider._zones = None
+ provider._request.reset_mock()
+ provider._request.side_effect = [
+ CloudflareRateLimitError('{}'),
+ CloudflareRateLimitError('{}'),
+ result
+ ]
+ self.assertEquals([], provider.zone_records(zone))
+ provider._request.assert_has_calls([call('GET', '/zones',
+ params={'page': 1})])
+
+ # # Exhaust our retries
+ provider._zones = None
+ provider._request.reset_mock()
+ provider._request.side_effect = [
+ CloudflareRateLimitError({"errors": [{"message": "first"}]}),
+ CloudflareRateLimitError({"errors": [{"message": "boo"}]}),
+ CloudflareRateLimitError({"errors": [{"message": "boo"}]}),
+ CloudflareRateLimitError({"errors": [{"message": "boo"}]}),
+ CloudflareRateLimitError({"errors": [{"message": "last"}]}),
+ ]
+ with self.assertRaises(CloudflareRateLimitError) as ctx:
+ provider.zone_records(zone)
+ self.assertEquals('last', text_type(ctx.exception))
diff --git a/tests/test_octodns_provider_constellix.py b/tests/test_octodns_provider_constellix.py
new file mode 100644
index 0000000..151d0d4
--- /dev/null
+++ b/tests/test_octodns_provider_constellix.py
@@ -0,0 +1,231 @@
+#
+#
+#
+
+
+from __future__ import absolute_import, division, print_function, \
+ unicode_literals
+
+from mock import Mock, call
+from os.path import dirname, join
+from requests import HTTPError
+from requests_mock import ANY, mock as requests_mock
+from six import text_type
+from unittest import TestCase
+
+from octodns.record import Record
+from octodns.provider.constellix import \
+ ConstellixProvider
+from octodns.provider.yaml import YamlProvider
+from octodns.zone import Zone
+
+
+class TestConstellixProvider(TestCase):
+ expected = Zone('unit.tests.', [])
+ source = YamlProvider('test', join(dirname(__file__), 'config'))
+ source.populate(expected)
+
+ # Our test suite differs a bit, add our NS and remove the simple one
+ expected.add_record(Record.new(expected, 'under', {
+ 'ttl': 3600,
+ 'type': 'NS',
+ 'values': [
+ 'ns1.unit.tests.',
+ 'ns2.unit.tests.',
+ ]
+ }))
+
+ # Add some ALIAS records
+ expected.add_record(Record.new(expected, '', {
+ 'ttl': 1800,
+ 'type': 'ALIAS',
+ 'value': 'aname.unit.tests.'
+ }))
+
+ expected.add_record(Record.new(expected, 'sub', {
+ 'ttl': 1800,
+ 'type': 'ALIAS',
+ 'value': 'aname.unit.tests.'
+ }))
+
+ for record in list(expected.records):
+ if record.name == 'sub' and record._type == 'NS':
+ expected._remove_record(record)
+ break
+
+ def test_populate(self):
+ provider = ConstellixProvider('test', 'api', 'secret')
+
+ # Bad auth
+ with requests_mock() as mock:
+ mock.get(ANY, status_code=401,
+ text='{"errors": ["Unable to authenticate token"]}')
+
+ with self.assertRaises(Exception) as ctx:
+ zone = Zone('unit.tests.', [])
+ provider.populate(zone)
+ self.assertEquals('Unauthorized', text_type(ctx.exception))
+
+ # Bad request
+ with requests_mock() as mock:
+ mock.get(ANY, status_code=400,
+ text='{"errors": ["\\"unittests\\" is not '
+ 'a valid domain name"]}')
+
+ with self.assertRaises(Exception) as ctx:
+ zone = Zone('unit.tests.', [])
+ provider.populate(zone)
+ self.assertEquals('\n - "unittests" is not a valid domain name',
+ text_type(ctx.exception))
+
+ # General error
+ with requests_mock() as mock:
+ mock.get(ANY, status_code=502, text='Things caught fire')
+
+ with self.assertRaises(HTTPError) as ctx:
+ zone = Zone('unit.tests.', [])
+ provider.populate(zone)
+ self.assertEquals(502, ctx.exception.response.status_code)
+
+ # Non-existent zone doesn't populate anything
+ with requests_mock() as mock:
+ mock.get(ANY, status_code=404,
+ text='')
+
+ zone = Zone('unit.tests.', [])
+ provider.populate(zone)
+ self.assertEquals(set(), zone.records)
+
+ # No diffs == no changes
+ with requests_mock() as mock:
+ base = 'https://api.dns.constellix.com/v1/domains'
+ with open('tests/fixtures/constellix-domains.json') as fh:
+ mock.get('{}{}'.format(base, ''), text=fh.read())
+ with open('tests/fixtures/constellix-records.json') as fh:
+ mock.get('{}{}'.format(base, '/123123/records'),
+ text=fh.read())
+
+ zone = Zone('unit.tests.', [])
+ provider.populate(zone)
+ self.assertEquals(15, len(zone.records))
+ changes = self.expected.changes(zone, provider)
+ self.assertEquals(0, len(changes))
+
+ # 2nd populate makes no network calls/all from cache
+ again = Zone('unit.tests.', [])
+ provider.populate(again)
+ self.assertEquals(15, len(again.records))
+
+ # bust the cache
+ del provider._zone_records[zone.name]
+
+ def test_apply(self):
+ provider = ConstellixProvider('test', 'api', 'secret')
+
+ resp = Mock()
+ resp.json = Mock()
+ provider._client._request = Mock(return_value=resp)
+
+ # non-existent domain, create everything
+ resp.json.side_effect = [
+ [], # no domains returned during populate
+ [{
+ 'id': 123123,
+ 'name': 'unit.tests'
+ }], # domain created in apply
+ ]
+
+ plan = provider.plan(self.expected)
+
+ # No root NS, no ignored, no excluded, no unsupported
+ n = len(self.expected.records) - 5
+ self.assertEquals(n, len(plan.changes))
+ self.assertEquals(n, provider.apply(plan))
+
+ provider._client._request.assert_has_calls([
+ # get all domains to build the cache
+ call('GET', ''),
+ # created the domain
+ call('POST', '/', data={'names': ['unit.tests']})
+ ])
+ # These two checks are broken up so that ordering doesn't break things.
+ # Python3 doesn't make the calls in a consistent order so different
+ # things follow the GET / on different runs
+ provider._client._request.assert_has_calls([
+ call('POST', '/123123/records/SRV', data={
+ 'roundRobin': [{
+ 'priority': 10,
+ 'weight': 20,
+ 'value': 'foo-1.unit.tests.',
+ 'port': 30
+ }, {
+ 'priority': 12,
+ 'weight': 20,
+ 'value': 'foo-2.unit.tests.',
+ 'port': 30
+ }],
+ 'name': '_srv._tcp',
+ 'ttl': 600,
+ }),
+ ])
+
+ self.assertEquals(18, provider._client._request.call_count)
+
+ provider._client._request.reset_mock()
+
+ provider._client.records = Mock(return_value=[
+ {
+ 'id': 11189897,
+ 'type': 'A',
+ 'name': 'www',
+ 'ttl': 300,
+ 'value': [
+ '1.2.3.4',
+ '2.2.3.4',
+ ]
+ }, {
+ 'id': 11189898,
+ 'type': 'A',
+ 'name': 'ttl',
+ 'ttl': 600,
+ 'value': [
+ '3.2.3.4'
+ ]
+ }, {
+ 'id': 11189899,
+ 'type': 'ALIAS',
+ 'name': 'alias',
+ 'ttl': 600,
+ 'value': [{
+ 'value': 'aname.unit.tests.'
+ }]
+ }
+ ])
+
+ # Domain exists, we don't care about return
+ resp.json.side_effect = ['{}']
+
+ wanted = Zone('unit.tests.', [])
+ wanted.add_record(Record.new(wanted, 'ttl', {
+ 'ttl': 300,
+ 'type': 'A',
+ 'value': '3.2.3.4'
+ }))
+
+ plan = provider.plan(wanted)
+ self.assertEquals(3, len(plan.changes))
+ self.assertEquals(3, provider.apply(plan))
+
+ # recreate for update, and deletes for the 2 parts of the other
+ provider._client._request.assert_has_calls([
+ call('POST', '/123123/records/A', data={
+ 'roundRobin': [{
+ 'value': '3.2.3.4'
+ }],
+ 'name': 'ttl',
+ 'ttl': 300
+ }),
+ call('DELETE', '/123123/records/A/11189897'),
+ call('DELETE', '/123123/records/A/11189898'),
+ call('DELETE', '/123123/records/ANAME/11189899')
+ ], any_order=True)
diff --git a/tests/test_octodns_provider_digitalocean.py b/tests/test_octodns_provider_digitalocean.py
index ddc6bc2..ebb5319 100644
--- a/tests/test_octodns_provider_digitalocean.py
+++ b/tests/test_octodns_provider_digitalocean.py
@@ -10,6 +10,7 @@ from mock import Mock, call
from os.path import dirname, join
from requests import HTTPError
from requests_mock import ANY, mock as requests_mock
+from six import text_type
from unittest import TestCase
from octodns.record import Record
@@ -50,7 +51,7 @@ class TestDigitalOceanProvider(TestCase):
with self.assertRaises(Exception) as ctx:
zone = Zone('unit.tests.', [])
provider.populate(zone)
- self.assertEquals('Unauthorized', ctx.exception.message)
+ self.assertEquals('Unauthorized', text_type(ctx.exception))
# General error
with requests_mock() as mock:
@@ -61,7 +62,7 @@ class TestDigitalOceanProvider(TestCase):
provider.populate(zone)
self.assertEquals(502, ctx.exception.response.status_code)
- # Non-existant zone doesn't populate anything
+ # Non-existent zone doesn't populate anything
with requests_mock() as mock:
mock.get(ANY, status_code=404,
text='{"id":"not_found","message":"The resource you '
@@ -153,7 +154,7 @@ class TestDigitalOceanProvider(TestCase):
}
}
- # non-existant domain, create everything
+ # non-existent domain, create everything
resp.json.side_effect = [
DigitalOceanClientNotFound, # no zone in populate
DigitalOceanClientNotFound, # no domain during apply
@@ -175,7 +176,20 @@ class TestDigitalOceanProvider(TestCase):
call('GET', '/domains/unit.tests/records', {'page': 1}),
# delete the initial A record
call('DELETE', '/domains/unit.tests/records/11189877'),
- # created at least one of the record with expected data
+ # created at least some of the record with expected data
+ call('POST', '/domains/unit.tests/records', data={
+ 'data': '1.2.3.4',
+ 'name': '@',
+ 'ttl': 300, 'type': 'A'}),
+ call('POST', '/domains/unit.tests/records', data={
+ 'data': '1.2.3.5',
+ 'name': '@',
+ 'ttl': 300, 'type': 'A'}),
+ call('POST', '/domains/unit.tests/records', data={
+ 'data': 'ca.unit.tests.',
+ 'flags': 0, 'name': '@',
+ 'tag': 'issue',
+ 'ttl': 3600, 'type': 'CAA'}),
call('POST', '/domains/unit.tests/records', data={
'name': '_srv._tcp',
'weight': 20,
diff --git a/tests/test_octodns_provider_dnsimple.py b/tests/test_octodns_provider_dnsimple.py
index 896425e..b918962 100644
--- a/tests/test_octodns_provider_dnsimple.py
+++ b/tests/test_octodns_provider_dnsimple.py
@@ -9,6 +9,7 @@ from mock import Mock, call
from os.path import dirname, join
from requests import HTTPError
from requests_mock import ANY, mock as requests_mock
+from six import text_type
from unittest import TestCase
from octodns.record import Record
@@ -37,7 +38,13 @@ class TestDnsimpleProvider(TestCase):
break
def test_populate(self):
+
+ # Sandbox
+ provider = DnsimpleProvider('test', 'token', 42, 'true')
+ self.assertTrue('sandbox' in provider._client.base)
+
provider = DnsimpleProvider('test', 'token', 42)
+ self.assertFalse('sandbox' in provider._client.base)
# Bad auth
with requests_mock() as mock:
@@ -47,7 +54,7 @@ class TestDnsimpleProvider(TestCase):
with self.assertRaises(Exception) as ctx:
zone = Zone('unit.tests.', [])
provider.populate(zone)
- self.assertEquals('Unauthorized', ctx.exception.message)
+ self.assertEquals('Unauthorized', text_type(ctx.exception))
# General error
with requests_mock() as mock:
@@ -58,7 +65,7 @@ class TestDnsimpleProvider(TestCase):
provider.populate(zone)
self.assertEquals(502, ctx.exception.response.status_code)
- # Non-existant zone doesn't populate anything
+ # Non-existent zone doesn't populate anything
with requests_mock() as mock:
mock.get(ANY, status_code=404,
text='{"message": "Domain `foo.bar` not found"}')
@@ -122,7 +129,7 @@ class TestDnsimpleProvider(TestCase):
resp.json = Mock()
provider._client._request = Mock(return_value=resp)
- # non-existant domain, create everything
+ # non-existent domain, create everything
resp.json.side_effect = [
DnsimpleClientNotFound, # no zone in populate
DnsimpleClientNotFound, # no domain during apply
@@ -138,7 +145,32 @@ class TestDnsimpleProvider(TestCase):
provider._client._request.assert_has_calls([
# created the domain
call('POST', '/domains', data={'name': 'unit.tests'}),
- # created at least one of the record with expected data
+ # created at least some of the record with expected data
+ call('POST', '/zones/unit.tests/records', data={
+ 'content': '1.2.3.4',
+ 'type': 'A',
+ 'name': '',
+ 'ttl': 300}),
+ call('POST', '/zones/unit.tests/records', data={
+ 'content': '1.2.3.5',
+ 'type': 'A',
+ 'name': '',
+ 'ttl': 300}),
+ call('POST', '/zones/unit.tests/records', data={
+ 'content': '0 issue "ca.unit.tests"',
+ 'type': 'CAA',
+ 'name': '',
+ 'ttl': 3600}),
+ call('POST', '/zones/unit.tests/records', data={
+ 'content': '1 1 7491973e5f8b39d5327cd4e08bc81b05f7710b49',
+ 'type': 'SSHFP',
+ 'name': '',
+ 'ttl': 3600}),
+ call('POST', '/zones/unit.tests/records', data={
+ 'content': '1 1 bf6b6825d2977c511a475bbefb88aad54a92ac73',
+ 'type': 'SSHFP',
+ 'name': '',
+ 'ttl': 3600}),
call('POST', '/zones/unit.tests/records', data={
'content': '20 30 foo-1.unit.tests.',
'priority': 10,
diff --git a/tests/test_octodns_provider_dnsmadeeasy.py b/tests/test_octodns_provider_dnsmadeeasy.py
index 04cf0ee..ba61b94 100644
--- a/tests/test_octodns_provider_dnsmadeeasy.py
+++ b/tests/test_octodns_provider_dnsmadeeasy.py
@@ -10,6 +10,7 @@ from mock import Mock, call
from os.path import dirname, join
from requests import HTTPError
from requests_mock import ANY, mock as requests_mock
+from six import text_type
from unittest import TestCase
from octodns.record import Record
@@ -65,7 +66,7 @@ class TestDnsMadeEasyProvider(TestCase):
with self.assertRaises(Exception) as ctx:
zone = Zone('unit.tests.', [])
provider.populate(zone)
- self.assertEquals('Unauthorized', ctx.exception.message)
+ self.assertEquals('Unauthorized', text_type(ctx.exception))
# Bad request
with requests_mock() as mock:
@@ -76,7 +77,7 @@ class TestDnsMadeEasyProvider(TestCase):
zone = Zone('unit.tests.', [])
provider.populate(zone)
self.assertEquals('\n - Rate limit exceeded',
- ctx.exception.message)
+ text_type(ctx.exception))
# General error
with requests_mock() as mock:
@@ -87,7 +88,7 @@ class TestDnsMadeEasyProvider(TestCase):
provider.populate(zone)
self.assertEquals(502, ctx.exception.response.status_code)
- # Non-existant zone doesn't populate anything
+ # Non-existent zone doesn't populate anything
with requests_mock() as mock:
mock.get(ANY, status_code=404,
text='')
@@ -130,7 +131,7 @@ class TestDnsMadeEasyProvider(TestCase):
with open('tests/fixtures/dnsmadeeasy-domains.json') as fh:
domains = json.load(fh)
- # non-existant domain, create everything
+ # non-existent domain, create everything
resp.json.side_effect = [
DnsMadeEasyClientNotFound, # no zone in populate
DnsMadeEasyClientNotFound, # no domain during apply
@@ -148,7 +149,27 @@ class TestDnsMadeEasyProvider(TestCase):
call('POST', '/', data={'name': 'unit.tests'}),
# get all domains to build the cache
call('GET', '/'),
- # created at least one of the record with expected data
+ # created at least some of the record with expected data
+ call('POST', '/123123/records', data={
+ 'type': 'A',
+ 'name': '',
+ 'value': '1.2.3.4',
+ 'ttl': 300}),
+ call('POST', '/123123/records', data={
+ 'type': 'A',
+ 'name': '',
+ 'value': '1.2.3.5',
+ 'ttl': 300}),
+ call('POST', '/123123/records', data={
+ 'type': 'ANAME',
+ 'name': '',
+ 'value': 'aname.unit.tests.',
+ 'ttl': 1800}),
+ call('POST', '/123123/records', data={
+ 'name': '',
+ 'value': 'ca.unit.tests',
+ 'issuerCritical': 0, 'caaType': 'issue',
+ 'ttl': 3600, 'type': 'CAA'}),
call('POST', '/123123/records', data={
'name': '_srv._tcp',
'weight': 20,
diff --git a/tests/test_octodns_provider_dyn.py b/tests/test_octodns_provider_dyn.py
index 79d764d..7c023fd 100644
--- a/tests/test_octodns_provider_dyn.py
+++ b/tests/test_octodns_provider_dyn.py
@@ -670,8 +670,8 @@ class TestDynProviderGeo(TestCase):
tds = provider.traffic_directors
self.assertEquals(set(['unit.tests.', 'geo.unit.tests.']),
set(tds.keys()))
- self.assertEquals(['A'], tds['unit.tests.'].keys())
- self.assertEquals(['A'], tds['geo.unit.tests.'].keys())
+ self.assertEquals(['A'], list(tds['unit.tests.'].keys()))
+ self.assertEquals(['A'], list(tds['geo.unit.tests.'].keys()))
provider.log.warn.assert_called_with("Unsupported TrafficDirector "
"'%s'", 'something else')
@@ -980,26 +980,34 @@ class TestDynProviderGeo(TestCase):
provider = DynProvider('test', 'cust', 'user', 'pass',
traffic_directors_enabled=True)
+ got = Zone('unit.tests.', [])
+ zone_name = got.name[:-1]
# only traffic director
mock.side_effect = [
# get traffic directors
self.traffic_directors_response,
- # get traffic director
+ # get the first td's nodes
+ {'data': [{'fqdn': zone_name, 'zone': zone_name}]},
+ # get traffic director, b/c ^ matches
self.traffic_director_response,
+ # get the next td's nodes, not a match
+ {'data': [{'fqdn': 'other', 'zone': 'other'}]},
# get zone
{'data': {}},
# get records
{'data': {}},
]
- got = Zone('unit.tests.', [])
provider.populate(got)
self.assertEquals(1, len(got.records))
self.assertFalse(self.expected_geo.changes(got, provider))
mock.assert_has_calls([
+ call('/DSF/', 'GET', {'detail': 'Y'}),
+ call('/DSFNode/2ERWXQNsb_IKG2YZgYqkPvk0PBM', 'GET', {}),
call('/DSF/2ERWXQNsb_IKG2YZgYqkPvk0PBM/', 'GET',
{'pending_changes': 'Y'}),
+ call('/DSFNode/3ERWXQNsb_IKG2YZgYqkPvk0PBM', 'GET', {}),
call('/Zone/unit.tests/', 'GET', {}),
- call('/AllRecord/unit.tests/unit.tests./', 'GET', {'detail': 'Y'}),
+ call('/AllRecord/unit.tests/unit.tests./', 'GET', {'detail': 'Y'})
])
@patch('dyn.core.SessionEngine.execute')
@@ -1035,8 +1043,12 @@ class TestDynProviderGeo(TestCase):
mock.side_effect = [
# get traffic directors
self.traffic_directors_response,
- # get traffic director
+ # grab its nodes, matches
+ {'data': [{'fqdn': 'unit.tests', 'zone': 'unit.tests'}]},
+ # get traffic director b/c match
self.traffic_director_response,
+ # grab next td's nodes, not a match
+ {'data': [{'fqdn': 'other', 'zone': 'other'}]},
# get zone
{'data': {}},
# get records
@@ -1047,10 +1059,13 @@ class TestDynProviderGeo(TestCase):
self.assertEquals(1, len(got.records))
self.assertFalse(self.expected_geo.changes(got, provider))
mock.assert_has_calls([
+ call('/DSF/', 'GET', {'detail': 'Y'}),
+ call('/DSFNode/2ERWXQNsb_IKG2YZgYqkPvk0PBM', 'GET', {}),
call('/DSF/2ERWXQNsb_IKG2YZgYqkPvk0PBM/', 'GET',
{'pending_changes': 'Y'}),
+ call('/DSFNode/3ERWXQNsb_IKG2YZgYqkPvk0PBM', 'GET', {}),
call('/Zone/unit.tests/', 'GET', {}),
- call('/AllRecord/unit.tests/unit.tests./', 'GET', {'detail': 'Y'}),
+ call('/AllRecord/unit.tests/unit.tests./', 'GET', {'detail': 'Y'})
])
@patch('dyn.core.SessionEngine.execute')
@@ -1085,8 +1100,10 @@ class TestDynProviderGeo(TestCase):
mock.side_effect = [
# get traffic directors
self.traffic_directors_response,
+ {'data': [{'fqdn': 'unit.tests', 'zone': 'unit.tests'}]},
# get traffic director
busted_traffic_director_response,
+ {'data': [{'fqdn': 'other', 'zone': 'other'}]},
# get zone
{'data': {}},
# get records
@@ -1099,10 +1116,13 @@ class TestDynProviderGeo(TestCase):
# so just compare set contents (which does name and type)
self.assertEquals(self.expected_geo.records, got.records)
mock.assert_has_calls([
+ call('/DSF/', 'GET', {'detail': 'Y'}),
+ call('/DSFNode/2ERWXQNsb_IKG2YZgYqkPvk0PBM', 'GET', {}),
call('/DSF/2ERWXQNsb_IKG2YZgYqkPvk0PBM/', 'GET',
{'pending_changes': 'Y'}),
+ call('/DSFNode/3ERWXQNsb_IKG2YZgYqkPvk0PBM', 'GET', {}),
call('/Zone/unit.tests/', 'GET', {}),
- call('/AllRecord/unit.tests/unit.tests./', 'GET', {'detail': 'Y'}),
+ call('/AllRecord/unit.tests/unit.tests./', 'GET', {'detail': 'Y'})
])
@patch('dyn.core.SessionEngine.execute')
@@ -1625,11 +1645,12 @@ class DummyRuleset(object):
class DummyTrafficDirector(object):
- def __init__(self, rulesets=[], response_pools=[], ttl=42):
+ def __init__(self, zone_name, rulesets=[], response_pools=[], ttl=42):
self.label = 'dummy:abcdef1234567890'
self.rulesets = rulesets
self.all_response_pools = response_pools
self.ttl = ttl
+ self.nodes = [{'zone': zone_name[:-1]}]
class TestDynProviderDynamic(TestCase):
@@ -1880,9 +1901,9 @@ class TestDynProviderDynamic(TestCase):
},
}),
]
- td = DummyTrafficDirector(rulesets, [default_response_pool,
- pool1_response_pool])
zone = Zone('unit.tests.', [])
+ td = DummyTrafficDirector(zone.name, rulesets,
+ [default_response_pool, pool1_response_pool])
record = provider._populate_dynamic_traffic_director(zone, fqdn, 'A',
td, rulesets,
True)
diff --git a/tests/test_octodns_provider_edgedns.py b/tests/test_octodns_provider_edgedns.py
new file mode 100644
index 0000000..20a9a07
--- /dev/null
+++ b/tests/test_octodns_provider_edgedns.py
@@ -0,0 +1,158 @@
+#
+#
+#
+
+from __future__ import absolute_import, division, print_function, \
+ unicode_literals
+
+# from mock import Mock, call
+from os.path import dirname, join
+from requests import HTTPError
+from requests_mock import ANY, mock as requests_mock
+from six import text_type
+from unittest import TestCase
+
+from octodns.record import Record
+from octodns.provider.edgedns import AkamaiProvider
+from octodns.provider.fastdns import AkamaiProvider as LegacyAkamaiProvider
+from octodns.provider.yaml import YamlProvider
+from octodns.zone import Zone
+
+
+class TestEdgeDnsProvider(TestCase):
+ expected = Zone('unit.tests.', [])
+ source = YamlProvider('test', join(dirname(__file__), 'config'))
+ source.populate(expected)
+
+ # Our test suite differs a bit, add our NS and remove the simple one
+ expected.add_record(Record.new(expected, 'under', {
+ 'ttl': 3600,
+ 'type': 'NS',
+ 'values': [
+ 'ns1.unit.tests.',
+ 'ns2.unit.tests.',
+ ]
+ }))
+ for record in list(expected.records):
+ if record.name == 'sub' and record._type == 'NS':
+ expected._remove_record(record)
+ break
+
+ def test_populate(self):
+ provider = AkamaiProvider("test", "secret", "akam.com", "atok", "ctok")
+
+ # Bad Auth
+ with requests_mock() as mock:
+ mock.get(ANY, status_code=401, text='{"message": "Unauthorized"}')
+
+ with self.assertRaises(Exception) as ctx:
+ zone = Zone('unit.tests.', [])
+ provider.populate(zone)
+
+ self.assertEquals(401, ctx.exception.response.status_code)
+
+ # general error
+ with requests_mock() as mock:
+ mock.get(ANY, status_code=502, text='Things caught fire')
+
+ with self.assertRaises(HTTPError) as ctx:
+ zone = Zone('unit.tests.', [])
+ provider.populate(zone)
+ self.assertEquals(502, ctx.exception.response.status_code)
+
+ # Non-existant zone doesn't populate anything
+ with requests_mock() as mock:
+ mock.get(ANY, status_code=404,
+ text='{"message": "Domain `foo.bar` not found"}')
+
+ zone = Zone('unit.tests.', [])
+ provider.populate(zone)
+ self.assertEquals(set(), zone.records)
+
+ # No diffs == no changes
+ with requests_mock() as mock:
+
+ with open('tests/fixtures/edgedns-records.json') as fh:
+ mock.get(ANY, text=fh.read())
+
+ zone = Zone('unit.tests.', [])
+ provider.populate(zone)
+ self.assertEquals(16, len(zone.records))
+ changes = self.expected.changes(zone, provider)
+ self.assertEquals(0, len(changes))
+
+ # 2nd populate makes no network calls/all from cache
+ again = Zone('unit.tests.', [])
+ provider.populate(again)
+ self.assertEquals(16, len(again.records))
+
+ # bust the cache
+ del provider._zone_records[zone.name]
+
+ def test_apply(self):
+ provider = AkamaiProvider("test", "s", "akam.com", "atok", "ctok",
+ "cid", "gid")
+
+ # tests create update delete through previous state config json
+ with requests_mock() as mock:
+
+ with open('tests/fixtures/edgedns-records-prev.json') as fh:
+ mock.get(ANY, text=fh.read())
+
+ plan = provider.plan(self.expected)
+ mock.post(ANY, status_code=201)
+ mock.put(ANY, status_code=200)
+ mock.delete(ANY, status_code=204)
+
+ changes = provider.apply(plan)
+ self.assertEquals(29, changes)
+
+ # Test against a zone that doesn't exist yet
+ with requests_mock() as mock:
+ with open('tests/fixtures/edgedns-records-prev-other.json') as fh:
+ mock.get(ANY, status_code=404)
+
+ plan = provider.plan(self.expected)
+ mock.post(ANY, status_code=201)
+ mock.put(ANY, status_code=200)
+ mock.delete(ANY, status_code=204)
+
+ changes = provider.apply(plan)
+ self.assertEquals(14, changes)
+
+ # Test against a zone that doesn't exist yet, but gid not provided
+ with requests_mock() as mock:
+ with open('tests/fixtures/edgedns-records-prev-other.json') as fh:
+ mock.get(ANY, status_code=404)
+ provider = AkamaiProvider("test", "s", "akam.com", "atok", "ctok",
+ "cid")
+ plan = provider.plan(self.expected)
+ mock.post(ANY, status_code=201)
+ mock.put(ANY, status_code=200)
+ mock.delete(ANY, status_code=204)
+
+ changes = provider.apply(plan)
+ self.assertEquals(14, changes)
+
+ # Test against a zone that doesn't exist, but cid not provided
+
+ with requests_mock() as mock:
+ mock.get(ANY, status_code=404)
+
+ provider = AkamaiProvider("test", "s", "akam.com", "atok", "ctok")
+ plan = provider.plan(self.expected)
+ mock.post(ANY, status_code=201)
+ mock.put(ANY, status_code=200)
+ mock.delete(ANY, status_code=204)
+
+ try:
+ changes = provider.apply(plan)
+ except NameError as e:
+ expected = "contractId not specified to create zone"
+ self.assertEquals(text_type(e), expected)
+
+
+class TestDeprecatedAkamaiProvider(TestCase):
+
+ def test_equivilent(self):
+ self.assertEquals(LegacyAkamaiProvider, AkamaiProvider)
diff --git a/tests/test_octodns_provider_googlecloud.py b/tests/test_octodns_provider_googlecloud.py
index 3a3e600..e642668 100644
--- a/tests/test_octodns_provider_googlecloud.py
+++ b/tests/test_octodns_provider_googlecloud.py
@@ -193,8 +193,13 @@ class DummyIterator:
def __iter__(self):
return self
+ # python2
def next(self):
- return self.iterable.next()
+ return next(self.iterable)
+
+ # python3
+ def __next__(self):
+ return next(self.iterable)
class TestGoogleCloudProvider(TestCase):
@@ -247,7 +252,7 @@ class TestGoogleCloudProvider(TestCase):
return_values_for_status = iter(
["pending"] * 11 + ['done', 'done'])
type(status_mock).status = PropertyMock(
- side_effect=return_values_for_status.next)
+ side_effect=lambda: next(return_values_for_status))
gcloud_zone_mock.changes = Mock(return_value=status_mock)
provider = self._get_provider()
diff --git a/tests/test_octodns_provider_mythicbeasts.py b/tests/test_octodns_provider_mythicbeasts.py
new file mode 100644
index 0000000..960bd65
--- /dev/null
+++ b/tests/test_octodns_provider_mythicbeasts.py
@@ -0,0 +1,451 @@
+#
+#
+#
+
+from __future__ import absolute_import, division, print_function, \
+ unicode_literals
+
+from os.path import dirname, join
+
+from requests_mock import ANY, mock as requests_mock
+from six import text_type
+from unittest import TestCase
+
+from octodns.provider.mythicbeasts import MythicBeastsProvider, \
+ add_trailing_dot, remove_trailing_dot
+from octodns.provider.yaml import YamlProvider
+from octodns.zone import Zone
+from octodns.record import Create, Update, Delete, Record
+
+
+class TestMythicBeastsProvider(TestCase):
+ expected = Zone('unit.tests.', [])
+ source = YamlProvider('test_expected', join(dirname(__file__), 'config'))
+ source.populate(expected)
+
+ # Dump anything we don't support from expected
+ for record in list(expected.records):
+ if record._type not in MythicBeastsProvider.SUPPORTS:
+ expected._remove_record(record)
+
+ def test_trailing_dot(self):
+ with self.assertRaises(AssertionError) as err:
+ add_trailing_dot('unit.tests.')
+ self.assertEquals('Value already has trailing dot',
+ text_type(err.exception))
+
+ with self.assertRaises(AssertionError) as err:
+ remove_trailing_dot('unit.tests')
+ self.assertEquals('Value already missing trailing dot',
+ text_type(err.exception))
+
+ self.assertEquals(add_trailing_dot('unit.tests'), 'unit.tests.')
+ self.assertEquals(remove_trailing_dot('unit.tests.'), 'unit.tests')
+
+ def test_data_for_single(self):
+ test_data = {
+ 'raw_values': [{'value': 'a:a::c', 'ttl': 0}],
+ 'zone': 'unit.tests.',
+ }
+ test_single = MythicBeastsProvider._data_for_single('', test_data)
+ self.assertTrue(isinstance(test_single, dict))
+ self.assertEquals('a:a::c', test_single['value'])
+
+ def test_data_for_multiple(self):
+ test_data = {
+ 'raw_values': [
+ {'value': 'b:b::d', 'ttl': 60},
+ {'value': 'a:a::c', 'ttl': 60}],
+ 'zone': 'unit.tests.',
+ }
+ test_multiple = MythicBeastsProvider._data_for_multiple('', test_data)
+ self.assertTrue(isinstance(test_multiple, dict))
+ self.assertEquals(2, len(test_multiple['values']))
+
+ def test_data_for_txt(self):
+ test_data = {
+ 'raw_values': [
+ {'value': 'v=DKIM1; k=rsa; p=prawf', 'ttl': 60},
+ {'value': 'prawf prawf dyma prawf', 'ttl': 300}],
+ 'zone': 'unit.tests.',
+ }
+ test_txt = MythicBeastsProvider._data_for_TXT('', test_data)
+ self.assertTrue(isinstance(test_txt, dict))
+ self.assertEquals(2, len(test_txt['values']))
+ self.assertEquals('v=DKIM1\\; k=rsa\\; p=prawf', test_txt['values'][0])
+
+ def test_data_for_MX(self):
+ test_data = {
+ 'raw_values': [
+ {'value': '10 un.unit', 'ttl': 60},
+ {'value': '20 dau.unit', 'ttl': 60},
+ {'value': '30 tri.unit', 'ttl': 60}],
+ 'zone': 'unit.tests.',
+ }
+ test_MX = MythicBeastsProvider._data_for_MX('', test_data)
+ self.assertTrue(isinstance(test_MX, dict))
+ self.assertEquals(3, len(test_MX['values']))
+
+ with self.assertRaises(AssertionError) as err:
+ test_MX = MythicBeastsProvider._data_for_MX(
+ '',
+ {'raw_values': [{'value': '', 'ttl': 0}]}
+ )
+ self.assertEquals('Unable to parse MX data',
+ text_type(err.exception))
+
+ def test_data_for_CNAME(self):
+ test_data = {
+ 'raw_values': [{'value': 'cname', 'ttl': 60}],
+ 'zone': 'unit.tests.',
+ }
+ test_cname = MythicBeastsProvider._data_for_CNAME('', test_data)
+ self.assertTrue(isinstance(test_cname, dict))
+ self.assertEquals('cname.unit.tests.', test_cname['value'])
+
+ def test_data_for_ANAME(self):
+ test_data = {
+ 'raw_values': [{'value': 'aname', 'ttl': 60}],
+ 'zone': 'unit.tests.',
+ }
+ test_aname = MythicBeastsProvider._data_for_ANAME('', test_data)
+ self.assertTrue(isinstance(test_aname, dict))
+ self.assertEquals('aname', test_aname['value'])
+
+ def test_data_for_SRV(self):
+ test_data = {
+ 'raw_values': [
+ {'value': '10 20 30 un.srv.unit', 'ttl': 60},
+ {'value': '20 30 40 dau.srv.unit', 'ttl': 60},
+ {'value': '30 30 50 tri.srv.unit', 'ttl': 60}],
+ 'zone': 'unit.tests.',
+ }
+ test_SRV = MythicBeastsProvider._data_for_SRV('', test_data)
+ self.assertTrue(isinstance(test_SRV, dict))
+ self.assertEquals(3, len(test_SRV['values']))
+
+ with self.assertRaises(AssertionError) as err:
+ test_SRV = MythicBeastsProvider._data_for_SRV(
+ '',
+ {'raw_values': [{'value': '', 'ttl': 0}]}
+ )
+ self.assertEquals('Unable to parse SRV data',
+ text_type(err.exception))
+
+ def test_data_for_SSHFP(self):
+ test_data = {
+ 'raw_values': [
+ {'value': '1 1 0123456789abcdef', 'ttl': 60},
+ {'value': '1 2 0123456789abcdef', 'ttl': 60},
+ {'value': '2 3 0123456789abcdef', 'ttl': 60}],
+ 'zone': 'unit.tests.',
+ }
+ test_SSHFP = MythicBeastsProvider._data_for_SSHFP('', test_data)
+ self.assertTrue(isinstance(test_SSHFP, dict))
+ self.assertEquals(3, len(test_SSHFP['values']))
+
+ with self.assertRaises(AssertionError) as err:
+ test_SSHFP = MythicBeastsProvider._data_for_SSHFP(
+ '',
+ {'raw_values': [{'value': '', 'ttl': 0}]}
+ )
+ self.assertEquals('Unable to parse SSHFP data',
+ text_type(err.exception))
+
+ def test_data_for_CAA(self):
+ test_data = {
+ 'raw_values': [{'value': '1 issue letsencrypt.org', 'ttl': 60}],
+ 'zone': 'unit.tests.',
+ }
+ test_CAA = MythicBeastsProvider._data_for_CAA('', test_data)
+ self.assertTrue(isinstance(test_CAA, dict))
+ self.assertEquals(3, len(test_CAA['value']))
+
+ with self.assertRaises(AssertionError) as err:
+ test_CAA = MythicBeastsProvider._data_for_CAA(
+ '',
+ {'raw_values': [{'value': '', 'ttl': 0}]}
+ )
+ self.assertEquals('Unable to parse CAA data',
+ text_type(err.exception))
+
+ def test_command_generation(self):
+ zone = Zone('unit.tests.', [])
+ zone.add_record(Record.new(zone, 'prawf-alias', {
+ 'ttl': 60,
+ 'type': 'ALIAS',
+ 'value': 'alias.unit.tests.',
+ }))
+ zone.add_record(Record.new(zone, 'prawf-ns', {
+ 'ttl': 300,
+ 'type': 'NS',
+ 'values': [
+ 'alias.unit.tests.',
+ 'alias2.unit.tests.',
+ ],
+ }))
+ zone.add_record(Record.new(zone, 'prawf-a', {
+ 'ttl': 60,
+ 'type': 'A',
+ 'values': [
+ '1.2.3.4',
+ '5.6.7.8',
+ ],
+ }))
+ zone.add_record(Record.new(zone, 'prawf-aaaa', {
+ 'ttl': 60,
+ 'type': 'AAAA',
+ 'values': [
+ 'a:a::a',
+ 'b:b::b',
+ 'c:c::c:c',
+ ],
+ }))
+ zone.add_record(Record.new(zone, 'prawf-txt', {
+ 'ttl': 60,
+ 'type': 'TXT',
+ 'value': 'prawf prawf dyma prawf',
+ }))
+ zone.add_record(Record.new(zone, 'prawf-txt2', {
+ 'ttl': 60,
+ 'type': 'TXT',
+ 'value': 'v=DKIM1\\; k=rsa\\; p=prawf',
+ }))
+ with requests_mock() as mock:
+ mock.post(ANY, status_code=200, text='')
+
+ provider = MythicBeastsProvider('test', {
+ 'unit.tests.': 'mypassword'
+ })
+
+ plan = provider.plan(zone)
+ changes = plan.changes
+ generated_commands = []
+
+ for change in changes:
+ generated_commands.extend(
+ provider._compile_commands('ADD', change.new)
+ )
+
+ expected_commands = [
+ 'ADD prawf-alias.unit.tests 60 ANAME alias.unit.tests.',
+ 'ADD prawf-ns.unit.tests 300 NS alias.unit.tests.',
+ 'ADD prawf-ns.unit.tests 300 NS alias2.unit.tests.',
+ 'ADD prawf-a.unit.tests 60 A 1.2.3.4',
+ 'ADD prawf-a.unit.tests 60 A 5.6.7.8',
+ 'ADD prawf-aaaa.unit.tests 60 AAAA a:a::a',
+ 'ADD prawf-aaaa.unit.tests 60 AAAA b:b::b',
+ 'ADD prawf-aaaa.unit.tests 60 AAAA c:c::c:c',
+ 'ADD prawf-txt.unit.tests 60 TXT prawf prawf dyma prawf',
+ 'ADD prawf-txt2.unit.tests 60 TXT v=DKIM1; k=rsa; p=prawf',
+ ]
+
+ generated_commands.sort()
+ expected_commands.sort()
+
+ self.assertEquals(
+ generated_commands,
+ expected_commands
+ )
+
+ # Now test deletion
+ existing = 'prawf-txt 300 TXT prawf prawf dyma prawf\n' \
+ 'prawf-txt2 300 TXT v=DKIM1; k=rsa; p=prawf\n' \
+ 'prawf-a 60 A 1.2.3.4'
+
+ with requests_mock() as mock:
+ mock.post(ANY, status_code=200, text=existing)
+ wanted = Zone('unit.tests.', [])
+
+ plan = provider.plan(wanted)
+ changes = plan.changes
+ generated_commands = []
+
+ for change in changes:
+ generated_commands.extend(
+ provider._compile_commands('DELETE', change.existing)
+ )
+
+ expected_commands = [
+ 'DELETE prawf-a.unit.tests 60 A 1.2.3.4',
+ 'DELETE prawf-txt.unit.tests 300 TXT prawf prawf dyma prawf',
+ 'DELETE prawf-txt2.unit.tests 300 TXT v=DKIM1; k=rsa; p=prawf',
+ ]
+
+ generated_commands.sort()
+ expected_commands.sort()
+
+ self.assertEquals(
+ generated_commands,
+ expected_commands
+ )
+
+ def test_fake_command_generation(self):
+ class FakeChangeRecord(object):
+ def __init__(self):
+ self.__fqdn = 'prawf.unit.tests.'
+ self._type = 'NOOP'
+ self.value = 'prawf'
+ self.ttl = 60
+
+ @property
+ def record(self):
+ return self
+
+ @property
+ def fqdn(self):
+ return self.__fqdn
+
+ with requests_mock() as mock:
+ mock.post(ANY, status_code=200, text='')
+
+ provider = MythicBeastsProvider('test', {
+ 'unit.tests.': 'mypassword'
+ })
+ record = FakeChangeRecord()
+ command = provider._compile_commands('ADD', record)
+ self.assertEquals([], command)
+
+ def test_populate(self):
+ provider = None
+
+ # Null passwords dict
+ with self.assertRaises(AssertionError) as err:
+ provider = MythicBeastsProvider('test', None)
+ self.assertEquals('Passwords must be a dictionary',
+ text_type(err.exception))
+
+ # Missing password
+ with requests_mock() as mock:
+ mock.post(ANY, status_code=401, text='ERR Not authenticated')
+
+ with self.assertRaises(AssertionError) as err:
+ provider = MythicBeastsProvider('test', dict())
+ zone = Zone('unit.tests.', [])
+ provider.populate(zone)
+ self.assertEquals(
+ 'Missing password for domain: unit.tests',
+ text_type(err.exception))
+
+ # Failed authentication
+ with requests_mock() as mock:
+ mock.post(ANY, status_code=401, text='ERR Not authenticated')
+
+ with self.assertRaises(Exception) as err:
+ provider = MythicBeastsProvider('test', {
+ 'unit.tests.': 'mypassword'
+ })
+ zone = Zone('unit.tests.', [])
+ provider.populate(zone)
+ self.assertEquals(
+ 'Mythic Beasts unauthorized for zone: unit.tests',
+ err.exception.message)
+
+ # Check unmatched lines are ignored
+ test_data = 'This should not match'
+ with requests_mock() as mock:
+ mock.post(ANY, status_code=200, text=test_data)
+
+ provider = MythicBeastsProvider('test', {
+ 'unit.tests.': 'mypassword'
+ })
+ zone = Zone('unit.tests.', [])
+ provider.populate(zone)
+ self.assertEquals(0, len(zone.records))
+
+ # Check unsupported records are skipped
+ test_data = '@ 60 NOOP prawf\n@ 60 SPF prawf prawf prawf'
+ with requests_mock() as mock:
+ mock.post(ANY, status_code=200, text=test_data)
+
+ provider = MythicBeastsProvider('test', {
+ 'unit.tests.': 'mypassword'
+ })
+ zone = Zone('unit.tests.', [])
+ provider.populate(zone)
+ self.assertEquals(0, len(zone.records))
+
+ # Check no changes between what we support and what's parsed
+ # from the unit.tests. config YAML. Also make sure we see the same
+ # for both after we've thrown away records we don't support
+ with requests_mock() as mock:
+ with open('tests/fixtures/mythicbeasts-list.txt') as file_handle:
+ mock.post(ANY, status_code=200, text=file_handle.read())
+
+ provider = MythicBeastsProvider('test', {
+ 'unit.tests.': 'mypassword'
+ })
+ zone = Zone('unit.tests.', [])
+ provider.populate(zone)
+
+ self.assertEquals(15, len(zone.records))
+ self.assertEquals(15, len(self.expected.records))
+ changes = self.expected.changes(zone, provider)
+ self.assertEquals(0, len(changes))
+
+ def test_apply(self):
+ provider = MythicBeastsProvider('test', {
+ 'unit.tests.': 'mypassword'
+ })
+ zone = Zone('unit.tests.', [])
+
+ # Create blank zone
+ with requests_mock() as mock:
+ mock.post(ANY, status_code=200, text='')
+ provider.populate(zone)
+
+ self.assertEquals(0, len(zone.records))
+
+ # Record change failed
+ with requests_mock() as mock:
+ mock.post(ANY, status_code=200, text='')
+ provider.populate(zone)
+ zone.add_record(Record.new(zone, 'prawf', {
+ 'ttl': 300,
+ 'type': 'TXT',
+ 'value': 'prawf',
+ }))
+ plan = provider.plan(zone)
+
+ with requests_mock() as mock:
+ mock.post(ANY, status_code=400, text='NADD 300 TXT prawf')
+
+ with self.assertRaises(Exception) as err:
+ provider.apply(plan)
+ self.assertEquals(
+ 'Mythic Beasts could not action command: unit.tests '
+ 'ADD prawf.unit.tests 300 TXT prawf', err.exception.message)
+
+ # Check deleting and adding/changing test record
+ existing = 'prawf 300 TXT prawf prawf prawf\ndileu 300 TXT dileu'
+
+ with requests_mock() as mock:
+ mock.post(ANY, status_code=200, text=existing)
+
+ # Mash up a new zone with records so a plan
+ # is generated with changes and applied. For some reason
+ # passing self.expected, or just changing each record's zone
+ # doesn't work. Nor does this without a single add_record after
+ wanted = Zone('unit.tests.', [])
+ for record in list(self.expected.records):
+ data = {'type': record._type}
+ data.update(record.data)
+ wanted.add_record(Record.new(wanted, record.name, data))
+
+ wanted.add_record(Record.new(wanted, 'prawf', {
+ 'ttl': 60,
+ 'type': 'TXT',
+ 'value': 'prawf yw e',
+ }))
+
+ plan = provider.plan(wanted)
+
+ # Octo ignores NS records (15-1)
+ self.assertEquals(1, len([c for c in plan.changes
+ if isinstance(c, Update)]))
+ self.assertEquals(1, len([c for c in plan.changes
+ if isinstance(c, Delete)]))
+ self.assertEquals(14, len([c for c in plan.changes
+ if isinstance(c, Create)]))
+ self.assertEquals(16, provider.apply(plan))
+ self.assertTrue(plan.exists)
diff --git a/tests/test_octodns_provider_ns1.py b/tests/test_octodns_provider_ns1.py
index 8530b62..00b068b 100644
--- a/tests/test_octodns_provider_ns1.py
+++ b/tests/test_octodns_provider_ns1.py
@@ -5,24 +5,19 @@
from __future__ import absolute_import, division, print_function, \
unicode_literals
-from mock import Mock, call, patch
-from nsone.rest.errors import AuthException, RateLimitException, \
+from collections import defaultdict
+from mock import call, patch
+from ns1.rest.errors import AuthException, RateLimitException, \
ResourceException
+from six import text_type
from unittest import TestCase
from octodns.record import Delete, Record, Update
-from octodns.provider.ns1 import Ns1Provider
+from octodns.provider.ns1 import Ns1Client, Ns1Exception, Ns1Provider
+from octodns.provider.plan import Plan
from octodns.zone import Zone
-class DummyZone(object):
-
- def __init__(self, records):
- self.data = {
- 'records': records
- }
-
-
class TestNs1Provider(TestCase):
zone = Zone('unit.tests.', [])
expected = set()
@@ -115,7 +110,7 @@ class TestNs1Provider(TestCase):
},
}))
- nsone_records = [{
+ ns1_records = [{
'type': 'A',
'ttl': 32,
'short_answers': ['1.2.3.4'],
@@ -171,43 +166,42 @@ class TestNs1Provider(TestCase):
'domain': 'unit.tests.',
}]
- @patch('nsone.NSONE.loadZone')
- def test_populate(self, load_mock):
+ @patch('ns1.rest.records.Records.retrieve')
+ @patch('ns1.rest.zones.Zones.retrieve')
+ def test_populate(self, zone_retrieve_mock, record_retrieve_mock):
provider = Ns1Provider('test', 'api-key')
# Bad auth
- load_mock.side_effect = AuthException('unauthorized')
+ zone_retrieve_mock.side_effect = AuthException('unauthorized')
zone = Zone('unit.tests.', [])
with self.assertRaises(AuthException) as ctx:
provider.populate(zone)
- self.assertEquals(load_mock.side_effect, ctx.exception)
+ self.assertEquals(zone_retrieve_mock.side_effect, ctx.exception)
# General error
- load_mock.reset_mock()
- load_mock.side_effect = ResourceException('boom')
+ zone_retrieve_mock.reset_mock()
+ zone_retrieve_mock.side_effect = ResourceException('boom')
zone = Zone('unit.tests.', [])
with self.assertRaises(ResourceException) as ctx:
provider.populate(zone)
- self.assertEquals(load_mock.side_effect, ctx.exception)
- self.assertEquals(('unit.tests',), load_mock.call_args[0])
+ self.assertEquals(zone_retrieve_mock.side_effect, ctx.exception)
+ self.assertEquals(('unit.tests',), zone_retrieve_mock.call_args[0])
- # Non-existant zone doesn't populate anything
- load_mock.reset_mock()
- load_mock.side_effect = \
+ # Non-existent zone doesn't populate anything
+ zone_retrieve_mock.reset_mock()
+ zone_retrieve_mock.side_effect = \
ResourceException('server error: zone not found')
zone = Zone('unit.tests.', [])
exists = provider.populate(zone)
self.assertEquals(set(), zone.records)
- self.assertEquals(('unit.tests',), load_mock.call_args[0])
+ self.assertEquals(('unit.tests',), zone_retrieve_mock.call_args[0])
self.assertFalse(exists)
# Existing zone w/o records
- load_mock.reset_mock()
- nsone_zone = DummyZone([])
- load_mock.side_effect = [nsone_zone]
- zone_search = Mock()
- zone_search.return_value = [
- {
+ zone_retrieve_mock.reset_mock()
+ record_retrieve_mock.reset_mock()
+ ns1_zone = {
+ 'records': [{
"domain": "geo.unit.tests",
"zone": "unit.tests",
"type": "A",
@@ -220,22 +214,25 @@ class TestNs1Provider(TestCase):
{'answer': ['4.5.6.7'],
'meta': {'iso_region_code': ['NA-US-WA']}},
],
+ 'tier': 3,
'ttl': 34,
- },
- ]
- nsone_zone.search = zone_search
+ }],
+ }
+ zone_retrieve_mock.side_effect = [ns1_zone]
+ # Its tier 3 so we'll do a full lookup
+ record_retrieve_mock.side_effect = ns1_zone['records']
zone = Zone('unit.tests.', [])
provider.populate(zone)
self.assertEquals(1, len(zone.records))
- self.assertEquals(('unit.tests',), load_mock.call_args[0])
+ self.assertEquals(('unit.tests',), zone_retrieve_mock.call_args[0])
+ record_retrieve_mock.assert_has_calls([call('unit.tests',
+ 'geo.unit.tests', 'A')])
# Existing zone w/records
- load_mock.reset_mock()
- nsone_zone = DummyZone(self.nsone_records)
- load_mock.side_effect = [nsone_zone]
- zone_search = Mock()
- zone_search.return_value = [
- {
+ zone_retrieve_mock.reset_mock()
+ record_retrieve_mock.reset_mock()
+ ns1_zone = {
+ 'records': self.ns1_records + [{
"domain": "geo.unit.tests",
"zone": "unit.tests",
"type": "A",
@@ -248,27 +245,30 @@ class TestNs1Provider(TestCase):
{'answer': ['4.5.6.7'],
'meta': {'iso_region_code': ['NA-US-WA']}},
],
+ 'tier': 3,
'ttl': 34,
- },
- ]
- nsone_zone.search = zone_search
+ }],
+ }
+ zone_retrieve_mock.side_effect = [ns1_zone]
+ # Its tier 3 so we'll do a full lookup
+ record_retrieve_mock.side_effect = ns1_zone['records']
zone = Zone('unit.tests.', [])
provider.populate(zone)
self.assertEquals(self.expected, zone.records)
- self.assertEquals(('unit.tests',), load_mock.call_args[0])
+ self.assertEquals(('unit.tests',), zone_retrieve_mock.call_args[0])
+ record_retrieve_mock.assert_has_calls([call('unit.tests',
+ 'geo.unit.tests', 'A')])
# Test skipping unsupported record type
- load_mock.reset_mock()
- nsone_zone = DummyZone(self.nsone_records + [{
- 'type': 'UNSUPPORTED',
- 'ttl': 42,
- 'short_answers': ['unsupported'],
- 'domain': 'unsupported.unit.tests.',
- }])
- load_mock.side_effect = [nsone_zone]
- zone_search = Mock()
- zone_search.return_value = [
- {
+ zone_retrieve_mock.reset_mock()
+ record_retrieve_mock.reset_mock()
+ ns1_zone = {
+ 'records': self.ns1_records + [{
+ 'type': 'UNSUPPORTED',
+ 'ttl': 42,
+ 'short_answers': ['unsupported'],
+ 'domain': 'unsupported.unit.tests.',
+ }, {
"domain": "geo.unit.tests",
"zone": "unit.tests",
"type": "A",
@@ -281,18 +281,27 @@ class TestNs1Provider(TestCase):
{'answer': ['4.5.6.7'],
'meta': {'iso_region_code': ['NA-US-WA']}},
],
+ 'tier': 3,
'ttl': 34,
- },
- ]
- nsone_zone.search = zone_search
+ }],
+ }
+ zone_retrieve_mock.side_effect = [ns1_zone]
zone = Zone('unit.tests.', [])
provider.populate(zone)
self.assertEquals(self.expected, zone.records)
- self.assertEquals(('unit.tests',), load_mock.call_args[0])
+ self.assertEquals(('unit.tests',), zone_retrieve_mock.call_args[0])
+ record_retrieve_mock.assert_has_calls([call('unit.tests',
+ 'geo.unit.tests', 'A')])
- @patch('nsone.NSONE.createZone')
- @patch('nsone.NSONE.loadZone')
- def test_sync(self, load_mock, create_mock):
+ @patch('ns1.rest.records.Records.delete')
+ @patch('ns1.rest.records.Records.update')
+ @patch('ns1.rest.records.Records.create')
+ @patch('ns1.rest.records.Records.retrieve')
+ @patch('ns1.rest.zones.Zones.create')
+ @patch('ns1.rest.zones.Zones.retrieve')
+ def test_sync(self, zone_retrieve_mock, zone_create_mock,
+ record_retrieve_mock, record_create_mock,
+ record_update_mock, record_delete_mock):
provider = Ns1Provider('test', 'api-key')
desired = Zone('unit.tests.', [])
@@ -306,124 +315,149 @@ class TestNs1Provider(TestCase):
self.assertTrue(plan.exists)
# Fails, general error
- load_mock.reset_mock()
- create_mock.reset_mock()
- load_mock.side_effect = ResourceException('boom')
+ zone_retrieve_mock.reset_mock()
+ record_retrieve_mock.reset_mock()
+ zone_create_mock.reset_mock()
+ zone_retrieve_mock.side_effect = ResourceException('boom')
with self.assertRaises(ResourceException) as ctx:
provider.apply(plan)
- self.assertEquals(load_mock.side_effect, ctx.exception)
+ self.assertEquals(zone_retrieve_mock.side_effect, ctx.exception)
# Fails, bad auth
- load_mock.reset_mock()
- create_mock.reset_mock()
- load_mock.side_effect = \
+ zone_retrieve_mock.reset_mock()
+ record_retrieve_mock.reset_mock()
+ zone_create_mock.reset_mock()
+ zone_retrieve_mock.side_effect = \
ResourceException('server error: zone not found')
- create_mock.side_effect = AuthException('unauthorized')
+ zone_create_mock.side_effect = AuthException('unauthorized')
with self.assertRaises(AuthException) as ctx:
provider.apply(plan)
- self.assertEquals(create_mock.side_effect, ctx.exception)
+ self.assertEquals(zone_create_mock.side_effect, ctx.exception)
- # non-existant zone, create
- load_mock.reset_mock()
- create_mock.reset_mock()
- load_mock.side_effect = \
+ # non-existent zone, create
+ zone_retrieve_mock.reset_mock()
+ record_retrieve_mock.reset_mock()
+ zone_create_mock.reset_mock()
+ zone_retrieve_mock.side_effect = \
ResourceException('server error: zone not found')
- # ugh, need a mock zone with a mock prop since we're using getattr, we
- # can actually control side effects on `meth` with that.
- mock_zone = Mock()
- mock_zone.add_SRV = Mock()
- mock_zone.add_SRV.side_effect = [
+
+ zone_create_mock.side_effect = ['foo']
+ # Test out the create rate-limit handling, then 9 successes
+ record_create_mock.side_effect = [
RateLimitException('boo', period=0),
- None,
- ]
- create_mock.side_effect = [mock_zone]
+ ] + ([None] * 9)
+
got_n = provider.apply(plan)
self.assertEquals(expected_n, got_n)
+ # Zone was created
+ zone_create_mock.assert_has_calls([call('unit.tests')])
+ # Checking that we got some of the expected records too
+ record_create_mock.assert_has_calls([
+ call('unit.tests', 'unit.tests', 'A', answers=[
+ {'answer': ['1.2.3.4'], 'meta': {}}
+ ], filters=[], ttl=32),
+ call('unit.tests', 'unit.tests', 'CAA', answers=[
+ (0, 'issue', 'ca.unit.tests')
+ ], ttl=40),
+ call('unit.tests', 'unit.tests', 'MX', answers=[
+ (10, 'mx1.unit.tests.'), (20, 'mx2.unit.tests.')
+ ], ttl=35),
+ ])
+
# Update & delete
- load_mock.reset_mock()
- create_mock.reset_mock()
- nsone_zone = DummyZone(self.nsone_records + [{
- 'type': 'A',
- 'ttl': 42,
- 'short_answers': ['9.9.9.9'],
- 'domain': 'delete-me.unit.tests.',
- }])
- nsone_zone.data['records'][0]['short_answers'][0] = '2.2.2.2'
- nsone_zone.loadRecord = Mock()
- zone_search = Mock()
- zone_search.return_value = [
- {
+ zone_retrieve_mock.reset_mock()
+ record_retrieve_mock.reset_mock()
+ zone_create_mock.reset_mock()
+
+ ns1_zone = {
+ 'records': self.ns1_records + [{
+ 'type': 'A',
+ 'ttl': 42,
+ 'short_answers': ['9.9.9.9'],
+ 'domain': 'delete-me.unit.tests.',
+ }, {
"domain": "geo.unit.tests",
"zone": "unit.tests",
"type": "A",
- "answers": [
- {'answer': ['1.1.1.1'], 'meta': {}},
- {'answer': ['1.2.3.4'],
- 'meta': {'ca_province': ['ON']}},
- {'answer': ['2.3.4.5'], 'meta': {'us_state': ['NY']}},
- {'answer': ['3.4.5.6'], 'meta': {'country': ['US']}},
- {'answer': ['4.5.6.7'],
- 'meta': {'iso_region_code': ['NA-US-WA']}},
+ "short_answers": [
+ '1.1.1.1',
+ '1.2.3.4',
+ '2.3.4.5',
+ '3.4.5.6',
+ '4.5.6.7',
],
+ 'tier': 3, # This flags it as advacned, full load required
'ttl': 34,
- },
- ]
- nsone_zone.search = zone_search
- load_mock.side_effect = [nsone_zone, nsone_zone]
+ }],
+ }
+ ns1_zone['records'][0]['short_answers'][0] = '2.2.2.2'
+
+ ns1_record = {
+ "domain": "geo.unit.tests",
+ "zone": "unit.tests",
+ "type": "A",
+ "answers": [
+ {'answer': ['1.1.1.1'], 'meta': {}},
+ {'answer': ['1.2.3.4'],
+ 'meta': {'ca_province': ['ON']}},
+ {'answer': ['2.3.4.5'], 'meta': {'us_state': ['NY']}},
+ {'answer': ['3.4.5.6'], 'meta': {'country': ['US']}},
+ {'answer': ['4.5.6.7'],
+ 'meta': {'iso_region_code': ['NA-US-WA']}},
+ ],
+ 'tier': 3,
+ 'ttl': 34,
+ }
+
+ record_retrieve_mock.side_effect = [ns1_record, ns1_record]
+ zone_retrieve_mock.side_effect = [ns1_zone, ns1_zone]
plan = provider.plan(desired)
self.assertEquals(3, len(plan.changes))
- self.assertIsInstance(plan.changes[0], Update)
- self.assertIsInstance(plan.changes[2], Delete)
- # ugh, we need a mock record that can be returned from loadRecord for
- # the update and delete targets, we can add our side effects to that to
- # trigger rate limit handling
- mock_record = Mock()
- mock_record.update.side_effect = [
+ # Shouldn't rely on order so just count classes
+ classes = defaultdict(lambda: 0)
+ for change in plan.changes:
+ classes[change.__class__] += 1
+ self.assertEquals(1, classes[Delete])
+ self.assertEquals(2, classes[Update])
+
+ record_update_mock.side_effect = [
RateLimitException('one', period=0),
None,
None,
]
- mock_record.delete.side_effect = [
+ record_delete_mock.side_effect = [
RateLimitException('two', period=0),
None,
None,
]
- nsone_zone.loadRecord.side_effect = [mock_record, mock_record,
- mock_record]
+
+ record_retrieve_mock.side_effect = [ns1_record, ns1_record]
+ zone_retrieve_mock.side_effect = [ns1_zone, ns1_zone]
got_n = provider.apply(plan)
self.assertEquals(3, got_n)
- nsone_zone.loadRecord.assert_has_calls([
- call('unit.tests', u'A'),
- call('geo', u'A'),
- call('delete-me', u'A'),
- ])
- mock_record.assert_has_calls([
- call.update(answers=[{'answer': [u'1.2.3.4'], 'meta': {}}],
- filters=[],
- ttl=32),
- call.update(answers=[{u'answer': [u'1.2.3.4'], u'meta': {}}],
- filters=[],
- ttl=32),
- call.update(
- answers=[
- {u'answer': [u'101.102.103.104'], u'meta': {}},
- {u'answer': [u'101.102.103.105'], u'meta': {}},
- {
- u'answer': [u'201.202.203.204'],
- u'meta': {
- u'iso_region_code': [u'NA-US-NY']
- },
- },
- ],
+
+ record_update_mock.assert_has_calls([
+ call('unit.tests', 'unit.tests', 'A', answers=[
+ {'answer': ['1.2.3.4'], 'meta': {}}],
+ filters=[],
+ ttl=32),
+ call('unit.tests', 'unit.tests', 'A', answers=[
+ {'answer': ['1.2.3.4'], 'meta': {}}],
+ filters=[],
+ ttl=32),
+ call('unit.tests', 'geo.unit.tests', 'A', answers=[
+ {'answer': ['101.102.103.104'], 'meta': {}},
+ {'answer': ['101.102.103.105'], 'meta': {}},
+ {
+ 'answer': ['201.202.203.204'],
+ 'meta': {'iso_region_code': ['NA-US-NY']}
+ }],
filters=[
- {u'filter': u'shuffle', u'config': {}},
- {u'filter': u'geotarget_country', u'config': {}},
- {u'filter': u'select_first_n', u'config': {u'N': 1}},
- ],
- ttl=34),
- call.delete(),
- call.delete()
+ {'filter': 'shuffle', 'config': {}},
+ {'filter': 'geotarget_country', 'config': {}},
+ {'filter': 'select_first_n', 'config': {'N': 1}}],
+ ttl=34)
])
def test_escaping(self):
@@ -448,21 +482,21 @@ class TestNs1Provider(TestCase):
'type': 'SPF',
'value': 'foo\\; bar baz\\; blip'
})
- self.assertEquals(['foo; bar baz; blip'],
- provider._params_for_SPF(record)['answers'])
+ params, _ = provider._params_for_SPF(record)
+ self.assertEquals(['foo; bar baz; blip'], params['answers'])
record = Record.new(zone, 'txt', {
'ttl': 35,
'type': 'TXT',
'value': 'foo\\; bar baz\\; blip'
})
- self.assertEquals(['foo; bar baz; blip'],
- provider._params_for_TXT(record)['answers'])
+ params, _ = provider._params_for_SPF(record)
+ self.assertEquals(['foo; bar baz; blip'], params['answers'])
def test_data_for_CNAME(self):
provider = Ns1Provider('test', 'api-key')
- # answers from nsone
+ # answers from ns1
a_record = {
'ttl': 31,
'type': 'CNAME',
@@ -476,7 +510,7 @@ class TestNs1Provider(TestCase):
self.assertEqual(a_expected,
provider._data_for_CNAME(a_record['type'], a_record))
- # no answers from nsone
+ # no answers from ns1
b_record = {
'ttl': 32,
'type': 'CNAME',
@@ -489,3 +523,1532 @@ class TestNs1Provider(TestCase):
}
self.assertEqual(b_expected,
provider._data_for_CNAME(b_record['type'], b_record))
+
+
+class TestNs1ProviderDynamic(TestCase):
+ zone = Zone('unit.tests.', [])
+
+ def record(self):
+ # return a new object each time so we can mess with it without causing
+ # problems from test to test
+ return Record.new(self.zone, '', {
+ 'dynamic': {
+ 'pools': {
+ 'lhr': {
+ 'fallback': 'iad',
+ 'values': [{
+ 'value': '3.4.5.6',
+ }],
+ },
+ 'iad': {
+ 'values': [{
+ 'value': '1.2.3.4',
+ }, {
+ 'value': '2.3.4.5',
+ }],
+ },
+ },
+ 'rules': [{
+ 'geos': [
+ 'AF',
+ 'EU-GB',
+ 'NA-US-FL'
+ ],
+ 'pool': 'lhr',
+ }, {
+ 'geos': [
+ 'AF-ZW',
+ ],
+ 'pool': 'iad',
+ }, {
+ 'pool': 'iad',
+ }],
+ },
+ 'octodns': {
+ 'healthcheck': {
+ 'host': 'send.me',
+ 'path': '/_ping',
+ 'port': 80,
+ 'protocol': 'HTTP',
+ }
+ },
+ 'ttl': 32,
+ 'type': 'A',
+ 'value': '1.2.3.4',
+ 'meta': {},
+ })
+
+ def test_notes(self):
+ provider = Ns1Provider('test', 'api-key')
+
+ self.assertEquals({}, provider._parse_notes(None))
+ self.assertEquals({}, provider._parse_notes(''))
+ self.assertEquals({}, provider._parse_notes('blah-blah-blah'))
+
+ # Round tripping
+ data = {
+ 'key': 'value',
+ 'priority': '1',
+ }
+ notes = provider._encode_notes(data)
+ self.assertEquals(data, provider._parse_notes(notes))
+
+ def test_monitors_for(self):
+ provider = Ns1Provider('test', 'api-key')
+
+ # pre-populate the client's monitors cache
+ monitor_one = {
+ 'config': {
+ 'host': '1.2.3.4',
+ },
+ 'notes': 'host:unit.tests type:A',
+ }
+ monitor_four = {
+ 'config': {
+ 'host': '2.3.4.5',
+ },
+ 'notes': 'host:unit.tests type:A',
+ }
+ provider._client._monitors_cache = {
+ 'one': monitor_one,
+ 'two': {
+ 'config': {
+ 'host': '8.8.8.8',
+ },
+ 'notes': 'host:unit.tests type:AAAA',
+ },
+ 'three': {
+ 'config': {
+ 'host': '9.9.9.9',
+ },
+ 'notes': 'host:other.unit.tests type:A',
+ },
+ 'four': monitor_four,
+ }
+
+ # Would match, but won't get there b/c it's not dynamic
+ record = Record.new(self.zone, '', {
+ 'ttl': 32,
+ 'type': 'A',
+ 'value': '1.2.3.4',
+ 'meta': {},
+ })
+ self.assertEquals({}, provider._monitors_for(record))
+
+ # Will match some records
+ self.assertEquals({
+ '1.2.3.4': monitor_one,
+ '2.3.4.5': monitor_four,
+ }, provider._monitors_for(self.record()))
+
+ def test_uuid(self):
+ # Just a smoke test/for coverage
+ provider = Ns1Provider('test', 'api-key')
+ self.assertTrue(provider._uuid())
+
+ @patch('octodns.provider.ns1.Ns1Provider._uuid')
+ @patch('ns1.rest.data.Feed.create')
+ def test_feed_create(self, datafeed_create_mock, uuid_mock):
+ provider = Ns1Provider('test', 'api-key')
+
+ # pre-fill caches to avoid extranious calls (things we're testing
+ # elsewhere)
+ provider._client._datasource_id = 'foo'
+ provider._client._feeds_for_monitors = {}
+
+ uuid_mock.reset_mock()
+ datafeed_create_mock.reset_mock()
+ uuid_mock.side_effect = ['xxxxxxxxxxxxxx']
+ feed = {
+ 'id': 'feed',
+ }
+ datafeed_create_mock.side_effect = [feed]
+ monitor = {
+ 'id': 'one',
+ 'name': 'one name',
+ 'config': {
+ 'host': '1.2.3.4',
+ },
+ 'notes': 'host:unit.tests type:A',
+ }
+ self.assertEquals('feed', provider._feed_create(monitor))
+ datafeed_create_mock.assert_has_calls([call('foo', 'one name - xxxxxx',
+ {'jobid': 'one'})])
+
+ @patch('octodns.provider.ns1.Ns1Provider._feed_create')
+ @patch('octodns.provider.ns1.Ns1Client.monitors_create')
+ @patch('octodns.provider.ns1.Ns1Client.notifylists_create')
+ def test_monitor_create(self, notifylists_create_mock,
+ monitors_create_mock, feed_create_mock):
+ provider = Ns1Provider('test', 'api-key')
+
+ # pre-fill caches to avoid extranious calls (things we're testing
+ # elsewhere)
+ provider._client._datasource_id = 'foo'
+ provider._client._feeds_for_monitors = {}
+
+ notifylists_create_mock.reset_mock()
+ monitors_create_mock.reset_mock()
+ feed_create_mock.reset_mock()
+ notifylists_create_mock.side_effect = [{
+ 'id': 'nl-id',
+ }]
+ monitors_create_mock.side_effect = [{
+ 'id': 'mon-id',
+ }]
+ feed_create_mock.side_effect = ['feed-id']
+ monitor = {
+ 'name': 'test monitor',
+ }
+ monitor_id, feed_id = provider._monitor_create(monitor)
+ self.assertEquals('mon-id', monitor_id)
+ self.assertEquals('feed-id', feed_id)
+ monitors_create_mock.assert_has_calls([call(name='test monitor',
+ notify_list='nl-id')])
+
+ def test_monitor_gen(self):
+ provider = Ns1Provider('test', 'api-key')
+
+ value = '3.4.5.6'
+ record = self.record()
+ monitor = provider._monitor_gen(record, value)
+ self.assertEquals(value, monitor['config']['host'])
+ self.assertTrue('\\nHost: send.me\\r' in monitor['config']['send'])
+ self.assertFalse(monitor['config']['ssl'])
+ self.assertEquals('host:unit.tests type:A', monitor['notes'])
+
+ record._octodns['healthcheck']['protocol'] = 'HTTPS'
+ monitor = provider._monitor_gen(record, value)
+ self.assertTrue(monitor['config']['ssl'])
+
+ record._octodns['healthcheck']['protocol'] = 'TCP'
+ monitor = provider._monitor_gen(record, value)
+ # No http send done
+ self.assertFalse('send' in monitor['config'])
+ # No http response expected
+ self.assertFalse('rules' in monitor)
+
+ def test_monitor_is_match(self):
+ provider = Ns1Provider('test', 'api-key')
+
+ # Empty matches empty
+ self.assertTrue(provider._monitor_is_match({}, {}))
+
+ # Anything matches empty
+ self.assertTrue(provider._monitor_is_match({}, {
+ 'anything': 'goes'
+ }))
+
+ # Missing doesn't match
+ self.assertFalse(provider._monitor_is_match({
+ 'exepct': 'this',
+ }, {
+ 'anything': 'goes'
+ }))
+
+ # Identical matches
+ self.assertTrue(provider._monitor_is_match({
+ 'exepct': 'this',
+ }, {
+ 'exepct': 'this',
+ }))
+
+ # Different values don't match
+ self.assertFalse(provider._monitor_is_match({
+ 'exepct': 'this',
+ }, {
+ 'exepct': 'that',
+ }))
+
+ # Different sub-values don't match
+ self.assertFalse(provider._monitor_is_match({
+ 'exepct': {
+ 'this': 'to-be',
+ },
+ }, {
+ 'exepct': {
+ 'this': 'something-else',
+ },
+ }))
+
+ @patch('octodns.provider.ns1.Ns1Provider._feed_create')
+ @patch('octodns.provider.ns1.Ns1Client.monitors_update')
+ @patch('octodns.provider.ns1.Ns1Provider._monitor_create')
+ @patch('octodns.provider.ns1.Ns1Provider._monitor_gen')
+ def test_monitor_sync(self, monitor_gen_mock, monitor_create_mock,
+ monitors_update_mock, feed_create_mock):
+ provider = Ns1Provider('test', 'api-key')
+
+ # pre-fill caches to avoid extranious calls (things we're testing
+ # elsewhere)
+ provider._client._datasource_id = 'foo'
+ provider._client._feeds_for_monitors = {
+ 'mon-id': 'feed-id',
+ }
+
+ # No existing monitor
+ monitor_gen_mock.reset_mock()
+ monitor_create_mock.reset_mock()
+ monitors_update_mock.reset_mock()
+ feed_create_mock.reset_mock()
+ monitor_gen_mock.side_effect = [{'key': 'value'}]
+ monitor_create_mock.side_effect = [('mon-id', 'feed-id')]
+ value = '1.2.3.4'
+ record = self.record()
+ monitor_id, feed_id = provider._monitor_sync(record, value, None)
+ self.assertEquals('mon-id', monitor_id)
+ self.assertEquals('feed-id', feed_id)
+ monitor_gen_mock.assert_has_calls([call(record, value)])
+ monitor_create_mock.assert_has_calls([call({'key': 'value'})])
+ monitors_update_mock.assert_not_called()
+ feed_create_mock.assert_not_called()
+
+ # Existing monitor that doesn't need updates
+ monitor_gen_mock.reset_mock()
+ monitor_create_mock.reset_mock()
+ monitors_update_mock.reset_mock()
+ feed_create_mock.reset_mock()
+ monitor = {
+ 'id': 'mon-id',
+ 'key': 'value',
+ 'name': 'monitor name',
+ }
+ monitor_gen_mock.side_effect = [monitor]
+ monitor_id, feed_id = provider._monitor_sync(record, value,
+ monitor)
+ self.assertEquals('mon-id', monitor_id)
+ self.assertEquals('feed-id', feed_id)
+ monitor_gen_mock.assert_called_once()
+ monitor_create_mock.assert_not_called()
+ monitors_update_mock.assert_not_called()
+ feed_create_mock.assert_not_called()
+
+ # Existing monitor that doesn't need updates, but is missing its feed
+ monitor_gen_mock.reset_mock()
+ monitor_create_mock.reset_mock()
+ monitors_update_mock.reset_mock()
+ feed_create_mock.reset_mock()
+ monitor = {
+ 'id': 'mon-id2',
+ 'key': 'value',
+ 'name': 'monitor name',
+ }
+ monitor_gen_mock.side_effect = [monitor]
+ feed_create_mock.side_effect = ['feed-id2']
+ monitor_id, feed_id = provider._monitor_sync(record, value,
+ monitor)
+ self.assertEquals('mon-id2', monitor_id)
+ self.assertEquals('feed-id2', feed_id)
+ monitor_gen_mock.assert_called_once()
+ monitor_create_mock.assert_not_called()
+ monitors_update_mock.assert_not_called()
+ feed_create_mock.assert_has_calls([call(monitor)])
+
+ # Existing monitor that needs updates
+ monitor_gen_mock.reset_mock()
+ monitor_create_mock.reset_mock()
+ monitors_update_mock.reset_mock()
+ feed_create_mock.reset_mock()
+ monitor = {
+ 'id': 'mon-id',
+ 'key': 'value',
+ 'name': 'monitor name',
+ }
+ gened = {
+ 'other': 'thing',
+ }
+ monitor_gen_mock.side_effect = [gened]
+ monitor_id, feed_id = provider._monitor_sync(record, value,
+ monitor)
+ self.assertEquals('mon-id', monitor_id)
+ self.assertEquals('feed-id', feed_id)
+ monitor_gen_mock.assert_called_once()
+ monitor_create_mock.assert_not_called()
+ monitors_update_mock.assert_has_calls([call('mon-id', other='thing')])
+ feed_create_mock.assert_not_called()
+
+ @patch('octodns.provider.ns1.Ns1Client.notifylists_delete')
+ @patch('octodns.provider.ns1.Ns1Client.monitors_delete')
+ @patch('octodns.provider.ns1.Ns1Client.datafeed_delete')
+ @patch('octodns.provider.ns1.Ns1Provider._monitors_for')
+ def test_monitors_gc(self, monitors_for_mock, datafeed_delete_mock,
+ monitors_delete_mock, notifylists_delete_mock):
+ provider = Ns1Provider('test', 'api-key')
+
+ # pre-fill caches to avoid extranious calls (things we're testing
+ # elsewhere)
+ provider._client._datasource_id = 'foo'
+ provider._client._feeds_for_monitors = {
+ 'mon-id': 'feed-id',
+ }
+
+ # No active monitors and no existing, nothing will happen
+ monitors_for_mock.reset_mock()
+ datafeed_delete_mock.reset_mock()
+ monitors_delete_mock.reset_mock()
+ notifylists_delete_mock.reset_mock()
+ monitors_for_mock.side_effect = [{}]
+ record = self.record()
+ provider._monitors_gc(record)
+ monitors_for_mock.assert_has_calls([call(record)])
+ datafeed_delete_mock.assert_not_called()
+ monitors_delete_mock.assert_not_called()
+ notifylists_delete_mock.assert_not_called()
+
+ # No active monitors and one existing, delete all the things
+ monitors_for_mock.reset_mock()
+ datafeed_delete_mock.reset_mock()
+ monitors_delete_mock.reset_mock()
+ notifylists_delete_mock.reset_mock()
+ monitors_for_mock.side_effect = [{
+ 'x': {
+ 'id': 'mon-id',
+ 'notify_list': 'nl-id',
+ }
+ }]
+ provider._monitors_gc(record)
+ monitors_for_mock.assert_has_calls([call(record)])
+ datafeed_delete_mock.assert_has_calls([call('foo', 'feed-id')])
+ monitors_delete_mock.assert_has_calls([call('mon-id')])
+ notifylists_delete_mock.assert_has_calls([call('nl-id')])
+
+ # Same existing, this time in active list, should be noop
+ monitors_for_mock.reset_mock()
+ datafeed_delete_mock.reset_mock()
+ monitors_delete_mock.reset_mock()
+ notifylists_delete_mock.reset_mock()
+ monitors_for_mock.side_effect = [{
+ 'x': {
+ 'id': 'mon-id',
+ 'notify_list': 'nl-id',
+ }
+ }]
+ provider._monitors_gc(record, {'mon-id'})
+ monitors_for_mock.assert_has_calls([call(record)])
+ datafeed_delete_mock.assert_not_called()
+ monitors_delete_mock.assert_not_called()
+ notifylists_delete_mock.assert_not_called()
+
+ # Non-active monitor w/o a feed, and another monitor that's left alone
+ # b/c it's active
+ monitors_for_mock.reset_mock()
+ datafeed_delete_mock.reset_mock()
+ monitors_delete_mock.reset_mock()
+ notifylists_delete_mock.reset_mock()
+ monitors_for_mock.side_effect = [{
+ 'x': {
+ 'id': 'mon-id',
+ 'notify_list': 'nl-id',
+ },
+ 'y': {
+ 'id': 'mon-id2',
+ 'notify_list': 'nl-id2',
+ },
+ }]
+ provider._monitors_gc(record, {'mon-id'})
+ monitors_for_mock.assert_has_calls([call(record)])
+ datafeed_delete_mock.assert_not_called()
+ monitors_delete_mock.assert_has_calls([call('mon-id2')])
+ notifylists_delete_mock.assert_has_calls([call('nl-id2')])
+
+ @patch('octodns.provider.ns1.Ns1Provider._monitor_sync')
+ @patch('octodns.provider.ns1.Ns1Provider._monitors_for')
+ def test_params_for_dynamic_region_only(self, monitors_for_mock,
+ monitor_sync_mock):
+ provider = Ns1Provider('test', 'api-key')
+
+ # pre-fill caches to avoid extranious calls (things we're testing
+ # elsewhere)
+ provider._client._datasource_id = 'foo'
+ provider._client._feeds_for_monitors = {
+ 'mon-id': 'feed-id',
+ }
+
+ # provider._params_for_A() calls provider._monitors_for() and
+ # provider._monitor_sync(). Mock their return values so that we don't
+ # make NS1 API calls during tests
+ monitors_for_mock.reset_mock()
+ monitor_sync_mock.reset_mock()
+ monitors_for_mock.side_effect = [{
+ '3.4.5.6': 'mid-3',
+ }]
+ monitor_sync_mock.side_effect = [
+ ('mid-1', 'fid-1'),
+ ('mid-2', 'fid-2'),
+ ('mid-3', 'fid-3'),
+ ]
+
+ record = self.record()
+ rule0 = record.data['dynamic']['rules'][0]
+ rule1 = record.data['dynamic']['rules'][1]
+ rule0['geos'] = ['AF', 'EU']
+ rule1['geos'] = ['NA']
+ ret, monitor_ids = provider._params_for_A(record)
+ self.assertEquals(10, len(ret['answers']))
+ self.assertEquals(ret['filters'],
+ Ns1Provider._FILTER_CHAIN_WITH_REGION(provider,
+ True))
+ self.assertEquals({
+ 'iad__catchall': {
+ 'meta': {
+ 'note': 'rule-order:2'
+ }
+ },
+ 'iad__georegion': {
+ 'meta': {
+ 'georegion': ['US-CENTRAL', 'US-EAST', 'US-WEST'],
+ 'note': 'rule-order:1'
+ }
+ },
+ 'lhr__georegion': {
+ 'meta': {
+ 'georegion': ['AFRICA', 'EUROPE'],
+ 'note': 'fallback:iad rule-order:0'
+ }
+ }
+ }, ret['regions'])
+ self.assertEquals({'mid-1', 'mid-2', 'mid-3'}, monitor_ids)
+
+ @patch('octodns.provider.ns1.Ns1Provider._monitor_sync')
+ @patch('octodns.provider.ns1.Ns1Provider._monitors_for')
+ def test_params_for_dynamic_state_only(self, monitors_for_mock,
+ monitor_sync_mock):
+ provider = Ns1Provider('test', 'api-key')
+
+ # pre-fill caches to avoid extranious calls (things we're testing
+ # elsewhere)
+ provider._client._datasource_id = 'foo'
+ provider._client._feeds_for_monitors = {
+ 'mon-id': 'feed-id',
+ }
+
+ # provider._params_for_A() calls provider._monitors_for() and
+ # provider._monitor_sync(). Mock their return values so that we don't
+ # make NS1 API calls during tests
+ monitors_for_mock.reset_mock()
+ monitor_sync_mock.reset_mock()
+ monitors_for_mock.side_effect = [{
+ '3.4.5.6': 'mid-3',
+ }]
+ monitor_sync_mock.side_effect = [
+ ('mid-1', 'fid-1'),
+ ('mid-2', 'fid-2'),
+ ('mid-3', 'fid-3'),
+ ]
+
+ record = self.record()
+ rule0 = record.data['dynamic']['rules'][0]
+ rule1 = record.data['dynamic']['rules'][1]
+ rule0['geos'] = ['AF', 'EU']
+ rule1['geos'] = ['NA-US-CA']
+ ret, _ = provider._params_for_A(record)
+ self.assertEquals(10, len(ret['answers']))
+ exp = Ns1Provider._FILTER_CHAIN_WITH_REGION_AND_COUNTRY(provider,
+ True)
+ self.assertEquals(ret['filters'], exp)
+ self.assertEquals({
+ 'iad__catchall': {
+ 'meta': {
+ 'note': 'rule-order:2'
+ }
+ },
+ 'iad__country': {
+ 'meta': {
+ 'note': 'rule-order:1',
+ 'us_state': ['CA']
+ }
+ },
+ 'lhr__georegion': {
+ 'meta': {
+ 'georegion': ['AFRICA', 'EUROPE'],
+ 'note': 'fallback:iad rule-order:0'
+ }
+ }
+ }, ret['regions'])
+
+ @patch('octodns.provider.ns1.Ns1Provider._monitor_sync')
+ @patch('octodns.provider.ns1.Ns1Provider._monitors_for')
+ def test_params_for_dynamic_contient_and_countries(self,
+ monitors_for_mock,
+ monitor_sync_mock):
+ provider = Ns1Provider('test', 'api-key')
+
+ # pre-fill caches to avoid extranious calls (things we're testing
+ # elsewhere)
+ provider._client._datasource_id = 'foo'
+ provider._client._feeds_for_monitors = {
+ 'mon-id': 'feed-id',
+ }
+
+ # provider._params_for_A() calls provider._monitors_for() and
+ # provider._monitor_sync(). Mock their return values so that we don't
+ # make NS1 API calls during tests
+ monitors_for_mock.reset_mock()
+ monitor_sync_mock.reset_mock()
+ monitors_for_mock.side_effect = [{
+ '3.4.5.6': 'mid-3',
+ }]
+ monitor_sync_mock.side_effect = [
+ ('mid-1', 'fid-1'),
+ ('mid-2', 'fid-2'),
+ ('mid-3', 'fid-3'),
+ ]
+
+ record = self.record()
+ rule0 = record.data['dynamic']['rules'][0]
+ rule1 = record.data['dynamic']['rules'][1]
+ rule0['geos'] = ['AF', 'EU', 'NA-US-CA']
+ rule1['geos'] = ['NA', 'NA-US']
+ ret, _ = provider._params_for_A(record)
+
+ self.assertEquals(17, len(ret['answers']))
+ # Deeply check the answers we have here
+ # group the answers based on where they came from
+ notes = defaultdict(list)
+ for answer in ret['answers']:
+ notes[answer['meta']['note']].append(answer)
+ # Remove the meta and region part since it'll vary based on the
+ # exact pool, that'll let us == them down below
+ del answer['meta']
+ del answer['region']
+
+ # Expected groups. iad has occurances in here: a country and region
+ # that was split out based on targeting a continent and a state. It
+ # finally has a catchall. Those are examples of the two ways pools get
+ # expanded.
+ #
+ # lhr splits in two, with a region and country.
+ #
+ # well as both lhr georegion (for contients) and country. The first is
+ # an example of a repeated target pool in a rule (only allowed when the
+ # 2nd is a catchall.)
+ self.assertEquals(['from:--default--', 'from:iad__catchall',
+ 'from:iad__country', 'from:iad__georegion',
+ 'from:lhr__country', 'from:lhr__georegion'],
+ sorted(notes.keys()))
+
+ # All the iad's should match (after meta and region were removed)
+ self.assertEquals(notes['from:iad__catchall'],
+ notes['from:iad__country'])
+ self.assertEquals(notes['from:iad__catchall'],
+ notes['from:iad__georegion'])
+
+ # The lhrs should match each other too
+ self.assertEquals(notes['from:lhr__georegion'],
+ notes['from:lhr__country'])
+
+ # We have both country and region filter chain entries
+ exp = Ns1Provider._FILTER_CHAIN_WITH_REGION_AND_COUNTRY(provider,
+ True)
+ self.assertEquals(ret['filters'], exp)
+
+ # and our region details match the expected behaviors/targeting
+ self.assertEquals({
+ 'iad__catchall': {
+ 'meta': {
+ 'note': 'rule-order:2'
+ }
+ },
+ 'iad__country': {
+ 'meta': {
+ 'country': ['US'],
+ 'note': 'rule-order:1'
+ }
+ },
+ 'iad__georegion': {
+ 'meta': {
+ 'georegion': ['US-CENTRAL', 'US-EAST', 'US-WEST'],
+ 'note': 'rule-order:1'
+ }
+ },
+ 'lhr__country': {
+ 'meta': {
+ 'note': 'fallback:iad rule-order:0',
+ 'us_state': ['CA']
+ }
+ },
+ 'lhr__georegion': {
+ 'meta': {
+ 'georegion': ['AFRICA', 'EUROPE'],
+ 'note': 'fallback:iad rule-order:0'
+ }
+ }
+ }, ret['regions'])
+
+ @patch('octodns.provider.ns1.Ns1Provider._monitor_sync')
+ @patch('octodns.provider.ns1.Ns1Provider._monitors_for')
+ def test_params_for_dynamic_oceania(self, monitors_for_mock,
+ monitor_sync_mock):
+ provider = Ns1Provider('test', 'api-key')
+
+ # pre-fill caches to avoid extranious calls (things we're testing
+ # elsewhere)
+ provider._client._datasource_id = 'foo'
+ provider._client._feeds_for_monitors = {
+ 'mon-id': 'feed-id',
+ }
+
+ # provider._params_for_A() calls provider._monitors_for() and
+ # provider._monitor_sync(). Mock their return values so that we don't
+ # make NS1 API calls during tests
+ monitors_for_mock.reset_mock()
+ monitor_sync_mock.reset_mock()
+ monitors_for_mock.side_effect = [{
+ '3.4.5.6': 'mid-3',
+ }]
+ monitor_sync_mock.side_effect = [
+ ('mid-1', 'fid-1'),
+ ('mid-2', 'fid-2'),
+ ('mid-3', 'fid-3'),
+ ]
+
+ # Set geos to 'OC' in rules[0] (pool - 'lhr')
+ # Check returned dict has list of countries under 'OC'
+ record = self.record()
+ rule0 = record.data['dynamic']['rules'][0]
+ rule0['geos'] = ['OC']
+ ret, _ = provider._params_for_A(record)
+
+ # Make sure the country list expanded into all the OC countries
+ got = set(ret['regions']['lhr__country']['meta']['country'])
+ self.assertEquals(got,
+ Ns1Provider._CONTINENT_TO_LIST_OF_COUNTRIES['OC'])
+
+ # When rules has 'OC', it is converted to list of countries in the
+ # params. Look if the returned filters is the filter chain with country
+ self.assertEquals(ret['filters'],
+ Ns1Provider._FILTER_CHAIN_WITH_COUNTRY(provider,
+ True))
+
+ @patch('octodns.provider.ns1.Ns1Provider._monitor_sync')
+ @patch('octodns.provider.ns1.Ns1Provider._monitors_for')
+ def test_params_for_dynamic(self, monitors_for_mock, monitors_sync_mock):
+ provider = Ns1Provider('test', 'api-key')
+
+ # pre-fill caches to avoid extranious calls (things we're testing
+ # elsewhere)
+ provider._client._datasource_id = 'foo'
+ provider._client._feeds_for_monitors = {
+ 'mon-id': 'feed-id',
+ }
+
+ monitors_for_mock.reset_mock()
+ monitors_sync_mock.reset_mock()
+ monitors_for_mock.side_effect = [{
+ '3.4.5.6': 'mid-3',
+ }]
+ monitors_sync_mock.side_effect = [
+ ('mid-1', 'fid-1'),
+ ('mid-2', 'fid-2'),
+ ('mid-3', 'fid-3'),
+ ]
+ # This indirectly calls into _params_for_dynamic_A and tests the
+ # handling to get there
+ record = self.record()
+ ret, _ = provider._params_for_A(record)
+
+ # Given that record has both country and region in the rules,
+ # the returned filter chain should be one with region and country
+ self.assertEquals(ret['filters'],
+ Ns1Provider._FILTER_CHAIN_WITH_REGION_AND_COUNTRY(
+ provider, True))
+
+ monitors_for_mock.assert_has_calls([call(record)])
+ monitors_sync_mock.assert_has_calls([
+ call(record, '1.2.3.4', None),
+ call(record, '2.3.4.5', None),
+ call(record, '3.4.5.6', 'mid-3'),
+ ])
+
+ record = Record.new(self.zone, 'geo', {
+ 'ttl': 34,
+ 'type': 'A',
+ 'values': ['101.102.103.104', '101.102.103.105'],
+ 'geo': {'EU': ['201.202.203.204']},
+ 'meta': {},
+ })
+ params, _ = provider._params_for_geo_A(record)
+ self.assertEquals([], params['filters'])
+
+ def test_data_for_dynamic_A(self):
+ provider = Ns1Provider('test', 'api-key')
+
+ # Unexpected filters throws an error
+ ns1_record = {
+ 'domain': 'unit.tests',
+ 'filters': [],
+ }
+ with self.assertRaises(Ns1Exception) as ctx:
+ provider._data_for_dynamic_A('A', ns1_record)
+ self.assertEquals('Unrecognized advanced record',
+ text_type(ctx.exception))
+
+ # empty record turns into empty data
+ ns1_record = {
+ 'answers': [],
+ 'domain': 'unit.tests',
+ 'filters': Ns1Provider._BASIC_FILTER_CHAIN(provider, True),
+ 'regions': {},
+ 'ttl': 42,
+ }
+ data = provider._data_for_dynamic_A('A', ns1_record)
+ self.assertEquals({
+ 'dynamic': {
+ 'pools': {},
+ 'rules': [],
+ },
+ 'ttl': 42,
+ 'type': 'A',
+ 'values': [],
+ }, data)
+
+ # Test out a small, but realistic setup that covers all the options
+ # We have country and region in the test config
+ filters = provider._get_updated_filter_chain(True, True)
+ catchall_pool_name = 'iad__catchall'
+ ns1_record = {
+ 'answers': [{
+ 'answer': ['3.4.5.6'],
+ 'meta': {
+ 'priority': 1,
+ 'note': 'from:lhr__country',
+ },
+ 'region': 'lhr',
+ }, {
+ 'answer': ['2.3.4.5'],
+ 'meta': {
+ 'priority': 2,
+ 'weight': 12,
+ 'note': 'from:iad',
+ },
+ 'region': 'lhr',
+ }, {
+ 'answer': ['1.2.3.4'],
+ 'meta': {
+ 'priority': 3,
+ 'note': 'from:--default--',
+ },
+ 'region': 'lhr',
+ }, {
+ 'answer': ['2.3.4.5'],
+ 'meta': {
+ 'priority': 1,
+ 'weight': 12,
+ 'note': 'from:iad',
+ },
+ 'region': 'iad',
+ }, {
+ 'answer': ['1.2.3.4'],
+ 'meta': {
+ 'priority': 2,
+ 'note': 'from:--default--',
+ },
+ 'region': 'iad',
+ }, {
+ 'answer': ['2.3.4.5'],
+ 'meta': {
+ 'priority': 1,
+ 'weight': 12,
+ 'note': 'from:{}'.format(catchall_pool_name),
+ },
+ 'region': catchall_pool_name,
+ }, {
+ 'answer': ['1.2.3.4'],
+ 'meta': {
+ 'priority': 2,
+ 'note': 'from:--default--',
+ },
+ 'region': catchall_pool_name,
+ }],
+ 'domain': 'unit.tests',
+ 'filters': filters,
+ 'regions': {
+ # lhr will use the new-split style names (and that will require
+ # combining in the code to produce the expected answer
+ 'lhr__georegion': {
+ 'meta': {
+ 'note': 'rule-order:1 fallback:iad',
+ 'georegion': ['AFRICA'],
+ },
+ },
+ 'lhr__country': {
+ 'meta': {
+ 'note': 'rule-order:1 fallback:iad',
+ 'country': ['CA'],
+ 'us_state': ['OR'],
+ },
+ },
+ # iad will use the old style "plain" region naming. We won't
+ # see mixed names like this in practice, but this should
+ # exercise both paths
+ 'iad': {
+ 'meta': {
+ 'note': 'rule-order:2',
+ 'country': ['ZW'],
+ },
+ },
+ catchall_pool_name: {
+ 'meta': {
+ 'note': 'rule-order:3',
+ },
+ }
+ },
+ 'tier': 3,
+ 'ttl': 42,
+ }
+ data = provider._data_for_dynamic_A('A', ns1_record)
+ self.assertEquals({
+ 'dynamic': {
+ 'pools': {
+ 'iad': {
+ 'fallback': None,
+ 'values': [{
+ 'value': '2.3.4.5',
+ 'weight': 12,
+ }],
+ },
+ 'lhr': {
+ 'fallback': 'iad',
+ 'values': [{
+ 'weight': 1,
+ 'value': '3.4.5.6',
+ }],
+ },
+ },
+ 'rules': [{
+ '_order': '1',
+ 'geos': [
+ 'AF',
+ 'NA-CA',
+ 'NA-US-OR',
+ ],
+ 'pool': 'lhr',
+ }, {
+ '_order': '2',
+ 'geos': [
+ 'AF-ZW',
+ ],
+ 'pool': 'iad',
+ }, {
+ '_order': '3',
+ 'pool': 'iad',
+ }],
+ },
+ 'ttl': 42,
+ 'type': 'A',
+ 'values': ['1.2.3.4'],
+ }, data)
+
+ # Same answer if we go through _data_for_A which out sources the job to
+ # _data_for_dynamic_A
+ data2 = provider._data_for_A('A', ns1_record)
+ self.assertEquals(data, data2)
+
+ # Same answer if we have an old-style catchall name
+ old_style_catchall_pool_name = 'catchall__iad'
+ ns1_record['answers'][-2]['region'] = old_style_catchall_pool_name
+ ns1_record['answers'][-1]['region'] = old_style_catchall_pool_name
+ ns1_record['regions'][old_style_catchall_pool_name] = \
+ ns1_record['regions'][catchall_pool_name]
+ del ns1_record['regions'][catchall_pool_name]
+ data3 = provider._data_for_dynamic_A('A', ns1_record)
+ self.assertEquals(data, data2)
+
+ # Oceania test cases
+ # 1. Full list of countries should return 'OC' in geos
+ oc_countries = Ns1Provider._CONTINENT_TO_LIST_OF_COUNTRIES['OC']
+ ns1_record['regions']['lhr__country']['meta']['country'] = \
+ list(oc_countries)
+ data3 = provider._data_for_A('A', ns1_record)
+ self.assertTrue('OC' in data3['dynamic']['rules'][0]['geos'])
+
+ # 2. Partial list of countries should return just those
+ partial_oc_cntry_list = list(oc_countries)[:5]
+ ns1_record['regions']['lhr__country']['meta']['country'] = \
+ partial_oc_cntry_list
+ data4 = provider._data_for_A('A', ns1_record)
+ for c in partial_oc_cntry_list:
+ self.assertTrue(
+ 'OC-{}'.format(c) in data4['dynamic']['rules'][0]['geos'])
+
+ @patch('ns1.rest.records.Records.retrieve')
+ @patch('ns1.rest.zones.Zones.retrieve')
+ @patch('octodns.provider.ns1.Ns1Provider._monitors_for')
+ def test_extra_changes(self, monitors_for_mock, zones_retrieve_mock,
+ records_retrieve_mock):
+ provider = Ns1Provider('test', 'api-key')
+
+ desired = Zone('unit.tests.', [])
+
+ # Empty zone and no changes
+ monitors_for_mock.reset_mock()
+ zones_retrieve_mock.reset_mock()
+ records_retrieve_mock.reset_mock()
+
+ extra = provider._extra_changes(desired, [])
+ self.assertFalse(extra)
+ monitors_for_mock.assert_not_called()
+
+ # Non-existent zone. No changes
+ monitors_for_mock.reset_mock()
+ zones_retrieve_mock.side_effect = \
+ ResourceException('server error: zone not found')
+ records_retrieve_mock.reset_mock()
+ extra = provider._extra_changes(desired, [])
+ self.assertFalse(extra)
+
+ # Unexpected exception message
+ zones_retrieve_mock.reset_mock()
+ zones_retrieve_mock.side_effect = ResourceException('boom')
+ with self.assertRaises(ResourceException) as ctx:
+ extra = provider._extra_changes(desired, [])
+ self.assertEquals(zones_retrieve_mock.side_effect, ctx.exception)
+
+ # Simple record, ignored, filter update lookups ignored
+ monitors_for_mock.reset_mock()
+ zones_retrieve_mock.reset_mock()
+ records_retrieve_mock.reset_mock()
+ zones_retrieve_mock.side_effect = \
+ ResourceException('server error: zone not found')
+
+ simple = Record.new(desired, '', {
+ 'ttl': 32,
+ 'type': 'A',
+ 'value': '1.2.3.4',
+ 'meta': {},
+ })
+ desired.add_record(simple)
+ extra = provider._extra_changes(desired, [])
+ self.assertFalse(extra)
+ monitors_for_mock.assert_not_called()
+
+ # Dynamic record, inspectable
+ dynamic = Record.new(desired, 'dyn', {
+ 'dynamic': {
+ 'pools': {
+ 'iad': {
+ 'values': [{
+ 'value': '1.2.3.4',
+ }],
+ },
+ },
+ 'rules': [{
+ 'pool': 'iad',
+ }],
+ },
+ 'octodns': {
+ 'healthcheck': {
+ 'host': 'send.me',
+ 'path': '/_ping',
+ 'port': 80,
+ 'protocol': 'HTTP',
+ }
+ },
+ 'ttl': 32,
+ 'type': 'A',
+ 'value': '1.2.3.4',
+ 'meta': {},
+ })
+ desired.add_record(dynamic)
+
+ # untouched, but everything in sync so no change needed
+ monitors_for_mock.reset_mock()
+ zones_retrieve_mock.reset_mock()
+ records_retrieve_mock.reset_mock()
+ # Generate what we expect to have
+ gend = provider._monitor_gen(dynamic, '1.2.3.4')
+ gend.update({
+ 'id': 'mid', # need to add an id
+ 'notify_list': 'xyz', # need to add a notify list (for now)
+ })
+ monitors_for_mock.side_effect = [{
+ '1.2.3.4': gend,
+ }]
+ extra = provider._extra_changes(desired, [])
+ self.assertFalse(extra)
+ monitors_for_mock.assert_has_calls([call(dynamic)])
+
+ update = Update(dynamic, dynamic)
+
+ # If we don't have a notify list we're broken and we'll expect to see
+ # an Update
+ monitors_for_mock.reset_mock()
+ zones_retrieve_mock.reset_mock()
+ records_retrieve_mock.reset_mock()
+ del gend['notify_list']
+ monitors_for_mock.side_effect = [{
+ '1.2.3.4': gend,
+ }]
+ extra = provider._extra_changes(desired, [])
+ self.assertEquals(1, len(extra))
+ extra = list(extra)[0]
+ self.assertIsInstance(extra, Update)
+ self.assertEquals(dynamic, extra.new)
+ monitors_for_mock.assert_has_calls([call(dynamic)])
+
+ # Add notify_list back and change the healthcheck protocol, we'll still
+ # expect to see an update
+ monitors_for_mock.reset_mock()
+ zones_retrieve_mock.reset_mock()
+ records_retrieve_mock.reset_mock()
+ gend['notify_list'] = 'xyz'
+ dynamic._octodns['healthcheck']['protocol'] = 'HTTPS'
+ del gend['notify_list']
+ monitors_for_mock.side_effect = [{
+ '1.2.3.4': gend,
+ }]
+ extra = provider._extra_changes(desired, [])
+ self.assertEquals(1, len(extra))
+ extra = list(extra)[0]
+ self.assertIsInstance(extra, Update)
+ self.assertEquals(dynamic, extra.new)
+ monitors_for_mock.assert_has_calls([call(dynamic)])
+
+ # If it's in the changed list, it'll be ignored
+ monitors_for_mock.reset_mock()
+ zones_retrieve_mock.reset_mock()
+ records_retrieve_mock.reset_mock()
+ extra = provider._extra_changes(desired, [update])
+ self.assertFalse(extra)
+ monitors_for_mock.assert_not_called()
+
+ # Test changes in filters
+
+ # No change in filters
+ monitors_for_mock.reset_mock()
+ zones_retrieve_mock.reset_mock()
+ records_retrieve_mock.reset_mock()
+ ns1_zone = {
+ 'records': [{
+ "domain": "dyn.unit.tests",
+ "zone": "unit.tests",
+ "type": "A",
+ "tier": 3,
+ "filters": Ns1Provider._BASIC_FILTER_CHAIN(provider, True)
+ }],
+ }
+ monitors_for_mock.side_effect = [{}]
+ zones_retrieve_mock.side_effect = [ns1_zone]
+ records_retrieve_mock.side_effect = ns1_zone['records']
+ extra = provider._extra_changes(desired, [])
+ self.assertFalse(extra)
+
+ # filters need an update
+ monitors_for_mock.reset_mock()
+ zones_retrieve_mock.reset_mock()
+ records_retrieve_mock.reset_mock()
+ ns1_zone = {
+ 'records': [{
+ "domain": "dyn.unit.tests",
+ "zone": "unit.tests",
+ "type": "A",
+ "tier": 3,
+ "filters": Ns1Provider._BASIC_FILTER_CHAIN(provider, False)
+ }],
+ }
+ monitors_for_mock.side_effect = [{}]
+ zones_retrieve_mock.side_effect = [ns1_zone]
+ records_retrieve_mock.side_effect = ns1_zone['records']
+ extra = provider._extra_changes(desired, [])
+ self.assertTrue(extra)
+
+ # Mixed disabled in filters. Raise Ns1Exception
+ monitors_for_mock.reset_mock()
+ zones_retrieve_mock.reset_mock()
+ records_retrieve_mock.reset_mock()
+ ns1_zone = {
+ 'records': [{
+ "domain": "dyn.unit.tests",
+ "zone": "unit.tests",
+ "type": "A",
+ "tier": 3,
+ "filters": Ns1Provider._BASIC_FILTER_CHAIN(provider, True)
+ }],
+ }
+ del ns1_zone['records'][0]['filters'][0]['disabled']
+ monitors_for_mock.side_effect = [{}]
+ zones_retrieve_mock.side_effect = [ns1_zone]
+ records_retrieve_mock.side_effect = ns1_zone['records']
+ with self.assertRaises(Ns1Exception) as ctx:
+ extra = provider._extra_changes(desired, [])
+ self.assertTrue('Mixed disabled flag in filters' in
+ text_type(ctx.exception))
+
+ DESIRED = Zone('unit.tests.', [])
+
+ SIMPLE = Record.new(DESIRED, 'sim', {
+ 'ttl': 33,
+ 'type': 'A',
+ 'value': '1.2.3.4',
+ })
+
+ # Dynamic record, inspectable
+ DYNAMIC = Record.new(DESIRED, 'dyn', {
+ 'dynamic': {
+ 'pools': {
+ 'iad': {
+ 'values': [{
+ 'value': '1.2.3.4',
+ }],
+ },
+ },
+ 'rules': [{
+ 'pool': 'iad',
+ }],
+ },
+ 'octodns': {
+ 'healthcheck': {
+ 'host': 'send.me',
+ 'path': '/_ping',
+ 'port': 80,
+ 'protocol': 'HTTP',
+ }
+ },
+ 'ttl': 32,
+ 'type': 'A',
+ 'value': '1.2.3.4',
+ 'meta': {},
+ })
+
+ def test_has_dynamic(self):
+ provider = Ns1Provider('test', 'api-key')
+
+ simple_update = Update(self.SIMPLE, self.SIMPLE)
+ dynamic_update = Update(self.DYNAMIC, self.DYNAMIC)
+
+ self.assertFalse(provider._has_dynamic([simple_update]))
+ self.assertTrue(provider._has_dynamic([dynamic_update]))
+ self.assertTrue(provider._has_dynamic([simple_update, dynamic_update]))
+
+ @patch('octodns.provider.ns1.Ns1Client.zones_retrieve')
+ @patch('octodns.provider.ns1.Ns1Provider._apply_Update')
+ def test_apply_monitor_regions(self, apply_update_mock,
+ zones_retrieve_mock):
+ provider = Ns1Provider('test', 'api-key')
+
+ simple_update = Update(self.SIMPLE, self.SIMPLE)
+ simple_plan = Plan(self.DESIRED, self.DESIRED, [simple_update], True)
+ dynamic_update = Update(self.DYNAMIC, self.DYNAMIC)
+ dynamic_update = Update(self.DYNAMIC, self.DYNAMIC)
+ dynamic_plan = Plan(self.DESIRED, self.DESIRED, [dynamic_update],
+ True)
+ both_plan = Plan(self.DESIRED, self.DESIRED, [simple_update,
+ dynamic_update], True)
+
+ # always return foo, we aren't testing this part here
+ zones_retrieve_mock.side_effect = [
+ 'foo',
+ 'foo',
+ 'foo',
+ 'foo',
+ ]
+
+ # Doesn't blow up, and calls apply once
+ apply_update_mock.reset_mock()
+ provider._apply(simple_plan)
+ apply_update_mock.assert_has_calls([call('foo', simple_update)])
+
+ # Blows up and apply not called
+ apply_update_mock.reset_mock()
+ with self.assertRaises(Ns1Exception) as ctx:
+ provider._apply(dynamic_plan)
+ self.assertTrue('monitor_regions not set' in text_type(ctx.exception))
+ apply_update_mock.assert_not_called()
+
+ # Blows up and apply not called even though there's a simple
+ apply_update_mock.reset_mock()
+ with self.assertRaises(Ns1Exception) as ctx:
+ provider._apply(both_plan)
+ self.assertTrue('monitor_regions not set' in text_type(ctx.exception))
+ apply_update_mock.assert_not_called()
+
+ # with monitor_regions set
+ provider.monitor_regions = ['lga']
+
+ apply_update_mock.reset_mock()
+ provider._apply(both_plan)
+ apply_update_mock.assert_has_calls([
+ call('foo', dynamic_update),
+ call('foo', simple_update),
+ ])
+
+
+class TestNs1Client(TestCase):
+
+ @patch('ns1.rest.zones.Zones.retrieve')
+ def test_retry_behavior(self, zone_retrieve_mock):
+ client = Ns1Client('dummy-key')
+
+ # No retry required, just calls and is returned
+ zone_retrieve_mock.reset_mock()
+ zone_retrieve_mock.side_effect = ['foo']
+ self.assertEquals('foo', client.zones_retrieve('unit.tests'))
+ zone_retrieve_mock.assert_has_calls([call('unit.tests')])
+
+ # One retry required
+ zone_retrieve_mock.reset_mock()
+ zone_retrieve_mock.side_effect = [
+ RateLimitException('boo', period=0),
+ 'foo'
+ ]
+ self.assertEquals('foo', client.zones_retrieve('unit.tests'))
+ zone_retrieve_mock.assert_has_calls([call('unit.tests')])
+
+ # Two retries required
+ zone_retrieve_mock.reset_mock()
+ zone_retrieve_mock.side_effect = [
+ RateLimitException('boo', period=0),
+ 'foo'
+ ]
+ self.assertEquals('foo', client.zones_retrieve('unit.tests'))
+ zone_retrieve_mock.assert_has_calls([call('unit.tests')])
+
+ # Exhaust our retries
+ zone_retrieve_mock.reset_mock()
+ zone_retrieve_mock.side_effect = [
+ RateLimitException('first', period=0),
+ RateLimitException('boo', period=0),
+ RateLimitException('boo', period=0),
+ RateLimitException('last', period=0),
+ ]
+ with self.assertRaises(RateLimitException) as ctx:
+ client.zones_retrieve('unit.tests')
+ self.assertEquals('last', text_type(ctx.exception))
+
+ def test_client_config(self):
+ with self.assertRaises(TypeError):
+ Ns1Client()
+
+ client = Ns1Client('dummy-key')
+ self.assertEquals(
+ client._client.config.get('keys'),
+ {'default': {'key': u'dummy-key', 'desc': 'imported API key'}})
+ self.assertEquals(client._client.config.get('follow_pagination'), True)
+ self.assertEquals(
+ client._client.config.get('rate_limit_strategy'), None)
+ self.assertEquals(client._client.config.get('parallelism'), None)
+
+ client = Ns1Client('dummy-key', parallelism=11)
+ self.assertEquals(
+ client._client.config.get('rate_limit_strategy'), 'concurrent')
+ self.assertEquals(client._client.config.get('parallelism'), 11)
+
+ client = Ns1Client('dummy-key', client_config={
+ 'endpoint': 'my.endpoint.com', 'follow_pagination': False})
+ self.assertEquals(
+ client._client.config.get('endpoint'), 'my.endpoint.com')
+ self.assertEquals(
+ client._client.config.get('follow_pagination'), False)
+
+ @patch('ns1.rest.data.Source.list')
+ @patch('ns1.rest.data.Source.create')
+ def test_datasource_id(self, datasource_create_mock, datasource_list_mock):
+ client = Ns1Client('dummy-key')
+
+ # First invocation with an empty list create
+ datasource_list_mock.reset_mock()
+ datasource_create_mock.reset_mock()
+ datasource_list_mock.side_effect = [[]]
+ datasource_create_mock.side_effect = [{
+ 'id': 'foo',
+ }]
+ self.assertEquals('foo', client.datasource_id)
+ name = 'octoDNS NS1 Data Source'
+ source_type = 'nsone_monitoring'
+ datasource_create_mock.assert_has_calls([call(name=name,
+ sourcetype=source_type)])
+ datasource_list_mock.assert_called_once()
+
+ # 2nd invocation is cached
+ datasource_list_mock.reset_mock()
+ datasource_create_mock.reset_mock()
+ self.assertEquals('foo', client.datasource_id)
+ datasource_create_mock.assert_not_called()
+ datasource_list_mock.assert_not_called()
+
+ # Reset the client's cache
+ client._datasource_id = None
+
+ # First invocation with a match in the list finds it and doesn't call
+ # create
+ datasource_list_mock.reset_mock()
+ datasource_create_mock.reset_mock()
+ datasource_list_mock.side_effect = [[{
+ 'id': 'other',
+ 'name': 'not a match',
+ }, {
+ 'id': 'bar',
+ 'name': name,
+ }]]
+ self.assertEquals('bar', client.datasource_id)
+ datasource_create_mock.assert_not_called()
+ datasource_list_mock.assert_called_once()
+
+ @patch('ns1.rest.data.Feed.delete')
+ @patch('ns1.rest.data.Feed.create')
+ @patch('ns1.rest.data.Feed.list')
+ def test_feeds_for_monitors(self, datafeed_list_mock,
+ datafeed_create_mock,
+ datafeed_delete_mock):
+ client = Ns1Client('dummy-key')
+
+ # pre-cache datasource_id
+ client._datasource_id = 'foo'
+
+ # Populate the cache and check the results
+ datafeed_list_mock.reset_mock()
+ datafeed_list_mock.side_effect = [[{
+ 'config': {
+ 'jobid': 'the-job',
+ },
+ 'id': 'the-feed',
+ }, {
+ 'config': {
+ 'jobid': 'the-other-job',
+ },
+ 'id': 'the-other-feed',
+ }]]
+ expected = {
+ 'the-job': 'the-feed',
+ 'the-other-job': 'the-other-feed',
+ }
+ self.assertEquals(expected, client.feeds_for_monitors)
+ datafeed_list_mock.assert_called_once()
+
+ # 2nd call uses cache
+ datafeed_list_mock.reset_mock()
+ self.assertEquals(expected, client.feeds_for_monitors)
+ datafeed_list_mock.assert_not_called()
+
+ # create a feed and make sure it's in the cache/map
+ datafeed_create_mock.reset_mock()
+ datafeed_create_mock.side_effect = [{
+ 'id': 'new-feed',
+ }]
+ client.datafeed_create(client.datasource_id, 'new-name', {
+ 'jobid': 'new-job',
+ })
+ datafeed_create_mock.assert_has_calls([call('foo', 'new-name', {
+ 'jobid': 'new-job',
+ })])
+ new_expected = expected.copy()
+ new_expected['new-job'] = 'new-feed'
+ self.assertEquals(new_expected, client.feeds_for_monitors)
+ datafeed_create_mock.assert_called_once()
+
+ # Delete a feed and make sure it's out of the cache/map
+ datafeed_delete_mock.reset_mock()
+ client.datafeed_delete(client.datasource_id, 'new-feed')
+ self.assertEquals(expected, client.feeds_for_monitors)
+ datafeed_delete_mock.assert_called_once()
+
+ @patch('ns1.rest.monitoring.Monitors.delete')
+ @patch('ns1.rest.monitoring.Monitors.update')
+ @patch('ns1.rest.monitoring.Monitors.create')
+ @patch('ns1.rest.monitoring.Monitors.list')
+ def test_monitors(self, monitors_list_mock, monitors_create_mock,
+ monitors_update_mock, monitors_delete_mock):
+ client = Ns1Client('dummy-key')
+
+ one = {
+ 'id': 'one',
+ 'key': 'value',
+ }
+ two = {
+ 'id': 'two',
+ 'key': 'other-value',
+ }
+
+ # Populate the cache and check the results
+ monitors_list_mock.reset_mock()
+ monitors_list_mock.side_effect = [[one, two]]
+ expected = {
+ 'one': one,
+ 'two': two,
+ }
+ self.assertEquals(expected, client.monitors)
+ monitors_list_mock.assert_called_once()
+
+ # 2nd round pulls it from cache
+ monitors_list_mock.reset_mock()
+ self.assertEquals(expected, client.monitors)
+ monitors_list_mock.assert_not_called()
+
+ # Create a monitor, make sure it's in the list
+ monitors_create_mock.reset_mock()
+ monitor = {
+ 'id': 'new-id',
+ 'key': 'new-value',
+ }
+ monitors_create_mock.side_effect = [monitor]
+ self.assertEquals(monitor, client.monitors_create(param='eter'))
+ monitors_create_mock.assert_has_calls([call({}, param='eter')])
+ new_expected = expected.copy()
+ new_expected['new-id'] = monitor
+ self.assertEquals(new_expected, client.monitors)
+
+ # Update a monitor, make sure it's updated in the cache
+ monitors_update_mock.reset_mock()
+ monitor = {
+ 'id': 'new-id',
+ 'key': 'changed-value',
+ }
+ monitors_update_mock.side_effect = [monitor]
+ self.assertEquals(monitor, client.monitors_update('new-id',
+ key='changed-value'))
+ monitors_update_mock \
+ .assert_has_calls([call('new-id', {}, key='changed-value')])
+ new_expected['new-id'] = monitor
+ self.assertEquals(new_expected, client.monitors)
+
+ # Delete a monitor, make sure it's out of the list
+ monitors_delete_mock.reset_mock()
+ monitors_delete_mock.side_effect = ['deleted']
+ self.assertEquals('deleted', client.monitors_delete('new-id'))
+ monitors_delete_mock.assert_has_calls([call('new-id')])
+ self.assertEquals(expected, client.monitors)
+
+ @patch('ns1.rest.monitoring.NotifyLists.delete')
+ @patch('ns1.rest.monitoring.NotifyLists.create')
+ @patch('ns1.rest.monitoring.NotifyLists.list')
+ def test_notifylists(self, notifylists_list_mock, notifylists_create_mock,
+ notifylists_delete_mock):
+ client = Ns1Client('dummy-key')
+
+ notifylists_list_mock.reset_mock()
+ notifylists_create_mock.reset_mock()
+ notifylists_delete_mock.reset_mock()
+ notifylists_create_mock.side_effect = ['bar']
+ notify_list = [{
+ 'config': {
+ 'sourceid': 'foo',
+ },
+ 'type': 'datafeed',
+ }]
+ nl = client.notifylists_create(name='some name',
+ notify_list=notify_list)
+ self.assertEquals('bar', nl)
+ notifylists_list_mock.assert_not_called()
+ notifylists_create_mock.assert_has_calls([
+ call({'name': 'some name', 'notify_list': notify_list})
+ ])
+ notifylists_delete_mock.assert_not_called()
+
+ notifylists_list_mock.reset_mock()
+ notifylists_create_mock.reset_mock()
+ notifylists_delete_mock.reset_mock()
+ client.notifylists_delete('nlid')
+ notifylists_list_mock.assert_not_called()
+ notifylists_create_mock.assert_not_called()
+ notifylists_delete_mock.assert_has_calls([call('nlid')])
+
+ notifylists_list_mock.reset_mock()
+ notifylists_create_mock.reset_mock()
+ notifylists_delete_mock.reset_mock()
+ expected = ['one', 'two', 'three']
+ notifylists_list_mock.side_effect = [expected]
+ nls = client.notifylists_list()
+ self.assertEquals(expected, nls)
+ notifylists_list_mock.assert_has_calls([call()])
+ notifylists_create_mock.assert_not_called()
+ notifylists_delete_mock.assert_not_called()
diff --git a/tests/test_octodns_provider_ovh.py b/tests/test_octodns_provider_ovh.py
index d3f468d..3da4276 100644
--- a/tests/test_octodns_provider_ovh.py
+++ b/tests/test_octodns_provider_ovh.py
@@ -279,6 +279,24 @@ class TestOvhProvider(TestCase):
'id': 18
})
+ # CAA
+ api_record.append({
+ 'fieldType': 'CAA',
+ 'ttl': 1600,
+ 'target': '0 issue "ca.unit.tests"',
+ 'subDomain': 'caa',
+ 'id': 19
+ })
+ expected.add(Record.new(zone, 'caa', {
+ 'ttl': 1600,
+ 'type': 'CAA',
+ 'values': [{
+ 'flags': 0,
+ 'tag': 'issue',
+ 'value': 'ca.unit.tests'
+ }]
+ }))
+
valid_dkim = [valid_dkim_key,
'v=DKIM1 \\; %s' % valid_dkim_key,
'h=sha256 \\; %s' % valid_dkim_key,
@@ -382,64 +400,66 @@ class TestOvhProvider(TestCase):
get_mock.side_effect = [[100], [101], [102], [103]]
provider.apply(plan)
wanted_calls = [
- call(u'/domain/zone/unit.tests/record', fieldType=u'TXT',
- subDomain='txt', target=u'TXT text', ttl=1400),
- call(u'/domain/zone/unit.tests/record', fieldType=u'DKIM',
- subDomain='dkim', target=self.valid_dkim_key,
- ttl=1300),
- call(u'/domain/zone/unit.tests/record', fieldType=u'A',
- subDomain=u'', target=u'1.2.3.4', ttl=100),
- call(u'/domain/zone/unit.tests/record', fieldType=u'SRV',
+ call('/domain/zone/unit.tests/record', fieldType='A',
+ subDomain='', target='1.2.3.4', ttl=100),
+ call('/domain/zone/unit.tests/record', fieldType='AAAA',
+ subDomain='', target='1:1ec:1::1', ttl=200),
+ call('/domain/zone/unit.tests/record', fieldType='MX',
+ subDomain='', target='10 mx1.unit.tests.', ttl=400),
+ call('/domain/zone/unit.tests/record', fieldType='SPF',
+ subDomain='',
+ target='v=spf1 include:unit.texts.redirect ~all',
+ ttl=1000),
+ call('/domain/zone/unit.tests/record', fieldType='SSHFP',
+ subDomain='',
+ target='1 1 bf6b6825d2977c511a475bbefb88aad54a92ac73',
+ ttl=1100),
+ call('/domain/zone/unit.tests/record', fieldType='PTR',
+ subDomain='4', target='unit.tests.', ttl=900),
+ call('/domain/zone/unit.tests/record', fieldType='SRV',
subDomain='_srv._tcp',
- target=u'10 20 30 foo-1.unit.tests.', ttl=800),
- call(u'/domain/zone/unit.tests/record', fieldType=u'SRV',
+ target='10 20 30 foo-1.unit.tests.', ttl=800),
+ call('/domain/zone/unit.tests/record', fieldType='SRV',
subDomain='_srv._tcp',
- target=u'40 50 60 foo-2.unit.tests.', ttl=800),
- call(u'/domain/zone/unit.tests/record', fieldType=u'PTR',
- subDomain='4', target=u'unit.tests.', ttl=900),
- call(u'/domain/zone/unit.tests/record', fieldType=u'NS',
- subDomain='www3', target=u'ns3.unit.tests.', ttl=700),
- call(u'/domain/zone/unit.tests/record', fieldType=u'NS',
- subDomain='www3', target=u'ns4.unit.tests.', ttl=700),
- call(u'/domain/zone/unit.tests/record',
- fieldType=u'SSHFP', subDomain=u'', ttl=1100,
- target=u'1 1 bf6b6825d2977c511a475bbefb88a'
- u'ad54'
- u'a92ac73',
- ),
- call(u'/domain/zone/unit.tests/record', fieldType=u'AAAA',
- subDomain=u'', target=u'1:1ec:1::1', ttl=200),
- call(u'/domain/zone/unit.tests/record', fieldType=u'MX',
- subDomain=u'', target=u'10 mx1.unit.tests.', ttl=400),
- call(u'/domain/zone/unit.tests/record', fieldType=u'CNAME',
- subDomain='www2', target=u'unit.tests.', ttl=300),
- call(u'/domain/zone/unit.tests/record', fieldType=u'SPF',
- subDomain=u'', ttl=1000,
- target=u'v=spf1 include:unit.texts.'
- u'redirect ~all',
- ),
- call(u'/domain/zone/unit.tests/record', fieldType=u'A',
- subDomain='sub', target=u'1.2.3.4', ttl=200),
- call(u'/domain/zone/unit.tests/record', fieldType=u'NAPTR',
- subDomain='naptr', ttl=500,
- target=u'10 100 "S" "SIP+D2U" "!^.*$!sip:'
- u'info@bar'
- u'.example.com!" .'
- ),
- call(u'/domain/zone/unit.tests/refresh')]
+ target='40 50 60 foo-2.unit.tests.', ttl=800),
+ call('/domain/zone/unit.tests/record', fieldType='CAA',
+ subDomain='caa', target='0 issue "ca.unit.tests"',
+ ttl=1600),
+ call('/domain/zone/unit.tests/record', fieldType='DKIM',
+ subDomain='dkim',
+ target='p=MIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQCxLaG'
+ '16G4SaEcXVdiIxTg7gKSGbHKQLm30CHib1h9FzS9nkcyvQSyQj1r'
+ 'MFyqC//tft3ohx3nvJl+bGCWxdtLYDSmir9PW54e5CTdxEh8MWRk'
+ 'BO3StF6QG/tAh3aTGDmkqhIJGLb87iHvpmVKqURmEUzJPv5KPJfW'
+ 'LofADI+q9lQIDAQAB', ttl=1300),
+ call('/domain/zone/unit.tests/record', fieldType='NAPTR',
+ subDomain='naptr',
+ target='10 100 "S" "SIP+D2U" "!^.*$!sip:info@bar.exam'
+ 'ple.com!" .', ttl=500),
+ call('/domain/zone/unit.tests/record', fieldType='A',
+ subDomain='sub', target='1.2.3.4', ttl=200),
+ call('/domain/zone/unit.tests/record', fieldType='TXT',
+ subDomain='txt', target='TXT text', ttl=1400),
+ call('/domain/zone/unit.tests/record', fieldType='CNAME',
+ subDomain='www2', target='unit.tests.', ttl=300),
+ call('/domain/zone/unit.tests/record', fieldType='NS',
+ subDomain='www3', target='ns3.unit.tests.', ttl=700),
+ call('/domain/zone/unit.tests/record', fieldType='NS',
+ subDomain='www3', target='ns4.unit.tests.', ttl=700),
+ call('/domain/zone/unit.tests/refresh')]
post_mock.assert_has_calls(wanted_calls)
# Get for delete calls
wanted_get_calls = [
- call(u'/domain/zone/unit.tests/record', fieldType=u'TXT',
- subDomain='txt'),
+ call(u'/domain/zone/unit.tests/record', fieldType=u'A',
+ subDomain=u''),
call(u'/domain/zone/unit.tests/record', fieldType=u'DKIM',
subDomain='dkim'),
call(u'/domain/zone/unit.tests/record', fieldType=u'A',
- subDomain=u''),
- call(u'/domain/zone/unit.tests/record', fieldType=u'A',
- subDomain='fake')]
+ subDomain='fake'),
+ call(u'/domain/zone/unit.tests/record', fieldType=u'TXT',
+ subDomain='txt')]
get_mock.assert_has_calls(wanted_get_calls)
# 4 delete calls for update and delete
delete_mock.assert_has_calls(
diff --git a/tests/test_octodns_provider_powerdns.py b/tests/test_octodns_provider_powerdns.py
index 067dc74..fd877ef 100644
--- a/tests/test_octodns_provider_powerdns.py
+++ b/tests/test_octodns_provider_powerdns.py
@@ -9,6 +9,7 @@ from json import loads, dumps
from os.path import dirname, join
from requests import HTTPError
from requests_mock import ANY, mock as requests_mock
+from six import text_type
from unittest import TestCase
from octodns.record import Record
@@ -40,10 +41,93 @@ with open('./tests/fixtures/powerdns-full-data.json') as fh:
class TestPowerDnsProvider(TestCase):
- def test_provider(self):
- provider = PowerDnsProvider('test', 'non.existant', 'api-key',
+ def test_provider_version_detection(self):
+ provider = PowerDnsProvider('test', 'non.existent', 'api-key',
nameserver_values=['8.8.8.8.',
'9.9.9.9.'])
+ # Bad auth
+ with requests_mock() as mock:
+ mock.get(ANY, status_code=401, text='Unauthorized')
+
+ with self.assertRaises(Exception) as ctx:
+ provider.powerdns_version
+ self.assertTrue('unauthorized' in text_type(ctx.exception))
+
+ # Api not found
+ with requests_mock() as mock:
+ mock.get(ANY, status_code=404, text='Not Found')
+
+ with self.assertRaises(Exception) as ctx:
+ provider.powerdns_version
+ self.assertTrue('404' in text_type(ctx.exception))
+
+ # Test version detection
+ with requests_mock() as mock:
+ mock.get('http://non.existent:8081/api/v1/servers/localhost',
+ status_code=200, json={'version': "4.1.10"})
+ self.assertEquals(provider.powerdns_version, [4, 1, 10])
+
+ # Test version detection for second time (should stay at 4.1.10)
+ with requests_mock() as mock:
+ mock.get('http://non.existent:8081/api/v1/servers/localhost',
+ status_code=200, json={'version': "4.2.0"})
+ self.assertEquals(provider.powerdns_version, [4, 1, 10])
+
+ # Test version detection
+ with requests_mock() as mock:
+ mock.get('http://non.existent:8081/api/v1/servers/localhost',
+ status_code=200, json={'version': "4.2.0"})
+
+ # Reset version, so detection will try again
+ provider._powerdns_version = None
+ self.assertNotEquals(provider.powerdns_version, [4, 1, 10])
+
+ def test_provider_version_config(self):
+ provider = PowerDnsProvider('test', 'non.existent', 'api-key',
+ nameserver_values=['8.8.8.8.',
+ '9.9.9.9.'])
+
+ # Test version 4.1.0
+ provider._powerdns_version = None
+ with requests_mock() as mock:
+ mock.get('http://non.existent:8081/api/v1/servers/localhost',
+ status_code=200, json={'version': "4.1.10"})
+ self.assertEquals(provider.soa_edit_api, 'INCEPTION-INCREMENT')
+ self.assertFalse(
+ provider.check_status_not_found,
+ 'check_status_not_found should be false '
+ 'for version 4.1.x and below')
+
+ # Test version 4.2.0
+ provider._powerdns_version = None
+ with requests_mock() as mock:
+ mock.get('http://non.existent:8081/api/v1/servers/localhost',
+ status_code=200, json={'version': "4.2.0"})
+ self.assertEquals(provider.soa_edit_api, 'INCEPTION-INCREMENT')
+ self.assertTrue(
+ provider.check_status_not_found,
+ 'check_status_not_found should be true for version 4.2.x')
+
+ # Test version 4.3.0
+ provider._powerdns_version = None
+ with requests_mock() as mock:
+ mock.get('http://non.existent:8081/api/v1/servers/localhost',
+ status_code=200, json={'version': "4.3.0"})
+ self.assertEquals(provider.soa_edit_api, 'DEFAULT')
+ self.assertTrue(
+ provider.check_status_not_found,
+ 'check_status_not_found should be true for version 4.3.x')
+
+ def test_provider(self):
+ provider = PowerDnsProvider('test', 'non.existent', 'api-key',
+ nameserver_values=['8.8.8.8.',
+ '9.9.9.9.'])
+
+ # Test version detection
+ with requests_mock() as mock:
+ mock.get('http://non.existent:8081/api/v1/servers/localhost',
+ status_code=200, json={'version': "4.1.10"})
+ self.assertEquals(provider.powerdns_version, [4, 1, 10])
# Bad auth
with requests_mock() as mock:
@@ -52,7 +136,7 @@ class TestPowerDnsProvider(TestCase):
with self.assertRaises(Exception) as ctx:
zone = Zone('unit.tests.', [])
provider.populate(zone)
- self.assertTrue('unauthorized' in ctx.exception.message)
+ self.assertTrue('unauthorized' in text_type(ctx.exception))
# General error
with requests_mock() as mock:
@@ -63,15 +147,25 @@ class TestPowerDnsProvider(TestCase):
provider.populate(zone)
self.assertEquals(502, ctx.exception.response.status_code)
- # Non-existant zone doesn't populate anything
+ # Non-existent zone in PowerDNS <4.3.0 doesn't populate anything
with requests_mock() as mock:
mock.get(ANY, status_code=422,
json={'error': "Could not find domain 'unit.tests.'"})
-
zone = Zone('unit.tests.', [])
provider.populate(zone)
self.assertEquals(set(), zone.records)
+ # Non-existent zone in PowerDNS >=4.2.0 doesn't populate anything
+
+ provider._powerdns_version = [4, 2, 0]
+ with requests_mock() as mock:
+ mock.get(ANY, status_code=404, text='Not Found')
+ zone = Zone('unit.tests.', [])
+ provider.populate(zone)
+ self.assertEquals(set(), zone.records)
+
+ provider._powerdns_version = [4, 1, 0]
+
# The rest of this is messy/complicated b/c it's dealing with mocking
expected = Zone('unit.tests.', [])
@@ -115,7 +209,7 @@ class TestPowerDnsProvider(TestCase):
not_found = {'error': "Could not find domain 'unit.tests.'"}
with requests_mock() as mock:
# get 422's, unknown zone
- mock.get(ANY, status_code=422, text='')
+ mock.get(ANY, status_code=422, text=dumps(not_found))
# patch 422's, unknown zone
mock.patch(ANY, status_code=422, text=dumps(not_found))
# post 201, is response to the create with data
@@ -126,9 +220,24 @@ class TestPowerDnsProvider(TestCase):
self.assertEquals(expected_n, provider.apply(plan))
self.assertFalse(plan.exists)
+ provider._powerdns_version = [4, 2, 0]
+ with requests_mock() as mock:
+ # get 404's, unknown zone
+ mock.get(ANY, status_code=404, text='')
+ # patch 404's, unknown zone
+ mock.patch(ANY, status_code=404, text=dumps(not_found))
+ # post 201, is response to the create with data
+ mock.post(ANY, status_code=201, text=assert_rrsets_callback)
+
+ plan = provider.plan(expected)
+ self.assertEquals(expected_n, len(plan.changes))
+ self.assertEquals(expected_n, provider.apply(plan))
+ self.assertFalse(plan.exists)
+
+ provider._powerdns_version = [4, 1, 0]
with requests_mock() as mock:
# get 422's, unknown zone
- mock.get(ANY, status_code=422, text='')
+ mock.get(ANY, status_code=422, text=dumps(not_found))
# patch 422's,
data = {'error': "Key 'name' not present or not a String"}
mock.patch(ANY, status_code=422, text=dumps(data))
@@ -142,7 +251,7 @@ class TestPowerDnsProvider(TestCase):
with requests_mock() as mock:
# get 422's, unknown zone
- mock.get(ANY, status_code=422, text='')
+ mock.get(ANY, status_code=422, text=dumps(not_found))
# patch 500's, things just blew up
mock.patch(ANY, status_code=500, text='')
@@ -152,7 +261,7 @@ class TestPowerDnsProvider(TestCase):
with requests_mock() as mock:
# get 422's, unknown zone
- mock.get(ANY, status_code=422, text='')
+ mock.get(ANY, status_code=422, text=dumps(not_found))
# patch 500's, things just blew up
mock.patch(ANY, status_code=422, text=dumps(not_found))
# post 422's, something wrong with create
@@ -163,7 +272,7 @@ class TestPowerDnsProvider(TestCase):
provider.apply(plan)
def test_small_change(self):
- provider = PowerDnsProvider('test', 'non.existant', 'api-key')
+ provider = PowerDnsProvider('test', 'non.existent', 'api-key')
expected = Zone('unit.tests.', [])
source = YamlProvider('test', join(dirname(__file__), 'config'))
@@ -173,6 +282,8 @@ class TestPowerDnsProvider(TestCase):
# A small change to a single record
with requests_mock() as mock:
mock.get(ANY, status_code=200, text=FULL_TEXT)
+ mock.get('http://non.existent:8081/api/v1/servers/localhost',
+ status_code=200, json={'version': '4.1.0'})
missing = Zone(expected.name, [])
# Find and delete the SPF record
@@ -203,7 +314,7 @@ class TestPowerDnsProvider(TestCase):
def test_existing_nameservers(self):
ns_values = ['8.8.8.8.', '9.9.9.9.']
- provider = PowerDnsProvider('test', 'non.existant', 'api-key',
+ provider = PowerDnsProvider('test', 'non.existent', 'api-key',
nameserver_values=ns_values)
expected = Zone('unit.tests.', [])
@@ -244,6 +355,8 @@ class TestPowerDnsProvider(TestCase):
}]
}
mock.get(ANY, status_code=200, json=data)
+ mock.get('http://non.existent:8081/api/v1/servers/localhost',
+ status_code=200, json={'version': '4.1.0'})
unrelated_record = Record.new(expected, '', {
'type': 'A',
@@ -277,6 +390,8 @@ class TestPowerDnsProvider(TestCase):
}]
}
mock.get(ANY, status_code=200, json=data)
+ mock.get('http://non.existent:8081/api/v1/servers/localhost',
+ status_code=200, json={'version': '4.1.0'})
plan = provider.plan(expected)
self.assertEquals(1, len(plan.changes))
diff --git a/tests/test_octodns_provider_rackspace.py b/tests/test_octodns_provider_rackspace.py
index c467dec..0a6564d 100644
--- a/tests/test_octodns_provider_rackspace.py
+++ b/tests/test_octodns_provider_rackspace.py
@@ -7,8 +7,9 @@ from __future__ import absolute_import, division, print_function, \
import json
import re
+from six import text_type
+from six.moves.urllib.parse import urlparse
from unittest import TestCase
-from urlparse import urlparse
from requests import HTTPError
from requests_mock import ANY, mock as requests_mock
@@ -39,7 +40,6 @@ with open('./tests/fixtures/rackspace-sample-recordset-page2.json') as fh:
class TestRackspaceProvider(TestCase):
def setUp(self):
- self.maxDiff = 1000
with requests_mock() as mock:
mock.post(ANY, status_code=200, text=AUTH_RESPONSE)
self.provider = RackspaceProvider('identity', 'test', 'api-key',
@@ -53,7 +53,7 @@ class TestRackspaceProvider(TestCase):
with self.assertRaises(Exception) as ctx:
zone = Zone('unit.tests.', [])
self.provider.populate(zone)
- self.assertTrue('unauthorized' in ctx.exception.message)
+ self.assertTrue('unauthorized' in text_type(ctx.exception))
self.assertTrue(mock.called_once)
def test_server_error(self):
@@ -792,13 +792,13 @@ class TestRackspaceProvider(TestCase):
ExpectedUpdates = {
"records": [{
"name": "unit.tests",
- "id": "A-222222",
- "data": "1.2.3.5",
+ "id": "A-111111",
+ "data": "1.2.3.4",
"ttl": 3600
}, {
"name": "unit.tests",
- "id": "A-111111",
- "data": "1.2.3.4",
+ "id": "A-222222",
+ "data": "1.2.3.5",
"ttl": 3600
}, {
"name": "unit.tests",
diff --git a/tests/test_octodns_provider_route53.py b/tests/test_octodns_provider_route53.py
index a569f7c..a2b61e7 100644
--- a/tests/test_octodns_provider_route53.py
+++ b/tests/test_octodns_provider_route53.py
@@ -7,12 +7,14 @@ from __future__ import absolute_import, division, print_function, \
from botocore.exceptions import ClientError
from botocore.stub import ANY, Stubber
+from six import text_type
from unittest import TestCase
from mock import patch
from octodns.record import Create, Delete, Record, Update
-from octodns.provider.route53 import Route53Provider, _Route53GeoDefault, \
- _Route53GeoRecord, _Route53Record, _octal_replace, Route53ProviderException
+from octodns.provider.route53 import Route53Provider, _Route53DynamicValue, \
+ _Route53GeoDefault, _Route53GeoRecord, Route53ProviderException, \
+ _Route53Record, _mod_keyer, _octal_replace
from octodns.zone import Zone
from helpers import GeoProvider
@@ -42,6 +44,202 @@ class TestOctalReplace(TestCase):
self.assertEquals(expected, _octal_replace(s))
+dynamic_rrsets = [{
+ 'Name': '_octodns-default-pool.unit.tests.',
+ 'ResourceRecords': [{'Value': '1.1.2.1'},
+ {'Value': '1.1.2.2'}],
+ 'TTL': 60,
+ 'Type': 'A',
+}, {
+ 'HealthCheckId': '76',
+ 'Name': '_octodns-ap-southeast-1-value.unit.tests.',
+ 'ResourceRecords': [{'Value': '1.4.1.1'}],
+ 'SetIdentifier': 'ap-southeast-1-000',
+ 'TTL': 60,
+ 'Type': 'A',
+ 'Weight': 2
+}, {
+ 'HealthCheckId': '09',
+ 'Name': '_octodns-ap-southeast-1-value.unit.tests.',
+ 'ResourceRecords': [{'Value': '1.4.1.2'}],
+ 'SetIdentifier': 'ap-southeast-1-001',
+ 'TTL': 60,
+ 'Type': 'A',
+ 'Weight': 2
+}, {
+ 'HealthCheckId': 'ab',
+ 'Name': '_octodns-eu-central-1-value.unit.tests.',
+ 'ResourceRecords': [{'Value': '1.3.1.1'}],
+ 'SetIdentifier': 'eu-central-1-000',
+ 'TTL': 60,
+ 'Type': 'A',
+ 'Weight': 1
+}, {
+ 'HealthCheckId': '1e',
+ 'Name': '_octodns-eu-central-1-value.unit.tests.',
+ 'ResourceRecords': [{'Value': '1.3.1.2'}],
+ 'SetIdentifier': 'eu-central-1-001',
+ 'TTL': 60,
+ 'Type': 'A',
+ 'Weight': 1
+}, {
+ 'HealthCheckId': '2a',
+ 'Name': '_octodns-us-east-1-value.unit.tests.',
+ 'ResourceRecords': [{'Value': '1.5.1.1'}],
+ 'SetIdentifier': 'us-east-1-000',
+ 'TTL': 60,
+ 'Type': 'A',
+ 'Weight': 1
+}, {
+ 'HealthCheckId': '61',
+ 'Name': '_octodns-us-east-1-value.unit.tests.',
+ 'ResourceRecords': [{'Value': '1.5.1.2'}],
+ 'SetIdentifier': 'us-east-1-001',
+ 'TTL': 60,
+ 'Type': 'A',
+ 'Weight': 1,
+}, {
+ 'AliasTarget': {'DNSName': '_octodns-default-pool.unit.tests.',
+ 'EvaluateTargetHealth': True,
+ 'HostedZoneId': 'Z2'},
+ 'Failover': 'SECONDARY',
+ 'Name': '_octodns-us-east-1-pool.unit.tests.',
+ 'SetIdentifier': 'us-east-1-Secondary-default',
+ 'Type': 'A'
+}, {
+ 'AliasTarget': {
+ 'DNSName': '_octodns-us-east-1-value.unit.tests.',
+ 'EvaluateTargetHealth': True,
+ 'HostedZoneId': 'Z2'
+ },
+ 'Failover': 'PRIMARY',
+ 'Name': '_octodns-us-east-1-pool.unit.tests.',
+ 'SetIdentifier': 'us-east-1-Primary',
+ 'Type': 'A',
+}, {
+ 'AliasTarget': {'DNSName': '_octodns-us-east-1-pool.unit.tests.',
+ 'EvaluateTargetHealth': True,
+ 'HostedZoneId': 'Z2'},
+ 'Failover': 'SECONDARY',
+ 'Name': '_octodns-eu-central-1-pool.unit.tests.',
+ 'SetIdentifier': 'eu-central-1-Secondary-default',
+ 'Type': 'A'
+}, {
+ 'AliasTarget': {
+ 'DNSName': '_octodns-eu-central-1-value.unit.tests.',
+ 'EvaluateTargetHealth': True,
+ 'HostedZoneId': 'Z2'
+ },
+ 'Failover': 'PRIMARY',
+ 'Name': '_octodns-eu-central-1-pool.unit.tests.',
+ 'SetIdentifier': 'eu-central-1-Primary',
+ 'Type': 'A',
+}, {
+ 'AliasTarget': {'DNSName': '_octodns-us-east-1-pool.unit.tests.',
+ 'EvaluateTargetHealth': True,
+ 'HostedZoneId': 'Z2'},
+ 'Failover': 'SECONDARY',
+ 'Name': '_octodns-ap-southeast-1-pool.unit.tests.',
+ 'SetIdentifier': 'ap-southeast-1-Secondary-default',
+ 'Type': 'A'
+}, {
+ 'AliasTarget': {
+ 'DNSName': '_octodns-ap-southeast-1-value.unit.tests.',
+ 'EvaluateTargetHealth': True,
+ 'HostedZoneId': 'Z2'
+ },
+ 'Failover': 'PRIMARY',
+ 'Name': '_octodns-ap-southeast-1-pool.unit.tests.',
+ 'SetIdentifier': 'ap-southeast-1-Primary',
+ 'Type': 'A',
+}, {
+ 'AliasTarget': {'DNSName': '_octodns-ap-southeast-1-pool.unit.tests.',
+ 'EvaluateTargetHealth': True,
+ 'HostedZoneId': 'Z2'},
+ 'GeoLocation': {'CountryCode': 'JP'},
+ 'Name': 'unit.tests.',
+ 'SetIdentifier': '1-ap-southeast-1-AS-JP',
+ 'Type': 'A',
+}, {
+ 'AliasTarget': {'DNSName': '_octodns-ap-southeast-1-pool.unit.tests.',
+ 'EvaluateTargetHealth': True,
+ 'HostedZoneId': 'Z2'},
+ 'GeoLocation': {'CountryCode': 'CN'},
+ 'Name': 'unit.tests.',
+ 'SetIdentifier': '1-ap-southeast-1-AS-CN',
+ 'Type': 'A',
+}, {
+ 'AliasTarget': {'DNSName': '_octodns-eu-central-1-pool.unit.tests.',
+ 'EvaluateTargetHealth': True,
+ 'HostedZoneId': 'Z2'},
+ 'GeoLocation': {'ContinentCode': 'NA-US-FL'},
+ 'Name': 'unit.tests.',
+ 'SetIdentifier': '2-eu-central-1-NA-US-FL',
+ 'Type': 'A',
+}, {
+ 'AliasTarget': {'DNSName': '_octodns-eu-central-1-pool.unit.tests.',
+ 'EvaluateTargetHealth': True,
+ 'HostedZoneId': 'Z2'},
+ 'GeoLocation': {'ContinentCode': 'EU'},
+ 'Name': 'unit.tests.',
+ 'SetIdentifier': '2-eu-central-1-EU',
+ 'Type': 'A',
+}, {
+ 'AliasTarget': {'DNSName': '_octodns-us-east-1-pool.unit.tests.',
+ 'EvaluateTargetHealth': True,
+ 'HostedZoneId': 'Z2'},
+ 'GeoLocation': {'CountryCode': '*'},
+ 'Name': 'unit.tests.',
+ 'SetIdentifier': '3-us-east-1-None',
+ 'Type': 'A',
+}]
+
+dynamic_record_data = {
+ 'dynamic': {
+ 'pools': {
+ 'ap-southeast-1': {
+ 'fallback': 'us-east-1',
+ 'values': [{
+ 'weight': 2, 'value': '1.4.1.1'
+ }, {
+ 'weight': 2, 'value': '1.4.1.2'
+ }]
+ },
+ 'eu-central-1': {
+ 'fallback': 'us-east-1',
+ 'values': [{
+ 'weight': 1, 'value': '1.3.1.1'
+ }, {
+ 'weight': 1, 'value': '1.3.1.2'
+ }],
+ },
+ 'us-east-1': {
+ 'values': [{
+ 'weight': 1, 'value': '1.5.1.1'
+ }, {
+ 'weight': 1, 'value': '1.5.1.2'
+ }],
+ }
+ },
+ 'rules': [{
+ 'geos': ['AS-CN', 'AS-JP'],
+ 'pool': 'ap-southeast-1',
+ }, {
+ 'geos': ['EU', 'NA-US-FL'],
+ 'pool': 'eu-central-1',
+ }, {
+ 'pool': 'us-east-1',
+ }],
+ },
+ 'ttl': 60,
+ 'type': 'A',
+ 'values': [
+ '1.1.2.1',
+ '1.1.2.2',
+ ],
+}
+
+
class TestRoute53Provider(TestCase):
expected = Zone('unit.tests.', [])
for name, data in (
@@ -177,6 +375,16 @@ class TestRoute53Provider(TestCase):
return (provider, stubber)
+ def _get_stubbed_delegation_set_provider(self):
+ provider = Route53Provider('test', 'abc', '123',
+ delegation_set_id="ABCDEFG123456")
+
+ # Use the stubber
+ stubber = Stubber(provider._conn)
+ stubber.activate()
+
+ return (provider, stubber)
+
def _get_stubbed_fallback_auth_provider(self):
provider = Route53Provider('test')
@@ -311,7 +519,7 @@ class TestRoute53Provider(TestCase):
'ResourceRecords': [{
'Value': '10 smtp-1.unit.tests.',
}, {
- 'Value': '20 smtp-2.unit.tests.',
+ 'Value': '20 smtp-2.unit.tests.',
}],
'TTL': 64,
}, {
@@ -508,6 +716,29 @@ class TestRoute53Provider(TestCase):
'TTL': 61,
'Type': 'A'
}
+ }, {
+ 'Action': 'UPSERT',
+ 'ResourceRecordSet': {
+ 'GeoLocation': {'ContinentCode': 'AF'},
+ 'Name': 'unit.tests.',
+ 'HealthCheckId': u'42',
+ 'ResourceRecords': [{'Value': '4.2.3.4'}],
+ 'SetIdentifier': 'AF',
+ 'TTL': 61,
+ 'Type': 'A'
+ }
+ }, {
+ 'Action': 'UPSERT',
+ 'ResourceRecordSet': {
+ 'GeoLocation': {'CountryCode': 'US'},
+ 'HealthCheckId': u'43',
+ 'Name': 'unit.tests.',
+ 'ResourceRecords': [{'Value': '5.2.3.4'},
+ {'Value': '6.2.3.4'}],
+ 'SetIdentifier': 'NA-US',
+ 'TTL': 61,
+ 'Type': 'A'
+ }
}, {
'Action': 'CREATE',
'ResourceRecordSet': {
@@ -520,17 +751,6 @@ class TestRoute53Provider(TestCase):
'TTL': 61,
'Type': 'A'
}
- }, {
- 'Action': 'UPSERT',
- 'ResourceRecordSet': {
- 'GeoLocation': {'ContinentCode': 'AF'},
- 'Name': 'unit.tests.',
- 'HealthCheckId': u'42',
- 'ResourceRecords': [{'Value': '4.2.3.4'}],
- 'SetIdentifier': 'AF',
- 'TTL': 61,
- 'Type': 'A'
- }
}, {
'Action': 'UPSERT',
'ResourceRecordSet': {
@@ -542,18 +762,6 @@ class TestRoute53Provider(TestCase):
'TTL': 61,
'Type': 'A'
}
- }, {
- 'Action': 'UPSERT',
- 'ResourceRecordSet': {
- 'GeoLocation': {'CountryCode': 'US'},
- 'HealthCheckId': u'43',
- 'Name': 'unit.tests.',
- 'ResourceRecords': [{'Value': '5.2.3.4'},
- {'Value': '6.2.3.4'}],
- 'SetIdentifier': 'NA-US',
- 'TTL': 61,
- 'Type': 'A'
- }
}],
'Comment': ANY
},
@@ -591,16 +799,6 @@ class TestRoute53Provider(TestCase):
change_resource_record_sets_params = {
'ChangeBatch': {
'Changes': [{
- 'Action': 'DELETE',
- 'ResourceRecordSet': {
- 'GeoLocation': {'CountryCode': '*'},
- 'Name': 'simple.unit.tests.',
- 'ResourceRecords': [{'Value': '1.2.3.4'},
- {'Value': '2.2.3.4'}],
- 'SetIdentifier': 'default',
- 'TTL': 61,
- 'Type': 'A'}
- }, {
'Action': 'DELETE',
'ResourceRecordSet': {
'GeoLocation': {'ContinentCode': 'OC'},
@@ -610,6 +808,16 @@ class TestRoute53Provider(TestCase):
'SetIdentifier': 'OC',
'TTL': 61,
'Type': 'A'}
+ }, {
+ 'Action': 'DELETE',
+ 'ResourceRecordSet': {
+ 'GeoLocation': {'CountryCode': '*'},
+ 'Name': 'simple.unit.tests.',
+ 'ResourceRecords': [{'Value': '1.2.3.4'},
+ {'Value': '2.2.3.4'}],
+ 'SetIdentifier': 'default',
+ 'TTL': 61,
+ 'Type': 'A'}
}, {
'Action': 'CREATE',
'ResourceRecordSet': {
@@ -683,6 +891,111 @@ class TestRoute53Provider(TestCase):
'CallerReference': ANY,
})
+ list_resource_record_sets_resp = {
+ 'ResourceRecordSets': [{
+ 'Name': 'a.unit.tests.',
+ 'Type': 'A',
+ 'GeoLocation': {
+ 'ContinentCode': 'NA',
+ },
+ 'ResourceRecords': [{
+ 'Value': '2.2.3.4',
+ }],
+ 'TTL': 61,
+ }],
+ 'IsTruncated': False,
+ 'MaxItems': '100',
+ }
+ stubber.add_response('list_resource_record_sets',
+ list_resource_record_sets_resp,
+ {'HostedZoneId': 'z42'})
+
+ stubber.add_response('list_health_checks',
+ {
+ 'HealthChecks': self.health_checks,
+ 'IsTruncated': False,
+ 'MaxItems': '100',
+ 'Marker': '',
+ })
+
+ stubber.add_response('change_resource_record_sets',
+ {'ChangeInfo': {
+ 'Id': 'id',
+ 'Status': 'PENDING',
+ 'SubmittedAt': '2017-01-29T01:02:03Z',
+ }}, {'HostedZoneId': 'z42', 'ChangeBatch': ANY})
+
+ self.assertEquals(9, provider.apply(plan))
+ stubber.assert_no_pending_responses()
+
+ def test_sync_create_with_delegation_set(self):
+ provider, stubber = self._get_stubbed_delegation_set_provider()
+
+ got = Zone('unit.tests.', [])
+
+ list_hosted_zones_resp = {
+ 'HostedZones': [],
+ 'Marker': 'm',
+ 'IsTruncated': False,
+ 'MaxItems': '100',
+ }
+ stubber.add_response('list_hosted_zones', list_hosted_zones_resp,
+ {})
+
+ plan = provider.plan(self.expected)
+ self.assertEquals(9, len(plan.changes))
+ self.assertFalse(plan.exists)
+ for change in plan.changes:
+ self.assertIsInstance(change, Create)
+ stubber.assert_no_pending_responses()
+
+ create_hosted_zone_resp = {
+ 'HostedZone': {
+ 'Name': 'unit.tests.',
+ 'Id': 'z42',
+ 'CallerReference': 'abc',
+ },
+ 'ChangeInfo': {
+ 'Id': 'a12',
+ 'Status': 'PENDING',
+ 'SubmittedAt': '2017-01-29T01:02:03Z',
+ 'Comment': 'hrm',
+ },
+ 'DelegationSet': {
+ 'Id': 'b23',
+ 'CallerReference': 'blip',
+ 'NameServers': [
+ 'n12.unit.tests.',
+ ],
+ },
+ 'Location': 'us-east-1',
+ }
+ stubber.add_response('create_hosted_zone',
+ create_hosted_zone_resp, {
+ 'Name': got.name,
+ 'CallerReference': ANY,
+ 'DelegationSetId': 'ABCDEFG123456'
+ })
+
+ list_resource_record_sets_resp = {
+ 'ResourceRecordSets': [{
+ 'Name': 'a.unit.tests.',
+ 'Type': 'A',
+ 'GeoLocation': {
+ 'ContinentCode': 'NA',
+ },
+ 'ResourceRecords': [{
+ 'Value': '2.2.3.4',
+ }],
+ 'TTL': 61,
+ }],
+ 'IsTruncated': False,
+ 'MaxItems': '100',
+ }
+ stubber.add_response('list_resource_record_sets',
+ list_resource_record_sets_resp,
+ {'HostedZoneId': 'z42'})
+
stubber.add_response('list_health_checks',
{
'HealthChecks': self.health_checks,
@@ -781,7 +1094,8 @@ class TestRoute53Provider(TestCase):
'AF': ['4.2.3.4'],
}
})
- id = provider.get_health_check_id(record, 'AF', record.geo['AF'], True)
+ value = record.geo['AF'].values[0]
+ id = provider.get_health_check_id(record, value, True)
self.assertEquals('42', id)
def test_health_check_create(self):
@@ -850,6 +1164,7 @@ class TestRoute53Provider(TestCase):
'CallerReference': ANY,
'HealthCheckConfig': health_check_config,
})
+ stubber.add_response('change_tags_for_resource', {})
record = Record.new(self.expected, '', {
'ttl': 61,
@@ -869,12 +1184,71 @@ class TestRoute53Provider(TestCase):
})
# if not allowed to create returns none
- id = provider.get_health_check_id(record, 'AF', record.geo['AF'],
- False)
+ value = record.geo['AF'].values[0]
+ id = provider.get_health_check_id(record, value, False)
self.assertFalse(id)
# when allowed to create we do
- id = provider.get_health_check_id(record, 'AF', record.geo['AF'], True)
+ id = provider.get_health_check_id(record, value, True)
+ self.assertEquals('42', id)
+ stubber.assert_no_pending_responses()
+
+ # A CNAME style healthcheck, without a value
+
+ health_check_config = {
+ 'EnableSNI': False,
+ 'FailureThreshold': 6,
+ 'FullyQualifiedDomainName': 'target-1.unit.tests.',
+ 'MeasureLatency': True,
+ 'Port': 8080,
+ 'RequestInterval': 10,
+ 'ResourcePath': '/_status',
+ 'Type': 'HTTP'
+ }
+ stubber.add_response('create_health_check', {
+ 'HealthCheck': {
+ 'Id': '42',
+ 'CallerReference': self.caller_ref,
+ 'HealthCheckConfig': health_check_config,
+ 'HealthCheckVersion': 1,
+ },
+ 'Location': 'http://url',
+ }, {
+ 'CallerReference': ANY,
+ 'HealthCheckConfig': health_check_config,
+ })
+ stubber.add_response('change_tags_for_resource', {})
+
+ id = provider.get_health_check_id(record, 'target-1.unit.tests.', True)
+ self.assertEquals('42', id)
+ stubber.assert_no_pending_responses()
+
+ # TCP health check
+
+ health_check_config = {
+ 'EnableSNI': False,
+ 'FailureThreshold': 6,
+ 'MeasureLatency': True,
+ 'Port': 8080,
+ 'RequestInterval': 10,
+ 'Type': 'TCP'
+ }
+ stubber.add_response('create_health_check', {
+ 'HealthCheck': {
+ 'Id': '42',
+ 'CallerReference': self.caller_ref,
+ 'HealthCheckConfig': health_check_config,
+ 'HealthCheckVersion': 1,
+ },
+ 'Location': 'http://url',
+ }, {
+ 'CallerReference': ANY,
+ 'HealthCheckConfig': health_check_config,
+ })
+ stubber.add_response('change_tags_for_resource', {})
+
+ record._octodns['healthcheck']['protocol'] = 'TCP'
+ id = provider.get_health_check_id(record, 'target-1.unit.tests.', True)
self.assertEquals('42', id)
stubber.assert_no_pending_responses()
@@ -981,6 +1355,8 @@ class TestRoute53Provider(TestCase):
'CallerReference': ANY,
'HealthCheckConfig': health_check_config,
})
+ stubber.add_response('change_tags_for_resource', {})
+ stubber.add_response('change_tags_for_resource', {})
record = Record.new(self.expected, 'a', {
'ttl': 61,
@@ -1001,7 +1377,8 @@ class TestRoute53Provider(TestCase):
}
})
- id = provider.get_health_check_id(record, 'AF', record.geo['AF'], True)
+ value = record.geo['AF'].values[0]
+ id = provider.get_health_check_id(record, value, True)
ml = provider.health_checks[id]['HealthCheckConfig']['MeasureLatency']
ri = provider.health_checks[id]['HealthCheckConfig']['RequestInterval']
self.assertFalse(ml)
@@ -1043,7 +1420,7 @@ class TestRoute53Provider(TestCase):
'HealthCheckId': '44',
})
change = Create(record)
- provider._mod_Create(change)
+ provider._mod_Create(change, 'z43', [])
stubber.assert_no_pending_responses()
# gc through _mod_Update
@@ -1052,7 +1429,7 @@ class TestRoute53Provider(TestCase):
})
# first record is ignored for our purposes, we have to pass something
change = Update(record, record)
- provider._mod_Create(change)
+ provider._mod_Create(change, 'z43', [])
stubber.assert_no_pending_responses()
# gc through _mod_Delete, expect 3 to go away, can't check order
@@ -1067,7 +1444,7 @@ class TestRoute53Provider(TestCase):
'HealthCheckId': ANY,
})
change = Delete(record)
- provider._mod_Delete(change)
+ provider._mod_Delete(change, 'z43', [])
stubber.assert_no_pending_responses()
# gc only AAAA, leave the A's alone
@@ -1428,6 +1805,155 @@ class TestRoute53Provider(TestCase):
self.assertEquals(1, len(extra))
stubber.assert_no_pending_responses()
+ def test_extra_change_dynamic_has_health_check(self):
+ provider, stubber = self._get_stubbed_provider()
+
+ list_hosted_zones_resp = {
+ 'HostedZones': [{
+ 'Name': 'unit.tests.',
+ 'Id': 'z42',
+ 'CallerReference': 'abc',
+ }],
+ 'Marker': 'm',
+ 'IsTruncated': False,
+ 'MaxItems': '100',
+ }
+ stubber.add_response('list_hosted_zones', list_hosted_zones_resp, {})
+
+ # record with geo and no health check returns change
+ desired = Zone('unit.tests.', [])
+ record = Record.new(desired, 'a', {
+ 'ttl': 30,
+ 'type': 'A',
+ 'value': '1.2.3.4',
+ 'dynamic': {
+ 'pools': {
+ 'one': {
+ 'values': [{
+ 'value': '2.2.3.4',
+ }],
+ },
+ },
+ 'rules': [{
+ 'pool': 'one',
+ }],
+ },
+ })
+ desired.add_record(record)
+ list_resource_record_sets_resp = {
+ 'ResourceRecordSets': [{
+ # Not dynamic value and other name
+ 'Name': 'unit.tests.',
+ 'Type': 'A',
+ 'GeoLocation': {
+ 'CountryCode': '*',
+ },
+ 'ResourceRecords': [{
+ 'Value': '1.2.3.4',
+ }],
+ 'TTL': 61,
+ # All the non-matches have a different Id so we'll fail if they
+ # match
+ 'HealthCheckId': '33',
+ }, {
+ # Not dynamic value, matching name, other type
+ 'Name': 'a.unit.tests.',
+ 'Type': 'AAAA',
+ 'ResourceRecords': [{
+ 'Value': '2001:0db8:3c4d:0015:0000:0000:1a2f:1a4b'
+ }],
+ 'TTL': 61,
+ 'HealthCheckId': '33',
+ }, {
+ # default value pool
+ 'Name': '_octodns-default-value.a.unit.tests.',
+ 'Type': 'A',
+ 'GeoLocation': {
+ 'CountryCode': '*',
+ },
+ 'ResourceRecords': [{
+ 'Value': '1.2.3.4',
+ }],
+ 'TTL': 61,
+ 'HealthCheckId': '33',
+ }, {
+ # different record
+ 'Name': '_octodns-two-value.other.unit.tests.',
+ 'Type': 'A',
+ 'GeoLocation': {
+ 'CountryCode': '*',
+ },
+ 'ResourceRecords': [{
+ 'Value': '1.2.3.4',
+ }],
+ 'TTL': 61,
+ 'HealthCheckId': '33',
+ }, {
+ # same everything, but different type
+ 'Name': '_octodns-one-value.a.unit.tests.',
+ 'Type': 'AAAA',
+ 'ResourceRecords': [{
+ 'Value': '2001:0db8:3c4d:0015:0000:0000:1a2f:1a4b'
+ }],
+ 'TTL': 61,
+ 'HealthCheckId': '33',
+ }, {
+ # same everything, sub
+ 'Name': '_octodns-one-value.sub.a.unit.tests.',
+ 'Type': 'A',
+ 'ResourceRecords': [{
+ 'Value': '1.2.3.4',
+ }],
+ 'TTL': 61,
+ 'HealthCheckId': '33',
+ }, {
+ # match
+ 'Name': '_octodns-one-value.a.unit.tests.',
+ 'Type': 'A',
+ 'ResourceRecords': [{
+ 'Value': '2.2.3.4',
+ }],
+ 'TTL': 61,
+ 'HealthCheckId': '42',
+ }],
+ 'IsTruncated': False,
+ 'MaxItems': '100',
+ }
+ stubber.add_response('list_resource_record_sets',
+ list_resource_record_sets_resp,
+ {'HostedZoneId': 'z42'})
+ stubber.add_response('list_health_checks', {
+ 'HealthChecks': [{
+ 'Id': '42',
+ 'CallerReference': self.caller_ref,
+ 'HealthCheckConfig': {
+ 'Type': 'HTTPS',
+ 'FullyQualifiedDomainName': 'a.unit.tests',
+ 'IPAddress': '2.2.3.4',
+ 'ResourcePath': '/_dns',
+ 'Type': 'HTTPS',
+ 'Port': 443,
+ 'MeasureLatency': True,
+ 'RequestInterval': 10,
+ },
+ 'HealthCheckVersion': 2,
+ }],
+ 'IsTruncated': False,
+ 'MaxItems': '100',
+ 'Marker': '',
+ })
+ extra = provider._extra_changes(desired=desired, changes=[])
+ self.assertEquals(0, len(extra))
+ stubber.assert_no_pending_responses()
+
+ # change b/c of healthcheck path
+ record._octodns['healthcheck'] = {
+ 'path': '/_ready'
+ }
+ extra = provider._extra_changes(desired=desired, changes=[])
+ self.assertEquals(1, len(extra))
+ stubber.assert_no_pending_responses()
+
# change b/c of healthcheck host
record._octodns['healthcheck'] = {
'host': 'foo.bar.io'
@@ -1503,46 +2029,51 @@ class TestRoute53Provider(TestCase):
# _get_test_plan() returns a plan with 11 modifications, 17 RRs
+ @patch('octodns.provider.route53.Route53Provider._load_records')
@patch('octodns.provider.route53.Route53Provider._really_apply')
- def test_apply_1(self, really_apply_mock):
+ def test_apply_1(self, really_apply_mock, _):
# 18 RRs with max of 19 should only get applied in one call
provider, plan = self._get_test_plan(19)
provider.apply(plan)
really_apply_mock.assert_called_once()
+ @patch('octodns.provider.route53.Route53Provider._load_records')
@patch('octodns.provider.route53.Route53Provider._really_apply')
- def test_apply_2(self, really_apply_mock):
+ def test_apply_2(self, really_apply_mock, _):
# 18 RRs with max of 17 should only get applied in two calls
provider, plan = self._get_test_plan(18)
provider.apply(plan)
self.assertEquals(2, really_apply_mock.call_count)
+ @patch('octodns.provider.route53.Route53Provider._load_records')
@patch('octodns.provider.route53.Route53Provider._really_apply')
- def test_apply_3(self, really_apply_mock):
+ def test_apply_3(self, really_apply_mock, _):
- # with a max of seven modifications, four calls
+ # with a max of seven modifications, three calls
provider, plan = self._get_test_plan(7)
provider.apply(plan)
- self.assertEquals(4, really_apply_mock.call_count)
+ self.assertEquals(3, really_apply_mock.call_count)
+ @patch('octodns.provider.route53.Route53Provider._load_records')
@patch('octodns.provider.route53.Route53Provider._really_apply')
- def test_apply_4(self, really_apply_mock):
+ def test_apply_4(self, really_apply_mock, _):
# with a max of 11 modifications, two calls
provider, plan = self._get_test_plan(11)
provider.apply(plan)
self.assertEquals(2, really_apply_mock.call_count)
+ @patch('octodns.provider.route53.Route53Provider._load_records')
@patch('octodns.provider.route53.Route53Provider._really_apply')
- def test_apply_bad(self, really_apply_mock):
+ def test_apply_bad(self, really_apply_mock, _):
# with a max of 1 modifications, fail
provider, plan = self._get_test_plan(1)
with self.assertRaises(Exception) as ctx:
provider.apply(plan)
- self.assertTrue('modifications' in ctx.exception.message)
+ self.assertTrue('modifications' in text_type(ctx.exception))
def test_semicolon_fixup(self):
provider = Route53Provider('test', 'abc', '123')
@@ -1568,38 +2099,131 @@ class TestRoute53Provider(TestCase):
provider = Route53Provider('test', 'abc', '123',
client_max_attempts=42)
# NOTE: this will break if boto ever changes the impl details...
- self.assertEquals(43, provider._conn.meta.events
- ._unique_id_handlers['retry-config-route53']
- ['handler']._checker.__dict__['_max_attempts'])
+ self.assertEquals({
+ 'mode': 'legacy',
+ 'total_max_attempts': 43,
+ }, provider._conn._client_config.retries)
+
+ def test_data_for_dynamic(self):
+ provider = Route53Provider('test', 'abc', '123')
+
+ data = provider._data_for_dynamic('', 'A', dynamic_rrsets)
+ self.assertEquals(dynamic_record_data, data)
+
+ @patch('octodns.provider.route53.Route53Provider._get_zone_id')
+ @patch('octodns.provider.route53.Route53Provider._load_records')
+ def test_dynamic_populate(self, load_records_mock, get_zone_id_mock):
+ provider = Route53Provider('test', 'abc', '123')
+
+ get_zone_id_mock.side_effect = ['z44']
+ load_records_mock.side_effect = [dynamic_rrsets]
+
+ got = Zone('unit.tests.', [])
+ provider.populate(got)
+
+ self.assertEquals(1, len(got.records))
+ record = list(got.records)[0]
+ self.assertEquals('', record.name)
+ self.assertEquals('A', record._type)
+ self.assertEquals([
+ '1.1.2.1',
+ '1.1.2.2',
+ ], record.values)
+ self.assertTrue(record.dynamic)
+
+ self.assertEquals({
+ 'ap-southeast-1': {
+ 'fallback': 'us-east-1',
+ 'values': [{
+ 'weight': 2, 'value': '1.4.1.1'
+ }, {
+ 'weight': 2, 'value': '1.4.1.2'
+ }]
+ },
+ 'eu-central-1': {
+ 'fallback': 'us-east-1',
+ 'values': [{
+ 'weight': 1, 'value': '1.3.1.1'
+ }, {
+ 'weight': 1, 'value': '1.3.1.2'
+ }],
+ },
+ 'us-east-1': {
+ 'fallback': None,
+ 'values': [{
+ 'weight': 1, 'value': '1.5.1.1'
+ }, {
+ 'weight': 1, 'value': '1.5.1.2'
+ }],
+ }
+ }, {k: v.data for k, v in record.dynamic.pools.items()})
+
+ self.assertEquals([
+ {
+ 'geos': ['AS-CN', 'AS-JP'],
+ 'pool': 'ap-southeast-1',
+ }, {
+ 'geos': ['EU', 'NA-US-FL'],
+ 'pool': 'eu-central-1',
+ }, {
+ 'pool': 'us-east-1',
+ }], [r.data for r in record.dynamic.rules])
+
+
+class DummyProvider(object):
+
+ def get_health_check_id(self, *args, **kwargs):
+ return None
class TestRoute53Records(TestCase):
+ existing = Zone('unit.tests.', [])
+ record_a = Record.new(existing, '', {
+ 'geo': {
+ 'NA-US': ['2.2.2.2', '3.3.3.3'],
+ 'OC': ['4.4.4.4', '5.5.5.5']
+ },
+ 'ttl': 99,
+ 'type': 'A',
+ 'values': ['9.9.9.9']
+ })
+
+ def test_value_fors(self):
+ route53_record = _Route53Record(None, self.record_a, False)
+
+ for value in (None, '', 'foo', 'bar', '1.2.3.4'):
+ converted = route53_record._value_convert_value(value,
+ self.record_a)
+ self.assertEquals(value, converted)
+
+ record_txt = Record.new(self.existing, 'txt', {
+ 'ttl': 98,
+ 'type': 'TXT',
+ 'value': 'Not Important',
+ })
+
+ # We don't really have to test the details fo chunked_value as that's
+ # tested elsewhere, we just need to make sure that it's plumbed up and
+ # working
+ self.assertEquals('"Not Important"', route53_record
+ ._value_convert_quoted(record_txt.values[0],
+ record_txt))
def test_route53_record(self):
- existing = Zone('unit.tests.', [])
- record_a = Record.new(existing, '', {
- 'geo': {
- 'NA-US': ['2.2.2.2', '3.3.3.3'],
- 'OC': ['4.4.4.4', '5.5.5.5']
- },
- 'ttl': 99,
- 'type': 'A',
- 'values': ['9.9.9.9']
- })
- a = _Route53Record(None, record_a, False)
+ a = _Route53Record(None, self.record_a, False)
self.assertEquals(a, a)
- b = _Route53Record(None, Record.new(existing, '',
+ b = _Route53Record(None, Record.new(self.existing, '',
{'ttl': 32, 'type': 'A',
'values': ['8.8.8.8',
'1.1.1.1']}),
False)
self.assertEquals(b, b)
- c = _Route53Record(None, Record.new(existing, 'other',
+ c = _Route53Record(None, Record.new(self.existing, 'other',
{'ttl': 99, 'type': 'A',
'values': ['9.9.9.9']}),
False)
self.assertEquals(c, c)
- d = _Route53Record(None, Record.new(existing, '',
+ d = _Route53Record(None, Record.new(self.existing, '',
{'ttl': 42, 'type': 'MX',
'value': {
'preference': 10,
@@ -1615,20 +2239,15 @@ class TestRoute53Records(TestCase):
self.assertNotEquals(a, c)
# Same everything, different class is not the same
- e = _Route53GeoDefault(None, record_a, False)
+ e = _Route53GeoDefault(None, self.record_a, False)
self.assertNotEquals(a, e)
- class DummyProvider(object):
-
- def get_health_check_id(self, *args, **kwargs):
- return None
-
provider = DummyProvider()
- f = _Route53GeoRecord(provider, record_a, 'NA-US',
- record_a.geo['NA-US'], False)
+ f = _Route53GeoRecord(provider, self.record_a, 'NA-US',
+ self.record_a.geo['NA-US'], False)
self.assertEquals(f, f)
- g = _Route53GeoRecord(provider, record_a, 'OC',
- record_a.geo['OC'], False)
+ g = _Route53GeoRecord(provider, self.record_a, 'OC',
+ self.record_a.geo['OC'], False)
self.assertEquals(g, g)
# Geo and non-geo are not the same, using Geo as primary to get it's
@@ -1641,3 +2260,468 @@ class TestRoute53Records(TestCase):
a.__repr__()
e.__repr__()
f.__repr__()
+
+ def test_route53_record_ordering(self):
+ # Matches
+ a = _Route53Record(None, self.record_a, False)
+ b = _Route53Record(None, self.record_a, False)
+ self.assertTrue(a == b)
+ self.assertFalse(a != b)
+ self.assertFalse(a < b)
+ self.assertTrue(a <= b)
+ self.assertFalse(a > b)
+ self.assertTrue(a >= b)
+
+ # Change the fqdn is greater
+ fqdn = _Route53Record(None, self.record_a, False,
+ fqdn_override='other')
+ self.assertFalse(a == fqdn)
+ self.assertTrue(a != fqdn)
+ self.assertFalse(a < fqdn)
+ self.assertFalse(a <= fqdn)
+ self.assertTrue(a > fqdn)
+ self.assertTrue(a >= fqdn)
+
+ provider = DummyProvider()
+ geo_a = _Route53GeoRecord(provider, self.record_a, 'NA-US',
+ self.record_a.geo['NA-US'], False)
+ geo_b = _Route53GeoRecord(provider, self.record_a, 'NA-US',
+ self.record_a.geo['NA-US'], False)
+ self.assertTrue(geo_a == geo_b)
+ self.assertFalse(geo_a != geo_b)
+ self.assertFalse(geo_a < geo_b)
+ self.assertTrue(geo_a <= geo_b)
+ self.assertFalse(geo_a > geo_b)
+ self.assertTrue(geo_a >= geo_b)
+
+ # Other base
+ geo_fqdn = _Route53GeoRecord(provider, self.record_a, 'NA-US',
+ self.record_a.geo['NA-US'], False)
+ geo_fqdn.fqdn = 'other'
+ self.assertFalse(geo_a == geo_fqdn)
+ self.assertTrue(geo_a != geo_fqdn)
+ self.assertFalse(geo_a < geo_fqdn)
+ self.assertFalse(geo_a <= geo_fqdn)
+ self.assertTrue(geo_a > geo_fqdn)
+ self.assertTrue(geo_a >= geo_fqdn)
+
+ # Other class
+ self.assertFalse(a == geo_a)
+ self.assertTrue(a != geo_a)
+ self.assertFalse(a < geo_a)
+ self.assertFalse(a <= geo_a)
+ self.assertTrue(a > geo_a)
+ self.assertTrue(a >= geo_a)
+
+ def test_dynamic_value_delete(self):
+ provider = DummyProvider()
+ geo = _Route53DynamicValue(provider, self.record_a, 'iad', '2.2.2.2',
+ 1, 0, False)
+
+ rrset = {
+ 'HealthCheckId': 'x12346z',
+ 'Name': '_octodns-iad-value.unit.tests.',
+ 'ResourceRecords': [{
+ 'Value': '2.2.2.2'
+ }],
+ 'SetIdentifier': 'iad-000',
+ 'TTL': 99,
+ 'Type': 'A',
+ 'Weight': 1,
+ }
+
+ candidates = [
+ # Empty, will test no SetIdentifier
+ {},
+ # Non-matching
+ {
+ 'SetIdentifier': 'not-a-match',
+ },
+ # Same set-id, different name
+ {
+ 'Name': 'not-a-match',
+ 'SetIdentifier': 'x12346z',
+ },
+ rrset,
+ ]
+
+ # Provide a matching rrset so that we'll just use it for the delete
+ # rathr than building up an almost identical one, note the way we'll
+ # know that we got the one we passed in is that it'll have a
+ # HealthCheckId and one that was created wouldn't since DummyProvider
+ # stubs out the lookup for them
+ mod = geo.mod('DELETE', candidates)
+ self.assertEquals('x12346z', mod['ResourceRecordSet']['HealthCheckId'])
+
+ # If we don't provide the candidate rrsets we get back exactly what we
+ # put in minus the healthcheck
+ rrset['HealthCheckId'] = None
+ mod = geo.mod('DELETE', [])
+ self.assertEquals(rrset, mod['ResourceRecordSet'])
+
+ def test_geo_delete(self):
+ provider = DummyProvider()
+ geo = _Route53GeoRecord(provider, self.record_a, 'NA-US',
+ self.record_a.geo['NA-US'], False)
+
+ rrset = {
+ 'GeoLocation': {
+ 'CountryCode': 'US'
+ },
+ 'HealthCheckId': 'x12346z',
+ 'Name': 'unit.tests.',
+ 'ResourceRecords': [{
+ 'Value': '2.2.2.2'
+ }, {
+ 'Value': '3.3.3.3'
+ }],
+ 'SetIdentifier': 'NA-US',
+ 'TTL': 99,
+ 'Type': 'A'
+ }
+
+ candidates = [
+ # Empty, will test no SetIdentifier
+ {},
+ {
+ 'SetIdentifier': 'not-a-match',
+ },
+ # Same set-id, different name
+ {
+ 'Name': 'not-a-match',
+ 'SetIdentifier': 'x12346z',
+ },
+ rrset,
+ ]
+
+ # Provide a matching rrset so that we'll just use it for the delete
+ # rathr than building up an almost identical one, note the way we'll
+ # know that we got the one we passed in is that it'll have a
+ # HealthCheckId and one that was created wouldn't since DummyProvider
+ # stubs out the lookup for them
+ mod = geo.mod('DELETE', candidates)
+ self.assertEquals('x12346z', mod['ResourceRecordSet']['HealthCheckId'])
+
+ # If we don't provide the candidate rrsets we get back exactly what we
+ # put in minus the healthcheck
+ del rrset['HealthCheckId']
+ mod = geo.mod('DELETE', [])
+ self.assertEquals(rrset, mod['ResourceRecordSet'])
+
+ def test_new_dynamic(self):
+ provider = Route53Provider('test', 'abc', '123')
+
+ # Just so boto won't try and make any calls
+ stubber = Stubber(provider._conn)
+ stubber.activate()
+
+ # We'll assume we create all healthchecks here, this functionality is
+ # thoroughly tested elsewhere
+ provider._health_checks = {}
+ # When asked for a healthcheck return dummy info
+ provider.get_health_check_id = lambda r, v, c: 'hc42'
+
+ zone = Zone('unit.tests.', [])
+ record = Record.new(zone, '', dynamic_record_data)
+
+ # Convert a record into _Route53Records
+ route53_records = _Route53Record.new(provider, record, 'z45',
+ creating=True)
+ self.assertEquals(18, len(route53_records))
+
+ expected_mods = [r.mod('CREATE', []) for r in route53_records]
+ # Sort so that we get a consistent order and don't rely on set ordering
+ expected_mods.sort(key=_mod_keyer)
+
+ # Convert the route53_records into mods
+ self.assertEquals([{
+ 'Action': 'CREATE',
+ 'ResourceRecordSet': {
+ 'HealthCheckId': 'hc42',
+ 'Name': '_octodns-ap-southeast-1-value.unit.tests.',
+ 'ResourceRecords': [{'Value': '1.4.1.1'}],
+ 'SetIdentifier': 'ap-southeast-1-000',
+ 'TTL': 60,
+ 'Type': 'A',
+ 'Weight': 2}
+ }, {
+ 'Action': 'CREATE',
+ 'ResourceRecordSet': {
+ 'HealthCheckId': 'hc42',
+ 'Name': '_octodns-ap-southeast-1-value.unit.tests.',
+ 'ResourceRecords': [{'Value': '1.4.1.2'}],
+ 'SetIdentifier': 'ap-southeast-1-001',
+ 'TTL': 60,
+ 'Type': 'A',
+ 'Weight': 2}
+ }, {
+ 'Action': 'CREATE',
+ 'ResourceRecordSet': {
+ 'Name': '_octodns-default-pool.unit.tests.',
+ 'ResourceRecords': [
+ {'Value': '1.1.2.1'},
+ {'Value': '1.1.2.2'}],
+ 'TTL': 60,
+ 'Type': 'A'}
+ }, {
+ 'Action': 'CREATE',
+ 'ResourceRecordSet': {
+ 'HealthCheckId': 'hc42',
+ 'Name': '_octodns-eu-central-1-value.unit.tests.',
+ 'ResourceRecords': [{'Value': '1.3.1.1'}],
+ 'SetIdentifier': 'eu-central-1-000',
+ 'TTL': 60,
+ 'Type': 'A',
+ 'Weight': 1}
+ }, {
+ 'Action': 'CREATE',
+ 'ResourceRecordSet': {
+ 'HealthCheckId': 'hc42',
+ 'Name': '_octodns-eu-central-1-value.unit.tests.',
+ 'ResourceRecords': [{'Value': '1.3.1.2'}],
+ 'SetIdentifier': 'eu-central-1-001',
+ 'TTL': 60,
+ 'Type': 'A',
+ 'Weight': 1}
+ }, {
+ 'Action': 'CREATE',
+ 'ResourceRecordSet': {
+ 'HealthCheckId': 'hc42',
+ 'Name': '_octodns-us-east-1-value.unit.tests.',
+ 'ResourceRecords': [{'Value': '1.5.1.1'}],
+ 'SetIdentifier': 'us-east-1-000',
+ 'TTL': 60,
+ 'Type': 'A',
+ 'Weight': 1}
+ }, {
+ 'Action': 'CREATE',
+ 'ResourceRecordSet': {
+ 'HealthCheckId': 'hc42',
+ 'Name': '_octodns-us-east-1-value.unit.tests.',
+ 'ResourceRecords': [{'Value': '1.5.1.2'}],
+ 'SetIdentifier': 'us-east-1-001',
+ 'TTL': 60,
+ 'Type': 'A',
+ 'Weight': 1}
+ }, {
+ 'Action': 'CREATE',
+ 'ResourceRecordSet': {
+ 'AliasTarget': {
+ 'DNSName': '_octodns-ap-southeast-1-value.unit.tests.',
+ 'EvaluateTargetHealth': True,
+ 'HostedZoneId': 'z45'},
+ 'Failover': 'PRIMARY',
+ 'Name': '_octodns-ap-southeast-1-pool.unit.tests.',
+ 'SetIdentifier': 'ap-southeast-1-Primary',
+ 'Type': 'A'}
+ }, {
+ 'Action': 'CREATE',
+ 'ResourceRecordSet': {
+ 'AliasTarget': {
+ 'DNSName': '_octodns-eu-central-1-value.unit.tests.',
+ 'EvaluateTargetHealth': True,
+ 'HostedZoneId': 'z45'},
+ 'Failover': 'PRIMARY',
+ 'Name': '_octodns-eu-central-1-pool.unit.tests.',
+ 'SetIdentifier': 'eu-central-1-Primary',
+ 'Type': 'A'}
+ }, {
+ 'Action': 'CREATE',
+ 'ResourceRecordSet': {
+ 'AliasTarget': {
+ 'DNSName': '_octodns-us-east-1-value.unit.tests.',
+ 'EvaluateTargetHealth': True,
+ 'HostedZoneId': 'z45'},
+ 'Failover': 'PRIMARY',
+ 'Name': '_octodns-us-east-1-pool.unit.tests.',
+ 'SetIdentifier': 'us-east-1-Primary',
+ 'Type': 'A'}
+ }, {
+ 'Action': 'CREATE',
+ 'ResourceRecordSet': {
+ 'AliasTarget': {
+ 'DNSName': '_octodns-us-east-1-pool.unit.tests.',
+ 'EvaluateTargetHealth': True,
+ 'HostedZoneId': 'z45'},
+ 'Failover': 'SECONDARY',
+ 'Name': '_octodns-ap-southeast-1-pool.unit.tests.',
+ 'SetIdentifier': 'ap-southeast-1-Secondary-us-east-1',
+ 'Type': 'A'}
+ }, {
+ 'Action': 'CREATE',
+ 'ResourceRecordSet': {
+ 'AliasTarget': {
+ 'DNSName': '_octodns-us-east-1-pool.unit.tests.',
+ 'EvaluateTargetHealth': True,
+ 'HostedZoneId': 'z45'},
+ 'Failover': 'SECONDARY',
+ 'Name': '_octodns-eu-central-1-pool.unit.tests.',
+ 'SetIdentifier': 'eu-central-1-Secondary-us-east-1',
+ 'Type': 'A'}
+ }, {
+ 'Action': 'CREATE',
+ 'ResourceRecordSet': {
+ 'AliasTarget': {
+ 'DNSName': '_octodns-default-pool.unit.tests.',
+ 'EvaluateTargetHealth': True,
+ 'HostedZoneId': 'z45'},
+ 'Failover': 'SECONDARY',
+ 'Name': '_octodns-us-east-1-pool.unit.tests.',
+ 'SetIdentifier': 'us-east-1-Secondary-default',
+ 'Type': 'A'}
+ }, {
+ 'Action': 'CREATE',
+ 'ResourceRecordSet': {
+ 'AliasTarget': {
+ 'DNSName': '_octodns-ap-southeast-1-pool.unit.tests.',
+ 'EvaluateTargetHealth': True,
+ 'HostedZoneId': 'z45'},
+ 'GeoLocation': {
+ 'CountryCode': 'CN'},
+ 'Name': 'unit.tests.',
+ 'SetIdentifier': '0-ap-southeast-1-AS-CN',
+ 'Type': 'A'}
+ }, {
+ 'Action': 'CREATE',
+ 'ResourceRecordSet': {
+ 'AliasTarget': {
+ 'DNSName': '_octodns-ap-southeast-1-pool.unit.tests.',
+ 'EvaluateTargetHealth': True,
+ 'HostedZoneId': 'z45'},
+ 'GeoLocation': {
+ 'CountryCode': 'JP'},
+ 'Name': 'unit.tests.',
+ 'SetIdentifier': '0-ap-southeast-1-AS-JP',
+ 'Type': 'A'}
+ }, {
+ 'Action': 'CREATE',
+ 'ResourceRecordSet': {
+ 'AliasTarget': {
+ 'DNSName': '_octodns-eu-central-1-pool.unit.tests.',
+ 'EvaluateTargetHealth': True,
+ 'HostedZoneId': 'z45'},
+ 'GeoLocation': {
+ 'ContinentCode': 'EU'},
+ 'Name': 'unit.tests.',
+ 'SetIdentifier': '1-eu-central-1-EU',
+ 'Type': 'A'}
+ }, {
+ 'Action': 'CREATE',
+ 'ResourceRecordSet': {
+ 'AliasTarget': {
+ 'DNSName': '_octodns-eu-central-1-pool.unit.tests.',
+ 'EvaluateTargetHealth': True,
+ 'HostedZoneId': 'z45'},
+ 'GeoLocation': {
+ 'CountryCode': 'US',
+ 'SubdivisionCode': 'FL'},
+ 'Name': 'unit.tests.',
+ 'SetIdentifier': '1-eu-central-1-NA-US-FL',
+ 'Type': 'A'}
+ }, {
+ 'Action': 'CREATE',
+ 'ResourceRecordSet': {
+ 'AliasTarget': {
+ 'DNSName': '_octodns-us-east-1-pool.unit.tests.',
+ 'EvaluateTargetHealth': True,
+ 'HostedZoneId': 'z45'},
+ 'GeoLocation': {
+ 'CountryCode': '*'},
+ 'Name': 'unit.tests.',
+ 'SetIdentifier': '2-us-east-1-None',
+ 'Type': 'A'}
+ }], expected_mods)
+
+ for route53_record in route53_records:
+ # Smoke test stringification
+ route53_record.__repr__()
+
+
+class TestModKeyer(TestCase):
+
+ def test_mod_keyer(self):
+
+ # First "column" is the action priority for C/R/U
+
+ # Deletes come first
+ self.assertEquals((0, 0, 'something'), _mod_keyer({
+ 'Action': 'DELETE',
+ 'ResourceRecordSet': {
+ 'Name': 'something',
+ }
+ }))
+
+ # Creates come next
+ self.assertEquals((1, 0, 'another'), _mod_keyer({
+ 'Action': 'CREATE',
+ 'ResourceRecordSet': {
+ 'Name': 'another',
+ }
+ }))
+
+ # Upserts are the same as creates
+ self.assertEquals((1, 0, 'last'), _mod_keyer({
+ 'Action': 'UPSERT',
+ 'ResourceRecordSet': {
+ 'Name': 'last',
+ }
+ }))
+
+ # Second "column" value records tested above
+
+ # AliasTarget primary second (to value)
+ self.assertEquals((0, -1, 'thing'), _mod_keyer({
+ 'Action': 'DELETE',
+ 'ResourceRecordSet': {
+ 'AliasTarget': 'some-target',
+ 'Failover': 'PRIMARY',
+ 'Name': 'thing',
+ }
+ }))
+
+ self.assertEquals((1, 1, 'thing'), _mod_keyer({
+ 'Action': 'UPSERT',
+ 'ResourceRecordSet': {
+ 'AliasTarget': 'some-target',
+ 'Failover': 'PRIMARY',
+ 'Name': 'thing',
+ }
+ }))
+
+ # AliasTarget secondary third
+ self.assertEquals((0, -2, 'thing'), _mod_keyer({
+ 'Action': 'DELETE',
+ 'ResourceRecordSet': {
+ 'AliasTarget': 'some-target',
+ 'Failover': 'SECONDARY',
+ 'Name': 'thing',
+ }
+ }))
+
+ self.assertEquals((1, 2, 'thing'), _mod_keyer({
+ 'Action': 'UPSERT',
+ 'ResourceRecordSet': {
+ 'AliasTarget': 'some-target',
+ 'Failover': 'SECONDARY',
+ 'Name': 'thing',
+ }
+ }))
+
+ # GeoLocation fourth
+ self.assertEquals((0, -3, 'some-id'), _mod_keyer({
+ 'Action': 'DELETE',
+ 'ResourceRecordSet': {
+ 'GeoLocation': 'some-target',
+ 'SetIdentifier': 'some-id',
+ }
+ }))
+
+ self.assertEquals((1, 3, 'some-id'), _mod_keyer({
+ 'Action': 'UPSERT',
+ 'ResourceRecordSet': {
+ 'GeoLocation': 'some-target',
+ 'SetIdentifier': 'some-id',
+ }
+ }))
+
+ # The third "column" has already been tested above, Name/SetIdentifier
diff --git a/tests/test_octodns_provider_selectel.py b/tests/test_octodns_provider_selectel.py
new file mode 100644
index 0000000..7ad1e6b
--- /dev/null
+++ b/tests/test_octodns_provider_selectel.py
@@ -0,0 +1,402 @@
+#
+#
+#
+
+from __future__ import absolute_import, division, print_function, \
+ unicode_literals
+
+from unittest import TestCase
+from six import text_type
+
+import requests_mock
+
+from octodns.provider.selectel import SelectelProvider
+from octodns.record import Record, Update
+from octodns.zone import Zone
+
+
+class TestSelectelProvider(TestCase):
+ API_URL = 'https://api.selectel.ru/domains/v1'
+
+ api_record = []
+
+ zone = Zone('unit.tests.', [])
+ expected = set()
+
+ domain = [{"name": "unit.tests", "id": 100000}]
+
+ # A, subdomain=''
+ api_record.append({
+ 'type': 'A',
+ 'ttl': 100,
+ 'content': '1.2.3.4',
+ 'name': 'unit.tests',
+ 'id': 1
+ })
+ expected.add(Record.new(zone, '', {
+ 'ttl': 100,
+ 'type': 'A',
+ 'value': '1.2.3.4',
+ }))
+
+ # A, subdomain='sub'
+ api_record.append({
+ 'type': 'A',
+ 'ttl': 200,
+ 'content': '1.2.3.4',
+ 'name': 'sub.unit.tests',
+ 'id': 2
+ })
+ expected.add(Record.new(zone, 'sub', {
+ 'ttl': 200,
+ 'type': 'A',
+ 'value': '1.2.3.4',
+ }))
+
+ # CNAME
+ api_record.append({
+ 'type': 'CNAME',
+ 'ttl': 300,
+ 'content': 'unit.tests',
+ 'name': 'www2.unit.tests',
+ 'id': 3
+ })
+ expected.add(Record.new(zone, 'www2', {
+ 'ttl': 300,
+ 'type': 'CNAME',
+ 'value': 'unit.tests.',
+ }))
+
+ # MX
+ api_record.append({
+ 'type': 'MX',
+ 'ttl': 400,
+ 'content': 'mx1.unit.tests',
+ 'priority': 10,
+ 'name': 'unit.tests',
+ 'id': 4
+ })
+ expected.add(Record.new(zone, '', {
+ 'ttl': 400,
+ 'type': 'MX',
+ 'values': [{
+ 'preference': 10,
+ 'exchange': 'mx1.unit.tests.',
+ }]
+ }))
+
+ # NS
+ api_record.append({
+ 'type': 'NS',
+ 'ttl': 600,
+ 'content': 'ns1.unit.tests',
+ 'name': 'unit.tests.',
+ 'id': 6
+ })
+ api_record.append({
+ 'type': 'NS',
+ 'ttl': 600,
+ 'content': 'ns2.unit.tests',
+ 'name': 'unit.tests',
+ 'id': 7
+ })
+ expected.add(Record.new(zone, '', {
+ 'ttl': 600,
+ 'type': 'NS',
+ 'values': ['ns1.unit.tests.', 'ns2.unit.tests.'],
+ }))
+
+ # NS with sub
+ api_record.append({
+ 'type': 'NS',
+ 'ttl': 700,
+ 'content': 'ns3.unit.tests',
+ 'name': 'www3.unit.tests',
+ 'id': 8
+ })
+ api_record.append({
+ 'type': 'NS',
+ 'ttl': 700,
+ 'content': 'ns4.unit.tests',
+ 'name': 'www3.unit.tests',
+ 'id': 9
+ })
+ expected.add(Record.new(zone, 'www3', {
+ 'ttl': 700,
+ 'type': 'NS',
+ 'values': ['ns3.unit.tests.', 'ns4.unit.tests.'],
+ }))
+
+ # SRV
+ api_record.append({
+ 'type': 'SRV',
+ 'ttl': 800,
+ 'target': 'foo-1.unit.tests',
+ 'weight': 20,
+ 'priority': 10,
+ 'port': 30,
+ 'id': 10,
+ 'name': '_srv._tcp.unit.tests'
+ })
+ api_record.append({
+ 'type': 'SRV',
+ 'ttl': 800,
+ 'target': 'foo-2.unit.tests',
+ 'name': '_srv._tcp.unit.tests',
+ 'weight': 50,
+ 'priority': 40,
+ 'port': 60,
+ 'id': 11
+ })
+ expected.add(Record.new(zone, '_srv._tcp', {
+ 'ttl': 800,
+ 'type': 'SRV',
+ 'values': [{
+ 'priority': 10,
+ 'weight': 20,
+ 'port': 30,
+ 'target': 'foo-1.unit.tests.',
+ }, {
+ 'priority': 40,
+ 'weight': 50,
+ 'port': 60,
+ 'target': 'foo-2.unit.tests.',
+ }]
+ }))
+
+ # AAAA
+ aaaa_record = {
+ 'type': 'AAAA',
+ 'ttl': 200,
+ 'content': '1:1ec:1::1',
+ 'name': 'unit.tests',
+ 'id': 15
+ }
+ api_record.append(aaaa_record)
+ expected.add(Record.new(zone, '', {
+ 'ttl': 200,
+ 'type': 'AAAA',
+ 'value': '1:1ec:1::1',
+ }))
+
+ # TXT
+ api_record.append({
+ 'type': 'TXT',
+ 'ttl': 300,
+ 'content': 'little text',
+ 'name': 'text.unit.tests',
+ 'id': 16
+ })
+ expected.add(Record.new(zone, 'text', {
+ 'ttl': 200,
+ 'type': 'TXT',
+ 'value': 'little text',
+ }))
+
+ @requests_mock.Mocker()
+ def test_populate(self, fake_http):
+ zone = Zone('unit.tests.', [])
+ fake_http.get('{}/unit.tests/records/'.format(self.API_URL),
+ json=self.api_record)
+ fake_http.get('{}/'.format(self.API_URL), json=self.domain)
+ fake_http.head('{}/unit.tests/records/'.format(self.API_URL),
+ headers={'X-Total-Count': str(len(self.api_record))})
+ fake_http.head('{}/'.format(self.API_URL),
+ headers={'X-Total-Count': str(len(self.domain))})
+
+ provider = SelectelProvider(123, 'secret_token')
+ provider.populate(zone)
+
+ self.assertEquals(self.expected, zone.records)
+
+ @requests_mock.Mocker()
+ def test_populate_invalid_record(self, fake_http):
+ more_record = self.api_record
+ more_record.append({"name": "unit.tests",
+ "id": 100001,
+ "content": "support.unit.tests.",
+ "ttl": 300, "ns": "ns1.unit.tests",
+ "type": "SOA",
+ "email": "support@unit.tests"})
+
+ zone = Zone('unit.tests.', [])
+ fake_http.get('{}/unit.tests/records/'.format(self.API_URL),
+ json=more_record)
+ fake_http.get('{}/'.format(self.API_URL), json=self.domain)
+ fake_http.head('{}/unit.tests/records/'.format(self.API_URL),
+ headers={'X-Total-Count': str(len(self.api_record))})
+ fake_http.head('{}/'.format(self.API_URL),
+ headers={'X-Total-Count': str(len(self.domain))})
+
+ zone.add_record(Record.new(self.zone, 'unsup', {
+ 'ttl': 200,
+ 'type': 'NAPTR',
+ 'value': {
+ 'order': 40,
+ 'preference': 70,
+ 'flags': 'U',
+ 'service': 'SIP+D2U',
+ 'regexp': '!^.*$!sip:info@bar.example.com!',
+ 'replacement': '.',
+ }
+ }))
+
+ provider = SelectelProvider(123, 'secret_token')
+ provider.populate(zone)
+
+ self.assertNotEqual(self.expected, zone.records)
+
+ @requests_mock.Mocker()
+ def test_apply(self, fake_http):
+
+ fake_http.get('{}/unit.tests/records/'.format(self.API_URL),
+ json=list())
+ fake_http.get('{}/'.format(self.API_URL), json=self.domain)
+ fake_http.head('{}/unit.tests/records/'.format(self.API_URL),
+ headers={'X-Total-Count': '0'})
+ fake_http.head('{}/'.format(self.API_URL),
+ headers={'X-Total-Count': str(len(self.domain))})
+ fake_http.post('{}/100000/records/'.format(self.API_URL), json=list())
+
+ provider = SelectelProvider(123, 'test_token')
+
+ zone = Zone('unit.tests.', [])
+
+ for record in self.expected:
+ zone.add_record(record)
+
+ plan = provider.plan(zone)
+ self.assertEquals(8, len(plan.changes))
+ self.assertEquals(8, provider.apply(plan))
+
+ @requests_mock.Mocker()
+ def test_domain_list(self, fake_http):
+ fake_http.get('{}/'.format(self.API_URL), json=self.domain)
+ fake_http.head('{}/'.format(self.API_URL),
+ headers={'X-Total-Count': str(len(self.domain))})
+
+ expected = {'unit.tests': self.domain[0]}
+ provider = SelectelProvider(123, 'test_token')
+
+ result = provider.domain_list()
+ self.assertEquals(result, expected)
+
+ @requests_mock.Mocker()
+ def test_authentication_fail(self, fake_http):
+ fake_http.get('{}/'.format(self.API_URL), status_code=401)
+ fake_http.head('{}/'.format(self.API_URL),
+ headers={'X-Total-Count': str(len(self.domain))})
+
+ with self.assertRaises(Exception) as ctx:
+ SelectelProvider(123, 'fail_token')
+ self.assertEquals(text_type(ctx.exception),
+ 'Authorization failed. Invalid or empty token.')
+
+ @requests_mock.Mocker()
+ def test_not_exist_domain(self, fake_http):
+ fake_http.get('{}/'.format(self.API_URL), status_code=404, json='')
+ fake_http.head('{}/'.format(self.API_URL),
+ headers={'X-Total-Count': str(len(self.domain))})
+
+ fake_http.post('{}/'.format(self.API_URL),
+ json={"name": "unit.tests",
+ "create_date": 1507154178,
+ "id": 100000})
+ fake_http.get('{}/unit.tests/records/'.format(self.API_URL),
+ json=list())
+ fake_http.head('{}/unit.tests/records/'.format(self.API_URL),
+ headers={'X-Total-Count': str(len(self.api_record))})
+ fake_http.post('{}/100000/records/'.format(self.API_URL),
+ json=list())
+
+ provider = SelectelProvider(123, 'test_token')
+
+ zone = Zone('unit.tests.', [])
+
+ for record in self.expected:
+ zone.add_record(record)
+
+ plan = provider.plan(zone)
+ self.assertEquals(8, len(plan.changes))
+ self.assertEquals(8, provider.apply(plan))
+
+ @requests_mock.Mocker()
+ def test_delete_no_exist_record(self, fake_http):
+ fake_http.get('{}/'.format(self.API_URL), json=self.domain)
+ fake_http.get('{}/100000/records/'.format(self.API_URL), json=list())
+ fake_http.head('{}/'.format(self.API_URL),
+ headers={'X-Total-Count': str(len(self.domain))})
+ fake_http.head('{}/unit.tests/records/'.format(self.API_URL),
+ headers={'X-Total-Count': '0'})
+
+ provider = SelectelProvider(123, 'test_token')
+
+ zone = Zone('unit.tests.', [])
+
+ provider.delete_record('unit.tests', 'NS', zone)
+
+ @requests_mock.Mocker()
+ def test_change_record(self, fake_http):
+ exist_record = [self.aaaa_record,
+ {"content": "6.6.5.7",
+ "ttl": 100,
+ "type": "A",
+ "id": 100001,
+ "name": "delete.unit.tests"},
+ {"content": "9.8.2.1",
+ "ttl": 100,
+ "type": "A",
+ "id": 100002,
+ "name": "unit.tests"}] # exist
+ fake_http.get('{}/unit.tests/records/'.format(self.API_URL),
+ json=exist_record)
+ fake_http.get('{}/'.format(self.API_URL), json=self.domain)
+ fake_http.get('{}/100000/records/'.format(self.API_URL),
+ json=exist_record)
+ fake_http.head('{}/unit.tests/records/'.format(self.API_URL),
+ headers={'X-Total-Count': str(len(exist_record))})
+ fake_http.head('{}/'.format(self.API_URL),
+ headers={'X-Total-Count': str(len(self.domain))})
+ fake_http.head('{}/100000/records/'.format(self.API_URL),
+ headers={'X-Total-Count': str(len(exist_record))})
+ fake_http.post('{}/100000/records/'.format(self.API_URL),
+ json=list())
+ fake_http.delete('{}/100000/records/100001'.format(self.API_URL),
+ text="")
+ fake_http.delete('{}/100000/records/100002'.format(self.API_URL),
+ text="")
+
+ provider = SelectelProvider(123, 'test_token')
+
+ zone = Zone('unit.tests.', [])
+
+ for record in self.expected:
+ zone.add_record(record)
+
+ plan = provider.plan(zone)
+ self.assertEquals(8, len(plan.changes))
+ self.assertEquals(8, provider.apply(plan))
+
+ @requests_mock.Mocker()
+ def test_include_change_returns_false(self, fake_http):
+ fake_http.get('{}/'.format(self.API_URL), json=self.domain)
+ fake_http.head('{}/'.format(self.API_URL),
+ headers={'X-Total-Count': str(len(self.domain))})
+ provider = SelectelProvider(123, 'test_token')
+ zone = Zone('unit.tests.', [])
+
+ exist_record = Record.new(zone, '', {
+ 'ttl': 60,
+ 'type': 'A',
+ 'values': ['1.1.1.1', '2.2.2.2']
+ })
+ new = Record.new(zone, '', {
+ 'ttl': 10,
+ 'type': 'A',
+ 'values': ['1.1.1.1', '2.2.2.2']
+ })
+ change = Update(exist_record, new)
+
+ include_change = provider._include_change(change)
+
+ self.assertFalse(include_change)
diff --git a/tests/test_octodns_provider_transip.py b/tests/test_octodns_provider_transip.py
new file mode 100644
index 0000000..f792085
--- /dev/null
+++ b/tests/test_octodns_provider_transip.py
@@ -0,0 +1,276 @@
+#
+#
+#
+
+from __future__ import absolute_import, division, print_function, \
+ unicode_literals
+
+from os.path import dirname, join
+from six import text_type
+
+from suds import WebFault
+
+from unittest import TestCase
+
+from octodns.provider.transip import TransipProvider
+from octodns.provider.yaml import YamlProvider
+from octodns.zone import Zone
+from transip.service.domain import DomainService
+from transip.service.objects import DnsEntry
+
+
+class MockFault(object):
+ faultstring = ""
+ faultcode = ""
+
+ def __init__(self, code, string, *args, **kwargs):
+ self.faultstring = string
+ self.faultcode = code
+
+
+class MockResponse(object):
+ dnsEntries = []
+
+
+class MockDomainService(DomainService):
+
+ def __init__(self, *args, **kwargs):
+ super(MockDomainService, self).__init__('MockDomainService', *args,
+ **kwargs)
+ self.mockupEntries = []
+
+ def mockup(self, records):
+
+ provider = TransipProvider('', '', '')
+
+ _dns_entries = []
+ for record in records:
+ if record._type in provider.SUPPORTS:
+ entries_for = getattr(provider,
+ '_entries_for_{}'.format(record._type))
+
+ # Root records have '@' as name
+ name = record.name
+ if name == '':
+ name = provider.ROOT_RECORD
+
+ _dns_entries.extend(entries_for(name, record))
+
+ # NS is not supported as a DNS Entry,
+ # so it should cover the if statement
+ _dns_entries.append(
+ DnsEntry('@', '3600', 'NS', 'ns01.transip.nl.'))
+
+ self.mockupEntries = _dns_entries
+
+ # Skips authentication layer and returns the entries loaded by "Mockup"
+ def get_info(self, domain_name):
+
+ # Special 'domain' to trigger error
+ if str(domain_name) == str('notfound.unit.tests'):
+ self.raiseZoneNotFound()
+
+ result = MockResponse()
+ result.dnsEntries = self.mockupEntries
+ return result
+
+ def set_dns_entries(self, domain_name, dns_entries):
+
+ # Special 'domain' to trigger error
+ if str(domain_name) == str('failsetdns.unit.tests'):
+ self.raiseSaveError()
+
+ return True
+
+ def raiseZoneNotFound(self):
+ fault = MockFault(str('102'), '102 is zone not found')
+ document = {}
+ raise WebFault(fault, document)
+
+ def raiseInvalidAuth(self):
+ fault = MockFault(str('200'), '200 is invalid auth')
+ document = {}
+ raise WebFault(fault, document)
+
+ def raiseSaveError(self):
+ fault = MockFault(str('200'), '202 random error')
+ document = {}
+ raise WebFault(fault, document)
+
+
+class TestTransipProvider(TestCase):
+
+ bogus_key = str("""-----BEGIN RSA PRIVATE KEY-----
+MIIEowIBAAKCAQEA0U5HGCkLrz423IyUf3u4cKN2WrNz1x5KNr6PvH2M/zxas+zB
+elbxkdT3AQ+wmfcIvOuTmFRTHv35q2um1aBrPxVw+2s+lWo28VwIRttwIB1vIeWu
+lSBnkEZQRLyPI2tH0i5QoMX4CVPf9rvij3Uslimi84jdzDfPFIh6jZ6C8nLipOTG
+0IMhge1ofVfB0oSy5H+7PYS2858QLAf5ruYbzbAxZRivS402wGmQ0d0Lc1KxraAj
+kiMM5yj/CkH/Vm2w9I6+tLFeASE4ub5HCP5G/ig4dbYtqZMQMpqyAbGxd5SOVtyn
+UHagAJUxf8DT3I8PyjEHjxdOPUsxNyRtepO/7QIDAQABAoIBAQC7fiZ7gxE/ezjD
+2n6PsHFpHVTBLS2gzzZl0dCKZeFvJk6ODJDImaeuHhrh7X8ifMNsEI9XjnojMhl8
+MGPzy88mZHugDNK0H8B19x5G8v1/Fz7dG5WHas660/HFkS+b59cfdXOugYiOOn9O
+08HBBpLZNRUOmVUuQfQTjapSwGLG8PocgpyRD4zx0LnldnJcqYCxwCdev+AAsPnq
+ibNtOd/MYD37w9MEGcaxLE8wGgkv8yd97aTjkgE+tp4zsM4QE4Rag133tsLLNznT
+4Qr/of15M3NW/DXq/fgctyRcJjZpU66eCXLCz2iRTnLyyxxDC2nwlxKbubV+lcS0
+S4hbfd/BAoGBAO8jXxEaiybR0aIhhSR5esEc3ymo8R8vBN3ZMJ+vr5jEPXr/ZuFj
+/R4cZ2XV3VoQJG0pvIOYVPZ5DpJM7W+zSXtJ/7bLXy4Bnmh/rc+YYgC+AXQoLSil
+iD2OuB2xAzRAK71DVSO0kv8gEEXCersPT2i6+vC2GIlJvLcYbOdRKWGxAoGBAOAQ
+aJbRLtKujH+kMdoMI7tRlL8XwI+SZf0FcieEu//nFyerTePUhVgEtcE+7eQ7hyhG
+fIXUFx/wALySoqFzdJDLc8U8pTLhbUaoLOTjkwnCTKQVprhnISqQqqh/0U5u47IE
+RWzWKN6OHb0CezNTq80Dr6HoxmPCnJHBHn5LinT9AoGAQSpvZpbIIqz8pmTiBl2A
+QQ2gFpcuFeRXPClKYcmbXVLkuhbNL1BzEniFCLAt4LQTaRf9ghLJ3FyCxwVlkpHV
+zV4N6/8hkcTpKOraL38D/dXJSaEFJVVuee/hZl3tVJjEEpA9rDwx7ooLRSdJEJ6M
+ciq55UyKBSdt4KssSiDI2RECgYBL3mJ7xuLy5bWfNsrGiVvD/rC+L928/5ZXIXPw
+26oI0Yfun7ulDH4GOroMcDF/GYT/Zzac3h7iapLlR0WYI47xxGI0A//wBZLJ3QIu
+krxkDo2C9e3Y/NqnHgsbOQR3aWbiDT4wxydZjIeXS3LKA2fl6Hyc90PN3cTEOb8I
+hq2gRQKBgEt0SxhhtyB93SjgTzmUZZ7PiEf0YJatfM6cevmjWHexrZH+x31PB72s
+fH2BQyTKKzoCLB1k/6HRaMnZdrWyWSZ7JKz3AHJ8+58d0Hr8LTrzDM1L6BbjeDct
+N4OiVz1I3rbZGYa396lpxO6ku8yCglisL1yrSP6DdEUp66ntpKVd
+-----END RSA PRIVATE KEY-----""")
+
+ def make_expected(self):
+ expected = Zone('unit.tests.', [])
+ source = YamlProvider('test', join(dirname(__file__), 'config'))
+ source.populate(expected)
+ return expected
+
+ def test_init(self):
+ with self.assertRaises(Exception) as ctx:
+ TransipProvider('test', 'unittest')
+
+ self.assertEquals(
+ str('Missing `key` of `key_file` parameter in config'),
+ str(ctx.exception))
+
+ TransipProvider('test', 'unittest', key=self.bogus_key)
+
+ # Existence and content of the key is tested in the SDK on client call
+ TransipProvider('test', 'unittest', key_file='/fake/path')
+
+ def test_populate(self):
+ _expected = self.make_expected()
+
+ # Unhappy Plan - Not authenticated
+ # Live test against API, will fail in an unauthorized error
+ with self.assertRaises(WebFault) as ctx:
+ provider = TransipProvider('test', 'unittest', self.bogus_key)
+ zone = Zone('unit.tests.', [])
+ provider.populate(zone, True)
+
+ self.assertEquals(str('WebFault'),
+ str(ctx.exception.__class__.__name__))
+
+ self.assertEquals(str('200'), ctx.exception.fault.faultcode)
+
+ # Unhappy Plan - Zone does not exists
+ # Will trigger an exception if provider is used as a target for a
+ # non-existing zone
+ with self.assertRaises(Exception) as ctx:
+ provider = TransipProvider('test', 'unittest', self.bogus_key)
+ provider._client = MockDomainService('unittest', self.bogus_key)
+ zone = Zone('notfound.unit.tests.', [])
+ provider.populate(zone, True)
+
+ self.assertEquals(str('TransipNewZoneException'),
+ str(ctx.exception.__class__.__name__))
+
+ self.assertEquals(
+ 'populate: (102) Transip used as target' +
+ ' for non-existing zone: notfound.unit.tests.',
+ text_type(ctx.exception))
+
+ # Happy Plan - Zone does not exists
+ # Won't trigger an exception if provider is NOT used as a target for a
+ # non-existing zone.
+ provider = TransipProvider('test', 'unittest', self.bogus_key)
+ provider._client = MockDomainService('unittest', self.bogus_key)
+ zone = Zone('notfound.unit.tests.', [])
+ provider.populate(zone, False)
+
+ # Happy Plan - Populate with mockup records
+ provider = TransipProvider('test', 'unittest', self.bogus_key)
+ provider._client = MockDomainService('unittest', self.bogus_key)
+ provider._client.mockup(_expected.records)
+ zone = Zone('unit.tests.', [])
+ provider.populate(zone, False)
+
+ # Transip allows relative values for types like cname, mx.
+ # Test is these are correctly appended with the domain
+ provider._currentZone = zone
+ self.assertEquals("www.unit.tests.", provider._parse_to_fqdn("www"))
+ self.assertEquals("www.unit.tests.",
+ provider._parse_to_fqdn("www.unit.tests."))
+ self.assertEquals("www.sub.sub.sub.unit.tests.",
+ provider._parse_to_fqdn("www.sub.sub.sub"))
+ self.assertEquals("unit.tests.",
+ provider._parse_to_fqdn("@"))
+
+ # Happy Plan - Even if the zone has no records the zone should exist
+ provider = TransipProvider('test', 'unittest', self.bogus_key)
+ provider._client = MockDomainService('unittest', self.bogus_key)
+ zone = Zone('unit.tests.', [])
+ exists = provider.populate(zone, True)
+ self.assertTrue(exists, 'populate should return true')
+
+ return
+
+ def test_plan(self):
+ _expected = self.make_expected()
+
+ # Test Happy plan, only create
+ provider = TransipProvider('test', 'unittest', self.bogus_key)
+ provider._client = MockDomainService('unittest', self.bogus_key)
+ plan = provider.plan(_expected)
+
+ self.assertEqual(12, plan.change_counts['Create'])
+ self.assertEqual(0, plan.change_counts['Update'])
+ self.assertEqual(0, plan.change_counts['Delete'])
+
+ return
+
+ def test_apply(self):
+ _expected = self.make_expected()
+
+ # Test happy flow. Create all supoorted records
+ provider = TransipProvider('test', 'unittest', self.bogus_key)
+ provider._client = MockDomainService('unittest', self.bogus_key)
+ plan = provider.plan(_expected)
+ self.assertEqual(12, len(plan.changes))
+ changes = provider.apply(plan)
+ self.assertEqual(changes, len(plan.changes))
+
+ # Test unhappy flow. Trigger 'not found error' in apply stage
+ # This should normally not happen as populate will capture it first
+ # but just in case.
+ changes = [] # reset changes
+ with self.assertRaises(Exception) as ctx:
+ provider = TransipProvider('test', 'unittest', self.bogus_key)
+ provider._client = MockDomainService('unittest', self.bogus_key)
+ plan = provider.plan(_expected)
+ plan.desired.name = 'notfound.unit.tests.'
+ changes = provider.apply(plan)
+
+ # Changes should not be set due to an Exception
+ self.assertEqual([], changes)
+
+ self.assertEquals(str('WebFault'),
+ str(ctx.exception.__class__.__name__))
+
+ self.assertEquals(str('102'), ctx.exception.fault.faultcode)
+
+ # Test unhappy flow. Trigger a unrecoverable error while saving
+ _expected = self.make_expected() # reset expected
+ changes = [] # reset changes
+
+ with self.assertRaises(Exception) as ctx:
+ provider = TransipProvider('test', 'unittest', self.bogus_key)
+ provider._client = MockDomainService('unittest', self.bogus_key)
+ plan = provider.plan(_expected)
+ plan.desired.name = 'failsetdns.unit.tests.'
+ changes = provider.apply(plan)
+
+ # Changes should not be set due to an Exception
+ self.assertEqual([], changes)
+
+ self.assertEquals(str('TransipException'),
+ str(ctx.exception.__class__.__name__))
diff --git a/tests/test_octodns_provider_yaml.py b/tests/test_octodns_provider_yaml.py
index 74261de..f858c05 100644
--- a/tests/test_octodns_provider_yaml.py
+++ b/tests/test_octodns_provider_yaml.py
@@ -5,13 +5,17 @@
from __future__ import absolute_import, division, print_function, \
unicode_literals
-from os.path import dirname, isfile, join
+from os import makedirs
+from os.path import basename, dirname, isdir, isfile, join
from unittest import TestCase
+from six import text_type
from yaml import safe_load
from yaml.constructor import ConstructorError
from octodns.record import Create
-from octodns.provider.yaml import YamlProvider
+from octodns.provider.base import Plan
+from octodns.provider.yaml import _list_all_yaml_files, \
+ SplitYamlProvider, YamlProvider
from octodns.zone import SubzoneRecordException, Zone
from helpers import TemporaryDirectory
@@ -54,8 +58,8 @@ class TestYamlProvider(TestCase):
# We add everything
plan = target.plan(zone)
- self.assertEquals(15, len(filter(lambda c: isinstance(c, Create),
- plan.changes)))
+ self.assertEquals(15, len([c for c in plan.changes
+ if isinstance(c, Create)]))
self.assertFalse(isfile(yaml_file))
# Now actually do it
@@ -64,8 +68,8 @@ class TestYamlProvider(TestCase):
# Dynamic plan
plan = target.plan(dynamic_zone)
- self.assertEquals(5, len(filter(lambda c: isinstance(c, Create),
- plan.changes)))
+ self.assertEquals(5, len([c for c in plan.changes
+ if isinstance(c, Create)]))
self.assertFalse(isfile(dynamic_yaml_file))
# Apply it
self.assertEquals(5, target.apply(plan))
@@ -76,16 +80,15 @@ class TestYamlProvider(TestCase):
target.populate(reloaded)
self.assertDictEqual(
{'included': ['test']},
- filter(
- lambda x: x.name == 'included', reloaded.records
- )[0]._octodns)
+ [x for x in reloaded.records
+ if x.name == 'included'][0]._octodns)
self.assertFalse(zone.changes(reloaded, target=source))
# A 2nd sync should still create everything
plan = target.plan(zone)
- self.assertEquals(15, len(filter(lambda c: isinstance(c, Create),
- plan.changes)))
+ self.assertEquals(15, len([c for c in plan.changes
+ if isinstance(c, Create)]))
with open(yaml_file) as fh:
data = safe_load(fh.read())
@@ -113,7 +116,7 @@ class TestYamlProvider(TestCase):
self.assertTrue('value' in data.pop('www.sub'))
# make sure nothing is left
- self.assertEquals([], data.keys())
+ self.assertEquals([], list(data.keys()))
with open(dynamic_yaml_file) as fh:
data = safe_load(fh.read())
@@ -142,7 +145,7 @@ class TestYamlProvider(TestCase):
# self.assertTrue('dynamic' in dyna)
# make sure nothing is left
- self.assertEquals([], data.keys())
+ self.assertEquals([], list(data.keys()))
def test_empty(self):
source = YamlProvider('test', join(dirname(__file__), 'config'))
@@ -175,4 +178,228 @@ class TestYamlProvider(TestCase):
with self.assertRaises(SubzoneRecordException) as ctx:
source.populate(zone)
self.assertEquals('Record www.sub.unit.tests. is under a managed '
- 'subzone', ctx.exception.message)
+ 'subzone', text_type(ctx.exception))
+
+
+class TestSplitYamlProvider(TestCase):
+
+ def test_list_all_yaml_files(self):
+ yaml_files = ('foo.yaml', '1.yaml', '$unit.tests.yaml')
+ all_files = ('something', 'else', '1', '$$', '-f') + yaml_files
+ all_dirs = ('dir1', 'dir2/sub', 'tricky.yaml')
+
+ with TemporaryDirectory() as td:
+ directory = join(td.dirname)
+
+ # Create some files, some of them with a .yaml extension, all of
+ # them empty.
+ for emptyfile in all_files:
+ open(join(directory, emptyfile), 'w').close()
+ # Do the same for some fake directories
+ for emptydir in all_dirs:
+ makedirs(join(directory, emptydir))
+
+ # This isn't great, but given the variable nature of the temp dir
+ # names, it's necessary.
+ d = list(basename(f) for f in _list_all_yaml_files(directory))
+ self.assertEqual(len(yaml_files), len(d))
+
+ def test_zone_directory(self):
+ source = SplitYamlProvider(
+ 'test', join(dirname(__file__), 'config/split'))
+
+ zone = Zone('unit.tests.', [])
+
+ self.assertEqual(
+ join(dirname(__file__), 'config/split/unit.tests.'),
+ source._zone_directory(zone))
+
+ def test_apply_handles_existing_zone_directory(self):
+ with TemporaryDirectory() as td:
+ provider = SplitYamlProvider('test', join(td.dirname, 'config'))
+ makedirs(join(td.dirname, 'config', 'does.exist.'))
+
+ zone = Zone('does.exist.', [])
+ self.assertTrue(isdir(provider._zone_directory(zone)))
+ provider.apply(Plan(None, zone, [], True))
+ self.assertTrue(isdir(provider._zone_directory(zone)))
+
+ def test_provider(self):
+ source = SplitYamlProvider(
+ 'test', join(dirname(__file__), 'config/split'))
+
+ zone = Zone('unit.tests.', [])
+ dynamic_zone = Zone('dynamic.tests.', [])
+
+ # With target we don't add anything
+ source.populate(zone, target=source)
+ self.assertEquals(0, len(zone.records))
+
+ # without it we see everything
+ source.populate(zone)
+ self.assertEquals(18, len(zone.records))
+
+ source.populate(dynamic_zone)
+ self.assertEquals(5, len(dynamic_zone.records))
+
+ with TemporaryDirectory() as td:
+ # Add some subdirs to make sure that it can create them
+ directory = join(td.dirname, 'sub', 'dir')
+ zone_dir = join(directory, 'unit.tests.')
+ dynamic_zone_dir = join(directory, 'dynamic.tests.')
+ target = SplitYamlProvider('test', directory)
+
+ # We add everything
+ plan = target.plan(zone)
+ self.assertEquals(15, len([c for c in plan.changes
+ if isinstance(c, Create)]))
+ self.assertFalse(isdir(zone_dir))
+
+ # Now actually do it
+ self.assertEquals(15, target.apply(plan))
+
+ # Dynamic plan
+ plan = target.plan(dynamic_zone)
+ self.assertEquals(5, len([c for c in plan.changes
+ if isinstance(c, Create)]))
+ self.assertFalse(isdir(dynamic_zone_dir))
+ # Apply it
+ self.assertEquals(5, target.apply(plan))
+ self.assertTrue(isdir(dynamic_zone_dir))
+
+ # There should be no changes after the round trip
+ reloaded = Zone('unit.tests.', [])
+ target.populate(reloaded)
+ self.assertDictEqual(
+ {'included': ['test']},
+ [x for x in reloaded.records
+ if x.name == 'included'][0]._octodns)
+
+ self.assertFalse(zone.changes(reloaded, target=source))
+
+ # A 2nd sync should still create everything
+ plan = target.plan(zone)
+ self.assertEquals(15, len([c for c in plan.changes
+ if isinstance(c, Create)]))
+
+ yaml_file = join(zone_dir, '$unit.tests.yaml')
+ self.assertTrue(isfile(yaml_file))
+ with open(yaml_file) as fh:
+ data = safe_load(fh.read())
+ roots = sorted(data.pop(''), key=lambda r: r['type'])
+ self.assertTrue('values' in roots[0]) # A
+ self.assertTrue('geo' in roots[0]) # geo made the trip
+ self.assertTrue('value' in roots[1]) # CAA
+ self.assertTrue('values' in roots[2]) # SSHFP
+
+ # These records are stored as plural "values." Check each file to
+ # ensure correctness.
+ for record_name in ('_srv._tcp', 'mx', 'naptr', 'sub', 'txt'):
+ yaml_file = join(zone_dir, '{}.yaml'.format(record_name))
+ self.assertTrue(isfile(yaml_file))
+ with open(yaml_file) as fh:
+ data = safe_load(fh.read())
+ self.assertTrue('values' in data.pop(record_name))
+
+ # These are stored as singular "value." Again, check each file.
+ for record_name in ('aaaa', 'cname', 'included', 'ptr', 'spf',
+ 'www.sub', 'www'):
+ yaml_file = join(zone_dir, '{}.yaml'.format(record_name))
+ self.assertTrue(isfile(yaml_file))
+ with open(yaml_file) as fh:
+ data = safe_load(fh.read())
+ self.assertTrue('value' in data.pop(record_name))
+
+ # Again with the plural, this time checking dynamic.tests.
+ for record_name in ('a', 'aaaa', 'real-ish-a'):
+ yaml_file = join(
+ dynamic_zone_dir, '{}.yaml'.format(record_name))
+ self.assertTrue(isfile(yaml_file))
+ with open(yaml_file) as fh:
+ data = safe_load(fh.read())
+ dyna = data.pop(record_name)
+ self.assertTrue('values' in dyna)
+ self.assertTrue('dynamic' in dyna)
+
+ # Singular again.
+ for record_name in ('cname', 'simple-weighted'):
+ yaml_file = join(
+ dynamic_zone_dir, '{}.yaml'.format(record_name))
+ self.assertTrue(isfile(yaml_file))
+ with open(yaml_file) as fh:
+ data = safe_load(fh.read())
+ dyna = data.pop(record_name)
+ self.assertTrue('value' in dyna)
+ self.assertTrue('dynamic' in dyna)
+
+ def test_empty(self):
+ source = SplitYamlProvider(
+ 'test', join(dirname(__file__), 'config/split'))
+
+ zone = Zone('empty.', [])
+
+ # without it we see everything
+ source.populate(zone)
+ self.assertEquals(0, len(zone.records))
+
+ def test_unsorted(self):
+ source = SplitYamlProvider(
+ 'test', join(dirname(__file__), 'config/split'))
+
+ zone = Zone('unordered.', [])
+
+ with self.assertRaises(ConstructorError):
+ source.populate(zone)
+
+ zone = Zone('unordered.', [])
+
+ source = SplitYamlProvider(
+ 'test', join(dirname(__file__), 'config/split'),
+ enforce_order=False)
+ # no exception
+ source.populate(zone)
+ self.assertEqual(2, len(zone.records))
+
+ def test_subzone_handling(self):
+ source = SplitYamlProvider(
+ 'test', join(dirname(__file__), 'config/split'))
+
+ # If we add `sub` as a sub-zone we'll reject `www.sub`
+ zone = Zone('unit.tests.', ['sub'])
+ with self.assertRaises(SubzoneRecordException) as ctx:
+ source.populate(zone)
+ self.assertEquals('Record www.sub.unit.tests. is under a managed '
+ 'subzone', text_type(ctx.exception))
+
+
+class TestOverridingYamlProvider(TestCase):
+
+ def test_provider(self):
+ config = join(dirname(__file__), 'config')
+ override_config = join(dirname(__file__), 'config', 'override')
+ base = YamlProvider('base', config, populate_should_replace=False)
+ override = YamlProvider('test', override_config,
+ populate_should_replace=True)
+
+ zone = Zone('dynamic.tests.', [])
+
+ # Load the base, should see the 5 records
+ base.populate(zone)
+ got = {r.name: r for r in zone.records}
+ self.assertEquals(5, len(got))
+ # We get the "dynamic" A from the bae config
+ self.assertTrue('dynamic' in got['a'].data)
+ # No added
+ self.assertFalse('added' in got)
+
+ # Load the overrides, should replace one and add 1
+ override.populate(zone)
+ got = {r.name: r for r in zone.records}
+ self.assertEquals(6, len(got))
+ # 'a' was replaced with a generic record
+ self.assertEquals({
+ 'ttl': 3600,
+ 'values': ['4.4.4.4', '5.5.5.5']
+ }, got['a'].data)
+ # And we have the new one
+ self.assertTrue('added' in got)
diff --git a/tests/test_octodns_record.py b/tests/test_octodns_record.py
index 4f05126..e2917b3 100644
--- a/tests/test_octodns_record.py
+++ b/tests/test_octodns_record.py
@@ -5,12 +5,14 @@
from __future__ import absolute_import, division, print_function, \
unicode_literals
+from six import text_type
from unittest import TestCase
from octodns.record import ARecord, AaaaRecord, AliasRecord, CaaRecord, \
- CnameRecord, Create, Delete, GeoValue, MxRecord, NaptrRecord, \
- NaptrValue, NsRecord, Record, SshfpRecord, SpfRecord, SrvRecord, \
- TxtRecord, Update, ValidationError, _Dynamic, _DynamicPool, _DynamicRule
+ CaaValue, CnameRecord, Create, Delete, GeoValue, MxRecord, MxValue, \
+ NaptrRecord, NaptrValue, NsRecord, PtrRecord, Record, SshfpRecord, \
+ SshfpValue, SpfRecord, SrvRecord, SrvValue, TxtRecord, Update, \
+ ValidationError, _Dynamic, _DynamicPool, _DynamicRule
from octodns.zone import Zone
from helpers import DynamicProvider, GeoProvider, SimpleProvider
@@ -27,6 +29,45 @@ class TestRecord(TestCase):
})
self.assertEquals('mixedcase', record.name)
+ def test_alias_lowering_value(self):
+ upper_record = AliasRecord(self.zone, 'aliasUppwerValue', {
+ 'ttl': 30,
+ 'type': 'ALIAS',
+ 'value': 'GITHUB.COM',
+ })
+ lower_record = AliasRecord(self.zone, 'aliasLowerValue', {
+ 'ttl': 30,
+ 'type': 'ALIAS',
+ 'value': 'github.com',
+ })
+ self.assertEquals(upper_record.value, lower_record.value)
+
+ def test_cname_lowering_value(self):
+ upper_record = CnameRecord(self.zone, 'CnameUppwerValue', {
+ 'ttl': 30,
+ 'type': 'CNAME',
+ 'value': 'GITHUB.COM',
+ })
+ lower_record = CnameRecord(self.zone, 'CnameLowerValue', {
+ 'ttl': 30,
+ 'type': 'CNAME',
+ 'value': 'github.com',
+ })
+ self.assertEquals(upper_record.value, lower_record.value)
+
+ def test_ptr_lowering_value(self):
+ upper_record = PtrRecord(self.zone, 'PtrUppwerValue', {
+ 'ttl': 30,
+ 'type': 'PTR',
+ 'value': 'GITHUB.COM',
+ })
+ lower_record = PtrRecord(self.zone, 'PtrLowerValue', {
+ 'ttl': 30,
+ 'type': 'PTR',
+ 'value': 'github.com',
+ })
+ self.assertEquals(upper_record.value, lower_record.value)
+
def test_a_and_record(self):
a_values = ['1.2.3.4', '2.2.3.4']
a_data = {'ttl': 30, 'values': a_values}
@@ -354,6 +395,17 @@ class TestRecord(TestCase):
self.assertEquals(b_value['exchange'], b.values[0].exchange)
self.assertEquals(b_data, b.data)
+ a_upper_values = [{
+ 'preference': 10,
+ 'exchange': 'SMTP1.'
+ }, {
+ 'priority': 20,
+ 'value': 'SMTP2.'
+ }]
+ a_upper_data = {'ttl': 30, 'values': a_upper_values}
+ a_upper = MxRecord(self.zone, 'a', a_upper_data)
+ self.assertEquals(a_upper.data, a.data)
+
target = SimpleProvider()
# No changes with self
self.assertFalse(a.changes(a, target))
@@ -432,113 +484,140 @@ class TestRecord(TestCase):
# full sorting
# equivalent
b_naptr_value = b.values[0]
- self.assertEquals(0, b_naptr_value.__cmp__(b_naptr_value))
+ self.assertTrue(b_naptr_value == b_naptr_value)
+ self.assertFalse(b_naptr_value != b_naptr_value)
+ self.assertTrue(b_naptr_value <= b_naptr_value)
+ self.assertTrue(b_naptr_value >= b_naptr_value)
# by order
- self.assertEquals(1, b_naptr_value.__cmp__(NaptrValue({
+ self.assertTrue(b_naptr_value > NaptrValue({
'order': 10,
'preference': 31,
'flags': 'M',
'service': 'N',
'regexp': 'O',
'replacement': 'x',
- })))
- self.assertEquals(-1, b_naptr_value.__cmp__(NaptrValue({
+ }))
+ self.assertTrue(b_naptr_value < NaptrValue({
'order': 40,
'preference': 31,
'flags': 'M',
'service': 'N',
'regexp': 'O',
'replacement': 'x',
- })))
+ }))
# by preference
- self.assertEquals(1, b_naptr_value.__cmp__(NaptrValue({
+ self.assertTrue(b_naptr_value > NaptrValue({
'order': 30,
'preference': 10,
'flags': 'M',
'service': 'N',
'regexp': 'O',
'replacement': 'x',
- })))
- self.assertEquals(-1, b_naptr_value.__cmp__(NaptrValue({
+ }))
+ self.assertTrue(b_naptr_value < NaptrValue({
'order': 30,
'preference': 40,
'flags': 'M',
'service': 'N',
'regexp': 'O',
'replacement': 'x',
- })))
+ }))
# by flags
- self.assertEquals(1, b_naptr_value.__cmp__(NaptrValue({
+ self.assertTrue(b_naptr_value > NaptrValue({
'order': 30,
'preference': 31,
'flags': 'A',
'service': 'N',
'regexp': 'O',
'replacement': 'x',
- })))
- self.assertEquals(-1, b_naptr_value.__cmp__(NaptrValue({
+ }))
+ self.assertTrue(b_naptr_value < NaptrValue({
'order': 30,
'preference': 31,
'flags': 'Z',
'service': 'N',
'regexp': 'O',
'replacement': 'x',
- })))
+ }))
# by service
- self.assertEquals(1, b_naptr_value.__cmp__(NaptrValue({
+ self.assertTrue(b_naptr_value > NaptrValue({
'order': 30,
'preference': 31,
'flags': 'M',
'service': 'A',
'regexp': 'O',
'replacement': 'x',
- })))
- self.assertEquals(-1, b_naptr_value.__cmp__(NaptrValue({
+ }))
+ self.assertTrue(b_naptr_value < NaptrValue({
'order': 30,
'preference': 31,
'flags': 'M',
'service': 'Z',
'regexp': 'O',
'replacement': 'x',
- })))
+ }))
# by regexp
- self.assertEquals(1, b_naptr_value.__cmp__(NaptrValue({
+ self.assertTrue(b_naptr_value > NaptrValue({
'order': 30,
'preference': 31,
'flags': 'M',
'service': 'N',
'regexp': 'A',
'replacement': 'x',
- })))
- self.assertEquals(-1, b_naptr_value.__cmp__(NaptrValue({
+ }))
+ self.assertTrue(b_naptr_value < NaptrValue({
'order': 30,
'preference': 31,
'flags': 'M',
'service': 'N',
'regexp': 'Z',
'replacement': 'x',
- })))
+ }))
# by replacement
- self.assertEquals(1, b_naptr_value.__cmp__(NaptrValue({
+ self.assertTrue(b_naptr_value > NaptrValue({
'order': 30,
'preference': 31,
'flags': 'M',
'service': 'N',
'regexp': 'O',
'replacement': 'a',
- })))
- self.assertEquals(-1, b_naptr_value.__cmp__(NaptrValue({
+ }))
+ self.assertTrue(b_naptr_value < NaptrValue({
'order': 30,
'preference': 31,
'flags': 'M',
'service': 'N',
'regexp': 'O',
'replacement': 'z',
- })))
+ }))
# __repr__ doesn't blow up
a.__repr__()
+ # Hash
+ v = NaptrValue({
+ 'order': 30,
+ 'preference': 31,
+ 'flags': 'M',
+ 'service': 'N',
+ 'regexp': 'O',
+ 'replacement': 'z',
+ })
+ o = NaptrValue({
+ 'order': 30,
+ 'preference': 32,
+ 'flags': 'M',
+ 'service': 'N',
+ 'regexp': 'O',
+ 'replacement': 'z',
+ })
+ values = set()
+ values.add(v)
+ self.assertTrue(v in values)
+ self.assertFalse(o in values)
+ values.add(o)
+ self.assertTrue(o in values)
+
def test_ns(self):
a_values = ['5.6.7.8.', '6.7.8.9.', '7.8.9.0.']
a_data = {'ttl': 30, 'values': a_values}
@@ -708,14 +787,14 @@ class TestRecord(TestCase):
# Missing type
with self.assertRaises(Exception) as ctx:
Record.new(self.zone, 'unknown', {})
- self.assertTrue('missing type' in ctx.exception.message)
+ self.assertTrue('missing type' in text_type(ctx.exception))
# Unknown type
with self.assertRaises(Exception) as ctx:
Record.new(self.zone, 'unknown', {
'type': 'XXX',
})
- self.assertTrue('Unknown record type' in ctx.exception.message)
+ self.assertTrue('Unknown record type' in text_type(ctx.exception))
def test_change(self):
existing = Record.new(self.zone, 'txt', {
@@ -746,6 +825,38 @@ class TestRecord(TestCase):
self.assertEquals(values, geo.values)
self.assertEquals(['NA-US', 'NA'], list(geo.parents))
+ a = GeoValue('NA-US-CA', values)
+ b = GeoValue('AP-JP', values)
+ c = GeoValue('NA-US-CA', ['2.3.4.5'])
+
+ self.assertEqual(a, a)
+ self.assertEqual(b, b)
+ self.assertEqual(c, c)
+
+ self.assertNotEqual(a, b)
+ self.assertNotEqual(a, c)
+ self.assertNotEqual(b, a)
+ self.assertNotEqual(b, c)
+ self.assertNotEqual(c, a)
+ self.assertNotEqual(c, b)
+
+ self.assertTrue(a > b)
+ self.assertTrue(a < c)
+ self.assertTrue(b < a)
+ self.assertTrue(b < c)
+ self.assertTrue(c > a)
+ self.assertTrue(c > b)
+
+ self.assertTrue(a >= a)
+ self.assertTrue(a >= b)
+ self.assertTrue(a <= c)
+ self.assertTrue(b <= a)
+ self.assertTrue(b <= b)
+ self.assertTrue(b <= c)
+ self.assertTrue(c > a)
+ self.assertTrue(c > b)
+ self.assertTrue(c >= b)
+
def test_healthcheck(self):
new = Record.new(self.zone, 'a', {
'ttl': 44,
@@ -775,6 +886,40 @@ class TestRecord(TestCase):
self.assertEquals('HTTPS', new.healthcheck_protocol)
self.assertEquals(443, new.healthcheck_port)
+ def test_healthcheck_tcp(self):
+ new = Record.new(self.zone, 'a', {
+ 'ttl': 44,
+ 'type': 'A',
+ 'value': '1.2.3.4',
+ 'octodns': {
+ 'healthcheck': {
+ 'path': '/ignored',
+ 'host': 'completely.ignored',
+ 'protocol': 'TCP',
+ 'port': 8080,
+ }
+ }
+ })
+ self.assertIsNone(new.healthcheck_path)
+ self.assertIsNone(new.healthcheck_host)
+ self.assertEquals('TCP', new.healthcheck_protocol)
+ self.assertEquals(8080, new.healthcheck_port)
+
+ new = Record.new(self.zone, 'a', {
+ 'ttl': 44,
+ 'type': 'A',
+ 'value': '1.2.3.4',
+ 'octodns': {
+ 'healthcheck': {
+ 'protocol': 'TCP',
+ }
+ }
+ })
+ self.assertIsNone(new.healthcheck_path)
+ self.assertIsNone(new.healthcheck_host)
+ self.assertEquals('TCP', new.healthcheck_protocol)
+ self.assertEquals(443, new.healthcheck_port)
+
def test_inored(self):
new = Record.new(self.zone, 'txt', {
'ttl': 44,
@@ -801,11 +946,339 @@ class TestRecord(TestCase):
})
self.assertFalse(new.ignored)
+ def test_ordering_functions(self):
+ a = Record.new(self.zone, 'a', {
+ 'ttl': 44,
+ 'type': 'A',
+ 'value': '1.2.3.4',
+ })
+ b = Record.new(self.zone, 'b', {
+ 'ttl': 44,
+ 'type': 'A',
+ 'value': '1.2.3.4',
+ })
+ c = Record.new(self.zone, 'c', {
+ 'ttl': 44,
+ 'type': 'A',
+ 'value': '1.2.3.4',
+ })
+ aaaa = Record.new(self.zone, 'a', {
+ 'ttl': 44,
+ 'type': 'AAAA',
+ 'value': '2601:644:500:e210:62f8:1dff:feb8:947a',
+ })
+
+ self.assertEquals(a, a)
+ self.assertEquals(b, b)
+ self.assertEquals(c, c)
+ self.assertEquals(aaaa, aaaa)
+
+ self.assertNotEqual(a, b)
+ self.assertNotEqual(a, c)
+ self.assertNotEqual(a, aaaa)
+ self.assertNotEqual(b, a)
+ self.assertNotEqual(b, c)
+ self.assertNotEqual(b, aaaa)
+ self.assertNotEqual(c, a)
+ self.assertNotEqual(c, b)
+ self.assertNotEqual(c, aaaa)
+ self.assertNotEqual(aaaa, a)
+ self.assertNotEqual(aaaa, b)
+ self.assertNotEqual(aaaa, c)
+
+ self.assertTrue(a < b)
+ self.assertTrue(a < c)
+ self.assertTrue(a < aaaa)
+ self.assertTrue(b > a)
+ self.assertTrue(b < c)
+ self.assertTrue(b > aaaa)
+ self.assertTrue(c > a)
+ self.assertTrue(c > b)
+ self.assertTrue(c > aaaa)
+ self.assertTrue(aaaa > a)
+ self.assertTrue(aaaa < b)
+ self.assertTrue(aaaa < c)
+
+ self.assertTrue(a <= a)
+ self.assertTrue(a <= b)
+ self.assertTrue(a <= c)
+ self.assertTrue(a <= aaaa)
+ self.assertTrue(b >= a)
+ self.assertTrue(b >= b)
+ self.assertTrue(b <= c)
+ self.assertTrue(b >= aaaa)
+ self.assertTrue(c >= a)
+ self.assertTrue(c >= b)
+ self.assertTrue(c >= c)
+ self.assertTrue(c >= aaaa)
+ self.assertTrue(aaaa >= a)
+ self.assertTrue(aaaa <= b)
+ self.assertTrue(aaaa <= c)
+ self.assertTrue(aaaa <= aaaa)
+
+ def test_caa_value(self):
+ a = CaaValue({'flags': 0, 'tag': 'a', 'value': 'v'})
+ b = CaaValue({'flags': 1, 'tag': 'a', 'value': 'v'})
+ c = CaaValue({'flags': 0, 'tag': 'c', 'value': 'v'})
+ d = CaaValue({'flags': 0, 'tag': 'a', 'value': 'z'})
+
+ self.assertEqual(a, a)
+ self.assertEqual(b, b)
+ self.assertEqual(c, c)
+ self.assertEqual(d, d)
+
+ self.assertNotEqual(a, b)
+ self.assertNotEqual(a, c)
+ self.assertNotEqual(a, d)
+ self.assertNotEqual(b, a)
+ self.assertNotEqual(b, c)
+ self.assertNotEqual(b, d)
+ self.assertNotEqual(c, a)
+ self.assertNotEqual(c, b)
+ self.assertNotEqual(c, d)
+
+ self.assertTrue(a < b)
+ self.assertTrue(a < c)
+ self.assertTrue(a < d)
+
+ self.assertTrue(b > a)
+ self.assertTrue(b > c)
+ self.assertTrue(b > d)
+
+ self.assertTrue(c > a)
+ self.assertTrue(c < b)
+ self.assertTrue(c > d)
+
+ self.assertTrue(d > a)
+ self.assertTrue(d < b)
+ self.assertTrue(d < c)
+
+ self.assertTrue(a <= b)
+ self.assertTrue(a <= c)
+ self.assertTrue(a <= d)
+ self.assertTrue(a <= a)
+ self.assertTrue(a >= a)
+
+ self.assertTrue(b >= a)
+ self.assertTrue(b >= c)
+ self.assertTrue(b >= d)
+ self.assertTrue(b >= b)
+ self.assertTrue(b <= b)
+
+ self.assertTrue(c >= a)
+ self.assertTrue(c <= b)
+ self.assertTrue(c >= d)
+ self.assertTrue(c >= c)
+ self.assertTrue(c <= c)
+
+ self.assertTrue(d >= a)
+ self.assertTrue(d <= b)
+ self.assertTrue(d <= c)
+ self.assertTrue(d >= d)
+ self.assertTrue(d <= d)
+
+ def test_mx_value(self):
+ a = MxValue({'preference': 0, 'priority': 'a', 'exchange': 'v',
+ 'value': '1'})
+ b = MxValue({'preference': 10, 'priority': 'a', 'exchange': 'v',
+ 'value': '2'})
+ c = MxValue({'preference': 0, 'priority': 'b', 'exchange': 'z',
+ 'value': '3'})
+
+ self.assertEqual(a, a)
+ self.assertEqual(b, b)
+ self.assertEqual(c, c)
+
+ self.assertNotEqual(a, b)
+ self.assertNotEqual(a, c)
+ self.assertNotEqual(b, a)
+ self.assertNotEqual(b, c)
+ self.assertNotEqual(c, a)
+ self.assertNotEqual(c, b)
+
+ self.assertTrue(a < b)
+ self.assertTrue(a < c)
+
+ self.assertTrue(b > a)
+ self.assertTrue(b > c)
+
+ self.assertTrue(c > a)
+ self.assertTrue(c < b)
+
+ self.assertTrue(a <= b)
+ self.assertTrue(a <= c)
+ self.assertTrue(a <= a)
+ self.assertTrue(a >= a)
+
+ self.assertTrue(b >= a)
+ self.assertTrue(b >= c)
+ self.assertTrue(b >= b)
+ self.assertTrue(b <= b)
+
+ self.assertTrue(c >= a)
+ self.assertTrue(c <= b)
+ self.assertTrue(c >= c)
+ self.assertTrue(c <= c)
+
+ def test_sshfp_value(self):
+ a = SshfpValue({'algorithm': 0, 'fingerprint_type': 0,
+ 'fingerprint': 'abcd'})
+ b = SshfpValue({'algorithm': 1, 'fingerprint_type': 0,
+ 'fingerprint': 'abcd'})
+ c = SshfpValue({'algorithm': 0, 'fingerprint_type': 1,
+ 'fingerprint': 'abcd'})
+ d = SshfpValue({'algorithm': 0, 'fingerprint_type': 0,
+ 'fingerprint': 'bcde'})
+
+ self.assertEqual(a, a)
+ self.assertEqual(b, b)
+ self.assertEqual(c, c)
+ self.assertEqual(d, d)
+
+ self.assertNotEqual(a, b)
+ self.assertNotEqual(a, c)
+ self.assertNotEqual(a, d)
+ self.assertNotEqual(b, a)
+ self.assertNotEqual(b, c)
+ self.assertNotEqual(b, d)
+ self.assertNotEqual(c, a)
+ self.assertNotEqual(c, b)
+ self.assertNotEqual(c, d)
+ self.assertNotEqual(d, a)
+ self.assertNotEqual(d, b)
+ self.assertNotEqual(d, c)
+
+ self.assertTrue(a < b)
+ self.assertTrue(a < c)
+
+ self.assertTrue(b > a)
+ self.assertTrue(b > c)
+
+ self.assertTrue(c > a)
+ self.assertTrue(c < b)
+
+ self.assertTrue(a <= b)
+ self.assertTrue(a <= c)
+ self.assertTrue(a <= a)
+ self.assertTrue(a >= a)
+
+ self.assertTrue(b >= a)
+ self.assertTrue(b >= c)
+ self.assertTrue(b >= b)
+ self.assertTrue(b <= b)
+
+ self.assertTrue(c >= a)
+ self.assertTrue(c <= b)
+ self.assertTrue(c >= c)
+ self.assertTrue(c <= c)
+
+ # Hash
+ values = set()
+ values.add(a)
+ self.assertTrue(a in values)
+ self.assertFalse(b in values)
+ values.add(b)
+ self.assertTrue(b in values)
+
+ def test_srv_value(self):
+ a = SrvValue({'priority': 0, 'weight': 0, 'port': 0, 'target': 'foo.'})
+ b = SrvValue({'priority': 1, 'weight': 0, 'port': 0, 'target': 'foo.'})
+ c = SrvValue({'priority': 0, 'weight': 2, 'port': 0, 'target': 'foo.'})
+ d = SrvValue({'priority': 0, 'weight': 0, 'port': 3, 'target': 'foo.'})
+ e = SrvValue({'priority': 0, 'weight': 0, 'port': 0, 'target': 'mmm.'})
+
+ self.assertEqual(a, a)
+ self.assertEqual(b, b)
+ self.assertEqual(c, c)
+ self.assertEqual(d, d)
+ self.assertEqual(e, e)
+
+ self.assertNotEqual(a, b)
+ self.assertNotEqual(a, c)
+ self.assertNotEqual(a, d)
+ self.assertNotEqual(a, e)
+ self.assertNotEqual(b, a)
+ self.assertNotEqual(b, c)
+ self.assertNotEqual(b, d)
+ self.assertNotEqual(b, e)
+ self.assertNotEqual(c, a)
+ self.assertNotEqual(c, b)
+ self.assertNotEqual(c, d)
+ self.assertNotEqual(c, e)
+ self.assertNotEqual(d, a)
+ self.assertNotEqual(d, b)
+ self.assertNotEqual(d, c)
+ self.assertNotEqual(d, e)
+ self.assertNotEqual(e, a)
+ self.assertNotEqual(e, b)
+ self.assertNotEqual(e, c)
+ self.assertNotEqual(e, d)
+
+ self.assertTrue(a < b)
+ self.assertTrue(a < c)
+
+ self.assertTrue(b > a)
+ self.assertTrue(b > c)
+
+ self.assertTrue(c > a)
+ self.assertTrue(c < b)
+
+ self.assertTrue(a <= b)
+ self.assertTrue(a <= c)
+ self.assertTrue(a <= a)
+ self.assertTrue(a >= a)
+
+ self.assertTrue(b >= a)
+ self.assertTrue(b >= c)
+ self.assertTrue(b >= b)
+ self.assertTrue(b <= b)
+
+ self.assertTrue(c >= a)
+ self.assertTrue(c <= b)
+ self.assertTrue(c >= c)
+ self.assertTrue(c <= c)
+
+ # Hash
+ values = set()
+ values.add(a)
+ self.assertTrue(a in values)
+ self.assertFalse(b in values)
+ values.add(b)
+ self.assertTrue(b in values)
+
class TestRecordValidation(TestCase):
zone = Zone('unit.tests.', [])
def test_base(self):
+ # fqdn length, DNS defins max as 253
+ with self.assertRaises(ValidationError) as ctx:
+ # The . will put this over the edge
+ name = 'x' * (253 - len(self.zone.name))
+ Record.new(self.zone, name, {
+ 'ttl': 300,
+ 'type': 'A',
+ 'value': '1.2.3.4',
+ })
+ reason = ctx.exception.reasons[0]
+ self.assertTrue(reason.startswith('invalid fqdn, "xxxx'))
+ self.assertTrue(reason.endswith('.unit.tests." is too long at 254'
+ ' chars, max is 253'))
+
+ # label length, DNS defins max as 63
+ with self.assertRaises(ValidationError) as ctx:
+ # The . will put this over the edge
+ name = 'x' * 64
+ Record.new(self.zone, name, {
+ 'ttl': 300,
+ 'type': 'A',
+ 'value': '1.2.3.4',
+ })
+ reason = ctx.exception.reasons[0]
+ self.assertTrue(reason.startswith('invalid name, "xxxx'))
+ self.assertTrue(reason.endswith('xxx" is too long at 64'
+ ' chars, max is 63'))
+
# no ttl
with self.assertRaises(ValidationError) as ctx:
Record.new(self.zone, '', {
@@ -1694,7 +2167,8 @@ class TestRecordValidation(TestCase):
'target': 'foo.bar.baz.'
}
})
- self.assertEquals(['invalid name'], ctx.exception.reasons)
+ self.assertEquals(['invalid name for SRV record'],
+ ctx.exception.reasons)
# missing priority
with self.assertRaises(ValidationError) as ctx:
@@ -2410,7 +2884,7 @@ class TestDynamicRecords(TestCase):
'weight': 1,
'value': '6.6.6.6',
}, {
- 'weight': 256,
+ 'weight': 16,
'value': '7.7.7.7',
}],
},
@@ -2434,7 +2908,7 @@ class TestDynamicRecords(TestCase):
}
with self.assertRaises(ValidationError) as ctx:
Record.new(self.zone, 'bad', a_data)
- self.assertEquals(['invalid weight "256" in pool "three" value 2'],
+ self.assertEquals(['invalid weight "16" in pool "three" value 2'],
ctx.exception.reasons)
# invalid non-int weight
@@ -2636,7 +3110,7 @@ class TestDynamicRecords(TestCase):
'invalid IPv4 address "blip"',
], ctx.exception.reasons)
- # missing rules
+ # missing rules, and unused pools
a_data = {
'dynamic': {
'pools': {
@@ -2663,7 +3137,10 @@ class TestDynamicRecords(TestCase):
}
with self.assertRaises(ValidationError) as ctx:
Record.new(self.zone, 'bad', a_data)
- self.assertEquals(['missing rules'], ctx.exception.reasons)
+ self.assertEquals([
+ 'missing rules',
+ 'unused pools: "one", "two"',
+ ], ctx.exception.reasons)
# empty rules
a_data = {
@@ -2693,7 +3170,10 @@ class TestDynamicRecords(TestCase):
}
with self.assertRaises(ValidationError) as ctx:
Record.new(self.zone, 'bad', a_data)
- self.assertEquals(['missing rules'], ctx.exception.reasons)
+ self.assertEquals([
+ 'missing rules',
+ 'unused pools: "one", "two"',
+ ], ctx.exception.reasons)
# rules not a list/tuple
a_data = {
@@ -2723,7 +3203,10 @@ class TestDynamicRecords(TestCase):
}
with self.assertRaises(ValidationError) as ctx:
Record.new(self.zone, 'bad', a_data)
- self.assertEquals(['rules must be a list'], ctx.exception.reasons)
+ self.assertEquals([
+ 'rules must be a list',
+ 'unused pools: "one", "two"',
+ ], ctx.exception.reasons)
# rule without pool
a_data = {
@@ -2757,7 +3240,10 @@ class TestDynamicRecords(TestCase):
}
with self.assertRaises(ValidationError) as ctx:
Record.new(self.zone, 'bad', a_data)
- self.assertEquals(['rule 1 missing pool'], ctx.exception.reasons)
+ self.assertEquals([
+ 'rule 1 missing pool',
+ 'unused pools: "two"',
+ ], ctx.exception.reasons)
# rule with non-string pools
a_data = {
@@ -2792,10 +3278,12 @@ class TestDynamicRecords(TestCase):
}
with self.assertRaises(ValidationError) as ctx:
Record.new(self.zone, 'bad', a_data)
- self.assertEquals(['rule 1 invalid pool "[]"'],
- ctx.exception.reasons)
+ self.assertEquals([
+ 'rule 1 invalid pool "[]"',
+ 'unused pools: "two"',
+ ], ctx.exception.reasons)
- # rule references non-existant pool
+ # rule references non-existent pool
a_data = {
'dynamic': {
'pools': {
@@ -2814,7 +3302,7 @@ class TestDynamicRecords(TestCase):
},
'rules': [{
'geos': ['NA-US-CA'],
- 'pool': 'non-existant',
+ 'pool': 'non-existent',
}, {
'pool': 'one',
}],
@@ -2828,8 +3316,10 @@ class TestDynamicRecords(TestCase):
}
with self.assertRaises(ValidationError) as ctx:
Record.new(self.zone, 'bad', a_data)
- self.assertEquals(["rule 1 undefined pool \"non-existant\""],
- ctx.exception.reasons)
+ self.assertEquals([
+ "rule 1 undefined pool \"non-existent\"",
+ 'unused pools: "two"',
+ ], ctx.exception.reasons)
# rule with invalid geos
a_data = {
@@ -2938,6 +3428,83 @@ class TestDynamicRecords(TestCase):
self.assertEquals(['rule 2 duplicate default'],
ctx.exception.reasons)
+ # repeated pool in rules
+ a_data = {
+ 'dynamic': {
+ 'pools': {
+ 'one': {
+ 'values': [{
+ 'value': '3.3.3.3',
+ }]
+ },
+ 'two': {
+ 'values': [{
+ 'value': '4.4.4.4',
+ }, {
+ 'value': '5.5.5.5',
+ }]
+ },
+ },
+ 'rules': [{
+ 'geos': ['EU'],
+ 'pool': 'two',
+ }, {
+ 'geos': ['AF'],
+ 'pool': 'one',
+ }, {
+ 'geos': ['OC'],
+ 'pool': 'one',
+ }],
+ },
+ 'ttl': 60,
+ 'type': 'A',
+ 'values': [
+ '1.1.1.1',
+ '2.2.2.2',
+ ],
+ }
+ with self.assertRaises(ValidationError) as ctx:
+ Record.new(self.zone, 'bad', a_data)
+ self.assertEquals(['rule 3 invalid, target pool "one" reused'],
+ ctx.exception.reasons)
+
+ # Repeated pool is OK if later one is a default
+ a_data = {
+ 'dynamic': {
+ 'pools': {
+ 'one': {
+ 'values': [{
+ 'value': '3.3.3.3',
+ }]
+ },
+ 'two': {
+ 'values': [{
+ 'value': '4.4.4.4',
+ }, {
+ 'value': '5.5.5.5',
+ }]
+ },
+ },
+ 'rules': [{
+ 'geos': ['EU-GB'],
+ 'pool': 'one',
+ }, {
+ 'geos': ['EU'],
+ 'pool': 'two',
+ }, {
+ 'pool': 'one',
+ }],
+ },
+ 'ttl': 60,
+ 'type': 'A',
+ 'values': [
+ '1.1.1.1',
+ '2.2.2.2',
+ ],
+ }
+ # This should be valid, no exception
+ Record.new(self.zone, 'bad', a_data)
+
def test_dynamic_lenient(self):
# Missing pools
a_data = {
diff --git a/tests/test_octodns_record_geo.py b/tests/test_octodns_record_geo.py
index 5b7454c..35df6d5 100644
--- a/tests/test_octodns_record_geo.py
+++ b/tests/test_octodns_record_geo.py
@@ -77,4 +77,6 @@ class TestRecordGeoCodes(TestCase):
def test_province_to_code(self):
self.assertEquals('NA-US-OR', GeoCodes.province_to_code('OR'))
self.assertEquals('NA-US-KY', GeoCodes.province_to_code('KY'))
+ self.assertEquals('NA-CA-AB', GeoCodes.province_to_code('AB'))
+ self.assertEquals('NA-CA-BC', GeoCodes.province_to_code('BC'))
self.assertFalse(GeoCodes.province_to_code('XX'))
diff --git a/tests/test_octodns_source_axfr.py b/tests/test_octodns_source_axfr.py
index 9251113..bd25062 100644
--- a/tests/test_octodns_source_axfr.py
+++ b/tests/test_octodns_source_axfr.py
@@ -9,11 +9,13 @@ import dns.zone
from dns.exception import DNSException
from mock import patch
+from six import text_type
from unittest import TestCase
from octodns.source.axfr import AxfrSource, AxfrSourceZoneTransferFailed, \
ZoneFileSource, ZoneFileSourceLoadFailure
from octodns.zone import Zone
+from octodns.record import ValidationError
class TestAxfrSource(TestCase):
@@ -38,7 +40,7 @@ class TestAxfrSource(TestCase):
zone = Zone('unit.tests.', [])
self.source.populate(zone)
self.assertEquals('Unable to Perform Zone Transfer',
- ctx.exception.message)
+ text_type(ctx.exception))
class TestZoneFileSource(TestCase):
@@ -68,4 +70,17 @@ class TestZoneFileSource(TestCase):
zone = Zone('invalid.zone.', [])
self.source.populate(zone)
self.assertEquals('The DNS zone has no NS RRset at its origin.',
- ctx.exception.message)
+ text_type(ctx.exception))
+
+ # Records are not to RFC (lenient=False)
+ with self.assertRaises(ValidationError) as ctx:
+ zone = Zone('invalid.records.', [])
+ self.source.populate(zone)
+ self.assertEquals('Invalid record _invalid.invalid.records.\n'
+ ' - invalid name for SRV record',
+ text_type(ctx.exception))
+
+ # Records are not to RFC, but load anyhow (lenient=True)
+ invalid = Zone('invalid.records.', [])
+ self.source.populate(invalid, lenient=True)
+ self.assertEquals(12, len(invalid.records))
diff --git a/tests/test_octodns_source_tinydns.py b/tests/test_octodns_source_tinydns.py
index d2e0e21..3693e17 100644
--- a/tests/test_octodns_source_tinydns.py
+++ b/tests/test_octodns_source_tinydns.py
@@ -20,7 +20,7 @@ class TestTinyDnsFileSource(TestCase):
def test_populate_normal(self):
got = Zone('example.com.', [])
self.source.populate(got)
- self.assertEquals(11, len(got.records))
+ self.assertEquals(17, len(got.records))
expected = Zone('example.com.', [])
for name, data in (
@@ -86,6 +86,36 @@ class TestTinyDnsFileSource(TestCase):
'exchange': 'smtp-2-host.example.com.',
}]
}),
+ ('', {
+ 'type': 'TXT',
+ 'ttl': 300,
+ 'value': 'test TXT',
+ }),
+ ('colon', {
+ 'type': 'TXT',
+ 'ttl': 300,
+ 'value': 'test : TXT',
+ }),
+ ('nottl', {
+ 'type': 'TXT',
+ 'ttl': 3600,
+ 'value': 'nottl test TXT',
+ }),
+ ('ipv6-3', {
+ 'type': 'AAAA',
+ 'ttl': 300,
+ 'value': '2a02:1348:017c:d5d0:0024:19ff:fef3:5742',
+ }),
+ ('ipv6-6', {
+ 'type': 'AAAA',
+ 'ttl': 3600,
+ 'value': '2a02:1348:017c:d5d0:0024:19ff:fef3:5743',
+ }),
+ ('semicolon', {
+ 'type': 'TXT',
+ 'ttl': 300,
+ 'value': 'v=DKIM1\\; k=rsa\\; p=blah',
+ }),
):
record = Record.new(expected, name, data)
expected.add_record(record)
@@ -173,4 +203,4 @@ class TestTinyDnsFileSource(TestCase):
def test_ignores_subs(self):
got = Zone('example.com.', ['sub'])
self.source.populate(got)
- self.assertEquals(10, len(got.records))
+ self.assertEquals(16, len(got.records))
diff --git a/tests/test_octodns_yaml.py b/tests/test_octodns_yaml.py
index effe231..f211854 100644
--- a/tests/test_octodns_yaml.py
+++ b/tests/test_octodns_yaml.py
@@ -5,7 +5,7 @@
from __future__ import absolute_import, division, print_function, \
unicode_literals
-from StringIO import StringIO
+from six import StringIO
from unittest import TestCase
from yaml.constructor import ConstructorError
diff --git a/tests/test_octodns_zone.py b/tests/test_octodns_zone.py
index 2fff996..1d000f2 100644
--- a/tests/test_octodns_zone.py
+++ b/tests/test_octodns_zone.py
@@ -6,6 +6,7 @@ from __future__ import absolute_import, division, print_function, \
unicode_literals
from unittest import TestCase
+from six import text_type
from octodns.record import ARecord, AaaaRecord, Create, Delete, Record, Update
from octodns.zone import DuplicateRecordException, InvalidNodeException, \
@@ -47,7 +48,7 @@ class TestZone(TestCase):
with self.assertRaises(DuplicateRecordException) as ctx:
zone.add_record(a)
self.assertEquals('Duplicate record a.unit.tests., type A',
- ctx.exception.message)
+ text_type(ctx.exception))
self.assertEquals(zone.records, set([a]))
# can add duplicate with replace=True
@@ -137,7 +138,7 @@ class TestZone(TestCase):
def test_missing_dot(self):
with self.assertRaises(Exception) as ctx:
Zone('not.allowed', [])
- self.assertTrue('missing ending dot' in ctx.exception.message)
+ self.assertTrue('missing ending dot' in text_type(ctx.exception))
def test_sub_zones(self):
@@ -160,7 +161,7 @@ class TestZone(TestCase):
})
with self.assertRaises(SubzoneRecordException) as ctx:
zone.add_record(record)
- self.assertTrue('not of type NS', ctx.exception.message)
+ self.assertTrue('not of type NS', text_type(ctx.exception))
# Can add it w/lenient
zone.add_record(record, lenient=True)
self.assertEquals(set([record]), zone.records)
@@ -174,7 +175,7 @@ class TestZone(TestCase):
})
with self.assertRaises(SubzoneRecordException) as ctx:
zone.add_record(record)
- self.assertTrue('under a managed sub-zone', ctx.exception.message)
+ self.assertTrue('under a managed sub-zone', text_type(ctx.exception))
# Can add it w/lenient
zone.add_record(record, lenient=True)
self.assertEquals(set([record]), zone.records)
@@ -188,7 +189,7 @@ class TestZone(TestCase):
})
with self.assertRaises(SubzoneRecordException) as ctx:
zone.add_record(record)
- self.assertTrue('under a managed sub-zone', ctx.exception.message)
+ self.assertTrue('under a managed sub-zone', text_type(ctx.exception))
# Can add it w/lenient
zone.add_record(record, lenient=True)
self.assertEquals(set([record]), zone.records)
diff --git a/tests/zones/invalid.records. b/tests/zones/invalid.records.
new file mode 100644
index 0000000..e7865a4
--- /dev/null
+++ b/tests/zones/invalid.records.
@@ -0,0 +1,43 @@
+$ORIGIN invalid.records.
+@ 3600 IN SOA ns1.invalid.records. root.invalid.records. (
+ 2018071501 ; Serial
+ 3600 ; Refresh (1 hour)
+ 600 ; Retry (10 minutes)
+ 604800 ; Expire (1 week)
+ 3600 ; NXDOMAIN ttl (1 hour)
+ )
+
+; NS Records
+@ 3600 IN NS ns1.invalid.records.
+@ 3600 IN NS ns2.invalid.records.
+under 3600 IN NS ns1.invalid.records.
+under 3600 IN NS ns2.invalid.records.
+
+; SRV Records
+_srv._tcp 600 IN SRV 10 20 30 foo-1.invalid.records.
+_srv._tcp 600 IN SRV 10 20 30 foo-2.invalid.records.
+_invalid 600 IN SRV 10 20 30 foo-3.invalid.records.
+
+; TXT Records
+txt 600 IN TXT "Bah bah black sheep"
+txt 600 IN TXT "have you any wool."
+txt 600 IN TXT "v=DKIM1;k=rsa;s=email;h=sha256;p=A/kinda+of/long/string+with+numb3rs"
+
+; MX Records
+mx 300 IN MX 10 smtp-4.invalid.records.
+mx 300 IN MX 20 smtp-2.invalid.records.
+mx 300 IN MX 30 smtp-3.invalid.records.
+mx 300 IN MX 40 smtp-1.invalid.records.
+
+; A Records
+@ 300 IN A 1.2.3.4
+@ 300 IN A 1.2.3.5
+www 300 IN A 2.2.3.6
+wwww.sub 300 IN A 2.2.3.6
+
+; AAAA Records
+aaaa 600 IN AAAA 2601:644:500:e210:62f8:1dff:feb8:947a
+
+; CNAME Records
+cname 300 IN CNAME invalid.records.
+included 300 IN CNAME invalid.records.
diff --git a/tests/zones/invalid.zone. b/tests/zones/invalid.zone.
index c814af6..04748a1 100644
--- a/tests/zones/invalid.zone.
+++ b/tests/zones/invalid.zone.
@@ -1,5 +1,5 @@
$ORIGIN invalid.zone.
-@ IN SOA ns1.invalid.zone. root.invalid.zone. (
+@ 3600 IN SOA ns1.invalid.zone. root.invalid.zone. (
2018071501 ; Serial
3600 ; Refresh (1 hour)
600 ; Retry (10 minutes)
diff --git a/tests/zones/tinydns/example.com b/tests/zones/tinydns/example.com
old mode 100644
new mode 100755
index 818d974..32781ca
--- a/tests/zones/tinydns/example.com
+++ b/tests/zones/tinydns/example.com
@@ -46,3 +46,12 @@ Ccname.other.foo:www.other.foo
+a1.blah-asdf.subtest.com:10.2.3.5
+a2.blah-asdf.subtest.com:10.2.3.6
+a3.asdf.subtest.com:10.2.3.7
+
+'example.com:test TXT:300
+'colon.example.com:test \072 TXT:300
+'nottl.example.com:nottl test TXT
+
+3ipv6-3.example.com:2a021348017cd5d0002419fffef35742:300
+6ipv6-6.example.com:2a021348017cd5d0002419fffef35743
+
+'semicolon.example.com:v=DKIM1; k=rsa; p=blah:300
diff --git a/tests/zones/unit.tests. b/tests/zones/unit.tests.
index 95828ad..0305e05 100644
--- a/tests/zones/unit.tests.
+++ b/tests/zones/unit.tests.
@@ -1,5 +1,5 @@
$ORIGIN unit.tests.
-@ IN SOA ns1.unit.tests. root.unit.tests. (
+@ 3600 IN SOA ns1.unit.tests. root.unit.tests. (
2018071501 ; Serial
3600 ; Refresh (1 hour)
600 ; Retry (10 minutes)