diff --git a/README.md b/README.md index 1f42ca4..5b814d8 100644 --- a/README.md +++ b/README.md @@ -208,7 +208,7 @@ The table below lists the providers octoDNS supports. We're currently in the pro | [GoogleCloudProvider](/octodns/provider/googlecloud.py) | | google-cloud-dns | A, AAAA, CAA, CNAME, MX, NAPTR, NS, PTR, SPF, SRV, TXT | No | | | [HetznerProvider](/octodns/provider/hetzner.py) | | | A, AAAA, CAA, CNAME, MX, NS, SRV, TXT | No | | | [MythicBeastsProvider](/octodns/provider/mythicbeasts.py) | | Mythic Beasts | A, AAAA, ALIAS, CNAME, MX, NS, SRV, SSHFP, CAA, TXT | No | | -| [Ns1Provider](/octodns/provider/ns1.py) | | ns1-python | All | Yes | | +| [Ns1Provider](https://github.com/octodns/octodns-ns1/) | [octodns_ns1](https://github.com/octodns/octodns-ns1/) | | | | | | [OVH](/octodns/provider/ovh.py) | | ovh | A, AAAA, CAA, CNAME, MX, NAPTR, NS, PTR, SPF, SRV, SSHFP, TXT, DKIM | No | | | [PowerDnsProvider](https://github.com/octodns/octodns-powerdns/) | [octodns_powerdns](https://github.com/octodns/octodns-powerdns/) | | | | | | [Rackspace](/octodns/provider/rackspace.py) | | | A, AAAA, ALIAS, CNAME, MX, NS, PTR, SPF, TXT | No | | diff --git a/octodns/provider/ns1.py b/octodns/provider/ns1.py index 802758b..afccc7e 100644 --- a/octodns/provider/ns1.py +++ b/octodns/provider/ns1.py @@ -6,1584 +6,16 @@ from __future__ import absolute_import, division, print_function, \ unicode_literals from logging import getLogger -from itertools import chain -from collections import Mapping, OrderedDict, defaultdict -from ns1 import NS1 -from ns1.rest.errors import RateLimitException, ResourceException -from pycountry_convert import country_alpha2_to_continent_code -from time import sleep -from uuid import uuid4 -from ..record import Record, Update -from . import ProviderException -from .base import BaseProvider - - -def _ensure_endswith_dot(string): - return string if string.endswith('.') else f'{string}.' - - -class Ns1Exception(ProviderException): - pass - - -class Ns1Client(object): - log = getLogger('NS1Client') - - def __init__(self, api_key, parallelism=None, retry_count=4, - client_config=None): - self.log.debug('__init__: parallelism=%s, retry_count=%d, ' - 'client_config=%s', parallelism, retry_count, - client_config) - self.retry_count = retry_count - - client = NS1(apiKey=api_key) - - # NS1 rate limits via a "token bucket" scheme, and provides information - # about rate limiting in headers on responses. Token bucket can be - # thought of as an initially "full" bucket, where, if not full, tokens - # are added at some rate. This allows "bursting" requests until the - # bucket is empty, after which, you are limited to the rate of token - # replenishment. - # There are a couple of "strategies" built into the SDK to avoid 429s - # from rate limiting. Since octodns operates concurrently via - # `max_workers`, a concurrent strategy seems appropriate. - # This strategy does nothing until the remaining requests are equal to - # or less than our `parallelism`, after which, each process will sleep - # for the token replenishment interval times parallelism. - # For example, if we can make 10 requests in 60 seconds, a token is - # replenished every 6 seconds. If parallelism is 3, we will burst 7 - # requests, and subsequently each process will sleep for 18 seconds - # before making another request. - # In general, parallelism should match the number of workers. - if parallelism is not None: - client.config['rate_limit_strategy'] = 'concurrent' - client.config['parallelism'] = parallelism - - # The list of records for a zone is paginated at around ~2.5k records, - # this tells the client to handle any of that transparently and ensure - # we get the full list of records. - client.config['follow_pagination'] = True - - # additional options or overrides - if isinstance(client_config, Mapping): - for k, v in client_config.items(): - client.config[k] = v - - self._client = client - - self._records = client.records() - self._zones = client.zones() - self._monitors = client.monitors() - self._notifylists = client.notifylists() - self._datasource = client.datasource() - self._datafeed = client.datafeed() - - self.reset_caches() - - def reset_caches(self): - self._datasource_id = None - self._feeds_for_monitors = None - self._monitors_cache = None - self._notifylists_cache = None - self._zones_cache = {} - self._records_cache = {} - - def update_record_cache(func): - def call(self, zone, domain, _type, **params): - if zone in self._zones_cache: - # remove record's zone from cache - del self._zones_cache[zone] - - cached = self._records_cache.setdefault(zone, {}) \ - .setdefault(domain, {}) - - if _type in cached: - # remove record from cache - del cached[_type] - - # write record to cache if its not a delete - new_record = func(self, zone, domain, _type, **params) - if new_record: - cached[_type] = new_record - - return new_record - - return call - - def read_or_set_record_cache(func): - def call(self, zone, domain, _type): - cached = self._records_cache.setdefault(zone, {}) \ - .setdefault(domain, {}) - if _type not in cached: - cached[_type] = func(self, zone, domain, _type) - - return cached[_type] - - return call - - @property - def datasource_id(self): - if self._datasource_id is None: - name = 'octoDNS NS1 Data Source' - source = None - for candidate in self.datasource_list(): - if candidate['name'] == name: - # Found it - source = candidate - break - - if source is None: - self.log.info('datasource_id: creating datasource %s', name) - # We need to create it - source = self.datasource_create(name=name, - sourcetype='nsone_monitoring') - self.log.info('datasource_id: id=%s', source['id']) - - self._datasource_id = source['id'] - - return self._datasource_id - - @property - def feeds_for_monitors(self): - if self._feeds_for_monitors is None: - self.log.debug('feeds_for_monitors: fetching & building') - self._feeds_for_monitors = { - f['config']['jobid']: f['id'] - for f in self.datafeed_list(self.datasource_id) - } - - return self._feeds_for_monitors - - @property - def monitors(self): - if self._monitors_cache is None: - self.log.debug('monitors: fetching & building') - self._monitors_cache = \ - {m['id']: m for m in self.monitors_list()} - return self._monitors_cache - - @property - def notifylists(self): - if self._notifylists_cache is None: - self.log.debug('notifylists: fetching & building') - self._notifylists_cache = \ - {l['name']: l for l in self.notifylists_list()} - return self._notifylists_cache - - def datafeed_create(self, sourceid, name, config): - ret = self._try(self._datafeed.create, sourceid, name, config) - self.feeds_for_monitors[config['jobid']] = ret['id'] - return ret - - def datafeed_delete(self, sourceid, feedid): - ret = self._try(self._datafeed.delete, sourceid, feedid) - self._feeds_for_monitors = { - k: v for k, v in self._feeds_for_monitors.items() if v != feedid - } - return ret - - def datafeed_list(self, sourceid): - return self._try(self._datafeed.list, sourceid) - - def datasource_create(self, **body): - return self._try(self._datasource.create, **body) - - def datasource_list(self): - return self._try(self._datasource.list) - - def monitors_create(self, **params): - body = {} - ret = self._try(self._monitors.create, body, **params) - self.monitors[ret['id']] = ret - return ret - - def monitors_delete(self, jobid): - ret = self._try(self._monitors.delete, jobid) - self.monitors.pop(jobid) - return ret - - def monitors_list(self): - return self._try(self._monitors.list) - - def monitors_update(self, job_id, **params): - body = {} - ret = self._try(self._monitors.update, job_id, body, **params) - self.monitors[ret['id']] = ret - return ret - - def notifylists_delete(self, nlid): - for name, nl in self.notifylists.items(): - if nl['id'] == nlid: - del self._notifylists_cache[name] - break - return self._try(self._notifylists.delete, nlid) - - def notifylists_create(self, **body): - nl = self._try(self._notifylists.create, body) - # cache it - self.notifylists[nl['name']] = nl - return nl - - def notifylists_list(self): - return self._try(self._notifylists.list) - - @update_record_cache - def records_create(self, zone, domain, _type, **params): - return self._try(self._records.create, zone, domain, _type, **params) - - @update_record_cache - def records_delete(self, zone, domain, _type): - return self._try(self._records.delete, zone, domain, _type) - - @read_or_set_record_cache - def records_retrieve(self, zone, domain, _type): - return self._try(self._records.retrieve, zone, domain, _type) - - @update_record_cache - def records_update(self, zone, domain, _type, **params): - return self._try(self._records.update, zone, domain, _type, **params) - - def zones_create(self, name): - self._zones_cache[name] = self._try(self._zones.create, name) - return self._zones_cache[name] - - def zones_retrieve(self, name): - if name not in self._zones_cache: - self._zones_cache[name] = self._try(self._zones.retrieve, name) - return self._zones_cache[name] - - def _try(self, method, *args, **kwargs): - tries = self.retry_count - while True: # We'll raise to break after our tries expire - try: - return method(*args, **kwargs) - except RateLimitException as e: - if tries <= 1: - raise - period = float(e.period) - self.log.warn('rate limit encountered, pausing ' - 'for %ds and trying again, %d remaining', - period, tries) - sleep(period) - tries -= 1 - - -class Ns1Provider(BaseProvider): - ''' - Ns1 provider - - ns1: - # Required - class: octodns.provider.ns1.Ns1Provider - api_key: env/NS1_API_KEY - # Only required if using dynamic records - monitor_regions: - - lga - # Optional. Default: false. true is Recommended, but not the default - # for backwards compatibility reasons. If true, all NS1 monitors will - # use a shared notify list rather than one per record & value - # combination. See CHANGELOG, - # https://github.com/octodns/octodns/blob/master/CHANGELOG.md, for more - # information before enabling this behavior. - shared_notifylist: false - # Optional. Default: None. If set, back off in advance to avoid 429s - # from rate-limiting. Generally this should be set to the number - # of processes or workers hitting the API, e.g. the value of - # `max_workers`. - parallelism: 11 - # Optional. Default: 4. Number of times to retry if a 429 response - # is received. - retry_count: 4 - # Optional. Default: None. Additional options or overrides passed to - # the NS1 SDK config, as key-value pairs. - client_config: - endpoint: my.nsone.endpoint # Default: api.nsone.net - ignore-ssl-errors: true # Default: false - follow_pagination: false # Default: true - ''' - SUPPORTS_GEO = True - SUPPORTS_DYNAMIC = True - SUPPORTS_POOL_VALUE_STATUS = True - SUPPORTS_MULTIVALUE_PTR = True - SUPPORTS = set(('A', 'AAAA', 'ALIAS', 'CAA', 'CNAME', 'MX', 'NAPTR', - 'NS', 'PTR', 'SPF', 'SRV', 'TXT', 'URLFWD')) - - ZONE_NOT_FOUND_MESSAGE = 'server error: zone not found' - SHARED_NOTIFYLIST_NAME = 'octoDNS NS1 Notify List' - - @property - def _UP_FILTER(self): - return { - 'config': {}, - 'filter': 'up' - } - - @property - def _REGION_FILTER(self): - return { - 'config': { - 'remove_no_georegion': True - }, - 'filter': u'geofence_regional' - } - - @property - def _COUNTRY_FILTER(self): - return { - 'config': { - 'remove_no_location': True - }, - 'filter': u'geofence_country' - } - - # In the NS1 UI/portal, this filter is called "SELECT FIRST GROUP" though - # the filter name in the NS1 api is 'select_first_region' - @property - def _SELECT_FIRST_REGION_FILTER(self): - return { - 'config': {}, - 'filter': u'select_first_region' - } - - @property - def _PRIORITY_FILTER(self): - return { - 'config': { - 'eliminate': u'1' - }, - 'filter': 'priority' - } - - @property - def _WEIGHTED_SHUFFLE_FILTER(self): - return { - 'config': {}, - 'filter': u'weighted_shuffle' - } - - @property - def _SELECT_FIRST_N_FILTER(self): - return { - 'config': { - 'N': u'1' - }, - 'filter': u'select_first_n' - } - - @property - def _BASIC_FILTER_CHAIN(self): - return [ - self._UP_FILTER, - self._SELECT_FIRST_REGION_FILTER, - self._PRIORITY_FILTER, - self._WEIGHTED_SHUFFLE_FILTER, - self._SELECT_FIRST_N_FILTER - ] - - @property - def _FILTER_CHAIN_WITH_REGION(self): - return [ - self._UP_FILTER, - self._REGION_FILTER, - self._SELECT_FIRST_REGION_FILTER, - self._PRIORITY_FILTER, - self._WEIGHTED_SHUFFLE_FILTER, - self._SELECT_FIRST_N_FILTER - ] - - @property - def _FILTER_CHAIN_WITH_COUNTRY(self): - return [ - self._UP_FILTER, - self._COUNTRY_FILTER, - self._SELECT_FIRST_REGION_FILTER, - self._PRIORITY_FILTER, - self._WEIGHTED_SHUFFLE_FILTER, - self._SELECT_FIRST_N_FILTER - ] - - @property - def _FILTER_CHAIN_WITH_REGION_AND_COUNTRY(self): - return [ - self._UP_FILTER, - self._REGION_FILTER, - self._COUNTRY_FILTER, - self._SELECT_FIRST_REGION_FILTER, - self._PRIORITY_FILTER, - self._WEIGHTED_SHUFFLE_FILTER, - self._SELECT_FIRST_N_FILTER - ] - - _REGION_TO_CONTINENT = { - 'AFRICA': 'AF', - 'ASIAPAC': 'AS', - 'EUROPE': 'EU', - 'SOUTH-AMERICA': 'SA', - # continent NA has been handled as part of Geofence Country filter - # starting from v0.9.13. These below US-* just need to continue to - # exist here so it doesn't break the ugrade path - 'US-CENTRAL': 'NA', - 'US-EAST': 'NA', - 'US-WEST': 'NA', - } - _CONTINENT_TO_REGIONS = { - 'AF': ('AFRICA',), - 'AS': ('ASIAPAC',), - 'EU': ('EUROPE',), - 'SA': ('SOUTH-AMERICA',), - } - - # Necessary for handling unsupported continents in _CONTINENT_TO_REGIONS - _CONTINENT_TO_LIST_OF_COUNTRIES = { - 'OC': {'FJ', 'NC', 'PG', 'SB', 'VU', 'AU', 'NF', 'NZ', 'FM', 'GU', - 'KI', 'MH', 'MP', 'NR', 'PW', 'AS', 'CK', 'NU', 'PF', 'PN', - 'TK', 'TO', 'TV', 'WF', 'WS'}, - 'NA': {'DO', 'DM', 'BB', 'BL', 'BM', 'HT', 'KN', 'JM', 'VC', 'HN', - 'BS', 'BZ', 'PR', 'NI', 'LC', 'TT', 'VG', 'PA', 'TC', 'PM', - 'GT', 'AG', 'GP', 'AI', 'VI', 'CA', 'GD', 'AW', 'CR', 'GL', - 'CU', 'MF', 'SV', 'US', 'MQ', 'MS', 'KY', 'MX', 'CW', 'BQ', - 'SX', 'UM'} - } - - def __init__(self, id, api_key, retry_count=4, monitor_regions=None, - parallelism=None, client_config=None, shared_notifylist=False, - *args, **kwargs): - self.log = getLogger(f'Ns1Provider[{id}]') - self.log.debug('__init__: id=%s, api_key=***, retry_count=%d, ' - 'monitor_regions=%s, parallelism=%s, client_config=%s', - id, retry_count, monitor_regions, parallelism, - client_config) - super(Ns1Provider, self).__init__(id, *args, **kwargs) - self.monitor_regions = monitor_regions - self.shared_notifylist = shared_notifylist - self.record_filters = dict() - self._client = Ns1Client(api_key, parallelism, retry_count, - client_config) - - def _sanitize_disabled_in_filter_config(self, filter_cfg): - # remove disabled=False from filters - for filter in filter_cfg: - if 'disabled' in filter and filter['disabled'] is False: - del filter['disabled'] - return filter_cfg - - def _valid_filter_config(self, filter_cfg): - self._sanitize_disabled_in_filter_config(filter_cfg) - has_region = self._REGION_FILTER in filter_cfg - has_country = self._COUNTRY_FILTER in filter_cfg - expected_filter_cfg = self._get_updated_filter_chain(has_region, - has_country) - return filter_cfg == expected_filter_cfg - - def _get_updated_filter_chain(self, has_region, has_country): - if has_region and has_country: - filter_chain = self._FILTER_CHAIN_WITH_REGION_AND_COUNTRY - elif has_region: - filter_chain = self._FILTER_CHAIN_WITH_REGION - elif has_country: - filter_chain = self._FILTER_CHAIN_WITH_COUNTRY - else: - filter_chain = self._BASIC_FILTER_CHAIN - - return filter_chain - - def _encode_notes(self, data): - return ' '.join([f'{k}:{v}' for k, v in sorted(data.items())]) - - def _parse_notes(self, note): - data = {} - if note: - for piece in note.split(' '): - try: - k, v = piece.split(':', 1) - data[k] = v if v != '' else None - except ValueError: - pass - return data - - def _data_for_geo_A(self, _type, record): - # record meta (which would include geo information is only - # returned when getting a record's detail, not from zone detail - geo = defaultdict(list) - data = { - 'ttl': record['ttl'], - 'type': _type, - } - values, codes = [], [] - for answer in record.get('answers', []): - meta = answer.get('meta', {}) - if meta: - # country + state and country + province are allowed - # in that case though, supplying a state/province would - # be redundant since the country would supercede in when - # resolving the record. it is syntactically valid, however. - country = meta.get('country', []) - us_state = meta.get('us_state', []) - ca_province = meta.get('ca_province', []) - for cntry in country: - con = country_alpha2_to_continent_code(cntry) - key = f'{con}-{cntry}' - geo[key].extend(answer['answer']) - for state in us_state: - key = f'NA-US-{state}' - geo[key].extend(answer['answer']) - for province in ca_province: - key = f'NA-CA-{province}' - geo[key].extend(answer['answer']) - for code in meta.get('iso_region_code', []): - key = code - geo[key].extend(answer['answer']) - else: - values.extend(answer['answer']) - codes.append([]) - values = [str(x) for x in values] - geo = OrderedDict( - {str(k): [str(x) for x in v] for k, v in geo.items()} - ) - data['values'] = values - data['geo'] = geo - return data - - def _parse_dynamic_pool_name(self, pool_name): - if pool_name.startswith('catchall__'): - # Special case for the old-style catchall prefix - return pool_name[10:] - try: - pool_name, _ = pool_name.rsplit('__', 1) - except ValueError: - pass - return pool_name - - def _parse_pools(self, answers): - # All regions (pools) will include the list of default values - # (eventually) at higher priorities, we'll just add them to this set to - # we'll have the complete collection. - default = set() - - # Fill out the pools by walking the answers and looking at their - # region (< v0.9.11) or notes (> v0.9.11). - pools = defaultdict(lambda: {'fallback': None, 'values': []}) - for answer in answers: - meta = answer['meta'] - notes = self._parse_notes(meta.get('note', '')) - - value = str(answer['answer'][0]) - if notes.get('from', False) == '--default--': - # It's a final/default value, record it and move on - default.add(value) - continue - - # NS1 pool names can be found in notes > v0.9.11, in order to allow - # us to find fallback-only pools/values. Before that we used - # `region` (group name in the UI) and only paid attention to - # priority=1 (first level) - notes_pool_name = notes.get('pool', None) - if notes_pool_name is None: - # < v0.9.11 - if meta['priority'] != 1: - # Ignore all but priority 1 - continue - # And use region's name as the pool name - pool_name = self._parse_dynamic_pool_name(answer['region']) - else: - # > v0.9.11, use the notes-based name and consider all values - pool_name = notes_pool_name - - pool = pools[pool_name] - value_dict = { - 'value': value, - 'weight': int(meta.get('weight', 1)), - } - if isinstance(meta['up'], bool): - value_dict['status'] = 'up' if meta['up'] else 'down' - - if value_dict not in pool['values']: - # If we haven't seen this value before add it to the pool - pool['values'].append(value_dict) - - # If there's a fallback recorded in the value for its pool go ahead - # and use it, another v0.9.11 thing - fallback = notes.get('fallback', None) - if fallback is not None: - pool['fallback'] = fallback - - # Order and convert to a list - default = sorted(default) - - return default, pools - - def _parse_rule_geos(self, meta, notes): - geos = set() - - for georegion in meta.get('georegion', []): - geos.add(self._REGION_TO_CONTINENT[georegion]) - - # Countries are easy enough to map, we just have to find their - # continent - # - # NOTE: Some continents need special handling since NS1 - # does not supprt them as regions. These are defined under - # _CONTINENT_TO_LIST_OF_COUNTRIES. So the countries for these - # regions will be present in meta['country']. If all the countries - # in _CONTINENT_TO_LIST_OF_COUNTRIES[] list are found, - # set the continent as the region and remove individual countries - - # continents that don't have all countries here because a subset of - # them were used in another rule, but we still need this rule to use - # continent instead of the remaining subset of its countries - continents_from_notes = set(notes.get('continents', '').split(',')) - - special_continents = dict() - for country in meta.get('country', []): - # country_alpha2_to_continent_code fails for Pitcairn ('PN'), - # United States Minor Outlying Islands ('UM') and - # Sint Maarten ('SX') - if country == 'PN': - con = 'OC' - elif country in ['SX', 'UM']: - con = 'NA' - else: - con = country_alpha2_to_continent_code(country) - - if con in self._CONTINENT_TO_LIST_OF_COUNTRIES: - special_continents.setdefault(con, set()).add(country) - else: - geos.add(f'{con}-{country}') - - for continent, countries in special_continents.items(): - if countries == self._CONTINENT_TO_LIST_OF_COUNTRIES[ - continent] or continent in continents_from_notes: - # All countries found or continent in notes, so add it to geos - geos.add(continent) - else: - # Partial countries found, so just add them as-is to geos - for c in countries: - geos.add(f'{continent}-{c}') - - # States and provinces are easy too, - # just assume NA-US or NA-CA - for state in meta.get('us_state', []): - geos.add(f'NA-US-{state}') - - for province in meta.get('ca_province', []): - geos.add(f'NA-CA-{province}') - - return geos - - def _parse_rules(self, pools, regions): - # The regions objects map to rules, but it's a bit fuzzy since they're - # tied to pools on the NS1 side, e.g. we can only have 1 rule per pool, - # that may eventually run into problems, but I don't have any use-cases - # examples currently where it would - rules = {} - for pool_name, region in sorted(regions.items()): - # Get the actual pool name by removing the type - pool_name = self._parse_dynamic_pool_name(pool_name) - - meta = region['meta'] - notes = self._parse_notes(meta.get('note', '')) - - # The group notes field in the UI is a `note` on the region here, - # that's where we can find our pool's fallback in < v0.9.11 anyway - if 'fallback' in notes: - # set the fallback pool name - pools[pool_name]['fallback'] = notes['fallback'] - - rule_order = notes['rule-order'] - try: - rule = rules[rule_order] - except KeyError: - rule = { - 'pool': pool_name, - '_order': rule_order, - } - rules[rule_order] = rule - - geos = self._parse_rule_geos(meta, notes) - if geos: - # There are geos, combine them with any existing geos for this - # pool and recorded the sorted unique set of them - rule['geos'] = sorted(set(rule.get('geos', [])) | geos) - - # Convert to list and order - rules = sorted(rules.values(), key=lambda r: (r['_order'], r['pool'])) - - return rules - - def _data_for_dynamic(self, _type, record): - # Cache record filters for later use - record_filters = self.record_filters.setdefault(record['domain'], {}) - record_filters[_type] = record['filters'] - - default, pools = self._parse_pools(record['answers']) - rules = self._parse_rules(pools, record['regions']) - - data = { - 'dynamic': { - 'pools': pools, - 'rules': rules, - }, - 'ttl': record['ttl'], - 'type': _type, - } - - if _type == 'CNAME': - data['value'] = default[0] - else: - data['values'] = default - - return data - - def _data_for_A(self, _type, record): - if record.get('tier', 1) > 1: - # Advanced record, see if it's first answer has a note - try: - first_answer_note = record['answers'][0]['meta']['note'] - except (IndexError, KeyError): - first_answer_note = '' - # If that note includes a `from` (pool name) it's a dynamic record - if 'from:' in first_answer_note: - return self._data_for_dynamic(_type, record) - # If not it's an old geo record - return self._data_for_geo_A(_type, record) - - # This is a basic record, just convert it - return { - 'ttl': record['ttl'], - 'type': _type, - 'values': [str(x) for x in record['short_answers']] - } - - _data_for_AAAA = _data_for_A - - def _data_for_SPF(self, _type, record): - values = [v.replace(';', '\\;') for v in record['short_answers']] - return { - 'ttl': record['ttl'], - 'type': _type, - 'values': values - } - - _data_for_TXT = _data_for_SPF - - def _data_for_CAA(self, _type, record): - values = [] - for answer in record['short_answers']: - flags, tag, value = answer.split(' ', 2) - values.append({ - 'flags': flags, - 'tag': tag, - 'value': value, - }) - return { - 'ttl': record['ttl'], - 'type': _type, - 'values': values, - } - - def _data_for_CNAME(self, _type, record): - if record.get('tier', 1) > 1: - # Advanced record, see if it's first answer has a note - try: - first_answer_note = record['answers'][0]['meta']['note'] - except (IndexError, KeyError): - first_answer_note = '' - # If that note includes a `pool` it's a valid dynamic record - if 'pool:' in first_answer_note: - return self._data_for_dynamic(_type, record) - # If not, it can't be parsed. Let it be an empty record - self.log.warn('Cannot parse %s dynamic record due to missing ' - 'pool name in first answer note, treating it as ' - 'an empty record', record['domain']) - value = None - else: - try: - value = record['short_answers'][0] - except IndexError: - value = None - - return { - 'ttl': record['ttl'], - 'type': _type, - 'value': value, - } - - _data_for_ALIAS = _data_for_CNAME - - def _data_for_MX(self, _type, record): - values = [] - for answer in record['short_answers']: - preference, exchange = answer.split(' ', 1) - values.append({ - 'preference': preference, - 'exchange': exchange, - }) - return { - 'ttl': record['ttl'], - 'type': _type, - 'values': values, - } - - def _data_for_NAPTR(self, _type, record): - values = [] - for answer in record['short_answers']: - order, preference, flags, service, regexp, replacement = \ - answer.split(' ', 5) - values.append({ - 'flags': flags, - 'order': order, - 'preference': preference, - 'regexp': regexp, - 'replacement': replacement, - 'service': service, - }) - return { - 'ttl': record['ttl'], - 'type': _type, - 'values': values, - } - - def _data_for_NS(self, _type, record): - return { - 'ttl': record['ttl'], - 'type': _type, - 'values': record['short_answers'], - } - - _data_for_PTR = _data_for_NS - - def _data_for_SRV(self, _type, record): - values = [] - for answer in record['short_answers']: - priority, weight, port, target = answer.split(' ', 3) - values.append({ - 'priority': priority, - 'weight': weight, - 'port': port, - 'target': target, - }) - return { - 'ttl': record['ttl'], - 'type': _type, - 'values': values, - } - - def _data_for_URLFWD(self, _type, record): - values = [] - for answer in record['short_answers']: - path, target, code, masking, query = answer.split(' ', 4) - values.append({ - 'path': path, - 'target': target, - 'code': code, - 'masking': masking, - 'query': query, - }) - return { - 'ttl': record['ttl'], - 'type': _type, - 'values': values, - } - - def populate(self, zone, target=False, lenient=False): - self.log.debug('populate: name=%s, target=%s, lenient=%s', - zone.name, - target, lenient) - - try: - ns1_zone_name = zone.name[:-1] - ns1_zone = self._client.zones_retrieve(ns1_zone_name) - - records = [] - geo_records = [] - - # change answers for certain types to always be absolute - for record in ns1_zone['records']: - if record['type'] in ['ALIAS', 'CNAME', 'MX', 'NS', 'PTR', - 'SRV']: - record['short_answers'] = [ - _ensure_endswith_dot(a) - for a in record['short_answers'] - ] - - if record.get('tier', 1) > 1: - # Need to get the full record data for geo records - record = self._client.records_retrieve(ns1_zone_name, - record['domain'], - record['type']) - geo_records.append(record) - else: - records.append(record) - - exists = True - except ResourceException as e: - if e.message != self.ZONE_NOT_FOUND_MESSAGE: - raise - records = [] - geo_records = [] - exists = False - - before = len(zone.records) - # geo information isn't returned from the main endpoint, so we need - # to query for all records with geo information - zone_hash = {} - for record in chain(records, geo_records): - _type = record['type'] - if _type not in self.SUPPORTS: - continue - data_for = getattr(self, f'_data_for_{_type}') - name = zone.hostname_from_fqdn(record['domain']) - data = data_for(_type, record) - record = Record.new(zone, name, data, source=self, lenient=lenient) - zone_hash[(_type, name)] = record - [zone.add_record(r, lenient=lenient) for r in zone_hash.values()] - self.log.info('populate: found %s records, exists=%s', - len(zone.records) - before, exists) - return exists - - def _params_for_geo_A(self, record): - # purposefully set non-geo answers to have an empty meta, - # so that we know we did this on purpose if/when troubleshooting - params = { - 'answers': [{"answer": [x], "meta": {}} for x in record.values], - 'ttl': record.ttl, - } - - has_country = False - for iso_region, target in record.geo.items(): - key = 'iso_region_code' - value = iso_region - if not has_country and len(value.split('-')) > 1: - has_country = True - for answer in target.values: - params['answers'].append( - { - 'answer': [answer], - 'meta': {key: [value]}, - }, - ) - - params['filters'] = [] - if has_country: - params['filters'].append( - {"filter": "shuffle", "config": {}} - ) - params['filters'].append( - {"filter": "geotarget_country", "config": {}} - ) - params['filters'].append( - {"filter": "select_first_n", - "config": {"N": 1}} - ) - - return params, None - - def _monitors_for(self, record): - monitors = {} - - if getattr(record, 'dynamic', False): - expected_host = record.fqdn[:-1] - expected_type = record._type - - for monitor in self._client.monitors.values(): - data = self._parse_notes(monitor['notes']) - if not data: - continue - if expected_host == data['host'] and \ - expected_type == data['type']: - # This monitor does not belong to this record - config = monitor['config'] - value = config['host'] - if record._type == 'CNAME': - # Append a trailing dot for CNAME records so that - # lookup by a CNAME answer works - value = value + '.' - monitors[value] = monitor - - return monitors - - def _uuid(self): - return uuid4().hex - - def _feed_create(self, monitor): - monitor_id = monitor['id'] - self.log.debug('_feed_create: monitor=%s', monitor_id) - name = f'{monitor["name"]} - {self._uuid()[:6]}' - - # Create the data feed - config = { - 'jobid': monitor_id, - } - feed = self._client.datafeed_create(self._client.datasource_id, name, - config) - feed_id = feed['id'] - self.log.debug('_feed_create: feed=%s', feed_id) - - return feed_id - - def _notifylists_find_or_create(self, name): - self.log.debug('_notifylists_find_or_create: name="%s"', name) - try: - nl = self._client.notifylists[name] - self.log.debug('_notifylists_find_or_create: existing=%s', - nl['id']) - except KeyError: - notify_list = [{ - 'config': { - 'sourceid': self._client.datasource_id, - }, - 'type': 'datafeed', - }] - nl = self._client.notifylists_create(name=name, - notify_list=notify_list) - self.log.debug('_notifylists_find_or_create: created=%s', - nl['id']) - - return nl - - def _monitor_create(self, monitor): - self.log.debug('_monitor_create: monitor="%s"', monitor['name']) - - # Find the right notifylist - nl_name = self.SHARED_NOTIFYLIST_NAME \ - if self.shared_notifylist else monitor['name'] - nl = self._notifylists_find_or_create(nl_name) - - # Create the monitor - monitor['notify_list'] = nl['id'] - monitor = self._client.monitors_create(**monitor) - monitor_id = monitor['id'] - self.log.debug('_monitor_create: monitor=%s', monitor_id) - - return monitor_id, self._feed_create(monitor) - - def _healthcheck_policy(self, record): - return record._octodns.get('ns1', {}) \ - .get('healthcheck', {}) \ - .get('policy', 'quorum') - - def _healthcheck_frequency(self, record): - return record._octodns.get('ns1', {}) \ - .get('healthcheck', {}) \ - .get('frequency', 60) - - def _healthcheck_rapid_recheck(self, record): - return record._octodns.get('ns1', {}) \ - .get('healthcheck', {}) \ - .get('rapid_recheck', False) - - def _healthcheck_connect_timeout(self, record): - return record._octodns.get('ns1', {}) \ - .get('healthcheck', {}) \ - .get('connect_timeout', 2) - - def _healthcheck_response_timeout(self, record): - return record._octodns.get('ns1', {}) \ - .get('healthcheck', {}) \ - .get('response_timeout', 10) - - def _monitor_gen(self, record, value): - host = record.fqdn[:-1] - _type = record._type - - if _type == 'CNAME': - # NS1 does not accept a host value with a trailing dot - value = value[:-1] - - ret = { - 'active': True, - 'config': { - 'connect_timeout': - # TCP monitors use milliseconds, so convert from - # seconds to milliseconds - self._healthcheck_connect_timeout(record) * 1000, - 'host': value, - 'port': record.healthcheck_port, - 'response_timeout': - # TCP monitors use milliseconds, so convert from - # seconds to milliseconds - self._healthcheck_response_timeout(record) * 1000, - 'ssl': record.healthcheck_protocol == 'HTTPS', - }, - 'job_type': 'tcp', - 'name': f'{host} - {_type} - {value}', - 'notes': self._encode_notes({ - 'host': host, - 'type': _type, - }), - 'policy': self._healthcheck_policy(record), - 'frequency': self._healthcheck_frequency(record), - 'rapid_recheck': self._healthcheck_rapid_recheck(record), - 'region_scope': 'fixed', - 'regions': self.monitor_regions, - } - - if _type == 'AAAA': - ret['config']['ipv6'] = True - - if record.healthcheck_protocol != 'TCP': - # IF it's HTTP we need to send the request string - path = record.healthcheck_path - host = record.healthcheck_host(value=value) - request = fr'GET {path} HTTP/1.0\r\nHost: {host}\r\n' \ - r'User-agent: NS1\r\n\r\n' - ret['config']['send'] = request - # We'll also expect a HTTP response - ret['rules'] = [{ - 'comparison': 'contains', - 'key': 'output', - 'value': '200 OK', - }] - - return ret - - def _monitor_is_match(self, expected, have): - # Make sure what we have matches what's in expected exactly. Anything - # else in have will be ignored. - for k, v in expected.items(): - if have.get(k, '--missing--') != v: - return False - - return True - - def _monitor_sync(self, record, value, existing): - self.log.debug('_monitor_sync: record=%s, value=%s', record.fqdn, - value) - expected = self._monitor_gen(record, value) - - if existing: - self.log.debug('_monitor_sync: existing=%s', existing['id']) - monitor_id = existing['id'] - - if not self._monitor_is_match(expected, existing): - self.log.debug('_monitor_sync: existing needs update') - # Update the monitor to match expected, everything else will be - # left alone and assumed correct - self._client.monitors_update(monitor_id, **expected) - - feed_id = self._client.feeds_for_monitors.get(monitor_id) - if feed_id is None: - self.log.warn('_monitor_sync: %s (%s) missing feed, creating', - existing['name'], monitor_id) - feed_id = self._feed_create(existing) - else: - self.log.debug('_monitor_sync: needs create') - # We don't have an existing monitor create it (and related bits) - monitor_id, feed_id = self._monitor_create(expected) - - return monitor_id, feed_id - - def _monitors_gc(self, record, active_monitor_ids=None): - self.log.debug('_monitors_gc: record=%s, active_monitor_ids=%s', - record.fqdn, active_monitor_ids) - - if active_monitor_ids is None: - active_monitor_ids = set() - - for monitor in self._monitors_for(record).values(): - monitor_id = monitor['id'] - if monitor_id in active_monitor_ids: - continue - - self.log.debug('_monitors_gc: deleting %s', monitor_id) - - feed_id = self._client.feeds_for_monitors.get(monitor_id) - if feed_id: - self._client.datafeed_delete(self._client.datasource_id, - feed_id) - - self._client.monitors_delete(monitor_id) - - notify_list_id = monitor['notify_list'] - for nl_name, nl in self._client.notifylists.items(): - if nl['id'] == notify_list_id: - # We've found the that might need deleting - if nl['name'] != self.SHARED_NOTIFYLIST_NAME: - # It's not shared so is safe to delete - self._client.notifylists_delete(notify_list_id) - break - - def _add_answers_for_pool(self, answers, default_answers, pool_name, - pool_label, pool_answers, pools, priority): - current_pool_name = pool_name - seen = set() - while current_pool_name and current_pool_name not in seen: - seen.add(current_pool_name) - pool = pools[current_pool_name] - for answer in pool_answers[current_pool_name]: - fallback = pool.data['fallback'] - if answer['feed_id']: - up = {'feed': answer['feed_id']} - else: - up = answer['status'] == 'up' - answer = { - 'answer': answer['answer'], - 'meta': { - 'priority': priority, - 'note': self._encode_notes({ - 'from': pool_label, - 'pool': current_pool_name, - 'fallback': fallback or '', - }), - 'up': up, - 'weight': answer['weight'], - }, - 'region': pool_label, # the one we're answering - } - answers.append(answer) - - current_pool_name = pool.data.get('fallback', None) - priority += 1 - - # Static/default - for answer in default_answers: - answer = { - 'answer': answer['answer'], - 'meta': { - 'priority': priority, - 'note': self._encode_notes({ - 'from': '--default--', - }), - 'up': True, - 'weight': 1, - }, - 'region': pool_label, # the one we're answering - } - answers.append(answer) - - def _generate_regions(self, record): - pools = record.dynamic.pools - has_country = False - has_region = False - regions = {} - - explicit_countries = dict() - for rule in record.dynamic.rules: - for geo in rule.data.get('geos', []): - if len(geo) == 5: - con, country = geo.split('-', 1) - explicit_countries.setdefault(con, set()).add(country) - - for i, rule in enumerate(record.dynamic.rules): - pool_name = rule.data['pool'] - - notes = { - 'rule-order': i, - } - - fallback = pools[pool_name].data.get('fallback', None) - if fallback: - notes['fallback'] = fallback - - country = set() - georegion = set() - us_state = set() - ca_province = set() - - for geo in rule.data.get('geos', []): - n = len(geo) - if n == 8: - # US state, e.g. NA-US-KY - # CA province, e.g. NA-CA-NL - us_state.add(geo[-2:]) if "NA-US" in geo \ - else ca_province.add(geo[-2:]) - # For filtering. State filtering is done by the country - # filter - has_country = True - elif n == 5: - # Country, e.g. EU-FR - country.add(geo[-2:]) - has_country = True - else: - # Continent, e.g. AS - if geo in self._CONTINENT_TO_REGIONS: - georegion.update(self._CONTINENT_TO_REGIONS[geo]) - has_region = True - else: - # No maps for geo in _CONTINENT_TO_REGIONS. - # Use the country list - self.log.debug('Converting geo {} to country list'. - format(geo)) - continent_countries = \ - self._CONTINENT_TO_LIST_OF_COUNTRIES[geo] - exclude = explicit_countries.get(geo, set()) - country.update(continent_countries - exclude) - notes.setdefault('continents', set()).add(geo) - has_country = True - - if 'continents' in notes: - notes['continents'] = ','.join(sorted(notes['continents'])) - - meta = { - 'note': self._encode_notes(notes), - } - - if georegion: - georegion_meta = dict(meta) - georegion_meta['georegion'] = sorted(georegion) - regions[f'{pool_name}__georegion'] = { - 'meta': georegion_meta, - } - - if country or us_state or ca_province: - # If there's country and/or states its a country pool, - # countries and states can coexist as they're handled by the - # same step in the filterchain (countries and georegions - # cannot as they're seperate stages and run the risk of - # eliminating all options) - country_state_meta = dict(meta) - if country: - country_state_meta['country'] = sorted(country) - if us_state: - country_state_meta['us_state'] = sorted(us_state) - if ca_province: - country_state_meta['ca_province'] = sorted(ca_province) - regions[f'{pool_name}__country'] = { - 'meta': country_state_meta, - } - elif not georegion: - # If there's no targeting it's a catchall - regions[f'{pool_name}__catchall'] = { - 'meta': meta, - } - - return has_country, has_region, regions - - def _generate_answers(self, record, regions): - pools = record.dynamic.pools - existing_monitors = self._monitors_for(record) - active_monitors = set() - - # Build a list of primary values for each pool, including their - # feed_id (monitor) - value_feed = dict() - pool_answers = defaultdict(list) - for pool_name, pool in sorted(pools.items()): - for value in pool.data['values']: - weight = value['weight'] - status = value['status'] - value = value['value'] - - feed_id = None - if status == 'obey': - # state is not forced, let's find a monitor - feed_id = value_feed.get(value) - # check for identical monitor and skip creating one if - # found - if not feed_id: - existing = existing_monitors.get(value) - monitor_id, feed_id = self._monitor_sync(record, value, - existing) - value_feed[value] = feed_id - active_monitors.add(monitor_id) - - pool_answers[pool_name].append({ - 'answer': [value], - 'weight': weight, - 'feed_id': feed_id, - 'status': status, - }) - - if record._type == 'CNAME': - default_values = [record.value] - else: - default_values = record.values - default_answers = [{ - 'answer': [v], - 'weight': 1, - } for v in default_values] - - # Build our list of answers - # The regions dictionary built above already has the required pool - # names. Iterate over them and add answers. - answers = [] - for pool_name in sorted(regions.keys()): - priority = 1 - - # Dynamic/health checked - pool_label = pool_name - # Remove the pool type from the end of the name - pool_name = self._parse_dynamic_pool_name(pool_name) - self._add_answers_for_pool(answers, default_answers, pool_name, - pool_label, pool_answers, pools, - priority) - - return active_monitors, answers - - def _params_for_dynamic(self, record): - # Convert rules to regions - has_country, has_region, regions = self._generate_regions(record) - - # Convert pools to answers - active_monitors, answers = self._generate_answers(record, regions) - - # Update filters as necessary - filters = self._get_updated_filter_chain(has_region, has_country) - - return { - 'answers': answers, - 'filters': filters, - 'regions': regions, - 'ttl': record.ttl, - }, active_monitors - - def _params_for_A(self, record): - if getattr(record, 'dynamic', False): - return self._params_for_dynamic(record) - elif hasattr(record, 'geo'): - return self._params_for_geo_A(record) - - return { - 'answers': record.values, - 'ttl': record.ttl, - }, None - - _params_for_AAAA = _params_for_A - _params_for_NS = _params_for_A - - def _params_for_SPF(self, record): - # NS1 seems to be the only provider that doesn't want things - # escaped in values so we have to strip them here and add - # them when going the other way - values = [v.replace('\\;', ';') for v in record.values] - return {'answers': values, 'ttl': record.ttl}, None - - _params_for_TXT = _params_for_SPF - - def _params_for_CAA(self, record): - values = [(v.flags, v.tag, v.value) for v in record.values] - return {'answers': values, 'ttl': record.ttl}, None - - def _params_for_CNAME(self, record): - if getattr(record, 'dynamic', False): - return self._params_for_dynamic(record) - - return {'answers': [record.value], 'ttl': record.ttl}, None - - _params_for_ALIAS = _params_for_CNAME - - def _params_for_MX(self, record): - values = [(v.preference, v.exchange) for v in record.values] - return {'answers': values, 'ttl': record.ttl}, None - - def _params_for_NAPTR(self, record): - values = [(v.order, v.preference, v.flags, v.service, v.regexp, - v.replacement) for v in record.values] - return {'answers': values, 'ttl': record.ttl}, None - - def _params_for_PTR(self, record): - return { - 'answers': record.values, - 'ttl': record.ttl, - }, None - - def _params_for_SRV(self, record): - values = [(v.priority, v.weight, v.port, v.target) - for v in record.values] - return {'answers': values, 'ttl': record.ttl}, None - - def _params_for_URLFWD(self, record): - values = [(v.path, v.target, v.code, v.masking, v.query) - for v in record.values] - return {'answers': values, 'ttl': record.ttl}, None - - def _extra_changes(self, desired, changes, **kwargs): - self.log.debug('_extra_changes: desired=%s', desired.name) - changed = set([c.record for c in changes]) - extra = [] - for record in desired.records: - if record in changed or not getattr(record, 'dynamic', False): - # Already changed, or no dynamic , no need to check it - continue - - # Filter normalization - # Check if filters for existing domains need an update - # Needs an explicit check since there might be no change in the - # config at all. Filters however might still need an update - domain = record.fqdn[:-1] - _type = record._type - record_filters = self.record_filters.get(domain, {}).get(_type, []) - if not self._valid_filter_config(record_filters): - # unrecognized set of filters, overwrite them by updating the - # record - self.log.info('_extra_changes: unrecognized filters in %s, ' - 'will update record', domain) - extra.append(Update(record, record)) - continue - - for value, have in self._monitors_for(record).items(): - expected = self._monitor_gen(record, value) - # TODO: find values which have missing monitors - if not self._monitor_is_match(expected, have): - self.log.info('_extra_changes: monitor mis-match for %s', - expected['name']) - extra.append(Update(record, record)) - break - if not have.get('notify_list'): - self.log.info('_extra_changes: broken monitor no notify ' - 'list %s (%s)', have['name'], have['id']) - extra.append(Update(record, record)) - break - - return extra - - def _apply_Create(self, ns1_zone, change): - new = change.new - zone = new.zone.name[:-1] - domain = new.fqdn[:-1] - _type = new._type - params, active_monitor_ids = getattr(self, f'_params_for_{_type}')(new) - self._client.records_create(zone, domain, _type, **params) - self._monitors_gc(new, active_monitor_ids) - - def _apply_Update(self, ns1_zone, change): - new = change.new - zone = new.zone.name[:-1] - domain = new.fqdn[:-1] - _type = new._type - params, active_monitor_ids = getattr(self, f'_params_for_{_type}')(new) - self._client.records_update(zone, domain, _type, **params) - # If we're cleaning up we need to send in the old record since it'd - # have anything that needs cleaning up - self._monitors_gc(change.existing, active_monitor_ids) - - def _apply_Delete(self, ns1_zone, change): - existing = change.existing - zone = existing.zone.name[:-1] - domain = existing.fqdn[:-1] - _type = existing._type - self._client.records_delete(zone, domain, _type) - self._monitors_gc(existing) - - def _has_dynamic(self, changes): - for change in changes: - if getattr(change.record, 'dynamic', False): - return True - - return False - - def _apply(self, plan): - desired = plan.desired - changes = plan.changes - self.log.debug('_apply: zone=%s, len(changes)=%d', desired.name, - len(changes)) - - # Make sure that if we're going to make any dynamic changes that we - # have monitor_regions configured before touching anything so we can - # abort early and not half-apply - if self._has_dynamic(changes) and self.monitor_regions is None: - raise Ns1Exception('Monitored record, but monitor_regions not set') - - domain_name = desired.name[:-1] - try: - ns1_zone = self._client.zones_retrieve(domain_name) - except ResourceException as e: - if e.message != self.ZONE_NOT_FOUND_MESSAGE: - raise - self.log.debug('_apply: no matching zone, creating') - ns1_zone = self._client.zones_create(domain_name) - - for change in changes: - class_name = change.__class__.__name__ - getattr(self, f'_apply_{class_name}')(ns1_zone, change) +logger = getLogger('Ns1') +try: + logger.warn('octodns_ns1 shimmed. Update your provider class to ' + 'octodns_ns1.Ns1Provider. ' + 'Shim will be removed in 1.0') + from octodns_ns1 import Ns1Provider + Ns1Provider # pragma: no cover +except ModuleNotFoundError: + logger.exception('Ns1Provider has been moved into a seperate module, ' + 'octodns_ns1 is now required. Provider class should ' + 'be updated to octodns_ns1.Ns1Provider') + raise diff --git a/octodns/provider/powerdns.py b/octodns/provider/powerdns.py index ebcd3b6..097088a 100644 --- a/octodns/provider/powerdns.py +++ b/octodns/provider/powerdns.py @@ -7,7 +7,7 @@ from __future__ import absolute_import, division, print_function, \ from logging import getLogger -logger = getLogger('PowerDNS') +logger = getLogger('PowerDns') try: logger.warn('octodns_powerdns shimmed. Update your provider class to ' 'octodns_powerdns.PowerDnsProvider. ' diff --git a/tests/test_octodns_provider_ns1.py b/tests/test_octodns_provider_ns1.py index 01a2ab1..3d85dd0 100644 --- a/tests/test_octodns_provider_ns1.py +++ b/tests/test_octodns_provider_ns1.py @@ -5,2859 +5,12 @@ from __future__ import absolute_import, division, print_function, \ unicode_literals -from collections import defaultdict -from mock import call, patch -from ns1.rest.errors import AuthException, RateLimitException, \ - ResourceException from unittest import TestCase -from octodns.record import Delete, Record, Update -from octodns.provider.ns1 import Ns1Client, Ns1Exception, Ns1Provider -from octodns.provider.plan import Plan -from octodns.zone import Zone - class TestNs1Provider(TestCase): - zone = Zone('unit.tests.', []) - expected = set() - expected.add(Record.new(zone, '', { - 'ttl': 32, - 'type': 'A', - 'value': '1.2.3.4', - 'meta': {}, - })) - expected.add(Record.new(zone, 'foo', { - 'ttl': 33, - 'type': 'A', - 'values': ['1.2.3.4', '1.2.3.5'], - 'meta': {}, - })) - expected.add(Record.new(zone, 'geo', { - 'ttl': 34, - 'type': 'A', - 'values': ['101.102.103.104', '101.102.103.105'], - 'geo': {'NA-US-NY': ['201.202.203.204']}, - 'meta': {}, - })) - expected.add(Record.new(zone, 'cname', { - 'ttl': 34, - 'type': 'CNAME', - 'value': 'foo.unit.tests.', - })) - expected.add(Record.new(zone, '', { - 'ttl': 35, - 'type': 'MX', - 'values': [{ - 'preference': 10, - 'exchange': 'mx1.unit.tests.', - }, { - 'preference': 20, - 'exchange': 'mx2.unit.tests.', - }] - })) - expected.add(Record.new(zone, 'naptr', { - 'ttl': 36, - 'type': 'NAPTR', - 'values': [{ - 'flags': 'U', - 'order': 100, - 'preference': 100, - 'regexp': '!^.*$!sip:info@bar.example.com!', - 'replacement': '.', - 'service': 'SIP+D2U', - }, { - 'flags': 'S', - 'order': 10, - 'preference': 100, - 'regexp': '!^.*$!sip:info@bar.example.com!', - 'replacement': '.', - 'service': 'SIP+D2U', - }] - })) - expected.add(Record.new(zone, '', { - 'ttl': 37, - 'type': 'NS', - 'values': ['ns1.unit.tests.', 'ns2.unit.tests.'], - })) - expected.add(Record.new(zone, '_srv._tcp', { - 'ttl': 38, - 'type': 'SRV', - 'values': [{ - 'priority': 10, - 'weight': 20, - 'port': 30, - 'target': 'foo-1.unit.tests.', - }, { - 'priority': 12, - 'weight': 30, - 'port': 30, - 'target': 'foo-2.unit.tests.', - }] - })) - expected.add(Record.new(zone, 'sub', { - 'ttl': 39, - 'type': 'NS', - 'values': ['ns3.unit.tests.', 'ns4.unit.tests.'], - })) - expected.add(Record.new(zone, '', { - 'ttl': 40, - 'type': 'CAA', - 'value': { - 'flags': 0, - 'tag': 'issue', - 'value': 'ca.unit.tests', - }, - })) - expected.add(Record.new(zone, 'urlfwd', { - 'ttl': 41, - 'type': 'URLFWD', - 'value': { - 'path': '/', - 'target': 'http://foo.unit.tests', - 'code': 301, - 'masking': 2, - 'query': 0, - }, - })) - expected.add(Record.new(zone, '1.2.3.4', { - 'ttl': 42, - 'type': 'PTR', - 'values': ['one.one.one.one.', 'two.two.two.two.'], - })) - ns1_records = [{ - 'type': 'A', - 'ttl': 32, - 'short_answers': ['1.2.3.4'], - 'domain': 'unit.tests.', - }, { - 'type': 'A', - 'ttl': 33, - 'short_answers': ['1.2.3.4', '1.2.3.5'], - 'domain': 'foo.unit.tests.', - }, { - 'type': 'A', - 'ttl': 34, - 'short_answers': ['101.102.103.104', '101.102.103.105'], - 'domain': 'geo.unit.tests', - }, { - 'type': 'CNAME', - 'ttl': 34, - 'short_answers': ['foo.unit.tests'], - 'domain': 'cname.unit.tests.', - }, { - 'type': 'MX', - 'ttl': 35, - 'short_answers': ['10 mx1.unit.tests.', '20 mx2.unit.tests'], - 'domain': 'unit.tests.', - }, { - 'type': 'NAPTR', - 'ttl': 36, - 'short_answers': [ - '10 100 S SIP+D2U !^.*$!sip:info@bar.example.com! .', - '100 100 U SIP+D2U !^.*$!sip:info@bar.example.com! .' - ], - 'domain': 'naptr.unit.tests.', - }, { - 'type': 'NS', - 'ttl': 37, - 'short_answers': ['ns1.unit.tests.', 'ns2.unit.tests'], - 'domain': 'unit.tests.', - }, { - 'type': 'SRV', - 'ttl': 38, - 'short_answers': ['12 30 30 foo-2.unit.tests.', - '10 20 30 foo-1.unit.tests'], - 'domain': '_srv._tcp.unit.tests.', - }, { - 'type': 'NS', - 'ttl': 39, - 'short_answers': ['ns3.unit.tests.', 'ns4.unit.tests'], - 'domain': 'sub.unit.tests.', - }, { - 'type': 'CAA', - 'ttl': 40, - 'short_answers': ['0 issue ca.unit.tests'], - 'domain': 'unit.tests.', - }, { - 'type': 'URLFWD', - 'ttl': 41, - 'short_answers': ['/ http://foo.unit.tests 301 2 0'], - 'domain': 'urlfwd.unit.tests.', - }, { - 'type': 'PTR', - 'ttl': 42, - 'short_answers': ['one.one.one.one.', 'two.two.two.two.'], - 'domain': '1.2.3.4.unit.tests.', - }] - - @patch('ns1.rest.records.Records.retrieve') - @patch('ns1.rest.zones.Zones.retrieve') - def test_populate(self, zone_retrieve_mock, record_retrieve_mock): - provider = Ns1Provider('test', 'api-key') - - def reset(): - provider._client.reset_caches() - zone_retrieve_mock.reset_mock() - record_retrieve_mock.reset_mock() - - # Bad auth - reset() - zone_retrieve_mock.side_effect = AuthException('unauthorized') - zone = Zone('unit.tests.', []) - with self.assertRaises(AuthException) as ctx: - provider.populate(zone) - self.assertEquals(zone_retrieve_mock.side_effect, ctx.exception) - - # General error - reset() - zone_retrieve_mock.side_effect = ResourceException('boom') - zone = Zone('unit.tests.', []) - with self.assertRaises(ResourceException) as ctx: - provider.populate(zone) - self.assertEquals(zone_retrieve_mock.side_effect, ctx.exception) - self.assertEquals(('unit.tests',), zone_retrieve_mock.call_args[0]) - - # Non-existent zone doesn't populate anything - reset() - zone_retrieve_mock.side_effect = \ - ResourceException('server error: zone not found') - zone = Zone('unit.tests.', []) - exists = provider.populate(zone) - self.assertEquals(set(), zone.records) - self.assertEquals(('unit.tests',), zone_retrieve_mock.call_args[0]) - self.assertFalse(exists) - - # Existing zone w/o records - reset() - ns1_zone = { - 'records': [{ - "domain": "geo.unit.tests", - "zone": "unit.tests", - "type": "A", - "answers": [ - {'answer': ['1.1.1.1'], 'meta': {}}, - {'answer': ['1.2.3.4'], - 'meta': {'ca_province': ['ON']}}, - {'answer': ['2.3.4.5'], 'meta': {'us_state': ['NY']}}, - {'answer': ['3.4.5.6'], 'meta': {'country': ['US']}}, - {'answer': ['4.5.6.7'], - 'meta': {'iso_region_code': ['NA-US-WA']}}, - ], - 'tier': 3, - 'ttl': 34, - }], - } - zone_retrieve_mock.side_effect = [ns1_zone] - # Its tier 3 so we'll do a full lookup - record_retrieve_mock.side_effect = ns1_zone['records'] - zone = Zone('unit.tests.', []) - provider.populate(zone) - self.assertEquals(1, len(zone.records)) - self.assertEquals(('unit.tests',), zone_retrieve_mock.call_args[0]) - record_retrieve_mock.assert_has_calls([call('unit.tests', - 'geo.unit.tests', 'A')]) - - # Existing zone w/records - reset() - ns1_zone = { - 'records': self.ns1_records + [{ - "domain": "geo.unit.tests", - "zone": "unit.tests", - "type": "A", - "answers": [ - {'answer': ['1.1.1.1'], 'meta': {}}, - {'answer': ['1.2.3.4'], - 'meta': {'ca_province': ['ON']}}, - {'answer': ['2.3.4.5'], 'meta': {'us_state': ['NY']}}, - {'answer': ['3.4.5.6'], 'meta': {'country': ['US']}}, - {'answer': ['4.5.6.7'], - 'meta': {'iso_region_code': ['NA-US-WA']}}, - ], - 'tier': 3, - 'ttl': 34, - }], - } - zone_retrieve_mock.side_effect = [ns1_zone] - # Its tier 3 so we'll do a full lookup - record_retrieve_mock.side_effect = ns1_zone['records'] - zone = Zone('unit.tests.', []) - provider.populate(zone) - self.assertEquals(self.expected, zone.records) - self.assertEquals(('unit.tests',), zone_retrieve_mock.call_args[0]) - record_retrieve_mock.assert_has_calls([call('unit.tests', - 'geo.unit.tests', 'A')]) - - # Test skipping unsupported record type - reset() - ns1_zone = { - 'records': self.ns1_records + [{ - 'type': 'UNSUPPORTED', - 'ttl': 42, - 'short_answers': ['unsupported'], - 'domain': 'unsupported.unit.tests.', - }, { - "domain": "geo.unit.tests", - "zone": "unit.tests", - "type": "A", - "answers": [ - {'answer': ['1.1.1.1'], 'meta': {}}, - {'answer': ['1.2.3.4'], - 'meta': {'ca_province': ['ON']}}, - {'answer': ['2.3.4.5'], 'meta': {'us_state': ['NY']}}, - {'answer': ['3.4.5.6'], 'meta': {'country': ['US']}}, - {'answer': ['4.5.6.7'], - 'meta': {'iso_region_code': ['NA-US-WA']}}, - ], - 'tier': 3, - 'ttl': 34, - }], - } - zone_retrieve_mock.side_effect = [ns1_zone] - zone = Zone('unit.tests.', []) - provider.populate(zone) - self.assertEquals(self.expected, zone.records) - self.assertEquals(('unit.tests',), zone_retrieve_mock.call_args[0]) - record_retrieve_mock.assert_has_calls([call('unit.tests', - 'geo.unit.tests', 'A')]) - - @patch('ns1.rest.records.Records.delete') - @patch('ns1.rest.records.Records.update') - @patch('ns1.rest.records.Records.create') - @patch('ns1.rest.records.Records.retrieve') - @patch('ns1.rest.zones.Zones.create') - @patch('ns1.rest.zones.Zones.retrieve') - def test_sync(self, zone_retrieve_mock, zone_create_mock, - record_retrieve_mock, record_create_mock, - record_update_mock, record_delete_mock): - provider = Ns1Provider('test', 'api-key') - - desired = Zone('unit.tests.', []) - for r in self.expected: - desired.add_record(r) - - plan = provider.plan(desired) - # everything except the root NS - expected_n = len(self.expected) - 1 - self.assertEquals(expected_n, len(plan.changes)) - self.assertTrue(plan.exists) - - def reset(): - provider._client.reset_caches() - record_retrieve_mock.reset_mock() - zone_create_mock.reset_mock() - zone_retrieve_mock.reset_mock() - - # Fails, general error - reset() - zone_retrieve_mock.side_effect = ResourceException('boom') - with self.assertRaises(ResourceException) as ctx: - provider.apply(plan) - self.assertEquals(zone_retrieve_mock.side_effect, ctx.exception) - - # Fails, bad auth - reset() - zone_retrieve_mock.side_effect = \ - ResourceException('server error: zone not found') - zone_create_mock.side_effect = AuthException('unauthorized') - with self.assertRaises(AuthException) as ctx: - provider.apply(plan) - self.assertEquals(zone_create_mock.side_effect, ctx.exception) - - # non-existent zone, create - reset() - zone_retrieve_mock.side_effect = \ - ResourceException('server error: zone not found') - - zone_create_mock.side_effect = ['foo'] - # Test out the create rate-limit handling, then successes for the rest - record_create_mock.side_effect = [ - RateLimitException('boo', period=0), - ] + ([None] * len(self.expected)) - - got_n = provider.apply(plan) - self.assertEquals(expected_n, got_n) - - # Zone was created - zone_create_mock.assert_has_calls([call('unit.tests')]) - # Checking that we got some of the expected records too - record_create_mock.assert_has_calls([ - call('unit.tests', 'unit.tests', 'A', answers=[ - {'answer': ['1.2.3.4'], 'meta': {}} - ], filters=[], ttl=32), - call('unit.tests', 'unit.tests', 'CAA', answers=[ - (0, 'issue', 'ca.unit.tests') - ], ttl=40), - call('unit.tests', 'unit.tests', 'MX', answers=[ - (10, 'mx1.unit.tests.'), (20, 'mx2.unit.tests.') - ], ttl=35), - call('unit.tests', '1.2.3.4.unit.tests', 'PTR', answers=[ - 'one.one.one.one.', 'two.two.two.two.', - ], ttl=42), - ]) - - # Update & delete - reset() - - ns1_zone = { - 'records': self.ns1_records + [{ - 'type': 'A', - 'ttl': 42, - 'short_answers': ['9.9.9.9'], - 'domain': 'delete-me.unit.tests.', - }, { - "domain": "geo.unit.tests", - "zone": "unit.tests", - "type": "A", - "short_answers": [ - '1.1.1.1', - '1.2.3.4', - '2.3.4.5', - '3.4.5.6', - '4.5.6.7', - ], - 'tier': 3, # This flags it as advacned, full load required - 'ttl': 34, - }], - } - ns1_zone['records'][0]['short_answers'][0] = '2.2.2.2' - - ns1_record = { - "domain": "geo.unit.tests", - "zone": "unit.tests", - "type": "A", - "answers": [ - {'answer': ['1.1.1.1'], 'meta': {}}, - {'answer': ['1.2.3.4'], - 'meta': {'ca_province': ['ON']}}, - {'answer': ['2.3.4.5'], 'meta': {'us_state': ['NY']}}, - {'answer': ['3.4.5.6'], 'meta': {'country': ['US']}}, - {'answer': ['4.5.6.7'], - 'meta': {'iso_region_code': ['NA-US-WA']}}, - ], - 'tier': 3, - 'ttl': 34, - } - - record_retrieve_mock.side_effect = [ns1_record, ns1_record] - zone_retrieve_mock.side_effect = [ns1_zone, ns1_zone] - plan = provider.plan(desired) - self.assertEquals(3, len(plan.changes)) - # Shouldn't rely on order so just count classes - classes = defaultdict(lambda: 0) - for change in plan.changes: - classes[change.__class__] += 1 - self.assertEquals(1, classes[Delete]) - self.assertEquals(2, classes[Update]) - - record_update_mock.side_effect = [ - RateLimitException('one', period=0), - None, - None, - ] - record_delete_mock.side_effect = [ - RateLimitException('two', period=0), - None, - None, - ] - - record_retrieve_mock.side_effect = [ns1_record, ns1_record] - zone_retrieve_mock.side_effect = [ns1_zone, ns1_zone] - got_n = provider.apply(plan) - self.assertEquals(3, got_n) - - record_update_mock.assert_has_calls([ - call('unit.tests', 'unit.tests', 'A', answers=[ - {'answer': ['1.2.3.4'], 'meta': {}}], - filters=[], - ttl=32), - call('unit.tests', 'unit.tests', 'A', answers=[ - {'answer': ['1.2.3.4'], 'meta': {}}], - filters=[], - ttl=32), - call('unit.tests', 'geo.unit.tests', 'A', answers=[ - {'answer': ['101.102.103.104'], 'meta': {}}, - {'answer': ['101.102.103.105'], 'meta': {}}, - { - 'answer': ['201.202.203.204'], - 'meta': {'iso_region_code': ['NA-US-NY']} - }], - filters=[ - {'filter': 'shuffle', 'config': {}}, - {'filter': 'geotarget_country', 'config': {}}, - {'filter': 'select_first_n', 'config': {'N': 1}}], - ttl=34) - ]) - - def test_escaping(self): - provider = Ns1Provider('test', 'api-key') - record = { - 'ttl': 31, - 'short_answers': ['foo; bar baz; blip'] - } - self.assertEquals(['foo\\; bar baz\\; blip'], - provider._data_for_SPF('SPF', record)['values']) - - record = { - 'ttl': 31, - 'short_answers': ['no', 'foo; bar baz; blip', 'yes'] - } - self.assertEquals(['no', 'foo\\; bar baz\\; blip', 'yes'], - provider._data_for_TXT('TXT', record)['values']) - - zone = Zone('unit.tests.', []) - record = Record.new(zone, 'spf', { - 'ttl': 34, - 'type': 'SPF', - 'value': 'foo\\; bar baz\\; blip' - }) - params, _ = provider._params_for_SPF(record) - self.assertEquals(['foo; bar baz; blip'], params['answers']) - - record = Record.new(zone, 'txt', { - 'ttl': 35, - 'type': 'TXT', - 'value': 'foo\\; bar baz\\; blip' - }) - params, _ = provider._params_for_SPF(record) - self.assertEquals(['foo; bar baz; blip'], params['answers']) - - def test_data_for_CNAME(self): - provider = Ns1Provider('test', 'api-key') - - # answers from ns1 - a_record = { - 'ttl': 31, - 'type': 'CNAME', - 'short_answers': ['foo.unit.tests.'] - } - a_expected = { - 'ttl': 31, - 'type': 'CNAME', - 'value': 'foo.unit.tests.' - } - self.assertEqual(a_expected, - provider._data_for_CNAME(a_record['type'], a_record)) - - # no answers from ns1 - b_record = { - 'ttl': 32, - 'type': 'CNAME', - 'short_answers': [] - } - b_expected = { - 'ttl': 32, - 'type': 'CNAME', - 'value': None - } - self.assertEqual(b_expected, - provider._data_for_CNAME(b_record['type'], b_record)) - - -class TestNs1ProviderDynamic(TestCase): - zone = Zone('unit.tests.', []) - - def record(self): - # return a new object each time so we can mess with it without causing - # problems from test to test - return Record.new(self.zone, '', { - 'dynamic': { - 'pools': { - 'lhr': { - 'fallback': 'iad', - 'values': [{ - 'value': '3.4.5.6', - }], - }, - 'iad': { - 'values': [{ - 'value': '1.2.3.4', - }, { - 'value': '2.3.4.5', - }], - }, - }, - 'rules': [{ - 'geos': [ - 'AF', - 'EU-GB', - 'NA-US-FL' - ], - 'pool': 'lhr', - }, { - 'geos': [ - 'AF-ZW', - ], - 'pool': 'iad', - }, { - 'pool': 'iad', - }], - }, - 'octodns': { - 'healthcheck': { - 'host': 'send.me', - 'path': '/_ping', - 'port': 80, - 'protocol': 'HTTP', - }, - 'ns1': { - 'healthcheck': { - 'connect_timeout': 5, - 'response_timeout': 6, - }, - }, - }, - 'ttl': 32, - 'type': 'A', - 'value': '1.2.3.4', - 'meta': {}, - }) - - def aaaa_record(self): - return Record.new(self.zone, '', { - 'dynamic': { - 'pools': { - 'lhr': { - 'fallback': 'iad', - 'values': [{ - 'value': '::ffff:3.4.5.6', - }], - }, - 'iad': { - 'values': [{ - 'value': '::ffff:1.2.3.4', - }, { - 'value': '::ffff:2.3.4.5', - }], - }, - }, - 'rules': [{ - 'geos': [ - 'AF', - 'EU-GB', - 'NA-US-FL' - ], - 'pool': 'lhr', - }, { - 'geos': [ - 'AF-ZW', - ], - 'pool': 'iad', - }, { - 'pool': 'iad', - }], - }, - 'octodns': { - 'healthcheck': { - 'host': 'send.me', - 'path': '/_ping', - 'port': 80, - 'protocol': 'HTTP', - } - }, - 'ttl': 32, - 'type': 'AAAA', - 'value': '::ffff:1.2.3.4', - 'meta': {}, - }) - - def cname_record(self): - return Record.new(self.zone, 'foo', { - 'dynamic': { - 'pools': { - 'iad': { - 'values': [{ - 'value': 'iad.unit.tests.', - }], - }, - }, - 'rules': [{ - 'pool': 'iad', - }], - }, - 'octodns': { - 'healthcheck': { - 'host': 'send.me', - 'path': '/_ping', - 'port': 80, - 'protocol': 'HTTP', - } - }, - 'ttl': 33, - 'type': 'CNAME', - 'value': 'value.unit.tests.', - 'meta': {}, - }) - - def test_notes(self): - provider = Ns1Provider('test', 'api-key') - - self.assertEquals({}, provider._parse_notes(None)) - self.assertEquals({}, provider._parse_notes('')) - self.assertEquals({}, provider._parse_notes('blah-blah-blah')) - - # Round tripping - data = { - 'key': 'value', - 'priority': '1', - } - notes = provider._encode_notes(data) - self.assertEquals(data, provider._parse_notes(notes)) - - def test_monitors_for(self): - provider = Ns1Provider('test', 'api-key') - - # pre-populate the client's monitors cache - monitor_one = { - 'config': { - 'host': '1.2.3.4', - }, - 'notes': 'host:unit.tests type:A', - } - monitor_four = { - 'config': { - 'host': '2.3.4.5', - }, - 'notes': 'host:unit.tests type:A', - } - monitor_five = { - 'config': { - 'host': 'iad.unit.tests', - }, - 'notes': 'host:foo.unit.tests type:CNAME', - } - provider._client._monitors_cache = { - 'one': monitor_one, - 'two': { - 'config': { - 'host': '8.8.8.8', - }, - 'notes': 'host:unit.tests type:AAAA', - }, - 'three': { - 'config': { - 'host': '9.9.9.9', - }, - 'notes': 'host:other.unit.tests type:A', - }, - 'four': monitor_four, - 'five': monitor_five, - 'six': { - 'config': { - 'host': '10.10.10.10', - }, - 'notes': 'non-conforming notes', - }, - 'seven': { - 'config': { - 'host': '11.11.11.11', - }, - 'notes': None, - }, - } - - # Would match, but won't get there b/c it's not dynamic - record = Record.new(self.zone, '', { - 'ttl': 32, - 'type': 'A', - 'value': '1.2.3.4', - 'meta': {}, - }) - self.assertEquals({}, provider._monitors_for(record)) - - # Will match some records - self.assertEquals({ - '1.2.3.4': monitor_one, - '2.3.4.5': monitor_four, - }, provider._monitors_for(self.record())) - - # Check match for CNAME values - self.assertEquals({ - 'iad.unit.tests.': monitor_five, - }, provider._monitors_for(self.cname_record())) - - def test_uuid(self): - # Just a smoke test/for coverage - provider = Ns1Provider('test', 'api-key') - self.assertTrue(provider._uuid()) - - @patch('octodns.provider.ns1.Ns1Provider._uuid') - @patch('ns1.rest.data.Feed.create') - def test_feed_create(self, datafeed_create_mock, uuid_mock): - provider = Ns1Provider('test', 'api-key') - - # pre-fill caches to avoid extranious calls (things we're testing - # elsewhere) - provider._client._datasource_id = 'foo' - provider._client._feeds_for_monitors = {} - - uuid_mock.reset_mock() - datafeed_create_mock.reset_mock() - uuid_mock.side_effect = ['xxxxxxxxxxxxxx'] - feed = { - 'id': 'feed', - } - datafeed_create_mock.side_effect = [feed] - monitor = { - 'id': 'one', - 'name': 'one name', - 'config': { - 'host': '1.2.3.4', - }, - 'notes': 'host:unit.tests type:A', - } - self.assertEquals('feed', provider._feed_create(monitor)) - datafeed_create_mock.assert_has_calls([call('foo', 'one name - xxxxxx', - {'jobid': 'one'})]) - - @patch('octodns.provider.ns1.Ns1Provider._feed_create') - @patch('octodns.provider.ns1.Ns1Client.monitors_create') - @patch('octodns.provider.ns1.Ns1Client.notifylists_create') - def test_monitor_create(self, notifylists_create_mock, - monitors_create_mock, feed_create_mock): - provider = Ns1Provider('test', 'api-key') - - # pre-fill caches to avoid extranious calls (things we're testing - # elsewhere) - provider._client._datasource_id = 'foo' - provider._client._feeds_for_monitors = {} - - notifylists_create_mock.reset_mock() - monitors_create_mock.reset_mock() - feed_create_mock.reset_mock() - notifylists_create_mock.side_effect = [{ - 'id': 'nl-id', - }] - monitors_create_mock.side_effect = [{ - 'id': 'mon-id', - }] - feed_create_mock.side_effect = ['feed-id'] - monitor = { - 'name': 'test monitor', - } - provider._client._notifylists_cache = {} - monitor_id, feed_id = provider._monitor_create(monitor) - self.assertEquals('mon-id', monitor_id) - self.assertEquals('feed-id', feed_id) - monitors_create_mock.assert_has_calls([call(name='test monitor', - notify_list='nl-id')]) - - @patch('octodns.provider.ns1.Ns1Provider._feed_create') - @patch('octodns.provider.ns1.Ns1Client.monitors_create') - @patch('octodns.provider.ns1.Ns1Client._try') - def test_monitor_create_shared_notifylist(self, try_mock, - monitors_create_mock, - feed_create_mock): - provider = Ns1Provider('test', 'api-key', shared_notifylist=True) - - # pre-fill caches to avoid extranious calls (things we're testing - # elsewhere) - provider._client._datasource_id = 'foo' - provider._client._feeds_for_monitors = {} - - # First time we'll need to create the share list - provider._client._notifylists_cache = {} - try_mock.reset_mock() - monitors_create_mock.reset_mock() - feed_create_mock.reset_mock() - try_mock.side_effect = [{ - 'id': 'nl-id', - 'name': provider.SHARED_NOTIFYLIST_NAME, - }] - monitors_create_mock.side_effect = [{ - 'id': 'mon-id', - }] - feed_create_mock.side_effect = ['feed-id'] - monitor = { - 'name': 'test monitor', - } - monitor_id, feed_id = provider._monitor_create(monitor) - self.assertEquals('mon-id', monitor_id) - self.assertEquals('feed-id', feed_id) - monitors_create_mock.assert_has_calls([call(name='test monitor', - notify_list='nl-id')]) - try_mock.assert_called_once() - # The shared notifylist should be cached now - self.assertEquals([provider.SHARED_NOTIFYLIST_NAME], - list(provider._client._notifylists_cache.keys())) - - # Second time we'll use the cached version - try_mock.reset_mock() - monitors_create_mock.reset_mock() - feed_create_mock.reset_mock() - monitors_create_mock.side_effect = [{ - 'id': 'mon-id', - }] - feed_create_mock.side_effect = ['feed-id'] - monitor = { - 'name': 'test monitor', - } - monitor_id, feed_id = provider._monitor_create(monitor) - self.assertEquals('mon-id', monitor_id) - self.assertEquals('feed-id', feed_id) - monitors_create_mock.assert_has_calls([call(name='test monitor', - notify_list='nl-id')]) - try_mock.assert_not_called() - - def test_monitor_gen(self): - provider = Ns1Provider('test', 'api-key') - - value = '3.4.5.6' - record = self.record() - monitor = provider._monitor_gen(record, value) - self.assertEquals(value, monitor['config']['host']) - self.assertTrue('\\nHost: send.me\\r' in monitor['config']['send']) - self.assertFalse(monitor['config']['ssl']) - self.assertEquals('host:unit.tests type:A', monitor['notes']) - - record._octodns['healthcheck']['host'] = None - monitor = provider._monitor_gen(record, value) - self.assertTrue(r'\nHost: 3.4.5.6\r' in monitor['config']['send']) - - record._octodns['healthcheck']['protocol'] = 'HTTPS' - monitor = provider._monitor_gen(record, value) - self.assertTrue(monitor['config']['ssl']) - - record._octodns['healthcheck']['protocol'] = 'TCP' - monitor = provider._monitor_gen(record, value) - # No http send done - self.assertFalse('send' in monitor['config']) - # No http response expected - self.assertFalse('rules' in monitor) - - record._octodns['ns1']['healthcheck']['policy'] = 'all' - monitor = provider._monitor_gen(record, value) - self.assertEquals('all', monitor['policy']) - - record._octodns['ns1']['healthcheck']['frequency'] = 300 - monitor = provider._monitor_gen(record, value) - self.assertEquals(300, monitor['frequency']) - - record._octodns['ns1']['healthcheck']['rapid_recheck'] = True - monitor = provider._monitor_gen(record, value) - self.assertTrue(monitor['rapid_recheck']) - - record._octodns['ns1']['healthcheck']['connect_timeout'] = 1 - monitor = provider._monitor_gen(record, value) - self.assertEquals(1000, monitor['config']['connect_timeout']) - - record._octodns['ns1']['healthcheck']['response_timeout'] = 2 - monitor = provider._monitor_gen(record, value) - self.assertEquals(2000, monitor['config']['response_timeout']) - - def test_monitor_gen_AAAA(self): - provider = Ns1Provider('test', 'api-key') - - value = '::ffff:3.4.5.6' - record = self.aaaa_record() - monitor = provider._monitor_gen(record, value) - self.assertTrue(monitor['config']['ipv6']) - - def test_monitor_gen_CNAME(self): - provider = Ns1Provider('test', 'api-key') - - value = 'iad.unit.tests.' - record = self.cname_record() - monitor = provider._monitor_gen(record, value) - self.assertEquals(value[:-1], monitor['config']['host']) - - def test_monitor_is_match(self): - provider = Ns1Provider('test', 'api-key') - - # Empty matches empty - self.assertTrue(provider._monitor_is_match({}, {})) - - # Anything matches empty - self.assertTrue(provider._monitor_is_match({}, { - 'anything': 'goes' - })) - - # Missing doesn't match - self.assertFalse(provider._monitor_is_match({ - 'exepct': 'this', - }, { - 'anything': 'goes' - })) - - # Identical matches - self.assertTrue(provider._monitor_is_match({ - 'exepct': 'this', - }, { - 'exepct': 'this', - })) - - # Different values don't match - self.assertFalse(provider._monitor_is_match({ - 'exepct': 'this', - }, { - 'exepct': 'that', - })) - - # Different sub-values don't match - self.assertFalse(provider._monitor_is_match({ - 'exepct': { - 'this': 'to-be', - }, - }, { - 'exepct': { - 'this': 'something-else', - }, - })) - - @patch('octodns.provider.ns1.Ns1Provider._feed_create') - @patch('octodns.provider.ns1.Ns1Client.monitors_update') - @patch('octodns.provider.ns1.Ns1Provider._monitor_create') - @patch('octodns.provider.ns1.Ns1Provider._monitor_gen') - def test_monitor_sync(self, monitor_gen_mock, monitor_create_mock, - monitors_update_mock, feed_create_mock): - provider = Ns1Provider('test', 'api-key') - - # pre-fill caches to avoid extranious calls (things we're testing - # elsewhere) - provider._client._datasource_id = 'foo' - provider._client._feeds_for_monitors = { - 'mon-id': 'feed-id', - } - - def reset(): - feed_create_mock.reset_mock() - monitor_create_mock.reset_mock() - monitor_gen_mock.reset_mock() - monitors_update_mock.reset_mock() - - # No existing monitor - reset() - monitor_gen_mock.side_effect = [{'key': 'value'}] - monitor_create_mock.side_effect = [('mon-id', 'feed-id')] - value = '1.2.3.4' - record = self.record() - monitor_id, feed_id = provider._monitor_sync(record, value, None) - self.assertEquals('mon-id', monitor_id) - self.assertEquals('feed-id', feed_id) - monitor_gen_mock.assert_has_calls([call(record, value)]) - monitor_create_mock.assert_has_calls([call({'key': 'value'})]) - monitors_update_mock.assert_not_called() - feed_create_mock.assert_not_called() - - # Existing monitor that doesn't need updates - reset() - monitor = { - 'id': 'mon-id', - 'key': 'value', - 'name': 'monitor name', - } - monitor_gen_mock.side_effect = [monitor] - monitor_id, feed_id = provider._monitor_sync(record, value, - monitor) - self.assertEquals('mon-id', monitor_id) - self.assertEquals('feed-id', feed_id) - monitor_gen_mock.assert_called_once() - monitor_create_mock.assert_not_called() - monitors_update_mock.assert_not_called() - feed_create_mock.assert_not_called() - - # Existing monitor that doesn't need updates, but is missing its feed - reset() - monitor = { - 'id': 'mon-id2', - 'key': 'value', - 'name': 'monitor name', - } - monitor_gen_mock.side_effect = [monitor] - feed_create_mock.side_effect = ['feed-id2'] - monitor_id, feed_id = provider._monitor_sync(record, value, - monitor) - self.assertEquals('mon-id2', monitor_id) - self.assertEquals('feed-id2', feed_id) - monitor_gen_mock.assert_called_once() - monitor_create_mock.assert_not_called() - monitors_update_mock.assert_not_called() - feed_create_mock.assert_has_calls([call(monitor)]) - - # Existing monitor that needs updates - reset() - monitor = { - 'id': 'mon-id', - 'key': 'value', - 'name': 'monitor name', - } - gened = { - 'other': 'thing', - } - monitor_gen_mock.side_effect = [gened] - monitor_id, feed_id = provider._monitor_sync(record, value, - monitor) - self.assertEquals('mon-id', monitor_id) - self.assertEquals('feed-id', feed_id) - monitor_gen_mock.assert_called_once() - monitor_create_mock.assert_not_called() - monitors_update_mock.assert_has_calls([call('mon-id', other='thing')]) - feed_create_mock.assert_not_called() - - @patch('octodns.provider.ns1.Ns1Client.notifylists_delete') - @patch('octodns.provider.ns1.Ns1Client.monitors_delete') - @patch('octodns.provider.ns1.Ns1Client.datafeed_delete') - @patch('octodns.provider.ns1.Ns1Provider._monitors_for') - def test_monitors_gc(self, monitors_for_mock, datafeed_delete_mock, - monitors_delete_mock, notifylists_delete_mock): - provider = Ns1Provider('test', 'api-key') - - # pre-fill caches to avoid extranious calls (things we're testing - # elsewhere) - provider._client._datasource_id = 'foo' - provider._client._feeds_for_monitors = { - 'mon-id': 'feed-id', - } - - def reset(): - datafeed_delete_mock.reset_mock() - monitors_delete_mock.reset_mock() - monitors_for_mock.reset_mock() - notifylists_delete_mock.reset_mock() - - # No active monitors and no existing, nothing will happen - reset() - monitors_for_mock.side_effect = [{}] - record = self.record() - provider._monitors_gc(record) - monitors_for_mock.assert_has_calls([call(record)]) - datafeed_delete_mock.assert_not_called() - monitors_delete_mock.assert_not_called() - notifylists_delete_mock.assert_not_called() - - # No active monitors and one existing, delete all the things - reset() - monitors_for_mock.side_effect = [{ - 'x': { - 'id': 'mon-id', - 'notify_list': 'nl-id', - } - }] - provider._client._notifylists_cache = { - 'not shared': { - 'id': 'nl-id', - 'name': 'not shared', - } - } - provider._monitors_gc(record) - monitors_for_mock.assert_has_calls([call(record)]) - datafeed_delete_mock.assert_has_calls([call('foo', 'feed-id')]) - monitors_delete_mock.assert_has_calls([call('mon-id')]) - notifylists_delete_mock.assert_has_calls([call('nl-id')]) - - # Same existing, this time in active list, should be noop - reset() - monitors_for_mock.side_effect = [{ - 'x': { - 'id': 'mon-id', - 'notify_list': 'nl-id', - } - }] - provider._monitors_gc(record, {'mon-id'}) - monitors_for_mock.assert_has_calls([call(record)]) - datafeed_delete_mock.assert_not_called() - monitors_delete_mock.assert_not_called() - notifylists_delete_mock.assert_not_called() - - # Non-active monitor w/o a feed, and another monitor that's left alone - # b/c it's active - reset() - monitors_for_mock.side_effect = [{ - 'x': { - 'id': 'mon-id', - 'notify_list': 'nl-id', - }, - 'y': { - 'id': 'mon-id2', - 'notify_list': 'nl-id2', - }, - }] - provider._client._notifylists_cache = { - 'not shared': { - 'id': 'nl-id', - 'name': 'not shared', - }, - 'not shared 2': { - 'id': 'nl-id2', - 'name': 'not shared 2', - } - } - provider._monitors_gc(record, {'mon-id'}) - monitors_for_mock.assert_has_calls([call(record)]) - datafeed_delete_mock.assert_not_called() - monitors_delete_mock.assert_has_calls([call('mon-id2')]) - notifylists_delete_mock.assert_has_calls([call('nl-id2')]) - - # Non-active monitor w/o a notifylist, generally shouldn't happen, but - # code should handle it just in case someone gets clicky in the UI - reset() - monitors_for_mock.side_effect = [{ - 'y': { - 'id': 'mon-id2', - 'notify_list': 'nl-id2', - }, - }] - provider._client._notifylists_cache = { - 'not shared a': { - 'id': 'nl-ida', - 'name': 'not shared a', - }, - 'not shared b': { - 'id': 'nl-idb', - 'name': 'not shared b', - } - } - provider._monitors_gc(record, {'mon-id'}) - monitors_for_mock.assert_has_calls([call(record)]) - datafeed_delete_mock.assert_not_called() - monitors_delete_mock.assert_has_calls([call('mon-id2')]) - notifylists_delete_mock.assert_not_called() - - # Non-active monitor with a shared notifylist, monitor deleted, but - # notifylist is left alone - reset() - provider.shared_notifylist = True - monitors_for_mock.side_effect = [{ - 'y': { - 'id': 'mon-id2', - 'notify_list': 'shared', - }, - }] - provider._client._notifylists_cache = { - 'shared': { - 'id': 'shared', - 'name': provider.SHARED_NOTIFYLIST_NAME, - }, - } - provider._monitors_gc(record, {'mon-id'}) - monitors_for_mock.assert_has_calls([call(record)]) - datafeed_delete_mock.assert_not_called() - monitors_delete_mock.assert_has_calls([call('mon-id2')]) - notifylists_delete_mock.assert_not_called() - - @patch('octodns.provider.ns1.Ns1Provider._monitors_for') - def test_params_for_dynamic_with_pool_status(self, monitors_for_mock): - provider = Ns1Provider('test', 'api-key') - monitors_for_mock.reset_mock() - monitors_for_mock.return_value = {} - record = Record.new(self.zone, '', { - 'dynamic': { - 'pools': { - 'iad': { - 'values': [{ - 'value': '1.2.3.4', - 'status': 'up', - }], - }, - }, - 'rules': [{ - 'pool': 'iad', - }], - }, - 'ttl': 32, - 'type': 'A', - 'value': '1.2.3.4', - 'meta': {}, - }) - params, active_monitors = provider._params_for_dynamic(record) - self.assertEqual(params['answers'][0]['meta']['up'], True) - self.assertEqual(len(active_monitors), 0) - - # check for down also - record.dynamic.pools['iad'].data['values'][0]['status'] = 'down' - params, active_monitors = provider._params_for_dynamic(record) - self.assertEqual(params['answers'][0]['meta']['up'], False) - self.assertEqual(len(active_monitors), 0) - - @patch('octodns.provider.ns1.Ns1Provider._monitor_sync') - @patch('octodns.provider.ns1.Ns1Provider._monitors_for') - def test_params_for_dynamic_region_only(self, monitors_for_mock, - monitor_sync_mock): - provider = Ns1Provider('test', 'api-key') - - # pre-fill caches to avoid extranious calls (things we're testing - # elsewhere) - provider._client._datasource_id = 'foo' - provider._client._feeds_for_monitors = { - 'mon-id': 'feed-id', - } - - # provider._params_for_A() calls provider._monitors_for() and - # provider._monitor_sync(). Mock their return values so that we don't - # make NS1 API calls during tests - monitors_for_mock.reset_mock() - monitor_sync_mock.reset_mock() - monitors_for_mock.side_effect = [{ - '3.4.5.6': 'mid-3', - }] - monitor_sync_mock.side_effect = [ - ('mid-1', 'fid-1'), - ('mid-2', 'fid-2'), - ('mid-3', 'fid-3'), - ] - - record = self.record() - rule0 = record.data['dynamic']['rules'][0] - rule1 = record.data['dynamic']['rules'][1] - rule0['geos'] = ['AF', 'EU'] - rule1['geos'] = ['AS'] - ret, monitor_ids = provider._params_for_A(record) - self.assertEquals(10, len(ret['answers'])) - self.assertEquals(ret['filters'], - provider._FILTER_CHAIN_WITH_REGION) - self.assertEquals({ - 'iad__catchall': { - 'meta': { - 'note': 'rule-order:2' - } - }, - 'iad__georegion': { - 'meta': { - 'georegion': ['ASIAPAC'], - 'note': 'rule-order:1' - } - }, - 'lhr__georegion': { - 'meta': { - 'georegion': ['AFRICA', 'EUROPE'], - 'note': 'fallback:iad rule-order:0' - } - } - }, ret['regions']) - self.assertEquals({'mid-1', 'mid-2', 'mid-3'}, monitor_ids) - - @patch('octodns.provider.ns1.Ns1Provider._monitor_sync') - @patch('octodns.provider.ns1.Ns1Provider._monitors_for') - def test_params_for_dynamic_state_only(self, monitors_for_mock, - monitor_sync_mock): - provider = Ns1Provider('test', 'api-key') - - # pre-fill caches to avoid extranious calls (things we're testing - # elsewhere) - provider._client._datasource_id = 'foo' - provider._client._feeds_for_monitors = { - 'mon-id': 'feed-id', - } - - # provider._params_for_A() calls provider._monitors_for() and - # provider._monitor_sync(). Mock their return values so that we don't - # make NS1 API calls during tests - monitors_for_mock.reset_mock() - monitor_sync_mock.reset_mock() - monitors_for_mock.side_effect = [{ - '3.4.5.6': 'mid-3', - }] - monitor_sync_mock.side_effect = [ - ('mid-1', 'fid-1'), - ('mid-2', 'fid-2'), - ('mid-3', 'fid-3'), - ] - - record = self.record() - rule0 = record.data['dynamic']['rules'][0] - rule1 = record.data['dynamic']['rules'][1] - rule0['geos'] = ['AF', 'EU'] - rule1['geos'] = ['NA-US-CA', 'NA-CA-NL'] - ret, _ = provider._params_for_A(record) - self.assertEquals(10, len(ret['answers'])) - exp = provider._FILTER_CHAIN_WITH_REGION_AND_COUNTRY - self.assertEquals(ret['filters'], exp) - self.assertEquals({ - 'iad__catchall': { - 'meta': { - 'note': 'rule-order:2' - } - }, - 'iad__country': { - 'meta': { - 'note': 'rule-order:1', - 'us_state': ['CA'], - 'ca_province': ['NL'] - } - }, - 'lhr__georegion': { - 'meta': { - 'georegion': ['AFRICA', 'EUROPE'], - 'note': 'fallback:iad rule-order:0' - } - } - }, ret['regions']) - - @patch('octodns.provider.ns1.Ns1Provider._monitor_sync') - @patch('octodns.provider.ns1.Ns1Provider._monitors_for') - def test_params_for_dynamic_contient_and_countries(self, - monitors_for_mock, - monitor_sync_mock): - provider = Ns1Provider('test', 'api-key') - - # pre-fill caches to avoid extranious calls (things we're testing - # elsewhere) - provider._client._datasource_id = 'foo' - provider._client._feeds_for_monitors = { - 'mon-id': 'feed-id', - } - - # provider._params_for_A() calls provider._monitors_for() and - # provider._monitor_sync(). Mock their return values so that we don't - # make NS1 API calls during tests - provider._client.reset_caches() - monitors_for_mock.reset_mock() - monitor_sync_mock.reset_mock() - monitors_for_mock.side_effect = [{ - '3.4.5.6': 'mid-3', - }] - monitor_sync_mock.side_effect = [ - ('mid-1', 'fid-1'), - ('mid-2', 'fid-2'), - ('mid-3', 'fid-3'), - ] - - record = self.record() - rule0 = record.data['dynamic']['rules'][0] - rule1 = record.data['dynamic']['rules'][1] - rule0['geos'] = ['AF', 'EU', 'NA-US-CA'] - rule1['geos'] = ['AS', 'AS-IN'] - ret, _ = provider._params_for_A(record) - - self.assertEquals(17, len(ret['answers'])) - # Deeply check the answers we have here - # group the answers based on where they came from - notes = defaultdict(list) - for answer in ret['answers']: - notes[answer['meta']['note']].append(answer) - # Remove the meta and region part since it'll vary based on the - # exact pool, that'll let us == them down below - del answer['meta'] - del answer['region'] - - # Expected groups. iad has occurances in here: a country and region - # that was split out based on targeting a continent and a state. It - # finally has a catchall. Those are examples of the two ways pools get - # expanded. - # - # lhr splits in two, with a region and country and includes a fallback - # - # All values now include their own `pool:` name - # - # well as both lhr georegion (for contients) and country. The first is - # an example of a repeated target pool in a rule (only allowed when the - # 2nd is a catchall.) - self.assertEquals(['fallback: from:iad__catchall pool:iad', - 'fallback: from:iad__country pool:iad', - 'fallback: from:iad__georegion pool:iad', - 'fallback: from:lhr__country pool:iad', - 'fallback: from:lhr__georegion pool:iad', - 'fallback:iad from:lhr__country pool:lhr', - 'fallback:iad from:lhr__georegion pool:lhr', - 'from:--default--'], - sorted(notes.keys())) - - # All the iad's should match (after meta and region were removed) - self.assertEquals(notes['from:iad__catchall'], - notes['from:iad__country']) - self.assertEquals(notes['from:iad__catchall'], - notes['from:iad__georegion']) - - # The lhrs should match each other too - self.assertEquals(notes['from:lhr__georegion'], - notes['from:lhr__country']) - - # We have both country and region filter chain entries - exp = provider._FILTER_CHAIN_WITH_REGION_AND_COUNTRY - self.assertEquals(ret['filters'], exp) - - # and our region details match the expected behaviors/targeting - self.assertEquals({ - 'iad__catchall': { - 'meta': { - 'note': 'rule-order:2' - } - }, - 'iad__country': { - 'meta': { - 'country': ['IN'], - 'note': 'rule-order:1' - } - }, - 'iad__georegion': { - 'meta': { - 'georegion': ['ASIAPAC'], - 'note': 'rule-order:1' - } - }, - 'lhr__country': { - 'meta': { - 'note': 'fallback:iad rule-order:0', - 'us_state': ['CA'] - } - }, - 'lhr__georegion': { - 'meta': { - 'georegion': ['AFRICA', 'EUROPE'], - 'note': 'fallback:iad rule-order:0' - } - } - }, ret['regions']) - - @patch('octodns.provider.ns1.Ns1Provider._monitor_sync') - @patch('octodns.provider.ns1.Ns1Provider._monitors_for') - def test_params_for_dynamic_oceania(self, monitors_for_mock, - monitor_sync_mock): - provider = Ns1Provider('test', 'api-key') - - # pre-fill caches to avoid extranious calls (things we're testing - # elsewhere) - provider._client._datasource_id = 'foo' - provider._client._feeds_for_monitors = { - 'mon-id': 'feed-id', - } - - # provider._params_for_A() calls provider._monitors_for() and - # provider._monitor_sync(). Mock their return values so that we don't - # make NS1 API calls during tests - monitors_for_mock.reset_mock() - monitor_sync_mock.reset_mock() - monitors_for_mock.side_effect = [{ - '3.4.5.6': 'mid-3', - }] - monitor_sync_mock.side_effect = [ - ('mid-1', 'fid-1'), - ('mid-2', 'fid-2'), - ('mid-3', 'fid-3'), - ] - - # Set geos to 'OC' in rules[0] (pool - 'lhr') - # Check returned dict has list of countries under 'OC' - record = self.record() - rule0 = record.data['dynamic']['rules'][0] - rule0['geos'] = ['OC'] - ret, _ = provider._params_for_A(record) - - # Make sure the country list expanded into all the OC countries - got = set(ret['regions']['lhr__country']['meta']['country']) - self.assertEquals(got, - Ns1Provider._CONTINENT_TO_LIST_OF_COUNTRIES['OC']) - - # When rules has 'OC', it is converted to list of countries in the - # params. Look if the returned filters is the filter chain with country - self.assertEquals(ret['filters'], - provider._FILTER_CHAIN_WITH_COUNTRY) - - @patch('octodns.provider.ns1.Ns1Provider._monitor_sync') - @patch('octodns.provider.ns1.Ns1Provider._monitors_for') - def test_params_for_dynamic(self, monitors_for_mock, monitors_sync_mock): - provider = Ns1Provider('test', 'api-key') - - # pre-fill caches to avoid extranious calls (things we're testing - # elsewhere) - provider._client._datasource_id = 'foo' - provider._client._feeds_for_monitors = { - 'mon-id': 'feed-id', - } - - monitors_for_mock.reset_mock() - monitors_sync_mock.reset_mock() - monitors_for_mock.side_effect = [{ - '3.4.5.6': 'mid-3', - }] - monitors_sync_mock.side_effect = [ - ('mid-1', 'fid-1'), - ('mid-2', 'fid-2'), - ('mid-3', 'fid-3'), - ] - # This indirectly calls into _params_for_dynamic and tests the - # handling to get there - record = self.record() - # copy an existing answer from a different pool to 'lhr' so - # in order to test answer repetition across pools (monitor reuse) - record.dynamic._data()['pools']['lhr']['values'].append( - record.dynamic._data()['pools']['iad']['values'][0]) - ret, _ = provider._params_for_A(record) - - # Given that record has both country and region in the rules, - # the returned filter chain should be one with region and country - self.assertEquals(ret['filters'], - provider._FILTER_CHAIN_WITH_REGION_AND_COUNTRY) - - monitors_for_mock.assert_has_calls([call(record)]) - monitors_sync_mock.assert_has_calls([ - call(record, '1.2.3.4', None), - call(record, '2.3.4.5', None), - call(record, '3.4.5.6', 'mid-3'), - ]) - - record = Record.new(self.zone, 'geo', { - 'ttl': 34, - 'type': 'A', - 'values': ['101.102.103.104', '101.102.103.105'], - 'geo': {'EU': ['201.202.203.204']}, - 'meta': {}, - }) - params, _ = provider._params_for_geo_A(record) - self.assertEquals([], params['filters']) - - @patch('octodns.provider.ns1.Ns1Provider._monitor_sync') - @patch('octodns.provider.ns1.Ns1Provider._monitors_for') - def test_params_for_dynamic_CNAME(self, monitors_for_mock, - monitor_sync_mock): - provider = Ns1Provider('test', 'api-key') - - # pre-fill caches to avoid extranious calls (things we're testing - # elsewhere) - provider._client._datasource_id = 'foo' - provider._client._feeds_for_monitors = { - 'mon-id': 'feed-id', - } - - # provider._params_for_A() calls provider._monitors_for() and - # provider._monitor_sync(). Mock their return values so that we don't - # make NS1 API calls during tests - monitors_for_mock.reset_mock() - monitor_sync_mock.reset_mock() - monitors_for_mock.side_effect = [{ - 'iad.unit.tests.': 'mid-1', - }] - monitor_sync_mock.side_effect = [ - ('mid-1', 'fid-1'), - ] - - record = self.cname_record() - ret, _ = provider._params_for_CNAME(record) - - # Check if the default value was correctly read and populated - # All other dynamic record test cases are covered by dynamic_A tests - self.assertEquals(ret['answers'][-1]['answer'][0], 'value.unit.tests.') - - def test_data_for_dynamic(self): - provider = Ns1Provider('test', 'api-key') - - # empty record turns into empty data - ns1_record = { - 'answers': [], - 'domain': 'unit.tests', - 'filters': provider._BASIC_FILTER_CHAIN, - 'regions': {}, - 'ttl': 42, - } - data = provider._data_for_dynamic('A', ns1_record) - self.assertEquals({ - 'dynamic': { - 'pools': {}, - 'rules': [], - }, - 'ttl': 42, - 'type': 'A', - 'values': [], - }, data) - - # Test out a small, but realistic setup that covers all the options - # We have country and region in the test config - filters = provider._get_updated_filter_chain(True, True) - catchall_pool_name = 'iad__catchall' - ns1_record = { - 'answers': [{ - 'answer': ['3.4.5.6'], - 'meta': { - 'priority': 1, - 'note': 'from:lhr__country', - 'up': {}, - }, - 'region': 'lhr', - }, { - 'answer': ['2.3.4.5'], - 'meta': { - 'priority': 2, - 'weight': 12, - 'note': 'from:iad', - 'up': {}, - }, - 'region': 'lhr', - }, { - 'answer': ['1.2.3.4'], - 'meta': { - 'priority': 3, - 'note': 'from:--default--', - }, - 'region': 'lhr', - }, { - 'answer': ['2.3.4.5'], - 'meta': { - 'priority': 1, - 'weight': 12, - 'note': 'from:iad', - 'up': {}, - }, - 'region': 'iad', - }, { - 'answer': ['1.2.3.4'], - 'meta': { - 'priority': 2, - 'note': 'from:--default--', - }, - 'region': 'iad', - }, { - 'answer': ['2.3.4.5'], - 'meta': { - 'priority': 1, - 'weight': 12, - 'note': f'from:{catchall_pool_name}', - 'up': {}, - }, - 'region': catchall_pool_name, - }, { - 'answer': ['1.2.3.4'], - 'meta': { - 'priority': 2, - 'note': 'from:--default--', - }, - 'region': catchall_pool_name, - }], - 'domain': 'unit.tests', - 'filters': filters, - 'regions': { - # lhr will use the new-split style names (and that will require - # combining in the code to produce the expected answer - 'lhr__georegion': { - 'meta': { - 'note': 'rule-order:1 fallback:iad', - 'georegion': ['AFRICA'], - }, - }, - 'lhr__country': { - 'meta': { - 'note': 'rule-order:1 fallback:iad', - 'country': ['MX'], - 'us_state': ['OR'], - 'ca_province': ['NL'] - }, - }, - # iad will use the old style "plain" region naming. We won't - # see mixed names like this in practice, but this should - # exercise both paths - 'iad': { - 'meta': { - 'note': 'rule-order:2', - 'country': ['ZW'], - }, - }, - catchall_pool_name: { - 'meta': { - 'note': 'rule-order:3', - }, - } - }, - 'tier': 3, - 'ttl': 42, - } - data = provider._data_for_dynamic('A', ns1_record) - self.assertEquals({ - 'dynamic': { - 'pools': { - 'iad': { - 'fallback': None, - 'values': [{ - 'value': '2.3.4.5', - 'weight': 12, - }], - }, - 'lhr': { - 'fallback': 'iad', - 'values': [{ - 'weight': 1, - 'value': '3.4.5.6', - }], - }, - }, - 'rules': [{ - '_order': '1', - 'geos': [ - 'AF', - 'NA-CA-NL', - 'NA-MX', - 'NA-US-OR' - ], - 'pool': 'lhr', - }, { - '_order': '2', - 'geos': [ - 'AF-ZW', - ], - 'pool': 'iad', - }, { - '_order': '3', - 'pool': 'iad', - }], - }, - 'ttl': 42, - 'type': 'A', - 'values': ['1.2.3.4'], - }, data) - - # Same answer if we go through _data_for_A which out sources the job to - # _data_for_dynamic - data2 = provider._data_for_A('A', ns1_record) - self.assertEquals(data, data2) - - # Same answer if we have an old-style catchall name - old_style_catchall_pool_name = 'catchall__iad' - ns1_record['answers'][-2]['region'] = old_style_catchall_pool_name - ns1_record['answers'][-1]['region'] = old_style_catchall_pool_name - ns1_record['regions'][old_style_catchall_pool_name] = \ - ns1_record['regions'][catchall_pool_name] - del ns1_record['regions'][catchall_pool_name] - data3 = provider._data_for_dynamic('A', ns1_record) - self.assertEquals(data, data2) - - # Oceania test cases - # 1. Full list of countries should return 'OC' in geos - oc_countries = Ns1Provider._CONTINENT_TO_LIST_OF_COUNTRIES['OC'] - ns1_record['regions']['lhr__country']['meta']['country'] = \ - list(oc_countries) - data3 = provider._data_for_A('A', ns1_record) - self.assertTrue('OC' in data3['dynamic']['rules'][0]['geos']) - - # 2. Partial list of countries should return just those - partial_oc_cntry_list = list(oc_countries)[:5] - ns1_record['regions']['lhr__country']['meta']['country'] = \ - partial_oc_cntry_list - data4 = provider._data_for_A('A', ns1_record) - for c in partial_oc_cntry_list: - self.assertTrue(f'OC-{c}' in data4['dynamic']['rules'][0]['geos']) - - # NA test cases - # 1. Full list of countries should return 'NA' in geos - na_countries = Ns1Provider._CONTINENT_TO_LIST_OF_COUNTRIES['NA'] - del ns1_record['regions']['lhr__country']['meta']['us_state'] - ns1_record['regions']['lhr__country']['meta']['country'] = \ - list(na_countries) - data5 = provider._data_for_A('A', ns1_record) - self.assertTrue('NA' in data5['dynamic']['rules'][0]['geos']) - - # 2. Partial list of countries should return just those - partial_na_cntry_list = list(na_countries)[:5] + ['SX', 'UM'] - ns1_record['regions']['lhr__country']['meta']['country'] = \ - partial_na_cntry_list - data6 = provider._data_for_A('A', ns1_record) - for c in partial_na_cntry_list: - self.assertTrue(f'NA-{c}' in data6['dynamic']['rules'][0]['geos']) - - # Test out fallback only pools and new-style notes - ns1_record = { - 'answers': [{ - 'answer': ['1.1.1.1'], - 'meta': { - 'priority': 1, - 'note': 'from:one__country pool:one fallback:two', - 'up': True, - }, - 'region': 'one_country', - }, { - 'answer': ['2.2.2.2'], - 'meta': { - 'priority': 2, - 'note': 'from:one__country pool:two fallback:three', - 'up': {}, - }, - 'region': 'one_country', - }, { - 'answer': ['3.3.3.3'], - 'meta': { - 'priority': 3, - 'note': 'from:one__country pool:three fallback:', - 'up': False, - }, - 'region': 'one_country', - }, { - 'answer': ['5.5.5.5'], - 'meta': { - 'priority': 4, - 'note': 'from:--default--', - }, - 'region': 'one_country', - }, { - 'answer': ['4.4.4.4'], - 'meta': { - 'priority': 1, - 'note': 'from:four__country pool:four fallback:', - 'up': {}, - }, - 'region': 'four_country', - }, { - 'answer': ['5.5.5.5'], - 'meta': { - 'priority': 2, - 'note': 'from:--default--', - }, - 'region': 'four_country', - }], - 'domain': 'unit.tests', - 'filters': filters, - 'regions': { - 'one__country': { - 'meta': { - 'note': 'rule-order:1 fallback:two', - 'country': ['CA'], - 'us_state': ['OR'], - }, - }, - 'four__country': { - 'meta': { - 'note': 'rule-order:2', - 'country': ['CA'], - 'us_state': ['OR'], - }, - }, - catchall_pool_name: { - 'meta': { - 'note': 'rule-order:3', - }, - } - }, - 'tier': 3, - 'ttl': 42, - } - data = provider._data_for_dynamic('A', ns1_record) - self.assertEquals({ - 'dynamic': { - 'pools': { - 'four': { - 'fallback': None, - 'values': [{'value': '4.4.4.4', 'weight': 1}] - }, - 'one': { - 'fallback': 'two', - 'values': [ - {'value': '1.1.1.1', 'weight': 1, 'status': 'up'}, - ], - }, - 'three': { - 'fallback': None, - 'values': [ - {'value': '3.3.3.3', 'weight': 1, 'status': 'down'} - ] - }, - 'two': { - 'fallback': 'three', - 'values': [{'value': '2.2.2.2', 'weight': 1}] - }, - }, - 'rules': [{ - '_order': '1', - 'geos': ['NA-CA', 'NA-US-OR'], - 'pool': 'one' - }, { - '_order': '2', - 'geos': ['NA-CA', 'NA-US-OR'], - 'pool': 'four' - }, { - '_order': '3', 'pool': 'iad'} - ] - }, - 'ttl': 42, - 'type': 'A', - 'values': ['5.5.5.5'] - }, data) - - def test_data_for_dynamic_CNAME(self): - provider = Ns1Provider('test', 'api-key') - - # Test out a small setup that just covers default value validation - # Everything else is same as dynamic A whose tests will cover all - # other options and test cases - # Not testing for geo/region specific cases - filters = provider._get_updated_filter_chain(False, False) - catchall_pool_name = 'iad__catchall' - ns1_record = { - 'answers': [{ - 'answer': ['iad.unit.tests.'], - 'meta': { - 'priority': 1, - 'weight': 12, - 'note': f'pool:iad from:{catchall_pool_name}', - 'up': {}, - }, - 'region': catchall_pool_name, - }, { - 'answer': ['value.unit.tests.'], - 'meta': { - 'priority': 2, - 'note': 'from:--default--', - 'up': {}, - }, - 'region': catchall_pool_name, - }], - 'domain': 'foo.unit.tests', - 'filters': filters, - 'regions': { - catchall_pool_name: { - 'meta': { - 'note': 'rule-order:1', - }, - } - }, - 'tier': 3, - 'ttl': 43, - 'type': 'CNAME', - } - data = provider._data_for_CNAME('CNAME', ns1_record) - self.assertEquals({ - 'dynamic': { - 'pools': { - 'iad': { - 'fallback': None, - 'values': [{ - 'value': 'iad.unit.tests.', - 'weight': 12, - }], - }, - }, - 'rules': [{ - '_order': '1', - 'pool': 'iad', - }], - }, - 'ttl': 43, - 'type': 'CNAME', - 'value': 'value.unit.tests.', - }, data) - - def test_data_for_invalid_dynamic_CNAME(self): - provider = Ns1Provider('test', 'api-key') - - # Potential setup created outside of octoDNS, so it could be missing - # notes and region names can be arbitrary - filters = provider._get_updated_filter_chain(False, False) - ns1_record = { - 'answers': [{ - 'answer': ['iad.unit.tests.'], - 'meta': { - 'priority': 1, - 'weight': 12, - 'up': {}, - }, - 'region': 'global', - }, { - 'answer': ['value.unit.tests.'], - 'meta': { - 'priority': 2, - 'up': {}, - }, - 'region': 'global', - }], - 'domain': 'foo.unit.tests', - 'filters': filters, - 'regions': { - 'global': {}, - }, - 'tier': 3, - 'ttl': 44, - 'type': 'CNAME', - } - data = provider._data_for_CNAME('CNAME', ns1_record) - self.assertEquals({ - 'ttl': 44, - 'type': 'CNAME', - 'value': None, - }, data) - - @patch('octodns.provider.ns1.Ns1Provider._monitor_sync') - @patch('octodns.provider.ns1.Ns1Provider._monitors_for') - def test_dynamic_explicit_countries(self, monitors_for_mock, - monitors_sync_mock): - provider = Ns1Provider('test', 'api-key') - record_data = { - 'dynamic': { - 'pools': { - 'iad': { - 'values': [{ - 'value': 'iad.unit.tests.', - 'status': 'up', - }], - }, - 'lhr': { - 'values': [{ - 'value': 'lhr.unit.tests.', - 'status': 'up', - }] - } - }, - 'rules': [ - { - 'geos': ['NA-US'], - 'pool': 'iad', - }, - { - 'geos': ['NA'], - 'pool': 'lhr', - }, - ], - }, - 'ttl': 33, - 'type': 'CNAME', - 'value': 'value.unit.tests.', - } - record = Record.new(self.zone, 'foo', record_data) - - ns1_record, _ = provider._params_for_dynamic(record) - regions = [ - r for r in ns1_record['regions'].values() - if 'US' in r['meta']['country'] - ] - self.assertEquals(len(regions), 1) - - ns1_record['domain'] = record.fqdn[:-1] - data = provider._data_for_dynamic(record._type, ns1_record)['dynamic'] - self.assertEquals(data['rules'][0]['geos'], ['NA-US']) - self.assertEquals(data['rules'][1]['geos'], ['NA']) - - @patch('ns1.rest.records.Records.retrieve') - @patch('ns1.rest.zones.Zones.retrieve') - @patch('octodns.provider.ns1.Ns1Provider._monitors_for') - def test_extra_changes(self, monitors_for_mock, zones_retrieve_mock, - records_retrieve_mock): - provider = Ns1Provider('test', 'api-key') - - desired = Zone('unit.tests.', []) - - def reset(): - monitors_for_mock.reset_mock() - provider._client.reset_caches() - records_retrieve_mock.reset_mock() - zones_retrieve_mock.reset_mock() - - # Empty zone and no changes - reset() - - extra = provider._extra_changes(desired, []) - self.assertFalse(extra) - monitors_for_mock.assert_not_called() - - # Non-existent zone. No changes - reset() - zones_retrieve_mock.side_effect = \ - ResourceException('server error: zone not found') - extra = provider._extra_changes(desired, []) - self.assertFalse(extra) - - # Simple record, ignored, filter update lookups ignored - reset() - zones_retrieve_mock.side_effect = \ - ResourceException('server error: zone not found') - - simple = Record.new(desired, '', { - 'ttl': 32, - 'type': 'A', - 'value': '1.2.3.4', - 'meta': {}, - }) - desired.add_record(simple) - extra = provider._extra_changes(desired, []) - self.assertFalse(extra) - monitors_for_mock.assert_not_called() - - # Dynamic record, inspectable - dynamic = Record.new(desired, 'dyn', { - 'dynamic': { - 'pools': { - 'iad': { - 'values': [{ - 'value': '1.2.3.4', - }], - }, - }, - 'rules': [{ - 'pool': 'iad', - }], - }, - 'octodns': { - 'healthcheck': { - 'host': 'send.me', - 'path': '/_ping', - 'port': 80, - 'protocol': 'HTTP', - } - }, - 'ttl': 32, - 'type': 'A', - 'value': '1.2.3.4', - 'meta': {}, - }) - desired.add_record(dynamic) - - # untouched, but everything in sync so no change needed - reset() - # Generate what we expect to have - provider.record_filters[dynamic.fqdn[:-1]] = { - dynamic._type: provider._get_updated_filter_chain(False, False) - } - gend = provider._monitor_gen(dynamic, '1.2.3.4') - gend.update({ - 'id': 'mid', # need to add an id - 'notify_list': 'xyz', # need to add a notify list (for now) - }) - monitors_for_mock.side_effect = [{ - '1.2.3.4': gend, - }] - extra = provider._extra_changes(desired, []) - self.assertFalse(extra) - monitors_for_mock.assert_has_calls([call(dynamic)]) - - update = Update(dynamic, dynamic) - - # If we don't have a notify list we're broken and we'll expect to see - # an Update - reset() - del gend['notify_list'] - monitors_for_mock.side_effect = [{ - '1.2.3.4': gend, - }] - extra = provider._extra_changes(desired, []) - self.assertEquals(1, len(extra)) - extra = list(extra)[0] - self.assertIsInstance(extra, Update) - self.assertEquals(dynamic, extra.new) - monitors_for_mock.assert_has_calls([call(dynamic)]) - - # Add notify_list back and change the healthcheck protocol, we'll still - # expect to see an update - reset() - gend['notify_list'] = 'xyz' - dynamic._octodns['healthcheck']['protocol'] = 'HTTPS' - del gend['notify_list'] - monitors_for_mock.side_effect = [{ - '1.2.3.4': gend, - }] - extra = provider._extra_changes(desired, []) - self.assertEquals(1, len(extra)) - extra = list(extra)[0] - self.assertIsInstance(extra, Update) - self.assertEquals(dynamic, extra.new) - monitors_for_mock.assert_has_calls([call(dynamic)]) - - # If it's in the changed list, it'll be ignored - reset() - extra = provider._extra_changes(desired, [update]) - self.assertFalse(extra) - monitors_for_mock.assert_not_called() - - # Test changes in filters - - # No change in filters - reset() - ns1_zone = { - 'records': [{ - "domain": "dyn.unit.tests", - "zone": "unit.tests", - "type": "A", - "tier": 3, - "filters": provider._BASIC_FILTER_CHAIN - }], - } - monitors_for_mock.side_effect = [{}] - zones_retrieve_mock.side_effect = [ns1_zone] - records_retrieve_mock.side_effect = ns1_zone['records'] - extra = provider._extra_changes(desired, []) - self.assertFalse(extra) - - # filters need an update - reset() - ns1_zone = { - 'records': [{ - "domain": "dyn.unit.tests", - "zone": "unit.tests", - "type": "A", - "tier": 3, - "filters": provider._BASIC_FILTER_CHAIN[:-1] - }], - } - monitors_for_mock.side_effect = [{}] - zones_retrieve_mock.side_effect = [ns1_zone] - records_retrieve_mock.side_effect = ns1_zone['records'] - ns1_record = ns1_zone['records'][0] - provider.record_filters[ns1_record['domain']] = { - ns1_record['type']: ns1_record['filters'] - } - extra = provider._extra_changes(desired, []) - self.assertTrue(extra) - - # disabled=False in filters doesn't trigger an update - reset() - ns1_zone = { - 'records': [{ - "domain": "dyn.unit.tests", - "zone": "unit.tests", - "type": "A", - "tier": 3, - "filters": provider._BASIC_FILTER_CHAIN - }], - } - ns1_zone['records'][0]['filters'][0]['disabled'] = False - monitors_for_mock.side_effect = [{}] - zones_retrieve_mock.side_effect = [ns1_zone] - records_retrieve_mock.side_effect = ns1_zone['records'] - ns1_record = ns1_zone['records'][0] - provider.record_filters[ns1_record['domain']] = { - ns1_record['type']: ns1_record['filters'] - } - extra = provider._extra_changes(desired, []) - self.assertFalse(extra) - - # disabled=True in filters does trigger an update - ns1_zone['records'][0]['filters'][0]['disabled'] = True - extra = provider._extra_changes(desired, []) - self.assertTrue(extra) - - DESIRED = Zone('unit.tests.', []) - - SIMPLE = Record.new(DESIRED, 'sim', { - 'ttl': 33, - 'type': 'A', - 'value': '1.2.3.4', - }) - - # Dynamic record, inspectable - DYNAMIC = Record.new(DESIRED, 'dyn', { - 'dynamic': { - 'pools': { - 'iad': { - 'values': [{ - 'value': '1.2.3.4', - }], - }, - }, - 'rules': [{ - 'pool': 'iad', - }], - }, - 'octodns': { - 'healthcheck': { - 'host': 'send.me', - 'path': '/_ping', - 'port': 80, - 'protocol': 'HTTP', - } - }, - 'ttl': 32, - 'type': 'A', - 'value': '1.2.3.4', - 'meta': {}, - }) - - def test_has_dynamic(self): - provider = Ns1Provider('test', 'api-key') - - simple_update = Update(self.SIMPLE, self.SIMPLE) - dynamic_update = Update(self.DYNAMIC, self.DYNAMIC) - - self.assertFalse(provider._has_dynamic([simple_update])) - self.assertTrue(provider._has_dynamic([dynamic_update])) - self.assertTrue(provider._has_dynamic([simple_update, dynamic_update])) - - @patch('octodns.provider.ns1.Ns1Client.zones_retrieve') - @patch('octodns.provider.ns1.Ns1Provider._apply_Update') - def test_apply_monitor_regions(self, apply_update_mock, - zones_retrieve_mock): - provider = Ns1Provider('test', 'api-key') - - simple_update = Update(self.SIMPLE, self.SIMPLE) - simple_plan = Plan(self.DESIRED, self.DESIRED, [simple_update], True) - dynamic_update = Update(self.DYNAMIC, self.DYNAMIC) - dynamic_update = Update(self.DYNAMIC, self.DYNAMIC) - dynamic_plan = Plan(self.DESIRED, self.DESIRED, [dynamic_update], - True) - both_plan = Plan(self.DESIRED, self.DESIRED, [simple_update, - dynamic_update], True) - - # always return foo, we aren't testing this part here - zones_retrieve_mock.side_effect = [ - 'foo', - 'foo', - 'foo', - 'foo', - ] - - # Doesn't blow up, and calls apply once - apply_update_mock.reset_mock() - provider._apply(simple_plan) - apply_update_mock.assert_has_calls([call('foo', simple_update)]) - - # Blows up and apply not called - apply_update_mock.reset_mock() - with self.assertRaises(Ns1Exception) as ctx: - provider._apply(dynamic_plan) - self.assertTrue('monitor_regions not set' in str(ctx.exception)) - apply_update_mock.assert_not_called() - - # Blows up and apply not called even though there's a simple - apply_update_mock.reset_mock() - with self.assertRaises(Ns1Exception) as ctx: - provider._apply(both_plan) - self.assertTrue('monitor_regions not set' in str(ctx.exception)) - apply_update_mock.assert_not_called() - - # with monitor_regions set - provider.monitor_regions = ['lga'] - - apply_update_mock.reset_mock() - provider._apply(both_plan) - apply_update_mock.assert_has_calls([ - call('foo', dynamic_update), - call('foo', simple_update), - ]) - - -class TestNs1Client(TestCase): - - @patch('ns1.rest.zones.Zones.retrieve') - def test_retry_behavior(self, zone_retrieve_mock): - client = Ns1Client('dummy-key') - - # No retry required, just calls and is returned - client.reset_caches() - zone_retrieve_mock.reset_mock() - zone_retrieve_mock.side_effect = ['foo'] - self.assertEquals('foo', client.zones_retrieve('unit.tests')) - zone_retrieve_mock.assert_has_calls([call('unit.tests')]) - - # One retry required - client.reset_caches() - zone_retrieve_mock.reset_mock() - zone_retrieve_mock.side_effect = [ - RateLimitException('boo', period=0), - 'foo' - ] - self.assertEquals('foo', client.zones_retrieve('unit.tests')) - zone_retrieve_mock.assert_has_calls([call('unit.tests')]) - - # Two retries required - client.reset_caches() - zone_retrieve_mock.reset_mock() - zone_retrieve_mock.side_effect = [ - RateLimitException('boo', period=0), - 'foo' - ] - self.assertEquals('foo', client.zones_retrieve('unit.tests')) - zone_retrieve_mock.assert_has_calls([call('unit.tests')]) - - # Exhaust our retries - client.reset_caches() - zone_retrieve_mock.reset_mock() - zone_retrieve_mock.side_effect = [ - RateLimitException('first', period=0), - RateLimitException('boo', period=0), - RateLimitException('boo', period=0), - RateLimitException('last', period=0), - ] - with self.assertRaises(RateLimitException) as ctx: - client.zones_retrieve('unit.tests') - self.assertEquals('last', str(ctx.exception)) - - def test_client_config(self): - with self.assertRaises(TypeError): - Ns1Client() - - client = Ns1Client('dummy-key') - self.assertEquals( - client._client.config.get('keys'), - {'default': {'key': u'dummy-key', 'desc': 'imported API key'}}) - self.assertEquals(client._client.config.get('follow_pagination'), True) - self.assertEquals( - client._client.config.get('rate_limit_strategy'), None) - self.assertEquals(client._client.config.get('parallelism'), None) - - client = Ns1Client('dummy-key', parallelism=11) - self.assertEquals( - client._client.config.get('rate_limit_strategy'), 'concurrent') - self.assertEquals(client._client.config.get('parallelism'), 11) - - client = Ns1Client('dummy-key', client_config={ - 'endpoint': 'my.endpoint.com', 'follow_pagination': False}) - self.assertEquals( - client._client.config.get('endpoint'), 'my.endpoint.com') - self.assertEquals( - client._client.config.get('follow_pagination'), False) - - @patch('ns1.rest.data.Source.list') - @patch('ns1.rest.data.Source.create') - def test_datasource_id(self, datasource_create_mock, datasource_list_mock): - client = Ns1Client('dummy-key') - - # First invocation with an empty list create - datasource_list_mock.reset_mock() - datasource_create_mock.reset_mock() - datasource_list_mock.side_effect = [[]] - datasource_create_mock.side_effect = [{ - 'id': 'foo', - }] - self.assertEquals('foo', client.datasource_id) - name = 'octoDNS NS1 Data Source' - source_type = 'nsone_monitoring' - datasource_create_mock.assert_has_calls([call(name=name, - sourcetype=source_type)]) - datasource_list_mock.assert_called_once() - - # 2nd invocation is cached - datasource_list_mock.reset_mock() - datasource_create_mock.reset_mock() - self.assertEquals('foo', client.datasource_id) - datasource_create_mock.assert_not_called() - datasource_list_mock.assert_not_called() - - # Reset the client's cache - client._datasource_id = None - - # First invocation with a match in the list finds it and doesn't call - # create - datasource_list_mock.reset_mock() - datasource_create_mock.reset_mock() - datasource_list_mock.side_effect = [[{ - 'id': 'other', - 'name': 'not a match', - }, { - 'id': 'bar', - 'name': name, - }]] - self.assertEquals('bar', client.datasource_id) - datasource_create_mock.assert_not_called() - datasource_list_mock.assert_called_once() - - @patch('ns1.rest.data.Feed.delete') - @patch('ns1.rest.data.Feed.create') - @patch('ns1.rest.data.Feed.list') - def test_feeds_for_monitors(self, datafeed_list_mock, - datafeed_create_mock, - datafeed_delete_mock): - client = Ns1Client('dummy-key') - - # pre-cache datasource_id - client._datasource_id = 'foo' - - # Populate the cache and check the results - datafeed_list_mock.reset_mock() - datafeed_list_mock.side_effect = [[{ - 'config': { - 'jobid': 'the-job', - }, - 'id': 'the-feed', - }, { - 'config': { - 'jobid': 'the-other-job', - }, - 'id': 'the-other-feed', - }]] - expected = { - 'the-job': 'the-feed', - 'the-other-job': 'the-other-feed', - } - self.assertEquals(expected, client.feeds_for_monitors) - datafeed_list_mock.assert_called_once() - - # 2nd call uses cache - datafeed_list_mock.reset_mock() - self.assertEquals(expected, client.feeds_for_monitors) - datafeed_list_mock.assert_not_called() - - # create a feed and make sure it's in the cache/map - datafeed_create_mock.reset_mock() - datafeed_create_mock.side_effect = [{ - 'id': 'new-feed', - }] - client.datafeed_create(client.datasource_id, 'new-name', { - 'jobid': 'new-job', - }) - datafeed_create_mock.assert_has_calls([call('foo', 'new-name', { - 'jobid': 'new-job', - })]) - new_expected = expected.copy() - new_expected['new-job'] = 'new-feed' - self.assertEquals(new_expected, client.feeds_for_monitors) - datafeed_create_mock.assert_called_once() - - # Delete a feed and make sure it's out of the cache/map - datafeed_delete_mock.reset_mock() - client.datafeed_delete(client.datasource_id, 'new-feed') - self.assertEquals(expected, client.feeds_for_monitors) - datafeed_delete_mock.assert_called_once() - - @patch('ns1.rest.monitoring.Monitors.delete') - @patch('ns1.rest.monitoring.Monitors.update') - @patch('ns1.rest.monitoring.Monitors.create') - @patch('ns1.rest.monitoring.Monitors.list') - def test_monitors(self, monitors_list_mock, monitors_create_mock, - monitors_update_mock, monitors_delete_mock): - client = Ns1Client('dummy-key') - - one = { - 'id': 'one', - 'key': 'value', - } - two = { - 'id': 'two', - 'key': 'other-value', - } - - # Populate the cache and check the results - monitors_list_mock.reset_mock() - monitors_list_mock.side_effect = [[one, two]] - expected = { - 'one': one, - 'two': two, - } - self.assertEquals(expected, client.monitors) - monitors_list_mock.assert_called_once() - - # 2nd round pulls it from cache - monitors_list_mock.reset_mock() - self.assertEquals(expected, client.monitors) - monitors_list_mock.assert_not_called() - - # Create a monitor, make sure it's in the list - monitors_create_mock.reset_mock() - monitor = { - 'id': 'new-id', - 'key': 'new-value', - } - monitors_create_mock.side_effect = [monitor] - self.assertEquals(monitor, client.monitors_create(param='eter')) - monitors_create_mock.assert_has_calls([call({}, param='eter')]) - new_expected = expected.copy() - new_expected['new-id'] = monitor - self.assertEquals(new_expected, client.monitors) - - # Update a monitor, make sure it's updated in the cache - monitors_update_mock.reset_mock() - monitor = { - 'id': 'new-id', - 'key': 'changed-value', - } - monitors_update_mock.side_effect = [monitor] - self.assertEquals(monitor, client.monitors_update('new-id', - key='changed-value')) - monitors_update_mock \ - .assert_has_calls([call('new-id', {}, key='changed-value')]) - new_expected['new-id'] = monitor - self.assertEquals(new_expected, client.monitors) - - # Delete a monitor, make sure it's out of the list - monitors_delete_mock.reset_mock() - monitors_delete_mock.side_effect = ['deleted'] - self.assertEquals('deleted', client.monitors_delete('new-id')) - monitors_delete_mock.assert_has_calls([call('new-id')]) - self.assertEquals(expected, client.monitors) - - @patch('ns1.rest.monitoring.NotifyLists.delete') - @patch('ns1.rest.monitoring.NotifyLists.create') - @patch('ns1.rest.monitoring.NotifyLists.list') - def test_notifylists(self, notifylists_list_mock, notifylists_create_mock, - notifylists_delete_mock): - client = Ns1Client('dummy-key') - - def reset(): - notifylists_create_mock.reset_mock() - notifylists_delete_mock.reset_mock() - notifylists_list_mock.reset_mock() - - reset() - notifylists_list_mock.side_effect = [{}] - expected = { - 'id': 'nl-id', - 'name': 'bar', - } - notifylists_create_mock.side_effect = [expected] - notify_list = [{ - 'config': { - 'sourceid': 'foo', - }, - 'type': 'datafeed', - }] - got = client.notifylists_create(name='some name', - notify_list=notify_list) - self.assertEquals(expected, got) - notifylists_list_mock.assert_called_once() - notifylists_create_mock.assert_has_calls([ - call({'name': 'some name', 'notify_list': notify_list}) - ]) - notifylists_delete_mock.assert_not_called() - - reset() - client.notifylists_delete('nlid') - notifylists_list_mock.assert_not_called() - notifylists_create_mock.assert_not_called() - notifylists_delete_mock.assert_has_calls([call('nlid')]) - - # Delete again, this time with a cache item that needs cleaned out and - # another that needs to be ignored - reset() - client._notifylists_cache = { - 'another': { - 'id': 'notid', - 'name': 'another', - }, - # This one comes 2nd on purpose - 'the-one': { - 'id': 'nlid', - 'name': 'the-one', - }, - } - client.notifylists_delete('nlid') - notifylists_list_mock.assert_not_called() - notifylists_create_mock.assert_not_called() - notifylists_delete_mock.assert_has_calls([call('nlid')]) - # Only another left - self.assertEquals(['another'], list(client._notifylists_cache.keys())) - - reset() - expected = ['one', 'two', 'three'] - notifylists_list_mock.side_effect = [expected] - nls = client.notifylists_list() - self.assertEquals(expected, nls) - notifylists_list_mock.assert_has_calls([call()]) - notifylists_create_mock.assert_not_called() - notifylists_delete_mock.assert_not_called() - - @patch('ns1.rest.records.Records.delete') - @patch('ns1.rest.records.Records.update') - @patch('ns1.rest.records.Records.create') - @patch('ns1.rest.records.Records.retrieve') - @patch('ns1.rest.zones.Zones.create') - @patch('ns1.rest.zones.Zones.delete') - @patch('ns1.rest.zones.Zones.retrieve') - def test_client_caching(self, zone_retrieve_mock, zone_delete_mock, - zone_create_mock, record_retrieve_mock, - record_create_mock, record_update_mock, - record_delete_mock): - client = Ns1Client('dummy-key') - - def reset(): - zone_retrieve_mock.reset_mock() - zone_delete_mock.reset_mock() - zone_create_mock.reset_mock() - record_retrieve_mock.reset_mock() - record_create_mock.reset_mock() - record_update_mock.reset_mock() - record_delete_mock.reset_mock() - # Testing caches so we don't reset those - - # Initial zone get fetches and caches - reset() - zone_retrieve_mock.side_effect = ['foo'] - self.assertEquals('foo', client.zones_retrieve('unit.tests')) - zone_retrieve_mock.assert_has_calls([call('unit.tests')]) - self.assertEquals({ - 'unit.tests': 'foo', - }, client._zones_cache) - - # Subsequent zone get does not fetch and returns from cache - reset() - self.assertEquals('foo', client.zones_retrieve('unit.tests')) - zone_retrieve_mock.assert_not_called() - - # Zone create stores in cache - reset() - zone_create_mock.side_effect = ['bar'] - self.assertEquals('bar', client.zones_create('sub.unit.tests')) - zone_create_mock.assert_has_calls([call('sub.unit.tests')]) - self.assertEquals({ - 'sub.unit.tests': 'bar', - 'unit.tests': 'foo', - }, client._zones_cache) - - # Initial record get fetches and caches - reset() - record_retrieve_mock.side_effect = ['baz'] - self.assertEquals('baz', client.records_retrieve('unit.tests', - 'a.unit.tests', 'A')) - record_retrieve_mock.assert_has_calls([call('unit.tests', - 'a.unit.tests', 'A')]) - self.assertEquals({ - 'unit.tests': { - 'a.unit.tests': { - 'A': 'baz' - } - } - }, client._records_cache) - - # Subsequent record get does not fetch and returns from cache - reset() - self.assertEquals('baz', client.records_retrieve('unit.tests', - 'a.unit.tests', 'A')) - record_retrieve_mock.assert_not_called() - - # Record create stores in cache - reset() - record_create_mock.side_effect = ['boo'] - self.assertEquals('boo', client.records_create('unit.tests', - 'aaaa.unit.tests', - 'AAAA', key='val')) - record_create_mock.assert_has_calls([call('unit.tests', - 'aaaa.unit.tests', 'AAAA', - key='val')]) - self.assertEquals({ - 'unit.tests': { - 'a.unit.tests': { - 'A': 'baz' - }, - 'aaaa.unit.tests': { - 'AAAA': 'boo' - }, - } - }, client._records_cache) - - # Record delete removes from cache and removes zone - reset() - record_delete_mock.side_effect = [{}] - self.assertEquals({}, client.records_delete('unit.tests', - 'aaaa.unit.tests', - 'AAAA')) - record_delete_mock.assert_has_calls([call('unit.tests', - 'aaaa.unit.tests', 'AAAA')]) - self.assertEquals({ - 'unit.tests': { - 'a.unit.tests': { - 'A': 'baz' - }, - 'aaaa.unit.tests': {}, - } - }, client._records_cache) - self.assertEquals({ - 'sub.unit.tests': 'bar', - }, client._zones_cache) - - # Delete the other record, no zone this time, record should still go - # away - reset() - record_delete_mock.side_effect = [{}] - self.assertEquals({}, client.records_delete('unit.tests', - 'a.unit.tests', 'A')) - record_delete_mock.assert_has_calls([call('unit.tests', 'a.unit.tests', - 'A')]) - self.assertEquals({ - 'unit.tests': { - 'a.unit.tests': {}, - 'aaaa.unit.tests': {}, - } - }, client._records_cache) - self.assertEquals({ - 'sub.unit.tests': 'bar', - }, client._zones_cache) - - # Record update removes zone and caches result - record_update_mock.side_effect = ['done'] - self.assertEquals('done', client.records_update('sub.unit.tests', - 'aaaa.sub.unit.tests', - 'AAAA', key='val')) - record_update_mock.assert_has_calls([call('sub.unit.tests', - 'aaaa.sub.unit.tests', - 'AAAA', key='val')]) - self.assertEquals({ - 'unit.tests': { - 'a.unit.tests': {}, - 'aaaa.unit.tests': {}, - }, - 'sub.unit.tests': { - 'aaaa.sub.unit.tests': { - 'AAAA': 'done', - }, - } - }, client._records_cache) - self.assertEquals({}, client._zones_cache) + def test_missing(self): + with self.assertRaises(ModuleNotFoundError): + from octodns.provider.ns1 import Ns1Provider + Ns1Provider