1
0
mirror of https://github.com/github/octodns.git synced 2024-05-11 05:55:00 +00:00

Update DSFMonitor labels to include type

Would have prevented both A and AAAA exisiting on the same node.
This commit is contained in:
Ross McFarland
2017-06-21 14:34:21 -07:00
parent 72eff706da
commit b6cd08e659
4 changed files with 99 additions and 23 deletions

View File

@@ -434,19 +434,20 @@ class DynProvider(BaseProvider):
if record in changed or not getattr(record, 'geo', False):
# Already changed, or no geo, no need to check it
continue
label = '{}:{}'.format(record.fqdn, record._type)
try:
monitor = self.traffic_director_monitors[record.fqdn]
monitor = self.traffic_director_monitors[label]
except KeyError:
self.log.info('_extra_changes: health-check missing for %s:%s',
record.fqdn, record._type)
self.log.info('_extra_changes: health-check missing for %s',
label)
extra.append(Update(record, record))
continue
# Heh, when pulled from the API host & path live under options, but
# when you create with the constructor they're top-level :-(
if monitor.host != record.healthcheck_host or \
monitor.path != record.healthcheck_path:
self.log.info('_extra_changes: health-check mis-match for '
'%s:%s', record.fqdn, record._type)
self.log.info('_extra_changes: health-check mis-match for %s',
label)
extra.append(Update(record, record))
return extra
@@ -532,6 +533,7 @@ class DynProvider(BaseProvider):
@property
def traffic_director_monitors(self):
if self._traffic_director_monitors is None:
self.log.debug('traffic_director_monitors: loading')
self._traffic_director_monitors = \
{m.label: m for m in get_all_dsf_monitors()}
@@ -539,26 +541,38 @@ class DynProvider(BaseProvider):
def _traffic_director_monitor(self, record):
fqdn = record.fqdn
label = '{}:{}'.format(fqdn, record._type)
try:
try:
monitor = self.traffic_director_monitors[label]
except KeyError:
# UNTIL 1.0 We don't have one for the new label format, see if
# we still have one for the old and update it
monitor = self.traffic_director_monitors[fqdn]
self.log.info('_traffic_director_monitor: upgrading label '
'to %s', label)
monitor.label = label
self.traffic_director_monitors[label] = \
self.traffic_director_monitors[fqdn]
del self.traffic_director_monitors[fqdn]
if monitor.host != record.healthcheck_host or \
monitor.path != record.healthcheck_path:
self.log.info('_traffic_director_monitor: updating monitor '
'for %s:%s', record.fqdn, record._type)
'for %s', label)
monitor.update(record.healthcheck_host,
record.healthcheck_path)
return monitor
except KeyError:
self.log.info('_traffic_director_monitor: creating monitor '
'for %s:%s', record.fqdn, record._type)
monitor = DSFMonitor(fqdn, protocol='HTTPS', response_count=2,
'for %s', label)
monitor = DSFMonitor(label, protocol='HTTPS', response_count=2,
probe_interval=60, retries=2,
port=self.MONITOR_PORT, active='Y',
host=record.healthcheck_host,
timeout=self.MONITOR_TIMEOUT,
header=self.MONITOR_HEADER,
path=record.healthcheck_path)
self._traffic_director_monitors[fqdn] = monitor
self._traffic_director_monitors[label] = monitor
return monitor
def _find_or_create_pool(self, td, pools, label, _type, values,

View File

@@ -500,8 +500,8 @@ class Route53Provider(BaseProvider):
# use
expected_re = re.compile(r'^\d\d\d\d:{}:{}:'
.format(record._type, record.name))
# Until the v1.0 release we'll clean out the previous version of
# Route53 health checks as best as we can.
# UNITL 1.0: we'll clean out the previous version of Route53 health
# checks as best as we can.
expected_legacy_host = record.fqdn[:-1]
expected_legacy = '0000:{}:'.format(record._type)
for id, health_check in self.health_checks.items():

View File

@@ -530,7 +530,7 @@ class TestDynProviderGeo(TestCase):
'agent_scheme': 'geo',
'dsf_monitor_id': monitor_id,
'endpoints': [],
'label': 'unit.tests.',
'label': 'unit.tests.:A',
'notifier': [],
'expected': '',
'header': 'User-Agent: Dyn Monitor',
@@ -543,6 +543,24 @@ class TestDynProviderGeo(TestCase):
'response_count': '2',
'retries': '2',
'services': ['12311']
}, {
'active': 'Y',
'agent_scheme': 'geo',
'dsf_monitor_id': 'b52',
'endpoints': [],
'label': 'old-label.unit.tests.',
'notifier': [],
'expected': '',
'header': 'User-Agent: Dyn Monitor',
'host': 'old-label.unit.tests',
'path': '/_dns',
'port': '443',
'timeout': '10',
'probe_interval': '60',
'protocol': 'HTTPS',
'response_count': '2',
'retries': '2',
'services': ['12312']
}],
'job_id': 3376281406,
'msgs': [{
@@ -647,7 +665,7 @@ class TestDynProviderGeo(TestCase):
'active': 'Y',
'dsf_monitor_id': geo_monitor_id,
'endpoints': [],
'label': 'geo.unit.tests.',
'label': 'geo.unit.tests.:A',
'notifier': '',
'expected': '',
'header': 'User-Agent: Dyn Monitor',
@@ -689,7 +707,7 @@ class TestDynProviderGeo(TestCase):
'retries': 2,
'protocol': 'HTTPS',
'response_count': 2,
'label': 'geo.unit.tests.',
'label': 'geo.unit.tests.:A',
'probe_interval': 60,
'active': 'Y',
'options': {
@@ -702,10 +720,10 @@ class TestDynProviderGeo(TestCase):
})
])
# created monitor is now cached
self.assertTrue('geo.unit.tests.' in
self.assertTrue('geo.unit.tests.:A' in
provider._traffic_director_monitors)
# pre-existing one is there too
self.assertTrue('unit.tests.' in
self.assertTrue('unit.tests.:A' in
provider._traffic_director_monitors)
# now ask for a monitor that does exist
@@ -738,7 +756,7 @@ class TestDynProviderGeo(TestCase):
'active': 'Y',
'dsf_monitor_id': self.monitor_id,
'endpoints': [],
'label': 'unit.tests.',
'label': 'unit.tests.:A',
'notifier': '',
'expected': '',
'header': 'User-Agent: Dyn Monitor',
@@ -773,14 +791,56 @@ class TestDynProviderGeo(TestCase):
})
])
# cached monitor should have been updated
self.assertTrue('unit.tests.' in
self.assertTrue('unit.tests.:A' in
provider._traffic_director_monitors)
monitor = provider._traffic_director_monitors['unit.tests.']
from pprint import pprint
pprint(monitor.__dict__)
monitor = provider._traffic_director_monitors['unit.tests.:A']
self.assertEquals('bleep.bloop', monitor.host)
self.assertEquals('/_nope', monitor.path)
# test upgrading an old label
record = Record.new(existing, 'old-label', {
'ttl': 60,
'type': 'A',
'value': '1.2.3.4'
})
mock.reset_mock()
mock.side_effect = [{
'data': {
'active': 'Y',
'dsf_monitor_id': self.monitor_id,
'endpoints': [],
'label': 'old-label.unit.tests.:A',
'notifier': '',
'expected': '',
'header': 'User-Agent: Dyn Monitor',
'host': 'old-label.unit.tests',
'path': '/_dns',
'port': '443',
'timeout': '10',
'probe_interval': '60',
'protocol': 'HTTPS',
'response_count': '2',
'retries': '2'
},
'job_id': 3376259461,
'msgs': [{'ERR_CD': None,
'INFO': 'add: Here is the new monitor',
'LVL': 'INFO',
'SOURCE': 'BLL'}],
'status': 'success'
}]
monitor = provider._traffic_director_monitor(record)
self.assertEquals(self.monitor_id, monitor.dsf_monitor_id)
# should have resulted an update
mock.assert_has_calls([
call('/DSFMonitor/b52/', 'PUT', {
'label': 'old-label.unit.tests.:A'
})
])
# cached monitor should have been updated
self.assertTrue('old-label.unit.tests.:A' in
provider._traffic_director_monitors)
@patch('dyn.core.SessionEngine.execute')
def test_extra_changes(self, mock):
provider = DynProvider('test', 'cust', 'user', 'pass', True)
@@ -818,6 +878,9 @@ class TestDynProviderGeo(TestCase):
extra = provider._extra_changes(None, desired, [])
self.assertEquals(0, len(extra))
# monitors should have been fetched now
mock.assert_called_once()
# diff in healthcheck, gets extra
desired = Zone('unit.tests.', [])
record = Record.new(desired, '', {

View File

@@ -52,7 +52,6 @@ class TestPowerDnsProvider(TestCase):
with self.assertRaises(Exception) as ctx:
zone = Zone('unit.tests.', [])
provider.populate(zone)
print(ctx.exception.message)
self.assertTrue('unauthorized' in ctx.exception.message)
# General error