1
0
mirror of https://github.com/github/octodns.git synced 2024-05-11 05:55:00 +00:00

Merge pull request #974 from octodns/delayed-arpa

Delayed arpa
This commit is contained in:
Ross McFarland
2023-02-07 11:37:26 -08:00
committed by GitHub
8 changed files with 518 additions and 12 deletions

View File

@@ -14,6 +14,8 @@
from octodns.record you'll need to update and pull them from their actual
home. Classes beginning with _ are not exported from octodns.record any
longer as they were considered private/protected.
* Beta support for auto-arpa has been added, See the
[auto-arpa documentation](/docs/auto_arpa.md) for more information.
#### Stuff

View File

@@ -255,6 +255,10 @@ Similar to providers, but can only serve to populate records into a zone, cannot
* Dnsimple's uses the configured TTL when serving things through the ALIAS, there's also a secondary TXT record created alongside the ALIAS that octoDNS ignores
* octoDNS itself supports non-ASCII character sets, but in testing Cloudflare is the only provider where that is currently functional end-to-end. Others have failures either in the client libraries or API calls
## Automatic PTR generation
octoDNS supports automatically generating PTR records from the `A`/`AAAA` records it manages. For more information see the [auto-arpa documentation](/docs/auto_arpa.md).
## Compatibility and Compliance
### `lenient`

91
docs/auto_arpa.md Normal file
View File

@@ -0,0 +1,91 @@
## Automatic PTR Generation With auto_arpa
octoDNS supports the automatic generation of `PTR` records for in-addr.arpa. and ip6.arpa. zones. In order to enable the functionality the `auto_arpa` key needs to be passed to the manager configuration.
```yaml
---
manager:
auto_arpa: true
```
Alternatively the value can be a dictionary with configuration options for the AutoArpa processor/provider.
```yaml
---
manager:
auto_arpa:
ttl: 1800
```
Once enabled a singleton `AutoArpa` instance, `auto-arpa`, will be added to the pool of providers and globally configured to run as the very last global processor so that it will see all records as they will be seen by targets. Further all zones ending with `arpa.` will be held back and processed after all other zones have been completed so that all `A` and `AAAA` records will have been seen prior to planning the `arpa.` zones.
In order to add `PTR` records for a zone the `auto-arpa` source should be added to the list of sources for the zone.
```yaml
0.0.10.in-addr.arpa.:
sources:
- auto-arpa
targets:
- ...
```
The above will add `PTR` records for any `A` records previously seen with IP addresses 10.0.0.*.
### A Complete Example
#### config/octodns.yaml
```yaml
manager:
auto_arpa: true
providers:
config:
class: octodns.provider.yaml.YamlProvider
directory: tests/config
powerdns:
class: octodns_powerdns.PowerDnsProvider
host: 10.0.0.53
port: 8081
api_key: env/POWERDNS_API_KEY
zones:
exxampled.com.:
sources:
- config
targets:
- powerdns
0.0.10.in-addr.arpa.:
sources:
- auto-arpa
targets:
- powerdns
```
#### config/exxampled.com.yaml
```yaml
? ''
: type: A
values:
- 10.0.0.101
- 10.0.0.102
email:
type: A
value: 10.0.0.103
fileserver:
type: A
value: 10.0.0.103
```
#### Auto-generated PTRs
* 101.0.0.10: exxampled.com.
* 102.0.0.10: exxampled.com.
* 103.0.0.10: email.exxampled.com., fileserver.exxampled.com.
### Notes
Automatic `PTR` generation requires a "complete" picture of records and thus cannot be done during partial syncs. Thus syncing `arpa.` zones will throw an error any time filtering of zones, targets, or sources is being done.

View File

@@ -11,6 +11,7 @@ from sys import stdout
from . import __VERSION__
from .idna import IdnaDict, idna_decode, idna_encode
from .processor.arpa import AutoArpa
from .provider.base import BaseProvider
from .provider.plan import Plan
from .provider.yaml import SplitYamlProvider, YamlProvider
@@ -95,10 +96,12 @@ class Manager(object):
plan = p[1]
return len(plan.changes[0].record.zone.name) if plan.changes else 0
def __init__(self, config_file, max_workers=None, include_meta=False):
def __init__(
self, config_file, max_workers=None, include_meta=False, auto_arpa=False
):
version = self._try_version('octodns', version=__VERSION__)
self.log.info(
'__init__: config_file=%s (octoDNS %s)', config_file, version
'__init__: config_file=%s, (octoDNS %s)', config_file, version
)
self._configured_sub_zones = None
@@ -116,6 +119,8 @@ class Manager(object):
manager_config, include_meta
)
self.auto_arpa = self._config_auto_arpa(manager_config, auto_arpa)
self.global_processors = manager_config.get('processors', [])
self.log.info('__init__: global_processors=%s', self.global_processors)
@@ -125,6 +130,16 @@ class Manager(object):
processors_config = self.config.get('processors', {})
self.processors = self._config_processors(processors_config)
if self.auto_arpa:
self.log.info(
'__init__: adding auto-arpa to processors and providers, appending it to global_processors list'
)
kwargs = self.auto_arpa if isinstance(auto_arpa, dict) else {}
auto_arpa = AutoArpa('auto-arpa', **kwargs)
self.providers[auto_arpa.name] = auto_arpa
self.processors[auto_arpa.name] = auto_arpa
self.global_processors.append(auto_arpa.name)
plan_outputs_config = manager_config.get(
'plan_outputs',
{
@@ -170,6 +185,11 @@ class Manager(object):
self.log.info('_config_include_meta: include_meta=%s', include_meta)
return include_meta
def _config_auto_arpa(self, manager_config, auto_arpa=False):
auto_arpa = auto_arpa or manager_config.get('auto_arpa', False)
self.log.info('_config_auto_arpa: auto_arpa=%s', auto_arpa)
return auto_arpa
def _config_providers(self, providers_config):
self.log.debug('_config_providers: configuring providers')
providers = {}
@@ -381,7 +401,6 @@ class Manager(object):
desired=None,
lenient=False,
):
zone = self.get_zone(zone_name)
self.log.debug(
'sync: populating, zone=%s, lenient=%s',
@@ -471,7 +490,26 @@ class Manager(object):
if eligible_zones:
zones = IdnaDict({n: zones.get(n) for n in eligible_zones})
includes_arpa = any(e.endswith('arpa.') for e in zones.keys())
if self.auto_arpa and includes_arpa:
# it's not safe to mess with auto_arpa when we don't have a complete
# picture of records, so if any filtering is happening while arpa
# zones are in play we need to abort
if any(e.endswith('arpa.') for e in eligible_zones):
raise ManagerException(
'ARPA zones cannot be synced during partial runs when auto_arpa is enabled'
)
if eligible_sources:
raise ManagerException(
'eligible_sources is incompatible with auto_arpa'
)
if eligible_targets:
raise ManagerException(
'eligible_targets is incompatible with auto_arpa'
)
aliased_zones = {}
delayed_arpa = []
futures = []
for zone_name, config in zones.items():
decoded_zone_name = idna_decode(zone_name)
@@ -568,16 +606,20 @@ class Manager(object):
f'Zone {decoded_zone_name}, unknown ' f'target: {target}'
)
futures.append(
self._executor.submit(
self._populate_and_plan,
zone_name,
processors,
sources,
targets,
lenient=lenient,
kwargs = {
'zone_name': zone_name,
'processors': processors,
'sources': sources,
'targets': targets,
'lenient': lenient,
}
if self.auto_arpa and zone_name.endswith('arpa.'):
delayed_arpa.append(kwargs)
else:
futures.append(
self._executor.submit(self._populate_and_plan, **kwargs)
)
)
# Wait on all results and unpack/flatten the plans and store the
# desired states in case we need them below
@@ -617,6 +659,20 @@ class Manager(object):
# as these are aliased zones
plans += [p for f in futures for p in f.result()[0]]
if delayed_arpa:
# if delaying arpa all of the non-arpa zones have been processed now
# so it's time to plan them
self.log.info(
'sync: processing %d delayed arpa zones', len(delayed_arpa)
)
# populate and plan them
futures = [
self._executor.submit(self._populate_and_plan, **kwargs)
for kwargs in delayed_arpa
]
# wait on the results and unpack/flatten the plans
plans += [p for f in futures for p in f.result()[0]]
# Best effort sort plans children first so that we create/update
# children zones before parents which should allow us to more safely
# extract things into sub-zones. Combining a child back into a parent

63
octodns/processor/arpa.py Normal file
View File

@@ -0,0 +1,63 @@
#
#
#
from collections import defaultdict
from ipaddress import ip_address
from logging import getLogger
from ..record import Record
from .base import BaseProcessor
class AutoArpa(BaseProcessor):
def __init__(self, name, ttl=3600):
super().__init__(name)
self.log = getLogger(f'AutoArpa[{name}]')
self.ttl = ttl
self._records = defaultdict(set)
def process_source_zone(self, desired, sources):
for record in desired.records:
if record._type in ('A', 'AAAA'):
ips = record.values
if record.geo:
for geo in record.geo.values():
ips += geo.values
if record.dynamic:
for pool in record.dynamic.pools.values():
for value in pool.data['values']:
ips.append(value['value'])
for ip in ips:
ptr = ip_address(ip).reverse_pointer
self._records[f'{ptr}.'].add(record.fqdn)
return desired
def populate(self, zone, target=False, lenient=False):
self.log.debug(
'populate: name=%s, target=%s, lenient=%s',
zone.name,
target,
lenient,
)
before = len(zone.records)
zone_name = zone.name
n = len(zone_name) + 1
for arpa, fqdns in self._records.items():
if arpa.endswith(zone_name):
name = arpa[:-n]
fqdns = sorted(fqdns)
record = Record.new(
zone,
name,
{'ttl': self.ttl, 'type': 'PTR', 'values': fqdns},
)
zone.add_record(record)
self.log.info(
'populate: found %s records', len(zone.records) - before
)

View File

@@ -0,0 +1,33 @@
manager:
max_workers: 2
auto_arpa:
ttl: 1800
providers:
in:
class: octodns.provider.yaml.YamlProvider
directory: tests/config
supports_root_ns: False
strict_supports: False
dump:
class: octodns.provider.yaml.YamlProvider
directory: env/YAML_TMP_DIR
default_ttl: 999
supports_root_ns: False
strict_supports: False
zones:
unit.tests.:
sources:
- in
targets:
- dump
3.2.2.in-addr.arpa.:
sources:
- auto-arpa
targets:
- dump
b.e.f.f.f.d.1.8.f.2.6.0.1.2.e.0.0.5.0.4.4.6.0.1.0.6.2.ip6.arpa.:
sources:
- auto-arpa
targets:
- dump

View File

@@ -907,6 +907,60 @@ class TestManager(TestCase):
str(ctx.exception),
)
def test_auto_arpa(self):
manager = Manager(get_config_filename('simple-arpa.yaml'))
with TemporaryDirectory() as tmpdir:
environ['YAML_TMP_DIR'] = tmpdir.dirname
# we can sync eligible_zones so long as they're not arpa
tc = manager.sync(dry_run=False, eligible_zones=['unit.tests.'])
self.assertEqual(22, tc)
# can't do partial syncs that include arpa zones
with self.assertRaises(ManagerException) as ctx:
manager.sync(
dry_run=False,
eligible_zones=['unit.tests.', '3.2.2.in-addr.arpa.'],
)
self.assertEqual(
'ARPA zones cannot be synced during partial runs when auto_arpa is enabled',
str(ctx.exception),
)
# same for eligible_sources
tc = manager.sync(
dry_run=False,
eligible_zones=['unit.tests.'],
eligible_sources=['in'],
)
self.assertEqual(22, tc)
# can't do partial syncs that include arpa zones
with self.assertRaises(ManagerException) as ctx:
manager.sync(dry_run=False, eligible_sources=['in'])
self.assertEqual(
'eligible_sources is incompatible with auto_arpa',
str(ctx.exception),
)
# same for eligible_targets
tc = manager.sync(
dry_run=False,
eligible_zones=['unit.tests.'],
eligible_targets=['dump'],
)
self.assertEqual(22, tc)
# can't do partial syncs that include arpa zones
with self.assertRaises(ManagerException) as ctx:
manager.sync(dry_run=False, eligible_targets=['dump'])
self.assertEqual(
'eligible_targets is incompatible with auto_arpa',
str(ctx.exception),
)
# full sync with arpa is fine, 2 extra records from it
tc = manager.sync(dry_run=False)
self.assertEqual(26, tc)
class TestMainThreadExecutor(TestCase):
def test_success(self):

View File

@@ -0,0 +1,203 @@
#
#
#
from unittest import TestCase
from octodns.processor.arpa import AutoArpa
from octodns.record import Record
from octodns.zone import Zone
class TestAutoArpa(TestCase):
def test_empty_zone(self):
# empty zone no records
zone = Zone('unit.tests.', [])
aa = AutoArpa('auto-arpa')
aa.process_source_zone(zone, [])
self.assertFalse(aa._records)
def test_single_value_A(self):
zone = Zone('unit.tests.', [])
record = Record.new(
zone, 'a', {'ttl': 32, 'type': 'A', 'value': '1.2.3.4'}
)
zone.add_record(record)
aa = AutoArpa('auto-arpa')
aa.process_source_zone(zone, [])
self.assertEqual(
{'4.3.2.1.in-addr.arpa.': {'a.unit.tests.'}}, aa._records
)
# matching zone
arpa = Zone('3.2.1.in-addr.arpa.', [])
aa.populate(arpa)
self.assertEqual(1, len(arpa.records))
(ptr,) = arpa.records
self.assertEqual('4.3.2.1.in-addr.arpa.', ptr.fqdn)
self.assertEqual(record.fqdn, ptr.value)
self.assertEqual(3600, ptr.ttl)
# other zone
arpa = Zone('4.4.4.in-addr.arpa.', [])
aa.populate(arpa)
self.assertEqual(0, len(arpa.records))
def test_multi_value_A(self):
zone = Zone('unit.tests.', [])
record = Record.new(
zone,
'a',
{'ttl': 32, 'type': 'A', 'values': ['1.2.3.4', '1.2.3.5']},
)
zone.add_record(record)
aa = AutoArpa('auto-arpa', ttl=1600)
aa.process_source_zone(zone, [])
self.assertEqual(
{
'4.3.2.1.in-addr.arpa.': {'a.unit.tests.'},
'5.3.2.1.in-addr.arpa.': {'a.unit.tests.'},
},
aa._records,
)
arpa = Zone('3.2.1.in-addr.arpa.', [])
aa.populate(arpa)
self.assertEqual(2, len(arpa.records))
ptr_1, ptr_2 = sorted(arpa.records)
self.assertEqual('4.3.2.1.in-addr.arpa.', ptr_1.fqdn)
self.assertEqual(record.fqdn, ptr_1.value)
self.assertEqual('5.3.2.1.in-addr.arpa.', ptr_2.fqdn)
self.assertEqual(record.fqdn, ptr_2.value)
self.assertEqual(1600, ptr_2.ttl)
def test_AAAA(self):
zone = Zone('unit.tests.', [])
record = Record.new(
zone, 'aaaa', {'ttl': 32, 'type': 'AAAA', 'value': 'ff:0c::4:2'}
)
zone.add_record(record)
aa = AutoArpa('auto-arpa')
aa.process_source_zone(zone, [])
ip6_arpa = '2.0.0.0.4.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.c.0.0.0.f.f.0.0.ip6.arpa.'
self.assertEqual({ip6_arpa: {'aaaa.unit.tests.'}}, aa._records)
# matching zone
arpa = Zone('c.0.0.0.f.f.0.0.ip6.arpa.', [])
aa.populate(arpa)
self.assertEqual(1, len(arpa.records))
(ptr,) = arpa.records
self.assertEqual(ip6_arpa, ptr.fqdn)
self.assertEqual(record.fqdn, ptr.value)
# other zone
arpa = Zone('c.0.0.0.e.f.0.0.ip6.arpa.', [])
aa.populate(arpa)
self.assertEqual(0, len(arpa.records))
def test_geo(self):
zone = Zone('unit.tests.', [])
record = Record.new(
zone,
'geo',
{
'ttl': 32,
'type': 'A',
'values': ['1.2.3.4', '1.2.3.5'],
'geo': {
'AF': ['1.1.1.1'],
'AS-JP': ['2.2.2.2', '3.3.3.3'],
'NA-US': ['4.4.4.4', '5.5.5.5'],
},
},
)
zone.add_record(record)
aa = AutoArpa('auto-arpa')
aa.process_source_zone(zone, [])
self.assertEqual(
{
'1.1.1.1.in-addr.arpa.': {'geo.unit.tests.'},
'2.2.2.2.in-addr.arpa.': {'geo.unit.tests.'},
'3.3.3.3.in-addr.arpa.': {'geo.unit.tests.'},
'4.4.4.4.in-addr.arpa.': {'geo.unit.tests.'},
'5.5.5.5.in-addr.arpa.': {'geo.unit.tests.'},
'4.3.2.1.in-addr.arpa.': {'geo.unit.tests.'},
'5.3.2.1.in-addr.arpa.': {'geo.unit.tests.'},
},
aa._records,
)
def test_dynamic(self):
zone = Zone('unit.tests.', [])
record = Record.new(
zone,
'dynamic',
{
'ttl': 32,
'type': 'A',
'values': ['1.2.3.4', '1.2.3.5'],
'dynamic': {
'pools': {
'one': {'values': [{'weight': 1, 'value': '3.3.3.3'}]},
'two': {
# Testing out of order value sorting here
'values': [
{'value': '5.5.5.5'},
{'value': '4.4.4.4'},
]
},
'three': {
'values': [
{'weight': 10, 'value': '4.4.4.4'},
{'weight': 12, 'value': '5.5.5.5'},
]
},
},
'rules': [
{'geos': ['AF', 'EU'], 'pool': 'three'},
{'geos': ['NA-US-CA'], 'pool': 'two'},
{'pool': 'one'},
],
},
},
)
zone.add_record(record)
aa = AutoArpa('auto-arpa')
aa.process_source_zone(zone, [])
self.assertEqual(
{
'3.3.3.3.in-addr.arpa.': {'dynamic.unit.tests.'},
'4.4.4.4.in-addr.arpa.': {'dynamic.unit.tests.'},
'5.5.5.5.in-addr.arpa.': {'dynamic.unit.tests.'},
'4.3.2.1.in-addr.arpa.': {'dynamic.unit.tests.'},
'5.3.2.1.in-addr.arpa.': {'dynamic.unit.tests.'},
},
aa._records,
)
def test_multiple_names(self):
zone = Zone('unit.tests.', [])
record1 = Record.new(
zone, 'a1', {'ttl': 32, 'type': 'A', 'value': '1.2.3.4'}
)
zone.add_record(record1)
record2 = Record.new(
zone, 'a2', {'ttl': 32, 'type': 'A', 'value': '1.2.3.4'}
)
zone.add_record(record2)
aa = AutoArpa('auto-arpa')
aa.process_source_zone(zone, [])
self.assertEqual(
{'4.3.2.1.in-addr.arpa.': {'a1.unit.tests.', 'a2.unit.tests.'}},
aa._records,
)
# matching zone
arpa = Zone('3.2.1.in-addr.arpa.', [])
aa.populate(arpa)
self.assertEqual(1, len(arpa.records))
(ptr,) = arpa.records
self.assertEqual('4.3.2.1.in-addr.arpa.', ptr.fqdn)
self.assertEqual([record1.fqdn, record2.fqdn], ptr.values)
self.assertEqual(3600, ptr.ttl)