1
0
mirror of https://github.com/StackExchange/dnscontrol.git synced 2024-05-11 05:55:12 +00:00

Merge branch 'master' into tlim_cfaliases

This commit is contained in:
Tom Limoncelli
2017-05-25 20:38:48 -04:00
61 changed files with 985 additions and 171 deletions

View File

@ -0,0 +1,28 @@
package dnsresolver
// dnsCache implements a very simple DNS cache.
// It caches the entire answer (i.e. all TXT records), filtering
// out the non-SPF answers is done at a higher layer.
// At this time the only rtype is "TXT". Eventually we'll need
// to cache A/AAAA/CNAME records to to CNAME flattening.
type dnsCache map[string]map[string][]string // map[fqdn]map[rtype] -> answers
func (c dnsCache) get(label, rtype string) ([]string, bool) {
v1, ok := c[label]
if !ok {
return nil, false
}
v2, ok := v1[rtype]
if !ok {
return nil, false
}
return v2, true
}
func (c dnsCache) put(label, rtype string, answers []string) {
_, ok := c[label]
if !ok {
c[label] = make(map[string][]string)
}
c[label][rtype] = answers
}

View File

@ -0,0 +1,31 @@
package dnsresolver
import "testing"
func TestDnsCache(t *testing.T) {
cache := &dnsCache{}
cache.put("one", "txt", []string{"a", "b", "c"})
cache.put("two", "txt", []string{"d", "e", "f"})
a, b := cache.get("one", "txt")
if !(b == true && len(a) == 3 && a[0] == "a" && a[1] == "b" && a[2] == "c") {
t.Errorf("one-txt didn't work")
}
a, b = cache.get("two", "txt")
if !(b == true && len(a) == 3 && a[0] == "d" && a[1] == "e" && a[2] == "f") {
t.Errorf("one-txt didn't work")
}
a, b = cache.get("three", "txt")
if !(b == false) {
t.Errorf("three-txt didn't work")
}
a, b = cache.get("two", "not")
if !(b == false) {
t.Errorf("two-not didn't work")
}
}

View File

@ -0,0 +1,83 @@
package dnsresolver
import (
"encoding/json"
"io/ioutil"
"net"
"github.com/pkg/errors"
)
// This file includes all the DNS Resolvers used by package spf.
// DnsResolver looks up txt strings associated with a FQDN.
type DnsResolver interface {
GetTxt(string) ([]string, error) // Given a DNS label, return the TXT values records.
}
// The "Live DNS" Resolver:
type dnsLive struct {
filename string
cache dnsCache
}
func NewResolverLive(filename string) *dnsLive {
// Does live DNS lookups. Records them. Writes file on Close.
c := &dnsLive{filename: filename}
c.cache = dnsCache{}
return c
}
func (c *dnsLive) GetTxt(label string) ([]string, error) {
// Try the cache.
txts, ok := c.cache.get(label, "txt")
if ok {
return txts, nil
}
// Populate the cache:
t, err := net.LookupTXT(label)
if err == nil {
c.cache.put(label, "txt", t)
}
return t, err
}
func (c *dnsLive) Close() {
// Write out and close the file.
m, _ := json.MarshalIndent(c.cache, "", " ")
m = append(m, "\n"...)
ioutil.WriteFile(c.filename, m, 0666)
}
// The "Pre-Cached DNS" Resolver:
type dnsPreloaded struct {
cache dnsCache
}
func NewResolverPreloaded(filename string) (*dnsPreloaded, error) {
c := &dnsPreloaded{}
c.cache = dnsCache{}
j, err := ioutil.ReadFile(filename)
if err != nil {
return nil, err
}
err = json.Unmarshal(j, &(*c).cache)
return c, err
}
func (c *dnsPreloaded) DumpCache() dnsCache {
return c.cache
}
func (c *dnsPreloaded) GetTxt(label string) ([]string, error) {
// Try the cache.
txts, ok := c.cache.get(label, "txt")
if ok {
return txts, nil
}
return nil, errors.Errorf("No preloaded DNS entry for: %#v", label)
}

336
pkg/js/helpers.js Normal file
View File

@ -0,0 +1,336 @@
"use strict";
var conf = {
registrars: [],
dns_providers: [],
domains: []
};
var defaultArgs = [];
function initialize(){
conf = {
registrars: [],
dns_providers: [],
domains: []
};
defaultArgs = [];
}
function NewRegistrar(name,type,meta) {
if (type) {
type == "MANUAL";
}
var reg = {name: name, type: type, meta: meta};
conf.registrars.push(reg);
return name;
}
function NewDnsProvider(name, type, meta) {
if ((typeof meta === 'object') && ('ip_conversions' in meta)) {
meta.ip_conversions = format_tt(meta.ip_conversions)
}
var dsp = {name: name, type: type, meta: meta};
conf.dns_providers.push(dsp);
return name;
}
function newDomain(name,registrar) {
return {name: name, registrar: registrar, meta:{}, records:[], dnsProviders: {}, defaultTTL: 0, nameservers:[]};
}
function processDargs(m, domain) {
// for each modifier, if it is a...
// function: call it with domain
// array: process recursively
// object: merge it into metadata
if (_.isFunction(m)) {
m(domain);
} else if (_.isArray(m)) {
for (var j in m) {
processDargs(m[j], domain)
}
} else if (_.isObject(m)) {
_.extend(domain.meta,m);
} else {
throw "WARNING: domain modifier type unsupported: "+ typeof m + " Domain: "+ domain.name;
}
}
// D(name,registrar): Create a DNS Domain. Use the parameters as records and mods.
function D(name,registrar) {
var domain = newDomain(name,registrar);
for (var i = 0; i< defaultArgs.length; i++){
processDargs(defaultArgs[i],domain)
}
for (var i = 2; i<arguments.length; i++) {
var m = arguments[i];
processDargs(m, domain)
}
conf.domains.push(domain)
}
// DEFAULTS provides a set of default arguments to apply to all future domains.
// Each call to DEFAULTS will clear any previous values set.
function DEFAULTS(){
defaultArgs = [];
for (var i = 0; i<arguments.length; i++) {
defaultArgs.push(arguments[i]);
}
}
// TTL(v): Set the TTL for a DNS record.
function TTL(v) {
return function(r) {
r.ttl = v;
}
}
// DefaultTTL(v): Set the default TTL for the domain.
function DefaultTTL(v) {
return function(d) {
d.defaultTTL = v;
}
}
// DnsProvider("providerName", 0)
// nsCount of 0 means don't use or register any nameservers.
// nsCount not provider means use all.
function DnsProvider(name, nsCount){
if(typeof nsCount === 'undefined'){
nsCount = -1;
}
return function(d) {
d.dnsProviders[name] = nsCount;
}
}
// A(name,ip, recordModifiers...)
function A(name, ip) {
var mods = getModifiers(arguments,2)
return function(d) {
addRecord(d,"A",name,ip,mods)
}
}
// AAAA(name,ip, recordModifiers...)
function AAAA(name, ip) {
var mods = getModifiers(arguments,2)
return function(d) {
addRecord(d,"AAAA",name,ip,mods)
}
}
// ALIAS(name,target, recordModifiers...)
function ALIAS(name, target) {
var mods = getModifiers(arguments,2)
return function(d) {
addRecord(d,"ALIAS",name,target,mods)
}
}
// CNAME(name,target, recordModifiers...)
function CNAME(name, target) {
var mods = getModifiers(arguments,2)
return function(d) {
addRecord(d,"CNAME",name,target,mods)
}
}
// TXT(name,target, recordModifiers...)
function TXT(name, target) {
var mods = getModifiers(arguments,2)
return function(d) {
addRecord(d,"TXT",name,target,mods)
}
}
// MX(name,priority,target, recordModifiers...)
function MX(name, priority, target) {
checkArgs([_.isString, _.isNumber, _.isString], arguments, "MX expects (name, priority, target)")
var mods = getModifiers(arguments,3)
return function(d) {
mods.push(priority);
addRecord(d, "MX", name, target, mods)
}
}
function checkArgs(checks, args, desc){
if (args.length < checks.length){
throw(desc)
}
for (var i = 0; i< checks.length; i++){
if (!checks[i](args[i])){
throw(desc+" - argument "+i+" is not correct type")
}
}
}
// NS(name,target, recordModifiers...)
function NS(name, target) {
var mods = getModifiers(arguments,2)
return function(d) {
addRecord(d,"NS",name,target,mods)
}
}
// NAMESERVER(name,target)
function NAMESERVER(name, target) {
return function(d) {
d.nameservers.push({name: name, target: target})
}
}
function format_tt(transform_table) {
// Turn [[low: 1, high: 2, newBase: 3], [low: 4, high: 5, newIP: 6]]
// into "1 ~ 2 ~ 3 ~; 4 ~ 5 ~ ~ 6"
var lines = []
for (var i=0; i < transform_table.length; i++) {
var ip = transform_table[i];
var newIP = ip.newIP;
if (newIP){
if(_.isArray(newIP)){
newIP = _.map(newIP,function(i){return num2dot(i)}).join(",")
}else{
newIP = num2dot(newIP);
}
}
var newBase = ip.newBase;
if (newBase){
if(_.isArray(newBase)){
newBase = _.map(newBase,function(i){return num2dot(i)}).join(",")
}else{
newBase = num2dot(newBase);
}
}
var row = [
num2dot(ip.low),
num2dot(ip.high),
newBase,
newIP
]
lines.push(row.join(" ~ "))
}
return lines.join(" ; ")
}
// IMPORT_TRANSFORM(translation_table, domain)
function IMPORT_TRANSFORM(translation_table, domain,ttl) {
return function(d) {
var rec = addRecord(d, "IMPORT_TRANSFORM", "@", domain, [
{'transform_table': format_tt(translation_table)}])
if (ttl){
rec.ttl = ttl;
}
}
}
// PURGE()
function PURGE(d) {
d.KeepUnknown = false
}
// NO_PURGE()
function NO_PURGE(d) {
d.KeepUnknown = true
}
function getModifiers(args,start) {
var mods = [];
for (var i = start;i<args.length; i++) {
mods.push(args[i])
}
return mods;
}
function addRecord(d,type,name,target,mods) {
// if target is number, assume ip address. convert it.
if (_.isNumber(target)) {
target = num2dot(target);
}
var rec = {type: type, name: name, target: target, ttl:d.defaultTTL, priority: 0, meta:{}};
// for each modifier, decide based on type:
// - Function: call is with the record as the argument
// - Object: merge it into the metadata
// - Number: IF MX record assume it is priority
if (mods) {
for (var i = 0; i< mods.length; i++) {
var m = mods[i]
if (_.isFunction(m)) {
m(rec);
} else if (_.isObject(m)) {
//convert transforms to strings
if (m.transform && _.isArray(m.transform)){
m.transform = format_tt(m.transform)
}
_.extend(rec.meta,m);
_.extend(rec.meta,m);
} else if (_.isNumber(m) && type == "MX") {
rec.priority = m;
} else {
console.log("WARNING: Modifier type unsupported:", typeof m, "(Skipping!)");
}
}
}
d.records.push(rec);
return rec;
}
//ip conversion functions from http://stackoverflow.com/a/8105740/121660
// via http://javascript.about.com/library/blipconvert.htm
function IP(dot)
{
var d = dot.split('.');
return ((((((+d[0])*256)+(+d[1]))*256)+(+d[2]))*256)+(+d[3]);
}
function num2dot(num)
{
if(num === undefined){
return "";
}
if (_.isString(num)){
return num
}
var d = num%256;
for (var i = 3; i > 0; i--)
{
num = Math.floor(num/256);
d = num%256 + '.' + d;
}
return d;
}
// Cloudflare aliases:
// Meta settings for individual records.
var CF_PROXY_OFF = {'cloudflare_proxy': 'off'}; // Proxy disabled.
var CF_PROXY_ON = {'cloudflare_proxy': 'on'}; // Proxy enabled.
var CF_PROXY_FULL = {'cloudflare_proxy': 'full'}; // Proxy+Railgun enabled.
// Per-domain meta settings:
// Proxy default off for entire domain (the default):
var CF_PROXY_DEFAULT_OFF = {'cloudflare_proxy_default': 'off'};
// Proxy default on for entire domain:
var CF_PROXY_DEFAULT_ON = {'cloudflare_proxy_default': 'on'};
// CUSTOM, PROVIDER SPECIFIC RECORD TYPES
function CF_REDIRECT(src, dst) {
return function(d) {
if (src.indexOf(",") !== -1 || dst.indexOf(",") !== -1){
throw("redirect src and dst must not have commas")
}
addRecord(d,"CF_REDIRECT","@",src+","+dst)
}
}
function CF_TEMP_REDIRECT(src, dst) {
return function(d) {
if (src.indexOf(",") !== -1 || dst.indexOf(",") !== -1){
throw("redirect src and dst must not have commas")
}
addRecord(d,"CF_TEMP_REDIRECT","@",src+","+dst)
}
}

64
pkg/js/js.go Normal file
View File

@ -0,0 +1,64 @@
package js
import (
"encoding/json"
"fmt"
"io/ioutil"
"github.com/StackExchange/dnscontrol/models"
"github.com/robertkrimen/otto"
//load underscore js into vm by default
_ "github.com/robertkrimen/otto/underscore"
)
//ExecuteJavascript accepts a javascript string and runs it, returning the resulting dnsConfig.
func ExecuteJavascript(script string, devMode bool) (*models.DNSConfig, error) {
vm := otto.New()
vm.Set("require", require)
helperJs := GetHelpers(devMode)
// run helper script to prime vm and initialize variables
if _, err := vm.Run(helperJs); err != nil {
return nil, err
}
// run user script
if _, err := vm.Run(script); err != nil {
return nil, err
}
// export conf as string and unmarshal
value, err := vm.Run(`JSON.stringify(conf)`)
if err != nil {
return nil, err
}
str, err := value.ToString()
if err != nil {
return nil, err
}
conf := &models.DNSConfig{}
if err = json.Unmarshal([]byte(str), conf); err != nil {
return nil, err
}
return conf, nil
}
func GetHelpers(devMode bool) string {
return _escFSMustString(devMode, "/helpers.js")
}
func require(call otto.FunctionCall) otto.Value {
file := call.Argument(0).String()
fmt.Printf("requiring: %s\n", file)
data, err := ioutil.ReadFile(file)
if err != nil {
panic(err)
}
_, err = call.Otto.Run(string(data))
if err != nil {
panic(err)
}
return otto.TrueValue()
}

86
pkg/js/js_test.go Normal file
View File

@ -0,0 +1,86 @@
package js
import (
"encoding/json"
"io/ioutil"
"os"
"path/filepath"
"testing"
"unicode"
"github.com/StackExchange/dnscontrol/models"
)
const (
testDir = "pkg/js/parse_tests"
errorDir = "pkg/js/error_tests"
)
func init() {
os.Chdir("../..") // go up a directory so we helpers.js is in a consistent place.
}
func TestParsedFiles(t *testing.T) {
files, err := ioutil.ReadDir(testDir)
if err != nil {
t.Fatal(err)
}
for _, f := range files {
//run all js files that start with a number. Skip others.
if filepath.Ext(f.Name()) != ".js" || !unicode.IsNumber(rune(f.Name()[0])) {
continue
}
t.Run(f.Name(), func(t *testing.T) {
content, err := ioutil.ReadFile(filepath.Join(testDir, f.Name()))
if err != nil {
t.Fatal(err)
}
conf, err := ExecuteJavascript(string(content), true)
if err != nil {
t.Fatal(err)
}
actualJSON, err := json.MarshalIndent(conf, "", " ")
if err != nil {
t.Fatal(err)
}
expectedFile := filepath.Join(testDir, f.Name()[:len(f.Name())-3]+".json")
expectedData, err := ioutil.ReadFile(expectedFile)
if err != nil {
t.Fatal(err)
}
conf = &models.DNSConfig{}
//unmarshal and remarshal to not require manual formatting
err = json.Unmarshal(expectedData, conf)
if err != nil {
t.Fatal(err)
}
expectedJSON, err := json.MarshalIndent(conf, "", " ")
if err != nil {
t.Fatal(err)
}
if string(expectedJSON) != string(actualJSON) {
t.Error("Expected and actual json don't match")
t.Log("Expected:", string(expectedJSON))
t.Log("Actual:", string(actualJSON))
}
})
}
}
func TestErrors(t *testing.T) {
tests := []struct{ desc, text string }{
{"old dsp style", `D("foo.com","reg","dsp")`},
{"MX no priority", `D("foo.com","reg",MX("@","test."))`},
{"MX reversed", `D("foo.com","reg",MX("@","test.", 5))`},
{"CF_REDIRECT With comma", `D("foo.com","reg",CF_REDIRECT("foo.com,","baaa"))`},
{"CF_TEMP_REDIRECT With comma", `D("foo.com","reg",CF_TEMP_REDIRECT("foo.com","baa,a"))`},
}
for _, tst := range tests {
t.Run(tst.desc, func(t *testing.T) {
if _, err := ExecuteJavascript(tst.text, true); err == nil {
t.Fatal("Expected error but found none")
}
})
}
}

View File

@ -0,0 +1,6 @@
var REG = NewRegistrar("Third-Party","NONE");
var CF = NewDnsProvider("Cloudflare", "CLOUDFLAREAPI")
D("foo.com",REG,DnsProvider(CF),
A("@","1.2.3.4")
);

View File

@ -0,0 +1,30 @@
{
"registrars": [
{
"name": "Third-Party",
"type": "NONE"
}
],
"dns_providers": [
{
"name": "Cloudflare",
"type": "CLOUDFLAREAPI"
}
],
"domains": [
{
"name": "foo.com",
"registrar": "Third-Party",
"dnsProviders": {
"Cloudflare":-1
},
"records": [
{
"type": "A",
"name": "@",
"target": "1.2.3.4"
}
]
}
]
}

View File

@ -0,0 +1,5 @@
var REG = NewRegistrar("Third-Party","NONE");
var CF = NewDnsProvider("Cloudflare", "CLOUDFLAREAPI")
D("foo.com",REG,DnsProvider(CF),
A("@","1.2.3.4",TTL(42))
);

View File

@ -0,0 +1,31 @@
{
"registrars": [
{
"name": "Third-Party",
"type": "NONE"
}
],
"dns_providers": [
{
"name": "Cloudflare",
"type": "CLOUDFLAREAPI"
}
],
"domains": [
{
"name": "foo.com",
"registrar": "Third-Party",
"dnsProviders": {
"Cloudflare":-1
},
"records": [
{
"type": "A",
"name": "@",
"target": "1.2.3.4",
"ttl": 42
}
]
}
]
}

View File

@ -0,0 +1,5 @@
var CLOUDFLARE = NewRegistrar("Cloudflare","CLOUDFLAREAPI");
D("foo.com",CLOUDFLARE,
A("@","1.2.3.4",{"cloudflare_proxy":"ON"})
);

View File

@ -0,0 +1,26 @@
{
"registrars": [
{
"name": "Cloudflare",
"type": "CLOUDFLAREAPI"
}
],
"dns_providers": [],
"domains": [
{
"name": "foo.com",
"registrar": "Cloudflare",
"dnsProviders": {},
"records": [
{
"type": "A",
"name": "@",
"target": "1.2.3.4",
"meta": {
"cloudflare_proxy": "ON"
}
}
]
}
]
}

View File

@ -0,0 +1,10 @@
var REG = NewRegistrar("Third-Party","NONE");
var CF = NewDnsProvider("Cloudflare", "CLOUDFLAREAPI")
var BASE = IP("1.2.3.4")
D("foo.com",REG,DnsProvider(CF,0),
A("@",BASE),
A("p1",BASE+1),
A("p255", BASE+255)
);

View File

@ -0,0 +1,28 @@
{
"registrars": [
{
"name": "Third-Party",
"type": "NONE"
}
],
"dns_providers": [
{
"name": "Cloudflare",
"type": "CLOUDFLAREAPI"
}
],
"domains": [
{
"name": "foo.com",
"registrar": "Third-Party",
"dnsProviders": {
"Cloudflare":0
},
"records": [
{ "type": "A","name": "@","target": "1.2.3.4"},
{ "type": "A","name": "p1","target": "1.2.3.5"},
{ "type": "A","name": "p255","target": "1.2.4.3"}
]
}
]
}

View File

@ -0,0 +1,7 @@
var REG = NewRegistrar("Third-Party","NONE");
var CF = NewDnsProvider("Cloudflare", "CLOUDFLAREAPI")
D("foo.com",REG,DnsProvider(CF,2),
A("@","1.2.3.4")
);
D("foo.com",REG);

View File

@ -0,0 +1,35 @@
{ "registrars": [
{
"name": "Third-Party",
"type": "NONE"
}
],
"dns_providers": [
{
"name": "Cloudflare",
"type": "CLOUDFLAREAPI"
}
],
"domains": [
{
"name": "foo.com",
"registrar": "Third-Party",
"dnsProviders": {
"Cloudflare":2
},
"records": [
{
"type": "A",
"name": "@",
"target": "1.2.3.4"
}
]
},
{
"name": "foo.com",
"registrar": "Third-Party",
"dnsProviders": {},
"records": []
}
]
}

View File

@ -0,0 +1,12 @@
var REG = NewRegistrar("Third-Party","NONE");
var CF = NewDnsProvider("Cloudflare", "CLOUDFLAREAPI")
var TRANSFORM_INT = [
{low: "0.0.0.0", high: "1.1.1.1", newBase: "2.2.2.2" },
{low: "1.1.1.1", high: IP("2.2.2.2"), newBase: ["3.3.3.3","4.4.4.4",IP("5.5.5.5")]} ,
{low: "1.1.1.1", high: IP("2.2.2.2"), newIP: ["3.3.3.3","4.4.4.4",IP("5.5.5.5")]}
]
D("foo.com",REG,DnsProvider(CF),
A("@","1.2.3.4",{transform: TRANSFORM_INT})
);

View File

@ -0,0 +1,34 @@
{
"registrars": [
{
"name": "Third-Party",
"type": "NONE"
}
],
"dns_providers": [
{
"name": "Cloudflare",
"type": "CLOUDFLAREAPI"
}
],
"domains": [
{
"name": "foo.com",
"registrar": "Third-Party",
"dnsProviders": {
"Cloudflare":-1
},
"records": [
{
"type": "A",
"name": "@",
"target": "1.2.3.4",
"meta": {
"transform": "0.0.0.0 ~ 1.1.1.1 ~ 2.2.2.2 ~ ; 1.1.1.1 ~ 2.2.2.2 ~ 3.3.3.3,4.4.4.4,5.5.5.5 ~ ; 1.1.1.1 ~ 2.2.2.2 ~ ~ 3.3.3.3,4.4.4.4,5.5.5.5"
}
}
],
"keepunknown": false
}
]
}

View File

@ -0,0 +1,4 @@
var TRANSFORM_INT = [
{low: "0.0.0.0", high: "1.1.1.1", newBase: "2.2.2.2" }
]
D("foo.com","reg",IMPORT_TRANSFORM(TRANSFORM_INT,"foo2.com",60))

View File

@ -0,0 +1,23 @@
{
"registrars": [],
"dns_providers": [],
"domains": [
{
"name": "foo.com",
"registrar": "reg",
"dnsProviders": {},
"records": [
{
"type": "IMPORT_TRANSFORM",
"name": "@",
"target": "foo2.com",
"ttl": 60,
"meta": {
"transform_table": "0.0.0.0 ~ 1.1.1.1 ~ 2.2.2.2 ~ "
}
}
],
"keepunknown": false
}
]
}

View File

@ -0,0 +1,2 @@
require("pkg/js/parse_tests/import.js")

View File

@ -0,0 +1,19 @@
{
"registrars": [],
"dns_providers": [],
"domains": [
{
"name": "foo.com",
"registrar": "none",
"dnsProviders": {},
"records": [
{
"type": "A",
"name": "@",
"target": "1.2.3.4"
}
],
"keepunknown": false
}
]
}

View File

@ -0,0 +1,3 @@
D("foo.com","none",
ALIAS("@","foo.com.")
);

View File

@ -0,0 +1,19 @@
{
"registrars": [],
"dns_providers": [],
"domains": [
{
"name": "foo.com",
"registrar": "none",
"dnsProviders": {},
"records": [
{
"type": "ALIAS",
"name": "@",
"target": "foo.com."
}
],
"keepunknown": false
}
]
}

View File

@ -0,0 +1,4 @@
D("foo.com","none",
CF_REDIRECT("test.foo.com","https://goo.com/$1"),
CF_TEMP_REDIRECT("test.foo.com","https://goo.com/$1")
);

View File

@ -0,0 +1,24 @@
{
"registrars": [],
"dns_providers": [],
"domains": [
{
"name": "foo.com",
"registrar": "none",
"dnsProviders": {},
"records": [
{
"type": "CF_REDIRECT",
"name": "@",
"target": "test.foo.com,https://goo.com/$1"
},
{
"type": "CF_TEMP_REDIRECT",
"name": "@",
"target": "test.foo.com,https://goo.com/$1"
}
],
"keepunknown": false
}
]
}

View File

@ -0,0 +1,3 @@
D("foo.com","none",
A("@","1.2.3.4")
);

248
pkg/js/static.go Normal file
View File

@ -0,0 +1,248 @@
package js
import (
"bytes"
"compress/gzip"
"encoding/base64"
"io/ioutil"
"net/http"
"os"
"path"
"sync"
"time"
)
type _escLocalFS struct{}
var _escLocal _escLocalFS
type _escStaticFS struct{}
var _escStatic _escStaticFS
type _escDirectory struct {
fs http.FileSystem
name string
}
type _escFile struct {
compressed string
size int64
modtime int64
local string
isDir bool
once sync.Once
data []byte
name string
}
func (_escLocalFS) Open(name string) (http.File, error) {
f, present := _escData[path.Clean(name)]
if !present {
return nil, os.ErrNotExist
}
return os.Open(f.local)
}
func (_escStaticFS) prepare(name string) (*_escFile, error) {
f, present := _escData[path.Clean(name)]
if !present {
return nil, os.ErrNotExist
}
var err error
f.once.Do(func() {
f.name = path.Base(name)
if f.size == 0 {
return
}
var gr *gzip.Reader
b64 := base64.NewDecoder(base64.StdEncoding, bytes.NewBufferString(f.compressed))
gr, err = gzip.NewReader(b64)
if err != nil {
return
}
f.data, err = ioutil.ReadAll(gr)
})
if err != nil {
return nil, err
}
return f, nil
}
func (fs _escStaticFS) Open(name string) (http.File, error) {
f, err := fs.prepare(name)
if err != nil {
return nil, err
}
return f.File()
}
func (dir _escDirectory) Open(name string) (http.File, error) {
return dir.fs.Open(dir.name + name)
}
func (f *_escFile) File() (http.File, error) {
type httpFile struct {
*bytes.Reader
*_escFile
}
return &httpFile{
Reader: bytes.NewReader(f.data),
_escFile: f,
}, nil
}
func (f *_escFile) Close() error {
return nil
}
func (f *_escFile) Readdir(count int) ([]os.FileInfo, error) {
return nil, nil
}
func (f *_escFile) Stat() (os.FileInfo, error) {
return f, nil
}
func (f *_escFile) Name() string {
return f.name
}
func (f *_escFile) Size() int64 {
return f.size
}
func (f *_escFile) Mode() os.FileMode {
return 0
}
func (f *_escFile) ModTime() time.Time {
return time.Unix(f.modtime, 0)
}
func (f *_escFile) IsDir() bool {
return f.isDir
}
func (f *_escFile) Sys() interface{} {
return f
}
// _escFS returns a http.Filesystem for the embedded assets. If useLocal is true,
// the filesystem's contents are instead used.
func _escFS(useLocal bool) http.FileSystem {
if useLocal {
return _escLocal
}
return _escStatic
}
// _escDir returns a http.Filesystem for the embedded assets on a given prefix dir.
// If useLocal is true, the filesystem's contents are instead used.
func _escDir(useLocal bool, name string) http.FileSystem {
if useLocal {
return _escDirectory{fs: _escLocal, name: name}
}
return _escDirectory{fs: _escStatic, name: name}
}
// _escFSByte returns the named file from the embedded assets. If useLocal is
// true, the filesystem's contents are instead used.
func _escFSByte(useLocal bool, name string) ([]byte, error) {
if useLocal {
f, err := _escLocal.Open(name)
if err != nil {
return nil, err
}
b, err := ioutil.ReadAll(f)
f.Close()
return b, err
}
f, err := _escStatic.prepare(name)
if err != nil {
return nil, err
}
return f.data, nil
}
// _escFSMustByte is the same as _escFSByte, but panics if name is not present.
func _escFSMustByte(useLocal bool, name string) []byte {
b, err := _escFSByte(useLocal, name)
if err != nil {
panic(err)
}
return b
}
// _escFSString is the string version of _escFSByte.
func _escFSString(useLocal bool, name string) (string, error) {
b, err := _escFSByte(useLocal, name)
return string(b), err
}
// _escFSMustString is the string version of _escFSMustByte.
func _escFSMustString(useLocal bool, name string) string {
return string(_escFSMustByte(useLocal, name))
}
var _escData = map[string]*_escFile{
"/helpers.js": {
local: "pkg/js/helpers.js",
size: 8855,
modtime: 0,
compressed: `
H4sIAAAAAAAA/9wZa2/byPG7fsUcgUZkRdOPXNKCOhVVbflg1JINWb76IAjCmlxJm/CF3aUcNyf/9mIf
JJekFDtA0w/NB0fcnffMzszOWjnDwDglAbf6nc4WUQjSZAUD+NoBAKB4TRiniDIf5gtXroUJW2Y03ZIQ
15bTGJFELnR2mlaIVyiP+JCuGQxgvuh3Oqs8CThJEyAJ4QRF5N/YdhSzGudD3L8hQVMK8b3rK+FaguwM
USb4aVqwshMUY5c/Z9iNMUeOFoeswBaLTime+ILBAKzxcHI/vLYUo538K3SneC2UEeR8kEQlii//uiCI
+/KvFlFo71Uae1nONjbFa6evPcFzmkhCLeEvEnarzWFXnBQPQwGwpQrpSm7AYDCAbvr4CQe868C7d2B3
SbYM0mSLKSNpwrpAEkXDMZwiFrw6IAxgldIY8SXn9p59p2GakGXfb5qa05V1Qpa9Zp0EP13IkFCGKe3r
lAEuEWuylEB+9VNL9XUntoOUhsyfL1wRibdVIIpdHWmz2bUPJ66kyDAVlvDni11duIymAWbsAtE1s2NX
B69p7ONjYVnAKNhAnIZkRTB1hS8JB8IAeZ5Xg9WUfQhQFAmgJ8I3mq4JiChFz34hgFApp4xscfRsQqng
EK6gayxZJjyVhggRRyWkOBtLj7BLzd2OawFTxI2t1euXOzvAEcMl/lAItQdZWMAWcfNJBmSbdt2O80+L
0pQ1wN0hxjdSzz2clx7+wnESatE9obobtzUwsfiGpk9g/Ws4nVxNfvW1JKX3VN7IE5ZnWUo5Dn2welCc
S+iBBSpg5brmq+K60mPX6Rwfw0Uzpn04pxhxDAguJneajgf3DAPfYMgQRTHmmDJArAhjQEkohGNeFZct
wlpBeXaVOoPDJ0sJWjqNwABO+kB+MZOwF+FkzTd9IL2eU1qv5kcDek4WruHQXZvBmWCA6DqPccLr1A3n
COgYBlACzsmiMuuB01jlLpWGVIHRCUiDaH+MLof317M70GmKAQKGOaSrQvWKM/AUUJZFz/JHFMEq5znF
Rf3yBL2ROPXyIPO0Iv5EogiCCCMKKHmGjOItSXMGWxTlmAmGpic1VlFi23Vwv69eNaXpS2kK06ZOUQuV
XWaza3vr+HCHuYzD2exaslRRquLQkFmB1/NzsWlTUwjqcR7BALZ1fhdlCq6xLXxQsJdr6ogYBjNxD8gQ
1gzhVRm/IYoSxqjNVlG/JijGlgsnDgiQhJ2neSLj5ARijBIGYZp0OYjmLKW6CGHlb6OgeCZykvIi7qgm
ItBRFJnatRoFje4UTULRIRRkZZOQJyFekQSH3eqsVhBwdGr2Pq9Zy6iYcyHDQuQSRavuxqESkWRFyR3r
FMo8z3MqpTQckMzMUyKlwQDWmJdoVYy6Z87rsqIwnEq+duhaQ8stpBGUnbqkw+GbhS1Bf7C8w+G3Rb6+
Gt7pXhfRNeavyV3Bg0L4kcILZlp6LV1DA6HC+WQ4Hn2HCgb8j1dBMvumCiIxPsy+Q/4S+sdLP3uYvSb7
+EEJk1GSUsKf36ZDgQUlWkOZYIODz6Kq2HPRmd1xSpK1C+L3JI8fRfdbrS/cqqC6YI0fAH/JcMAZHOJi
OW802fs3mEx2TbL4FXyMztC0pxDNcsF0ngsNk5YmqiwgfzGpIxMXCxY41WUUVV0U/KKQim8jSctm1Jao
Rore05vVCDTaMsnvJwUxJwvJWlR5p94sV7x6FhyVngGrR3qWuK2IEhWklOKAy4bXcoyW1oytyfdkpsn/
LC1Nvp2ThODD8ehuNP1tNDUVMIVtADSEfqV2mrVfxl39Ci1J+fr/3b7Yqm7pnKKEic8lR4+RHmuIlCT4
z+dR+uTDqQsbst74cOaKbv8fiGEf3i9cUNs/F9sf5PbVrQ8fFwtFRl4UrVN4gTN4gffw0oef4QU+wAvA
C3y0OspBEUmwakQ7ZlQOREzCL9AQcl8vKuEzGDRhy85eAEjpYAAk8+TPfnmK5Gct0o2bqNpsRHlBa+nF
KFMgbukv4nwtJhF5fBam3CbOzvE+pSSxLdeMd3Ft3E+4wFTc+60jYiglPFKqJT5qiomFb6gmt9vKaZql
euL7v6agJm6oKKU4rKS4Sg9grvdLnpkXpU+O214WAVmta+k7hoHlbzUalMGnx2zpk9YBXsByhBpCBq2q
AtT7fbCK+97V+PZmOlvOpsPJ3eXNdKwOVYSEpVQUVpfI8gi+HcnlPHpTYlDTxkBcbGtFp8nKcsH6u1WS
L82q/n3tNo5Q12/mC1NKZ7dwagVCSFt3OMWBvqBxHrV9rIx4ez/9dWQbBlILWsHQ+yfG2X3yOUmfEhjA
CkUMF8n2ZtlCLtcO4HOa41pGbNYG5jKO6L4qsveyLIH78r588KpctQlF4WzflgRMfTZoulKORVuVR7MQ
2Xalk76ssrpNQozlMRbJEYUhxYx5oEayHAj3ykRRdVa2rkWm7JpsdWQ1THvYLcLvqznFPVyaXBEPvnlx
rjo1OTTVo1Y9/d0/Aw1xQEIMj4jhENJEDZAL+CO4bExCmZqEiju/6iYAMflV9AMV6s3eqaeArU0+Jayy
nA9XlzB+qCgry0t3FIqVBjd914on1YzJiDkQTWDMsQTcnCxqe28bxkJsUxwYiRe+YyoKSv0imsq0IYda
THbmrI0gdfdKYHj3Doyhb7XRrEmlxAZu7b3BQG0j7lpL5UxXpKfWQPftUA1r6TMUy5eU6m3owdpjPUGz
iAvhxr2E21YI0oSlog1K13Y1Xx4fHCxbbjlXdsGy7z6TLCPJ+ifHaqqyt/6Gnh4RF09RQf2xheKgr1Ix
yaB67SmLFIMVTWPYcJ75x8eMo+BzusV0FaVPXpDGx+j4r6cnH/7y88nx6dnpx48nIqdvCSoQPqEtYgEl
GffQY5pziRORR4ro8/FjRDIdf96Gx0Z5vbXDlDsdY2ANAwhT7rEsItzuet26Frb81wvnJwvnz2cfPjo9
8XG6cIyvs9rX+4XTeGMq2pk8LhiTlfiS07NyeOaYD5uSt1V7NCwiSd1tJbU2SpLHjdQbquz8p7MPH/cU
qPeik/6bzCtHR+p8GCM8ISKMEd94qyhNqeB5LPSswsOgDj3oel3oQbhn3Bf2y7FMlObhKkIUA4oIYpj5
amCAuRyHc5EepJAkCcmWhDmKiscIT74an18ub6c3D78vby4vRVXpBiXJZUbTL89dH7rpatXd9aWMoosQ
yxASJlqTsElmcphKUhAxyOBkH5XL++vrg3RWeRQpSgWV3hSRaJ0nFTWxg+lR8R5kmsPvVDroKXW6Wqmy
l3BSvguAbQyyHb8uoJ71H7TaUuNV1tvDNWkzPcRmv1VrXIR1VVDc381uxi7cTm9+u7oYTeHudnR+dXl1
DtPR+c30Ama/347ujFnd5XI6uriajs5nNqOBCyF72yVZHCJGA48kIf5ys5KXEvhpMICjU/jjD0Fm39be
SYZFcUjksILRQD6ThYxDnDM1bN+gLYYgjWPEWoMMaI0DK30sVzThjAY9y7V6Qq+yHzbVn43Gt/93Nqgp
9Q1D/CcAAP//qJ7f15ciAAA=
`,
},
"/": {
isDir: true,
local: "pkg/js",
},
}

View File

@ -0,0 +1,69 @@
//Package nameservers provides logic for dynamically finding nameservers for a domain, and configuring NS records for them.
package nameservers
import (
"fmt"
"strings"
"github.com/StackExchange/dnscontrol/models"
"github.com/StackExchange/dnscontrol/providers"
"github.com/miekg/dns/dnsutil"
"strconv"
)
//DetermineNameservers will find all nameservers we should use for a domain. It follows the following rules:
//1. All explicitly defined NAMESERVER records will be used.
//2. Each DSP declares how many nameservers to use. Default is all. 0 indicates to use none.
func DetermineNameservers(dc *models.DomainConfig, maxNS int, dsps map[string]providers.DNSServiceProvider) ([]*models.Nameserver, error) {
//always take explicit
ns := dc.Nameservers
for dsp, n := range dc.DNSProviders {
if n == 0 {
continue
}
fmt.Printf("----- Getting nameservers from: %s\n", dsp)
p, ok := dsps[dsp]
if !ok {
return nil, fmt.Errorf("DNS provider %s not declared", dsp)
}
nss, err := p.GetNameservers(dc.Name)
if err != nil {
return nil, err
}
take := len(nss)
if n > 0 && n < take {
take = n
}
for i := 0; i < take; i++ {
ns = append(ns, nss[i])
}
}
return ns, nil
}
//AddNSRecords creates NS records on a domain corresponding to the nameservers specified.
func AddNSRecords(dc *models.DomainConfig) {
ttl := uint32(300)
if ttls, ok := dc.Metadata["ns_ttl"]; ok {
t, err := strconv.ParseUint(ttls, 10, 32)
if err != nil {
fmt.Printf("WARNING: ns_ttl fpr %s (%s) is not a valid int", dc.Name, ttls)
} else {
ttl = uint32(t)
}
}
for _, ns := range dc.Nameservers {
rc := &models.RecordConfig{
Type: "NS",
Name: "@",
Target: ns.Name,
Metadata: map[string]string{},
TTL: ttl,
}
if !strings.HasSuffix(rc.Target, ".") {
rc.Target += "."
}
rc.NameFQDN = dnsutil.AddOrigin(rc.Name, dc.Name)
dc.Records = append(dc.Records, rc)
}
}

View File

@ -0,0 +1,42 @@
package normalize
import (
"github.com/StackExchange/dnscontrol/models"
"testing"
)
func TestImportTransform(t *testing.T) {
const transformDouble = "0.0.0.0~1.1.1.1~~9.0.0.0,10.0.0.0"
const transformSingle = "0.0.0.0~1.1.1.1~~8.0.0.0"
src := &models.DomainConfig{
Name: "stackexchange.com",
Records: []*models.RecordConfig{
{Type: "A", Name: "*", NameFQDN: "*.stackexchange.com", Target: "0.0.2.2"},
{Type: "A", Name: "www", NameFQDN: "", Target: "0.0.1.1"},
},
}
dst := &models.DomainConfig{
Name: "internal",
Records: []*models.RecordConfig{
{Type: "A", Name: "*.stackexchange.com", NameFQDN: "*.stackexchange.com.internal", Target: "0.0.3.3", Metadata: map[string]string{"transform_table": transformSingle}},
{Type: "IMPORT_TRANSFORM", Name: "@", Target: "stackexchange.com", Metadata: map[string]string{"transform_table": transformDouble}},
},
}
cfg := &models.DNSConfig{
Domains: []*models.DomainConfig{src, dst},
}
if errs := NormalizeAndValidateConfig(cfg); len(errs) != 0 {
for _, err := range errs {
t.Error(err)
}
t.FailNow()
}
d := cfg.FindDomain("internal")
if len(d.Records) != 3 {
for _, r := range d.Records {
t.Error(r)
}
t.Fatalf("Expected 3 records in internal, but got %d", len(d.Records))
}
}

392
pkg/normalize/validate.go Normal file
View File

@ -0,0 +1,392 @@
package normalize
import (
"fmt"
"net"
"strings"
"github.com/StackExchange/dnscontrol/models"
"github.com/StackExchange/dnscontrol/pkg/transform"
"github.com/StackExchange/dnscontrol/providers"
"github.com/miekg/dns"
"github.com/miekg/dns/dnsutil"
)
// Returns false if target does not validate.
func checkIPv4(label string) error {
if net.ParseIP(label).To4() == nil {
return fmt.Errorf("WARNING: target (%v) is not an IPv4 address", label)
}
return nil
}
// Returns false if target does not validate.
func checkIPv6(label string) error {
if net.ParseIP(label).To16() == nil {
return fmt.Errorf("WARNING: target (%v) is not an IPv6 address", label)
}
return nil
}
// make sure target is valid reference for cnames, mx, etc.
func checkTarget(target string) error {
if target == "@" {
return nil
}
if len(target) < 1 {
return fmt.Errorf("empty target")
}
// If it containts a ".", it must end in a ".".
if strings.ContainsRune(target, '.') && target[len(target)-1] != '.' {
return fmt.Errorf("target (%v) must end with a (.) [Required if target is not single label]", target)
}
return nil
}
// validateRecordTypes list of valid rec.Type values. Returns true if this is a real DNS record type, false means it is a pseudo-type used internally.
func validateRecordTypes(rec *models.RecordConfig, domain string, pTypes []string) error {
var validTypes = map[string]bool{
"A": true,
"AAAA": true,
"CNAME": true,
"IMPORT_TRANSFORM": false,
"MX": true,
"TXT": true,
"NS": true,
"ALIAS": false,
}
_, ok := validTypes[rec.Type]
if !ok {
cType := providers.GetCustomRecordType(rec.Type)
if cType == nil {
return fmt.Errorf("Unsupported record type (%v) domain=%v name=%v", rec.Type, domain, rec.Name)
}
for _, providerType := range pTypes {
if providerType != cType.Provider {
return fmt.Errorf("Custom record type %s is not compatible with provider type %s", rec.Type, providerType)
}
}
//it is ok. Lets replace the type with real type and add metadata to say we checked it
rec.Metadata["orig_custom_type"] = rec.Type
if cType.RealType != "" {
rec.Type = cType.RealType
}
}
return nil
}
// underscores in names are often used erroneously. They are valid for dns records, but invalid for urls.
// here we list common records expected to have underscores. Anything else containing an underscore will print a warning.
var expectedUnderscores = []string{"_domainkey", "_dmarc"}
func checkLabel(label string, rType string, domain string) error {
if label == "@" {
return nil
}
if len(label) < 1 {
return fmt.Errorf("empty %s label in %s", rType, domain)
}
if label[len(label)-1] == '.' {
return fmt.Errorf("label %s.%s ends with a (.)", label, domain)
}
//underscores are warnings
if strings.ContainsRune(label, '_') {
//unless it is in our exclusion list
ok := false
for _, ex := range expectedUnderscores {
if strings.Contains(label, ex) {
ok = true
break
}
}
if !ok {
return Warning{fmt.Errorf("label %s.%s contains an underscore", label, domain)}
}
}
return nil
}
// checkTargets returns true if rec.Target is valid for the rec.Type.
func checkTargets(rec *models.RecordConfig, domain string) (errs []error) {
label := rec.Name
target := rec.Target
check := func(e error) {
if e != nil {
err := fmt.Errorf("In %s %s.%s: %s", rec.Type, rec.Name, domain, e.Error())
if _, ok := e.(Warning); ok {
err = Warning{err}
}
errs = append(errs, err)
}
}
switch rec.Type {
case "A":
check(checkIPv4(target))
case "AAAA":
check(checkIPv6(target))
case "CNAME":
check(checkTarget(target))
if label == "@" {
check(fmt.Errorf("cannot create CNAME record for bare domain"))
}
case "MX":
check(checkTarget(target))
case "NS":
check(checkTarget(target))
if label == "@" {
check(fmt.Errorf("cannot create NS record for bare domain. Use NAMESERVER instead"))
}
case "ALIAS":
check(checkTarget(target))
case "TXT", "IMPORT_TRANSFORM":
default:
if rec.Metadata["orig_custom_type"] != "" {
//it is a valid custom type. We perform no validation on target
return
}
errs = append(errs, fmt.Errorf("Unimplemented record type (%v) domain=%v name=%v",
rec.Type, domain, rec.Name))
}
return
}
func transformCNAME(target, oldDomain, newDomain string) string {
// Canonicalize. If it isn't a FQDN, add the newDomain.
result := dnsutil.AddOrigin(target, oldDomain)
if dns.IsFqdn(result) {
result = result[:len(result)-1]
}
return dnsutil.AddOrigin(result, newDomain) + "."
}
// import_transform imports the records of one zone into another, modifying records along the way.
func importTransform(srcDomain, dstDomain *models.DomainConfig, transforms []transform.IpConversion, ttl uint32) error {
// Read srcDomain.Records, transform, and append to dstDomain.Records:
// 1. Skip any that aren't A or CNAMEs.
// 2. Append destDomainname to the end of the label.
// 3. For CNAMEs, append destDomainname to the end of the target.
// 4. For As, change the target as described the transforms.
for _, rec := range srcDomain.Records {
if dstDomain.HasRecordTypeName(rec.Type, rec.NameFQDN) {
continue
}
newRec := func() *models.RecordConfig {
rec2, _ := rec.Copy()
rec2.Name = rec2.NameFQDN
rec2.NameFQDN = dnsutil.AddOrigin(rec2.Name, dstDomain.Name)
if ttl != 0 {
rec2.TTL = ttl
}
return rec2
}
switch rec.Type {
case "A":
trs, err := transform.TransformIPToList(net.ParseIP(rec.Target), transforms)
if err != nil {
return fmt.Errorf("import_transform: TransformIP(%v, %v) returned err=%s", rec.Target, transforms, err)
}
for _, tr := range trs {
r := newRec()
r.Target = tr.String()
dstDomain.Records = append(dstDomain.Records, r)
}
case "CNAME":
r := newRec()
r.Target = transformCNAME(r.Target, srcDomain.Name, dstDomain.Name)
dstDomain.Records = append(dstDomain.Records, r)
case "MX", "NS", "TXT":
// Not imported.
continue
default:
return fmt.Errorf("import_transform: Unimplemented record type %v (%v)",
rec.Type, rec.Name)
}
}
return nil
}
// deleteImportTransformRecords deletes any IMPORT_TRANSFORM records from a domain.
func deleteImportTransformRecords(domain *models.DomainConfig) {
for i := len(domain.Records) - 1; i >= 0; i-- {
rec := domain.Records[i]
if rec.Type == "IMPORT_TRANSFORM" {
domain.Records = append(domain.Records[:i], domain.Records[i+1:]...)
}
}
}
// Warning is a wrapper around error that can be used to indicate it should not
// stop execution, but is still likely a problem.
type Warning struct {
error
}
func NormalizeAndValidateConfig(config *models.DNSConfig) (errs []error) {
ptypeMap := map[string]string{}
for _, p := range config.DNSProviders {
ptypeMap[p.Name] = p.Type
}
for _, domain := range config.Domains {
pTypes := []string{}
for p := range domain.DNSProviders {
pType, ok := ptypeMap[p]
if !ok {
errs = append(errs, fmt.Errorf("%s uses undefined DNS provider %s", domain.Name, p))
} else {
pTypes = append(pTypes, pType)
}
}
// Normalize Nameservers.
for _, ns := range domain.Nameservers {
ns.Name = dnsutil.AddOrigin(ns.Name, domain.Name)
ns.Name = strings.TrimRight(ns.Name, ".")
}
// Normalize Records.
for _, rec := range domain.Records {
if rec.TTL == 0 {
rec.TTL = models.DefaultTTL
}
// Validate the unmodified inputs:
if err := validateRecordTypes(rec, domain.Name, pTypes); err != nil {
errs = append(errs, err)
}
if err := checkLabel(rec.Name, rec.Type, domain.Name); err != nil {
errs = append(errs, err)
}
if errs2 := checkTargets(rec, domain.Name); errs2 != nil {
errs = append(errs, errs2...)
}
// Canonicalize Targets.
if rec.Type == "CNAME" || rec.Type == "MX" || rec.Type == "NS" {
rec.Target = dnsutil.AddOrigin(rec.Target, domain.Name+".")
}
// Populate FQDN:
rec.NameFQDN = dnsutil.AddOrigin(rec.Name, domain.Name)
}
}
// Process any pseudo-records:
for _, domain := range config.Domains {
for _, rec := range domain.Records {
if rec.Type == "IMPORT_TRANSFORM" {
table, err := transform.DecodeTransformTable(rec.Metadata["transform_table"])
if err != nil {
errs = append(errs, err)
continue
}
err = importTransform(config.FindDomain(rec.Target), domain, table, rec.TTL)
if err != nil {
errs = append(errs, err)
}
}
}
}
// Clean up:
for _, domain := range config.Domains {
deleteImportTransformRecords(domain)
}
// Run record transforms
for _, domain := range config.Domains {
if err := applyRecordTransforms(domain); err != nil {
errs = append(errs, err)
}
}
//Check that CNAMES don't have to co-exist with any other records
for _, d := range config.Domains {
errs = append(errs, checkCNAMEs(d)...)
}
//Check that if any aliases are used in a domain, every provider for that domain supports them
for _, d := range config.Domains {
err := checkALIASes(d, config.DNSProviders)
if err != nil {
errs = append(errs, err)
}
}
return errs
}
func checkCNAMEs(dc *models.DomainConfig) (errs []error) {
cnames := map[string]bool{}
for _, r := range dc.Records {
if r.Type == "CNAME" {
if cnames[r.Name] {
errs = append(errs, fmt.Errorf("Cannot have multiple CNAMEs with same name: %s", r.NameFQDN))
}
cnames[r.Name] = true
}
}
for _, r := range dc.Records {
if cnames[r.Name] && r.Type != "CNAME" {
errs = append(errs, fmt.Errorf("Cannot have CNAME and %s record with same name: %s", r.Type, r.NameFQDN))
}
}
return
}
func checkALIASes(dc *models.DomainConfig, pList []*models.DNSProviderConfig) error {
hasAlias := false
for _, r := range dc.Records {
if r.Type == "ALIAS" {
hasAlias = true
break
}
}
if !hasAlias {
return nil
}
for pName := range dc.DNSProviders {
for _, p := range pList {
if p.Name == pName {
if !providers.ProviderHasCabability(p.Type, providers.CanUseAlias) {
return fmt.Errorf("Domain %s uses ALIAS records, but DNS provider type %s does not support them", dc.Name, p.Type)
}
break
}
}
}
return nil
}
func applyRecordTransforms(domain *models.DomainConfig) error {
for _, rec := range domain.Records {
if rec.Type != "A" {
continue
}
tt, ok := rec.Metadata["transform"]
if !ok {
continue
}
table, err := transform.DecodeTransformTable(tt)
if err != nil {
return err
}
ip := net.ParseIP(rec.Target) //ip already validated above
newIPs, err := transform.TransformIPToList(net.ParseIP(rec.Target), table)
if err != nil {
return err
}
for i, newIP := range newIPs {
if i == 0 && !newIP.Equal(ip) {
rec.Target = newIP.String() //replace target of first record if different
} else if i > 0 {
// any additional ips need identical records with the alternate ip added to the domain
copy, err := rec.Copy()
if err != nil {
return err
}
copy.Target = newIP.String()
domain.Records = append(domain.Records, copy)
}
}
}
return nil
}

View File

@ -0,0 +1,171 @@
package normalize
import (
"testing"
"fmt"
"github.com/StackExchange/dnscontrol/models"
)
func TestCheckLabel(t *testing.T) {
var tests = []struct {
experiment string
isError bool
}{
{"@", false},
{"foo", false},
{"foo.bar", false},
{"foo.", true},
{"foo.bar.", true},
{"foo_bar", true},
{"_domainkey", false},
}
for _, test := range tests {
err := checkLabel(test.experiment, "A", "foo.com")
checkError(t, err, test.isError, test.experiment)
}
}
func checkError(t *testing.T, err error, shouldError bool, experiment string) {
if err != nil && !shouldError {
t.Errorf("%v: Error (%v)\n", experiment, err)
}
if err == nil && shouldError {
t.Errorf("%v: Expected error but got none \n", experiment)
}
}
func Test_assert_valid_ipv4(t *testing.T) {
var tests = []struct {
experiment string
isError bool
}{
{"1.2.3.4", false},
{"1.2.3.4/10", true},
{"1.2.3", true},
{"foo", true},
}
for _, test := range tests {
err := checkIPv4(test.experiment)
checkError(t, err, test.isError, test.experiment)
}
}
func Test_assert_valid_target(t *testing.T) {
var tests = []struct {
experiment string
isError bool
}{
{"@", false},
{"foo", false},
{"foo.bar.", false},
{"foo.", false},
{"foo.bar", true},
}
for _, test := range tests {
err := checkTarget(test.experiment)
checkError(t, err, test.isError, test.experiment)
}
}
func Test_transform_cname(t *testing.T) {
var tests = []struct {
experiment string
expected string
}{
{"@", "old.com.new.com."},
{"foo", "foo.old.com.new.com."},
{"foo.bar", "foo.bar.old.com.new.com."},
{"foo.bar.", "foo.bar.new.com."},
{"chat.stackexchange.com.", "chat.stackexchange.com.new.com."},
}
for _, test := range tests {
actual := transformCNAME(test.experiment, "old.com", "new.com")
if test.expected != actual {
t.Errorf("%v: expected (%v) got (%v)\n", test.experiment, test.expected, actual)
}
}
}
func TestNSAtRoot(t *testing.T) {
//do not allow ns records for @
rec := &models.RecordConfig{Name: "test", Type: "NS", Target: "ns1.name.com."}
errs := checkTargets(rec, "foo.com")
if len(errs) > 0 {
t.Error("Expect no error with ns record on subdomain")
}
rec.Name = "@"
errs = checkTargets(rec, "foo.com")
if len(errs) != 1 {
t.Error("Expect error with ns record on @")
}
}
func TestTransforms(t *testing.T) {
var tests = []struct {
givenIP string
expectedRecords []string
}{
{"0.0.5.5", []string{"2.0.5.5"}},
{"3.0.5.5", []string{"5.5.5.5"}},
{"7.0.5.5", []string{"9.9.9.9", "10.10.10.10"}},
}
const transform = "0.0.0.0~1.0.0.0~2.0.0.0~; 3.0.0.0~4.0.0.0~~5.5.5.5; 7.0.0.0~8.0.0.0~~9.9.9.9,10.10.10.10"
for i, test := range tests {
dc := &models.DomainConfig{
Records: []*models.RecordConfig{
{Type: "A", Target: test.givenIP, Metadata: map[string]string{"transform": transform}},
},
}
err := applyRecordTransforms(dc)
if err != nil {
t.Errorf("error on test %d: %s", i, err)
continue
}
if len(dc.Records) != len(test.expectedRecords) {
t.Errorf("test %d: expect %d records but found %d", i, len(test.expectedRecords), len(dc.Records))
continue
}
for r, rec := range dc.Records {
if rec.Target != test.expectedRecords[r] {
t.Errorf("test %d at index %d: records don't match. Expect %s but found %s.", i, r, test.expectedRecords[r], rec.Target)
continue
}
}
}
}
func TestCNAMEMutex(t *testing.T) {
var recA = &models.RecordConfig{Type: "CNAME", Name: "foo", NameFQDN: "foo.example.com", Target: "example.com."}
tests := []struct {
rType string
name string
fail bool
}{
{"A", "foo", true},
{"A", "foo2", false},
{"CNAME", "foo", true},
{"CNAME", "foo2", false},
}
for _, tst := range tests {
t.Run(fmt.Sprintf("%s %s", tst.rType, tst.name), func(t *testing.T) {
var recB = &models.RecordConfig{Type: tst.rType, Name: tst.name, NameFQDN: tst.name + ".example.com", Target: "example2.com."}
dc := &models.DomainConfig{
Name: "example.com",
Records: []*models.RecordConfig{recA, recB},
}
errs := checkCNAMEs(dc)
if errs != nil && !tst.fail {
t.Error("Got error but expected none")
}
if errs == nil && tst.fail {
t.Error("Expected error but got none")
}
})
}
}

107
pkg/spflib/parse.go Normal file
View File

@ -0,0 +1,107 @@
package spflib
import (
"fmt"
"strings"
"github.com/StackExchange/dnscontrol/pkg/dnsresolver"
)
type SPFRecord struct {
Lookups int
Parts []*SPFPart
}
type SPFPart struct {
Text string
Lookups int
IncludeRecord *SPFRecord
}
func Lookup(target string, dnsres dnsresolver.DnsResolver) (string, error) {
txts, err := dnsres.GetTxt(target)
if err != nil {
return "", err
}
var result []string
for _, txt := range txts {
if strings.HasPrefix(txt, "v=spf1 ") {
result = append(result, txt)
}
}
if len(result) == 0 {
return "", fmt.Errorf("%s has no spf TXT records", target)
}
if len(result) != 1 {
return "", fmt.Errorf("%s has multiple spf TXT records", target)
}
return result[0], nil
}
var qualifiers = map[byte]bool{
'?': true,
'~': true,
'-': true,
'+': true,
}
func Parse(text string, dnsres dnsresolver.DnsResolver) (*SPFRecord, error) {
if !strings.HasPrefix(text, "v=spf1 ") {
return nil, fmt.Errorf("Not an spf record")
}
parts := strings.Split(text, " ")
rec := &SPFRecord{}
for _, part := range parts[1:] {
p := &SPFPart{Text: part}
if qualifiers[part[0]] {
part = part[1:]
}
rec.Parts = append(rec.Parts, p)
if part == "all" {
//all. nothing else matters.
break
} else if strings.HasPrefix(part, "a") || strings.HasPrefix(part, "mx") {
rec.Lookups++
p.Lookups = 1
} else if strings.HasPrefix(part, "ip4:") || strings.HasPrefix(part, "ip6:") {
//ip address, 0 lookups
continue
} else if strings.HasPrefix(part, "include:") {
rec.Lookups++
includeTarget := strings.TrimPrefix(part, "include:")
subRecord, err := Lookup(includeTarget, dnsres)
if err != nil {
return nil, err
}
p.IncludeRecord, err = Parse(subRecord, dnsres)
if err != nil {
return nil, fmt.Errorf("In included spf: %s", err)
}
rec.Lookups += p.IncludeRecord.Lookups
p.Lookups = p.IncludeRecord.Lookups + 1
} else {
return nil, fmt.Errorf("Unsupported spf part %s", part)
}
}
return rec, nil
}
// DumpSPF outputs an SPFRecord and related data for debugging purposes.
func DumpSPF(rec *SPFRecord, indent string) {
fmt.Printf("%sTotal Lookups: %d\n", indent, rec.Lookups)
fmt.Print(indent + "v=spf1")
for _, p := range rec.Parts {
fmt.Print(" " + p.Text)
}
fmt.Println()
indent += "\t"
for _, p := range rec.Parts {
if p.Lookups > 0 {
fmt.Println(indent + p.Text)
}
if p.IncludeRecord != nil {
DumpSPF(p.IncludeRecord, indent+"\t")
}
}
}

30
pkg/spflib/parse_test.go Normal file
View File

@ -0,0 +1,30 @@
package spflib
import (
"strings"
"testing"
"github.com/StackExchange/dnscontrol/pkg/dnsresolver"
)
func TestParse(t *testing.T) {
dnsres, err := dnsresolver.NewResolverPreloaded("testdata-dns1.json")
if err != nil {
t.Fatal(err)
}
rec, err := Parse(strings.Join([]string{"v=spf1",
"ip4:198.252.206.0/24",
"ip4:192.111.0.0/24",
"include:_spf.google.com",
"include:mailgun.org",
"include:spf-basic.fogcreek.com",
"include:mail.zendesk.com",
"include:servers.mcsv.net",
"include:sendgrid.net",
"include:spf.mtasv.net",
"~all"}, " "), dnsres)
if err != nil {
t.Fatal(err)
}
DumpSPF(rec, "")
}

View File

@ -0,0 +1,64 @@
{
"_netblocks.google.com": {
"txt": [
"v=spf1 ip4:64.18.0.0/20 ip4:64.233.160.0/19 ip4:66.102.0.0/20 ip4:66.249.80.0/20 ip4:72.14.192.0/18 ip4:74.125.0.0/16 ip4:108.177.8.0/21 ip4:173.194.0.0/16 ip4:207.126.144.0/20 ip4:209.85.128.0/17 ip4:216.58.192.0/19 ip4:216.239.32.0/19 ~all"
]
},
"_netblocks2.google.com": {
"txt": [
"v=spf1 ip6:2001:4860:4000::/36 ip6:2404:6800:4000::/36 ip6:2607:f8b0:4000::/36 ip6:2800:3f0:4000::/36 ip6:2a00:1450:4000::/36 ip6:2c0f:fb50:4000::/36 ~all"
]
},
"_netblocks3.google.com": {
"txt": [
"v=spf1 ip4:172.217.0.0/19 ip4:108.177.96.0/19 ~all"
]
},
"_spf.google.com": {
"txt": [
"v=spf1 include:_netblocks.google.com include:_netblocks2.google.com include:_netblocks3.google.com ~all"
]
},
"mail.zendesk.com": {
"txt": [
"v=spf1 ip4:192.161.144.0/20 ip4:185.12.80.0/22 ip4:96.46.150.192/27 ip4:174.137.46.0/24 ip4:188.172.128.0/20 ip4:216.198.0.0/18 ~all"
]
},
"mailgun.org": {
"txt": [
"google-site-verification=FIGVOKZm6lQFDBJaiC2DdwvBy8TInunoGCt-1gnL4PA",
"v=spf1 include:spf1.mailgun.org include:spf2.mailgun.org ~all"
]
},
"sendgrid.net": {
"txt": [
"google-site-verification=NxyooVvVaIgddVa23KTlOEuVPuhffcDqJFV8RzWrAys",
"v=spf1 ip4:167.89.0.0/17 ip4:208.117.48.0/20 ip4:50.31.32.0/19 ip4:198.37.144.0/20 ip4:198.21.0.0/21 ip4:192.254.112.0/20 ip4:168.245.0.0/17 ~all"
]
},
"servers.mcsv.net": {
"txt": [
"v=spf1 ip4:205.201.128.0/20 ip4:198.2.128.0/18 ip4:148.105.8.0/21 ?all"
]
},
"spf-basic.fogcreek.com": {
"txt": [
"v=spf1 ip4:64.34.80.172 -all"
]
},
"spf.mtasv.net": {
"txt": [
"v=spf1 ip4:50.31.156.96/27 ip4:104.245.209.192/26 ~all"
]
},
"spf1.mailgun.org": {
"txt": [
"v=spf1 ip4:173.193.210.32/27 ip4:50.23.218.192/27 ip4:174.37.226.64/27 ip4:208.43.239.136/30 ip4:184.173.105.0/24 ip4:184.173.153.0/24 ip4:104.130.122.0/23 ip4:146.20.112.0/26 ~all"
]
},
"spf2.mailgun.org": {
"txt": [
"v=spf1 ip4:209.61.151.0/24 ip4:166.78.68.0/22 ip4:198.61.254.0/23 ip4:192.237.158.0/23 ip4:23.253.182.0/23 ip4:104.130.96.0/28 ip4:146.20.113.0/24 ip4:146.20.191.0/24 ~all"
]
}
}

129
pkg/transform/transform.go Normal file
View File

@ -0,0 +1,129 @@
package transform
import (
"fmt"
"net"
"strings"
)
type IpConversion struct {
Low, High net.IP
NewBases []net.IP
NewIPs []net.IP
}
func ipToUint(i net.IP) (uint32, error) {
parts := i.To4()
if parts == nil || len(parts) != 4 {
return 0, fmt.Errorf("%s is not an ipv4 address", parts.String())
}
r := uint32(parts[0])<<24 | uint32(parts[1])<<16 | uint32(parts[2])<<8 | uint32(parts[3])
return r, nil
}
func UintToIP(u uint32) net.IP {
return net.IPv4(
byte((u>>24)&255),
byte((u>>16)&255),
byte((u>>8)&255),
byte((u)&255))
}
// DecodeTransformTable turns a string-encoded table into a list of conversions.
func DecodeTransformTable(transforms string) ([]IpConversion, error) {
result := []IpConversion{}
rows := strings.Split(transforms, ";")
for ri, row := range rows {
items := strings.Split(row, "~")
if len(items) != 4 {
return nil, fmt.Errorf("transform_table rows should have 4 elements. (%v) found in row (%v) of %#v\n", len(items), ri, transforms)
}
for i, item := range items {
items[i] = strings.TrimSpace(item)
}
con := IpConversion{
Low: net.ParseIP(items[0]),
High: net.ParseIP(items[1]),
}
parseList := func(s string) ([]net.IP, error) {
ips := []net.IP{}
for _, ip := range strings.Split(s, ",") {
if ip == "" {
continue
}
addr := net.ParseIP(ip)
if addr == nil {
return nil, fmt.Errorf("%s is not a valid ip address", ip)
}
ips = append(ips, addr)
}
return ips, nil
}
var err error
if con.NewBases, err = parseList(items[2]); err != nil {
return nil, err
}
if con.NewIPs, err = parseList(items[3]); err != nil {
return nil, err
}
low, _ := ipToUint(con.Low)
high, _ := ipToUint(con.High)
if low > high {
return nil, fmt.Errorf("transform_table Low should be less than High. row (%v) %v>%v (%v)\n", ri, con.Low, con.High, transforms)
}
if len(con.NewBases) > 0 && len(con.NewIPs) > 0 {
return nil, fmt.Errorf("transform_table_rows should only specify one of NewBases or NewIPs, Not both")
}
result = append(result, con)
}
return result, nil
}
// TransformIP transforms a single ip address. If the transform results in multiple new targets, an error will be returned.
func TransformIP(address net.IP, transforms []IpConversion) (net.IP, error) {
ips, err := TransformIPToList(address, transforms)
if err != nil {
return nil, err
}
if len(ips) != 1 {
return nil, fmt.Errorf("Expect exactly one ip for TransformIP result. Got: %s", ips)
}
return ips[0], err
}
// TransformIPToList manipulates an net.IP based on a list of IpConversions. It can potentially expand one ip address into multiple addresses.
func TransformIPToList(address net.IP, transforms []IpConversion) ([]net.IP, error) {
thisIP, err := ipToUint(address)
if err != nil {
return nil, err
}
for _, conv := range transforms {
min, err := ipToUint(conv.Low)
if err != nil {
return nil, err
}
max, err := ipToUint(conv.High)
if err != nil {
return nil, err
}
if (thisIP >= min) && (thisIP <= max) {
if len(conv.NewIPs) > 0 {
return conv.NewIPs, nil
}
list := []net.IP{}
for _, nb := range conv.NewBases {
newbase, err := ipToUint(nb)
if err != nil {
return nil, err
}
list = append(list, UintToIP(newbase+(thisIP-min)))
}
return list, nil
}
}
return []net.IP{address}, nil
}

View File

@ -0,0 +1,224 @@
package transform
import (
"net"
"strings"
"testing"
)
func TestIPToUint(t *testing.T) {
ip := net.ParseIP("1.2.3.4")
u, err := ipToUint(ip)
if err != nil {
t.Fatal(err)
}
if u != 16909060 {
t.Fatalf("I to uint conversion failed. Should be 16909060. Got %d", u)
}
ip2 := UintToIP(u)
if !ip.Equal(ip2) {
t.Fatalf("IPs should be equal. %s is not %s", ip2, ip)
}
}
func Test_DecodeTransformTable_failures(t *testing.T) {
result, err := DecodeTransformTable("1.2.3.4 ~ 3.4.5.6")
if result != nil {
t.Errorf("expected nil, got (%v)\n", result)
}
if err == nil {
t.Error("expect error, got none")
}
}
func test_ip(t *testing.T, test string, expected string, actual net.IP) {
if !net.ParseIP(expected).Equal(actual) {
t.Errorf("Test %v: expected Low (%v), got (%v)\n", test, actual, expected)
}
}
func Test_DecodeTransformTable_0(t *testing.T) {
result, err := DecodeTransformTable("1.2.3.4 ~ 2.3.4.5 ~ 3.4.5.6 ~ ")
if err != nil {
t.Fatal(err)
}
if len(result) != 1 {
t.Errorf("Test %v: expected col length (%v), got (%v)\n", 1, 1, len(result))
}
test_ip(t, "low", "1.2.3.4", result[0].Low)
test_ip(t, "high", "2.3.4.5", result[0].High)
test_ip(t, "newBase", "3.4.5.6", result[0].NewBases[0])
//test_ip(t, "newIP", "", result[0].NewIPs)
}
func Test_DecodeTransformTable_1(t *testing.T) {
result, err := DecodeTransformTable("1.2.3.4~2.3.4.5~3.4.5.6 ~;8.7.6.5 ~ 9.8.7.6 ~ 7.6.5.4 ~ ")
if err != nil {
t.Fatal(err)
}
if len(result) != 2 {
t.Errorf("Test %v: expected col length (%v), got (%v)\n", 1, 2, len(result))
}
test_ip(t, "Low[0]", "1.2.3.4", result[0].Low)
test_ip(t, "High[0]", "2.3.4.5", result[0].High)
test_ip(t, "NewBase[0]", "3.4.5.6", result[0].NewBases[0])
//test_ip(t, "newIP[0]", "", result[0].NewIP)
test_ip(t, "Low[1]", "8.7.6.5", result[1].Low)
test_ip(t, "High[1]", "9.8.7.6", result[1].High)
test_ip(t, "NewBase[1]", "7.6.5.4", result[1].NewBases[0])
//test_ip(t, "newIP[1]", "", result[0].NewIP)
}
func Test_DecodeTransformTable_NewIP(t *testing.T) {
result, err := DecodeTransformTable("1.2.3.4 ~ 2.3.4.5 ~ ~ 3.4.5.6 ")
if err != nil {
t.Fatal(err)
}
if len(result) != 1 {
t.Errorf("Test %v: expected col length (%v), got (%v)\n", 1, 1, len(result))
}
test_ip(t, "low", "1.2.3.4", result[0].Low)
test_ip(t, "high", "2.3.4.5", result[0].High)
test_ip(t, "newIP", "3.4.5.6", result[0].NewIPs[0])
}
func Test_DecodeTransformTable_order(t *testing.T) {
raw := "9.8.7.6 ~ 8.7.6.5 ~ 7.6.5.4 ~"
result, err := DecodeTransformTable(raw)
if result != nil {
t.Errorf("Invalid range not detected: (%v)\n", raw)
}
if err == nil {
t.Error("expect error, got none")
}
}
func Test_DecodeTransformTable_Base_and_IP(t *testing.T) {
raw := "1.1.1.1~ 8.7.6.5 ~ 7.6.5.4 ~ 4.4.4.4"
result, err := DecodeTransformTable(raw)
if result != nil {
t.Errorf("NewBase and NewIP should not both be specified: (%v)\n", raw)
}
if err == nil {
t.Error("expect error, got none")
}
}
func Test_TransformIP(t *testing.T) {
var transforms1 = []IpConversion{{
Low: net.ParseIP("11.11.11.0"),
High: net.ParseIP("11.11.11.20"),
NewBases: []net.IP{net.ParseIP("99.99.99.0")},
}, {
Low: net.ParseIP("22.22.22.0"),
High: net.ParseIP("22.22.22.40"),
NewBases: []net.IP{net.ParseIP("99.99.99.100")},
}, {
Low: net.ParseIP("33.33.33.20"),
High: net.ParseIP("33.33.35.40"),
NewBases: []net.IP{net.ParseIP("100.100.100.0")},
}, {
Low: net.ParseIP("44.44.44.20"),
High: net.ParseIP("44.44.44.40"),
NewBases: []net.IP{net.ParseIP("100.100.100.40")},
}, {
Low: net.ParseIP("55.0.0.0"),
High: net.ParseIP("55.255.0.0"),
NewBases: []net.IP{net.ParseIP("66.0.0.0"), net.ParseIP("77.0.0.0")},
}}
//NO TRANSFORMS ON 99.x.x.x PLZ
var tests = []struct {
experiment string
expected string
}{
{"11.11.11.0", "99.99.99.0"},
{"11.11.11.1", "99.99.99.1"},
{"11.11.11.11", "99.99.99.11"},
{"11.11.11.19", "99.99.99.19"},
{"11.11.11.20", "99.99.99.20"},
{"11.11.11.21", "11.11.11.21"},
{"22.22.22.22", "99.99.99.122"},
{"22.22.22.255", "22.22.22.255"},
{"33.33.33.0", "33.33.33.0"},
{"33.33.33.19", "33.33.33.19"},
{"33.33.33.20", "100.100.100.0"},
{"33.33.33.21", "100.100.100.1"},
{"33.33.33.33", "100.100.100.13"},
{"33.33.35.39", "100.100.102.19"},
{"33.33.35.40", "100.100.102.20"},
{"33.33.35.41", "33.33.35.41"},
{"44.44.44.24", "100.100.100.44"},
{"44.44.44.44", "44.44.44.44"},
{"55.0.42.42", "66.0.42.42,77.0.42.42"},
{"99.0.0.42", "99.0.0.42"},
}
for _, test := range tests {
experiment := net.ParseIP(test.experiment)
actual, err := TransformIPToList(experiment, transforms1)
if err != nil {
t.Errorf("%v: got an err: %v\n", experiment, err)
}
list := []string{}
for _, ip := range actual {
list = append(list, ip.String())
}
act := strings.Join(list, ",")
if test.expected != act {
t.Errorf("%v: expected (%v) got (%v)\n", experiment, test.expected, act)
}
}
}
func Test_TransformIP_NewIP(t *testing.T) {
var transforms1 = []IpConversion{{
Low: net.ParseIP("11.11.11.0"),
High: net.ParseIP("11.11.11.20"),
NewIPs: []net.IP{net.ParseIP("1.1.1.1")},
}, {
Low: net.ParseIP("22.22.22.0"),
High: net.ParseIP("22.22.22.40"),
NewIPs: []net.IP{net.ParseIP("2.2.2.2")},
}, {
Low: net.ParseIP("33.33.33.20"),
High: net.ParseIP("33.33.35.40"),
NewIPs: []net.IP{net.ParseIP("3.3.3.3")},
},
}
var tests = []struct {
experiment string
expected string
}{
{"11.11.11.0", "1.1.1.1"},
{"11.11.11.1", "1.1.1.1"},
{"11.11.11.11", "1.1.1.1"},
{"11.11.11.19", "1.1.1.1"},
{"11.11.11.20", "1.1.1.1"},
{"11.11.11.21", "11.11.11.21"},
{"22.22.22.22", "2.2.2.2"},
{"22.22.22.255", "22.22.22.255"},
{"33.33.33.0", "33.33.33.0"},
{"33.33.33.19", "33.33.33.19"},
{"33.33.33.20", "3.3.3.3"},
{"33.33.33.21", "3.3.3.3"},
{"33.33.33.33", "3.3.3.3"},
{"33.33.35.39", "3.3.3.3"},
{"33.33.35.40", "3.3.3.3"},
{"33.33.35.41", "33.33.35.41"},
}
for _, test := range tests {
experiment := net.ParseIP(test.experiment)
expected := net.ParseIP(test.expected)
actual, err := TransformIP(experiment, transforms1)
if err != nil {
t.Errorf("%v: got an err: %v\n", experiment, err)
}
if !expected.Equal(actual) {
t.Errorf("%v: expected (%v) got (%v)\n", experiment, expected, actual)
}
}
}