1
0
mirror of https://github.com/StackExchange/dnscontrol.git synced 2024-05-11 05:55:12 +00:00

Add SPF flattening feature. (#126)

This commit is contained in:
Craig Peterson
2017-09-29 15:30:36 -04:00
committed by Tom Limoncelli
parent 707f7e5d99
commit 823e8bb1a3
20 changed files with 104153 additions and 343 deletions

View File

@@ -1,66 +0,0 @@
package main
import (
"fmt"
"strings"
"github.com/StackExchange/dnscontrol/pkg/dnsresolver"
"github.com/StackExchange/dnscontrol/pkg/spflib"
)
func main() {
h := dnsresolver.NewResolverLive("spf-store.json")
fmt.Println(h.GetTxt("_spf.google.com"))
fmt.Println(h.GetTxt("spf-basic.fogcreek.com"))
h.Close()
i, err := dnsresolver.NewResolverPreloaded("spf-store.json")
if err != nil {
panic(err)
}
fmt.Println(i.GetTxt("_spf.google.com"))
fmt.Println(i.GetTxt("spf-basic.fogcreek.com"))
fmt.Println(i.GetTxt("wontbefound"))
fmt.Println()
fmt.Println("---------------------")
fmt.Println()
//res := dnsresolver.NewResolverLive("spf-store2.json")
res, err := dnsresolver.NewResolverPreloaded("spf-store2.json")
if err != nil {
panic(err)
}
rec, err := spflib.Parse(strings.Join([]string{"v=spf1",
"ip4:198.252.206.0/24",
"ip4:192.111.0.0/24",
"include:_spf.google.com",
"include:mailgun.org",
"include:spf-basic.fogcreek.com",
"include:mail.zendesk.com",
"include:servers.mcsv.net",
"include:sendgrid.net",
"include:spf.mtasv.net",
"~all"}, " "), res)
if err != nil {
panic(err)
}
spflib.DumpSPF(rec, "")
fmt.Println()
fmt.Println("---------------------")
fmt.Println()
var spf string
spf, err = spflib.Lookup("whatexit.org", res)
if err != nil {
panic(err)
}
rec, err = spflib.Parse(spf, res)
if err != nil {
panic(err)
}
spflib.DumpSPF(rec, "")
//res.Close()
}

View File

@@ -1,12 +0,0 @@
{
"_spf.google.com": {
"txt": [
"v=spf1 include:_netblocks.google.com include:_netblocks2.google.com include:_netblocks3.google.com ~all"
]
},
"spf-basic.fogcreek.com": {
"txt": [
"v=spf1 ip4:64.34.80.172 -all"
]
}
}

View File

@@ -1,69 +0,0 @@
{
"_netblocks.google.com": {
"txt": [
"v=spf1 ip4:64.18.0.0/20 ip4:64.233.160.0/19 ip4:66.102.0.0/20 ip4:66.249.80.0/20 ip4:72.14.192.0/18 ip4:74.125.0.0/16 ip4:108.177.8.0/21 ip4:173.194.0.0/16 ip4:207.126.144.0/20 ip4:209.85.128.0/17 ip4:216.58.192.0/19 ip4:216.239.32.0/19 ~all"
]
},
"_netblocks2.google.com": {
"txt": [
"v=spf1 ip6:2001:4860:4000::/36 ip6:2404:6800:4000::/36 ip6:2607:f8b0:4000::/36 ip6:2800:3f0:4000::/36 ip6:2a00:1450:4000::/36 ip6:2c0f:fb50:4000::/36 ~all"
]
},
"_netblocks3.google.com": {
"txt": [
"v=spf1 ip4:172.217.0.0/19 ip4:108.177.96.0/19 ~all"
]
},
"_spf.google.com": {
"txt": [
"v=spf1 include:_netblocks.google.com include:_netblocks2.google.com include:_netblocks3.google.com ~all"
]
},
"mail.zendesk.com": {
"txt": [
"v=spf1 ip4:192.161.144.0/20 ip4:185.12.80.0/22 ip4:96.46.150.192/27 ip4:174.137.46.0/24 ip4:188.172.128.0/20 ip4:216.198.0.0/18 ~all"
]
},
"mailgun.org": {
"txt": [
"google-site-verification=FIGVOKZm6lQFDBJaiC2DdwvBy8TInunoGCt-1gnL4PA",
"v=spf1 include:spf1.mailgun.org include:spf2.mailgun.org ~all"
]
},
"sendgrid.net": {
"txt": [
"v=spf1 ip4:167.89.0.0/17 ip4:208.117.48.0/20 ip4:50.31.32.0/19 ip4:198.37.144.0/20 ip4:198.21.0.0/21 ip4:192.254.112.0/20 ip4:168.245.0.0/17 ~all",
"google-site-verification=NxyooVvVaIgddVa23KTlOEuVPuhffcDqJFV8RzWrAys"
]
},
"servers.mcsv.net": {
"txt": [
"v=spf1 ip4:205.201.128.0/20 ip4:198.2.128.0/18 ip4:148.105.8.0/21 ?all"
]
},
"spf-basic.fogcreek.com": {
"txt": [
"v=spf1 ip4:64.34.80.172 -all"
]
},
"spf.mtasv.net": {
"txt": [
"v=spf1 ip4:50.31.156.96/27 ip4:104.245.209.192/26 ~all"
]
},
"spf1.mailgun.org": {
"txt": [
"v=spf1 ip4:173.193.210.32/27 ip4:50.23.218.192/27 ip4:174.37.226.64/27 ip4:208.43.239.136/30 ip4:184.173.105.0/24 ip4:184.173.153.0/24 ip4:104.130.122.0/23 ip4:146.20.112.0/26 ~all"
]
},
"spf2.mailgun.org": {
"txt": [
"v=spf1 ip4:209.61.151.0/24 ip4:166.78.68.0/22 ip4:198.61.254.0/23 ip4:192.237.158.0/23 ip4:23.253.182.0/23 ip4:104.130.96.0/28 ip4:146.20.113.0/24 ip4:146.20.191.0/24 ~all"
]
},
"whatexit.org": {
"txt": [
"v=spf1 ip6:2607:f2f8:a9c0::3 ip4:174.136.107.195 include:servers.mcsv.net include:_spf.google.com mx:evite.com -all"
]
}
}

103444
docs/flattener/flattener.js Normal file

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

62
docs/flattener/index.html Normal file
View File

@@ -0,0 +1,62 @@
<html >
<head>
<link rel="stylesheet" href="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.6/css/bootstrap.min.css" integrity="sha384-1q8mTJOASx8j1Au+a5WDVnPi2lkFfwwEAa8hDDdjZlpLegxhjVME1fgjWPGmkzs7" crossorigin="anonymous">
<script src="https://code.jquery.com/jquery-3.2.1.min.js"></script>
<script src="flattener.js"></script>
<style>
ul {
margin: 0px 0px 0px 20px;
list-style: none;
line-height: 2em;
font-family: Arial;
}
ul li {
font-size: 16px;
position: relative;
}
ul li:before {
position: absolute;
left: -15px;
top: 0px;
content: '';
display: block;
border-left: 1px solid #ddd;
height: 1em;
border-bottom: 1px solid #ddd;
width: 10px;
}
ul li:after {
position: absolute;
left: -15px;
bottom: -7px;
content: '';
display: block;
border-left: 1px solid #ddd;
height: 100%;
}
ul li.root {
margin: 0px 0px 0px -20px;
}
ul li.root:before {
display: none;
}
ul li.root:after {
display: none;
}
ul li:last-child:after {
display: none;
}
</style>
</head>
<body>
<div class='container' style='padding-bottom:50px'>
<input type="text" id="domain" value="stackoverflow.com"></input>
<button id="lookup_btn">Lookup</button>
<div id="results"></div>
<div id="flattened"></div>
</div>
</body>

154
docs/flattener/js.go Normal file
View File

@@ -0,0 +1,154 @@
// +build js
package main
import (
"encoding/json"
"fmt"
"net/http"
"strings"
"github.com/StackExchange/dnscontrol/pkg/spflib"
"github.com/gopherjs/jquery"
)
type gResolver struct{}
type gResp struct {
Status int
Answer []struct {
Data string `json:"data"`
}
}
func (g gResolver) GetTxt(fqdn string) ([]string, error) {
resp, err := http.Get("https://dns.google.com/resolve?type=txt&name=" + fqdn)
if err != nil {
return nil, err
}
defer resp.Body.Close()
dec := json.NewDecoder(resp.Body)
dat := &gResp{}
if err = dec.Decode(dat); err != nil {
return nil, err
}
list := []string{}
for _, a := range dat.Answer {
list = append(list, strings.Trim(a.Data, "\""))
}
return list, nil
}
var jq = jquery.NewJQuery
var parsed *spflib.SPFRecord
var domain string
func main() {
jq(func() {
jq("#lookup_btn").On(jquery.CLICK, func(e jquery.Event) {
go func() {
domain = jq("#domain").Val()
rec, err := spflib.Lookup(domain, gResolver{})
if err != nil {
panic(err)
}
parsed, err = spflib.Parse(rec, gResolver{})
if err != nil {
// todo: show a better error
panic(err)
}
jq("#results").SetHtml(buildHTML(parsed, domain))
jq(".cb").On(jquery.CHANGE, func(e jquery.Event) {
updateDisabledChecks()
renderResults()
})
updateDisabledChecks()
renderResults()
}()
})
})
}
func updateDisabledChecks() {
jq("input:checkbox").Each(func(i int, el interface{}) {
fmt.Println(jq(el).Prop("disabled"))
jq(el).SetProp("disabled", false)
})
jq("input:checkbox:not(:checked)").Each(func(i int, el interface{}) {
fmt.Println(jq(el).Attr("id"))
jq(el).Next().Next().Find("input:checkbox").Each(func(i int, el interface{}) {
fmt.Println("^^", jq(el).Attr("id"))
jq(el).SetProp("disabled", true)
})
})
}
func renderResults() {
content := ""
addFlattened := func(mode string, filter string) {
flat := parsed.Flatten(filter)
lookups := 0
if filter != "*" {
lookups = parsed.Lookups() - len(strings.Split(filter, ","))
}
content += fmt.Sprintf(`
<h3> %s flattened (length %d, %d lookups)</h3><code>%s</code>
`, mode, len(flat.TXT()), lookups, flat.TXT())
split := flat.TXTSplit("_spf%d." + domain)
if len(split) > 1 {
lookups += len(split) - 1
content += fmt.Sprintf("<h3>%s flattened split (%d lookups)</h3>", mode, lookups)
for k, v := range split {
content += fmt.Sprintf("<h4>%s</h4><code>%s</code>", k, v)
}
}
}
addFlattened("Fully", "*")
// look for selected divs
filters := []string{}
jq("input:checked").Each(func(i int, el interface{}) {
filters = append(filters, jq(el).Attr("id"))
})
if len(filters) > 0 {
addFlattened("Selectively", strings.Join(filters, ","))
}
jq("#flattened").SetHtml(content)
}
func buildHTML(rec *spflib.SPFRecord, domain string) string {
h := "<h1>" + domain + "</h1>"
h += fmt.Sprintf("<h2>%d lookups</h2>", rec.Lookups())
return h + genRoot(rec)
}
// html based on https://codepen.io/khoama/pen/hpljA
func genRoot(rec *spflib.SPFRecord) string {
h := fmt.Sprintf(`
<ul>
<li class='root'>%s</li>
`, rec.TXT())
for _, p := range rec.Parts {
h += genPart(p)
}
h += "</ul>"
return h
}
func genPart(rec *spflib.SPFPart) string {
if !rec.IsLookup {
return fmt.Sprintf(`<li>%s</li>`, rec.Text)
}
h := fmt.Sprintf(`<li>
<input type="checkbox" class='cb' id="%s" name="%s" />
<label for="%s">%s(%d lookups)</label>`, rec.IncludeDomain, rec.IncludeDomain, rec.IncludeDomain, rec.Text, rec.IncludeRecord.Lookups()+1)
h += fmt.Sprintf("<ul>")
for _, p := range rec.IncludeRecord.Parts {
h += genPart(p)
}
h += "</ul>"
h += "</li>"
return h
}

View File

@@ -1,4 +1,4 @@
$TTL 300
@ IN SOA DEFAULT_NOT_SET. DEFAULT_NOT_SET. 2017072603 3600 600 604800 1440
@ IN SOA DEFAULT_NOT_SET. DEFAULT_NOT_SET. 2017091830 3600 600 604800 1440
IN NS ns1.otherdomain.tld.
IN NS ns2.otherdomain.tld.

View File

@@ -0,0 +1 @@
D(REVERSE("1.2.0.0/16"),"none");

View File

@@ -0,0 +1,13 @@
{
"registrars": [],
"dns_providers": [],
"domains": [
{
"name": "2.1.in-addr.arpa",
"registrar": "none",
"dnsProviders": {},
"records": [],
"keepunknown": false
}
]
}

View File

@@ -1,28 +0,0 @@
package dnsresolver
// dnsCache implements a very simple DNS cache.
// It caches the entire answer (i.e. all TXT records), filtering
// out the non-SPF answers is done at a higher layer.
// At this time the only rtype is "TXT". Eventually we'll need
// to cache A/AAAA/CNAME records to to CNAME flattening.
type dnsCache map[string]map[string][]string // map[fqdn]map[rtype] -> answers
func (c dnsCache) get(label, rtype string) ([]string, bool) {
v1, ok := c[label]
if !ok {
return nil, false
}
v2, ok := v1[rtype]
if !ok {
return nil, false
}
return v2, true
}
func (c dnsCache) put(label, rtype string, answers []string) {
_, ok := c[label]
if !ok {
c[label] = make(map[string][]string)
}
c[label][rtype] = answers
}

View File

@@ -1,31 +0,0 @@
package dnsresolver
import "testing"
func TestDnsCache(t *testing.T) {
cache := &dnsCache{}
cache.put("one", "txt", []string{"a", "b", "c"})
cache.put("two", "txt", []string{"d", "e", "f"})
a, b := cache.get("one", "txt")
if !(b == true && len(a) == 3 && a[0] == "a" && a[1] == "b" && a[2] == "c") {
t.Errorf("one-txt didn't work")
}
a, b = cache.get("two", "txt")
if !(b == true && len(a) == 3 && a[0] == "d" && a[1] == "e" && a[2] == "f") {
t.Errorf("one-txt didn't work")
}
a, b = cache.get("three", "txt")
if !(b == false) {
t.Errorf("three-txt didn't work")
}
a, b = cache.get("two", "not")
if !(b == false) {
t.Errorf("two-not didn't work")
}
}

View File

@@ -1,83 +0,0 @@
package dnsresolver
import (
"encoding/json"
"io/ioutil"
"net"
"github.com/pkg/errors"
)
// This file includes all the DNS Resolvers used by package spf.
// DnsResolver looks up txt strings associated with a FQDN.
type DnsResolver interface {
GetTxt(string) ([]string, error) // Given a DNS label, return the TXT values records.
}
// The "Live DNS" Resolver:
type dnsLive struct {
filename string
cache dnsCache
}
func NewResolverLive(filename string) *dnsLive {
// Does live DNS lookups. Records them. Writes file on Close.
c := &dnsLive{filename: filename}
c.cache = dnsCache{}
return c
}
func (c *dnsLive) GetTxt(label string) ([]string, error) {
// Try the cache.
txts, ok := c.cache.get(label, "txt")
if ok {
return txts, nil
}
// Populate the cache:
t, err := net.LookupTXT(label)
if err == nil {
c.cache.put(label, "txt", t)
}
return t, err
}
func (c *dnsLive) Close() {
// Write out and close the file.
m, _ := json.MarshalIndent(c.cache, "", " ")
m = append(m, "\n"...)
ioutil.WriteFile(c.filename, m, 0666)
}
// The "Pre-Cached DNS" Resolver:
type dnsPreloaded struct {
cache dnsCache
}
func NewResolverPreloaded(filename string) (*dnsPreloaded, error) {
c := &dnsPreloaded{}
c.cache = dnsCache{}
j, err := ioutil.ReadFile(filename)
if err != nil {
return nil, err
}
err = json.Unmarshal(j, &(*c).cache)
return c, err
}
func (c *dnsPreloaded) DumpCache() dnsCache {
return c.cache
}
func (c *dnsPreloaded) GetTxt(label string) ([]string, error) {
// Try the cache.
txts, ok := c.cache.get(label, "txt")
if ok {
return txts, nil
}
return nil, errors.Errorf("No preloaded DNS entry for: %#v", label)
}

79
pkg/normalize/flatten.go Normal file
View File

@@ -0,0 +1,79 @@
package normalize
import (
"fmt"
"strings"
"github.com/miekg/dns/dnsutil"
"github.com/StackExchange/dnscontrol/models"
"github.com/StackExchange/dnscontrol/pkg/spflib"
)
// hasSpfRecords returns true if this record requests SPF unrolling.
func flattenSPFs(cfg *models.DNSConfig) []error {
var cache spflib.CachingResolver
var errs []error
var err error
for _, domain := range cfg.Domains {
apexTXTs := domain.Records.Grouped()[models.RecordKey{Type: "TXT", Name: "@"}]
// flatten all spf records that have the "flatten" metadata
for _, txt := range apexTXTs {
var rec *spflib.SPFRecord
if txt.Metadata["flatten"] != "" || txt.Metadata["split"] != "" {
if cache == nil {
cache, err = spflib.NewCache("spfcache.json")
if err != nil {
return []error{err}
}
}
rec, err = spflib.Parse(txt.Target, cache)
if err != nil {
errs = append(errs, err)
continue
}
}
if flatten, ok := txt.Metadata["flatten"]; ok && strings.HasPrefix(txt.Target, "v=spf1") {
rec = rec.Flatten(flatten)
txt.Target = rec.TXT()
}
// now split if needed
if split, ok := txt.Metadata["split"]; ok {
if !strings.Contains(split, "%d") {
errs = append(errs, Warning{fmt.Errorf("Split format `%s` in `%s` is not proper format (should have %%d in it)", split, txt.NameFQDN)})
continue
}
recs := rec.TXTSplit(split + "." + domain.Name)
for k, v := range recs {
if k == "@" {
txt.Target = v
} else {
cp, _ := txt.Copy()
cp.Target = v
cp.NameFQDN = k
cp.Name = dnsutil.TrimDomainName(k, domain.Name)
domain.Records = append(domain.Records, cp)
}
}
}
}
}
if cache == nil {
return errs
}
// check if cache is stale
for _, e := range cache.ResolveErrors() {
errs = append(errs, Warning{fmt.Errorf("problem resolving SPF record: %s", e)})
}
if len(cache.ResolveErrors()) == 0 {
changed := cache.ChangedRecords()
if len(changed) > 0 {
if err := cache.Save("spfcache.updated.json"); err != nil {
errs = append(errs, err)
} else {
errs = append(errs, Warning{fmt.Errorf("%d spf record lookups are out of date with cache (%s). Wrote changes to dnscache.updated.json. Please rename and commit", len(changed), strings.Join(changed, ","))})
}
}
}
return errs
}

View File

@@ -314,7 +314,12 @@ func NormalizeAndValidateConfig(config *models.DNSConfig) (errs []error) {
}
}
// Process any pseudo-records:
// SPF flattening
if ers := flattenSPFs(config); len(ers) > 0 {
errs = append(errs, ers...)
}
// Process IMPORT_TRANSFORM
for _, domain := range config.Domains {
for _, rec := range domain.Records {
if rec.Type == "IMPORT_TRANSFORM" {
@@ -334,7 +339,6 @@ func NormalizeAndValidateConfig(config *models.DNSConfig) (errs []error) {
for _, domain := range config.Domains {
deleteImportTransformRecords(domain)
}
// Run record transforms
for _, domain := range config.Domains {
if err := applyRecordTransforms(domain); err != nil {

102
pkg/spflib/flatten.go Normal file
View File

@@ -0,0 +1,102 @@
package spflib
import (
"fmt"
"strings"
)
func (s *SPFRecord) TXT() string {
text := "v=spf1"
for _, p := range s.Parts {
text += " " + p.Text
}
return text
}
const maxLen = 255
//TXTSplit returns a set of txt records to use for SPF.
//pattern given is used to name all chained spf records.
//patern should include %d, which will be replaced by a counter.
//should result in fqdn after replacement
//returned map will have keys with fqdn of resulting records.
//root record will be under key "@"
func (s *SPFRecord) TXTSplit(pattern string) map[string]string {
m := map[string]string{}
s.split("@", pattern, 1, m)
return m
}
func (s *SPFRecord) split(thisfqdn string, pattern string, nextIdx int, m map[string]string) {
base := s.TXT()
// simple case. it fits
if len(base) <= maxLen {
m[thisfqdn] = base
return
}
// we need to trim.
// take parts while we fit
nextFQDN := fmt.Sprintf(pattern, nextIdx)
lastPart := s.Parts[len(s.Parts)-1]
tail := " include:" + nextFQDN + " " + lastPart.Text
thisText := "v=spf1"
newRec := &SPFRecord{}
over := false
addedCount := 0
for _, part := range s.Parts {
if !over {
if len(thisText)+1+len(part.Text)+len(tail) <= maxLen {
thisText += " " + part.Text
addedCount++
} else {
over = true
if addedCount == 0 {
//the first part is too big to include. We kinda have to give up here.
m[thisfqdn] = base
return
}
}
}
if over {
newRec.Parts = append(newRec.Parts, part)
}
}
m[thisfqdn] = thisText + tail
newRec.split(nextFQDN, pattern, nextIdx+1, m)
}
func (s *SPFRecord) Flatten(spec string) *SPFRecord {
newRec := &SPFRecord{}
for _, p := range s.Parts {
if p.IncludeRecord == nil {
// non-includes copy straight over
newRec.Parts = append(newRec.Parts, p)
} else if !matchesFlatSpec(spec, p.IncludeDomain) {
//includes that don't match get copied straight across
newRec.Parts = append(newRec.Parts, p)
} else {
//flatten child recursively
flattenedChild := p.IncludeRecord.Flatten(spec)
// include their parts (skipping final all term)
for _, childPart := range flattenedChild.Parts[:len(flattenedChild.Parts)-1] {
newRec.Parts = append(newRec.Parts, childPart)
}
}
}
return newRec
}
func matchesFlatSpec(spec, fqdn string) bool {
if spec == "*" {
return true
}
for _, p := range strings.Split(spec, ",") {
if p == fqdn {
return true
}
}
return false
}

106
pkg/spflib/flatten_test.go Normal file
View File

@@ -0,0 +1,106 @@
package spflib
import (
"strings"
"testing"
)
func TestFlatten(t *testing.T) {
res, err := NewCache("testdata-dns1.json")
if err != nil {
t.Fatal(err)
}
rec, err := Parse(strings.Join([]string{"v=spf1",
"ip4:198.252.206.0/24",
"ip4:192.111.0.0/24",
"include:_spf.google.com",
"include:mailgun.org",
"include:spf-basic.fogcreek.com",
"include:mail.zendesk.com",
"include:servers.mcsv.net",
"include:sendgrid.net",
"include:spf.mtasv.net",
"~all"}, " "), res)
if err != nil {
t.Fatal(err)
}
t.Log(rec.Print())
rec = rec.Flatten("mailgun.org")
t.Log(rec.Print())
}
// each test is array of strings.
// first item is unsplit input
// next is @ spf record
// after that is alternating record fqdn and value
var splitTests = [][]string{
{
"simple",
"v=spf1 -all",
"v=spf1 -all",
},
{
"longsimple",
"v=spf1 include:a01234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789.com -all",
"v=spf1 include:a01234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789.com -all",
},
{
"long simple multipart",
"v=spf1 include:a.com include:b.com include:12345678901234567890123456789000000000000000123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789.com -all",
"v=spf1 include:a.com include:b.com include:12345678901234567890123456789000000000000000123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789.com -all",
},
{
"overflow",
"v=spf1 include:a.com include:b.com include:X12345678901234567890123456789000000000000000123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789.com -all",
"v=spf1 include:a.com include:b.com include:_spf1.stackex.com -all",
"_spf1.stackex.com",
"v=spf1 include:X12345678901234567890123456789000000000000000123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789.com -all",
},
{
"overflow all sign carries",
"v=spf1 include:a.com include:b.com include:X12345678901234567890123456789000000000000000123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789.com ~all",
"v=spf1 include:a.com include:b.com include:_spf1.stackex.com ~all",
"_spf1.stackex.com",
"v=spf1 include:X12345678901234567890123456789000000000000000123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789.com ~all",
},
{
"really big",
"v=spf1 ip4:200.192.169.178 ip4:200.192.169.178 ip4:200.192.169.178 ip4:200.192.169.178 ip4:200.192.169.178 ip4:200.192.169.178 ip4:200.192.169.178 ip4:200.192.169.178 ip4:200.192.169.178 ip4:200.192.169.178 ip4:200.192.169.178 ip4:200.192.169.178 ip4:200.192.169.178" +
" ip4:200.192.169.178 ip4:200.192.169.178 ip4:200.192.169.178 ip4:200.192.169.178 ip4:200.192.169.178 ip4:200.192.169.178 ip4:200.192.169.178 ip4:200.192.169.178 ip4:200.192.169.178 ip4:200.192.169.178 ip4:200.192.169.178 ip4:200.192.169.178 ip4:200.192.169.178 ip4:200.192.169.178" +
" ip4:200.192.169.178 ip4:200.192.169.178 ip4:200.192.169.178 ip4:200.192.169.178 ip4:200.192.169.178 ip4:200.192.169.178 ip4:200.192.169.178 ip4:200.192.169.178 ip4:200.192.169.178 ip4:200.192.169.178 ip4:200.192.169.178 ip4:200.192.169.178 ip4:200.192.169.178 ip4:200.192.169.178 -all",
"v=spf1 ip4:200.192.169.178 ip4:200.192.169.178 ip4:200.192.169.178 ip4:200.192.169.178 ip4:200.192.169.178 ip4:200.192.169.178 ip4:200.192.169.178 ip4:200.192.169.178 ip4:200.192.169.178 ip4:200.192.169.178 include:_spf1.stackex.com -all",
"_spf1.stackex.com",
"v=spf1 ip4:200.192.169.178 ip4:200.192.169.178 ip4:200.192.169.178 ip4:200.192.169.178 ip4:200.192.169.178 ip4:200.192.169.178 ip4:200.192.169.178 ip4:200.192.169.178 ip4:200.192.169.178 ip4:200.192.169.178 include:_spf2.stackex.com -all",
"_spf2.stackex.com",
"v=spf1 ip4:200.192.169.178 ip4:200.192.169.178 ip4:200.192.169.178 ip4:200.192.169.178 ip4:200.192.169.178 ip4:200.192.169.178 ip4:200.192.169.178 ip4:200.192.169.178 ip4:200.192.169.178 ip4:200.192.169.178 include:_spf3.stackex.com -all",
"_spf3.stackex.com",
"v=spf1 ip4:200.192.169.178 ip4:200.192.169.178 ip4:200.192.169.178 ip4:200.192.169.178 ip4:200.192.169.178 ip4:200.192.169.178 ip4:200.192.169.178 ip4:200.192.169.178 ip4:200.192.169.178 ip4:200.192.169.178 ip4:200.192.169.178 -all",
},
{
"too long to split",
"v=spf1 include:a0123456789012345678901234567890123456789012345sssss6789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789.com -all",
"v=spf1 include:a0123456789012345678901234567890123456789012345sssss6789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789.com -all",
},
}
func TestSplit(t *testing.T) {
for _, tst := range splitTests {
t.Run(tst[0], func(t *testing.T) {
rec, err := Parse(tst[1], nil)
if err != nil {
t.Fatal(err)
}
res := rec.TXTSplit("_spf%d.stackex.com")
if res["@"] != tst[2] {
t.Fatalf("Root record wrong. \nExp %s\ngot %s", tst[2], res["@"])
}
for i := 3; i < len(tst); i += 2 {
fqdn := tst[i]
exp := tst[i+1]
if res[fqdn] != exp {
t.Fatalf("Record %s.\nExp %s\ngot %s", fqdn, exp, res[fqdn])
}
}
})
}
}

View File

@@ -4,38 +4,33 @@ import (
"fmt"
"strings"
"github.com/StackExchange/dnscontrol/pkg/dnsresolver"
"bytes"
"io"
)
type SPFRecord struct {
Lookups int
Parts []*SPFPart
Parts []*SPFPart
}
func (s *SPFRecord) Lookups() int {
count := 0
for _, p := range s.Parts {
if p.IsLookup {
count++
}
if p.IncludeRecord != nil {
count += p.IncludeRecord.Lookups()
}
}
return count
}
type SPFPart struct {
Text string
Lookups int
IsLookup bool
IncludeRecord *SPFRecord
}
func Lookup(target string, dnsres dnsresolver.DnsResolver) (string, error) {
txts, err := dnsres.GetTxt(target)
if err != nil {
return "", err
}
var result []string
for _, txt := range txts {
if strings.HasPrefix(txt, "v=spf1 ") {
result = append(result, txt)
}
}
if len(result) == 0 {
return "", fmt.Errorf("%s has no spf TXT records", target)
}
if len(result) != 1 {
return "", fmt.Errorf("%s has multiple spf TXT records", target)
}
return result[0], nil
IncludeDomain string
}
var qualifiers = map[byte]bool{
@@ -45,7 +40,7 @@ var qualifiers = map[byte]bool{
'+': true,
}
func Parse(text string, dnsres dnsresolver.DnsResolver) (*SPFRecord, error) {
func Parse(text string, dnsres Resolver) (*SPFRecord, error) {
if !strings.HasPrefix(text, "v=spf1 ") {
return nil, fmt.Errorf("Not an spf record")
}
@@ -61,24 +56,23 @@ func Parse(text string, dnsres dnsresolver.DnsResolver) (*SPFRecord, error) {
//all. nothing else matters.
break
} else if strings.HasPrefix(part, "a") || strings.HasPrefix(part, "mx") {
rec.Lookups++
p.Lookups = 1
p.IsLookup = true
} else if strings.HasPrefix(part, "ip4:") || strings.HasPrefix(part, "ip6:") {
//ip address, 0 lookups
continue
} else if strings.HasPrefix(part, "include:") {
rec.Lookups++
includeTarget := strings.TrimPrefix(part, "include:")
subRecord, err := Lookup(includeTarget, dnsres)
if err != nil {
return nil, err
p.IsLookup = true
p.IncludeDomain = strings.TrimPrefix(part, "include:")
if dnsres != nil {
subRecord, err := dnsres.GetSPF(p.IncludeDomain)
if err != nil {
return nil, err
}
p.IncludeRecord, err = Parse(subRecord, dnsres)
if err != nil {
return nil, fmt.Errorf("In included spf: %s", err)
}
}
p.IncludeRecord, err = Parse(subRecord, dnsres)
if err != nil {
return nil, fmt.Errorf("In included spf: %s", err)
}
rec.Lookups += p.IncludeRecord.Lookups
p.Lookups = p.IncludeRecord.Lookups + 1
} else {
return nil, fmt.Errorf("Unsupported spf part %s", part)
}
@@ -87,21 +81,27 @@ func Parse(text string, dnsres dnsresolver.DnsResolver) (*SPFRecord, error) {
return rec, nil
}
// DumpSPF outputs an SPFRecord and related data for debugging purposes.
func DumpSPF(rec *SPFRecord, indent string) {
fmt.Printf("%sTotal Lookups: %d\n", indent, rec.Lookups)
fmt.Print(indent + "v=spf1")
func dump(rec *SPFRecord, indent string, w io.Writer) {
fmt.Fprintf(w, "%sTotal Lookups: %d\n", indent, rec.Lookups())
fmt.Fprint(w, indent+"v=spf1")
for _, p := range rec.Parts {
fmt.Print(" " + p.Text)
fmt.Fprint(w, " "+p.Text)
}
fmt.Println()
fmt.Fprintln(w)
indent += "\t"
for _, p := range rec.Parts {
if p.Lookups > 0 {
fmt.Println(indent + p.Text)
if p.IsLookup {
fmt.Fprintln(w, indent+p.Text)
}
if p.IncludeRecord != nil {
DumpSPF(p.IncludeRecord, indent+"\t")
dump(p.IncludeRecord, indent+"\t", w)
}
}
}
func (rec *SPFRecord) Print() string {
w := &bytes.Buffer{}
dump(rec, "", w)
return w.String()
}

View File

@@ -3,12 +3,10 @@ package spflib
import (
"strings"
"testing"
"github.com/StackExchange/dnscontrol/pkg/dnsresolver"
)
func TestParse(t *testing.T) {
dnsres, err := dnsresolver.NewResolverPreloaded("testdata-dns1.json")
dnsres, err := NewCache("testdata-dns1.json")
if err != nil {
t.Fatal(err)
}
@@ -26,5 +24,5 @@ func TestParse(t *testing.T) {
if err != nil {
t.Fatal(err)
}
DumpSPF(rec, "")
t.Log(rec.Print())
}

135
pkg/spflib/resolver.go Normal file
View File

@@ -0,0 +1,135 @@
package spflib
import (
"encoding/json"
"fmt"
"io/ioutil"
"net"
"os"
"strings"
)
// Resolver looks up spf txt records associated with a FQDN.
type Resolver interface {
GetSPF(string) (string, error)
}
// LiveResolver simply queries DNS to resolve SPF records.
type LiveResolver struct{}
func (l LiveResolver) GetSPF(name string) (string, error) {
vals, err := net.LookupTXT(name)
if err != nil {
return "", err
}
spf := ""
for _, v := range vals {
if strings.HasPrefix(v, "v=spf1") {
if spf != "" {
return "", fmt.Errorf("%s has multiple SPF records", name)
}
spf = v
}
}
if spf == "" {
return "", fmt.Errorf("%s has no SPF record", name)
}
return spf, nil
}
// CachingResolver wraps a live resolver and adds caching to it.
// GetSPF will always return the cached value, if present.
// It will also query the inner resolver and compare results.
// If a given lookup has inconsistencies between cache and live,
// GetSPF will return the cached result.
// All records queries will be stored for the lifetime of the resolver,
// and can be flushed to disk at the end.
// All resolution errors from the inner resolver will be saved and can be retreived later.
type CachingResolver interface {
Resolver
ChangedRecords() []string
ResolveErrors() []error
Save(filename string) error
}
type cacheEntry struct {
SPF string
// value we have looked up this run
resolvedSPF string
resolveError error
}
type cache struct {
records map[string]*cacheEntry
inner Resolver
}
func NewCache(filename string) (CachingResolver, error) {
f, err := os.Open(filename)
if err != nil {
if os.IsNotExist(err) {
// doesn't exist, just make a new one
return &cache{
records: map[string]*cacheEntry{},
inner: LiveResolver{},
}, nil
}
return nil, err
}
dec := json.NewDecoder(f)
recs := map[string]*cacheEntry{}
if err := dec.Decode(&recs); err != nil {
return nil, err
}
return &cache{
records: recs,
inner: LiveResolver{},
}, nil
}
func (c *cache) GetSPF(name string) (string, error) {
entry, ok := c.records[name]
if !ok {
entry = &cacheEntry{}
c.records[name] = entry
}
if entry.resolvedSPF == "" && entry.resolveError == nil {
entry.resolvedSPF, entry.resolveError = c.inner.GetSPF(name)
}
// return cached value
if entry.SPF != "" {
return entry.SPF, nil
}
// if not cached, return results of inner resolver
return entry.resolvedSPF, entry.resolveError
}
func (c *cache) ChangedRecords() []string {
names := []string{}
for name, entry := range c.records {
if entry.resolvedSPF != entry.SPF {
names = append(names, name)
}
}
return names
}
func (c *cache) ResolveErrors() (errs []error) {
for _, entry := range c.records {
if entry.resolveError != nil {
errs = append(errs, entry.resolveError)
}
}
return
}
func (c *cache) Save(filename string) error {
for _, entry := range c.records {
if entry.resolvedSPF != "" {
entry.SPF = entry.resolvedSPF
}
}
dat, _ := json.MarshalIndent(c.records, "", " ")
return ioutil.WriteFile(filename, dat, 0644)
}