From 2cfd67e4fa2b271822a591e54454f7cf4014870e Mon Sep 17 00:00:00 2001 From: Craig Peterson Date: Tue, 3 Oct 2017 11:53:56 -0400 Subject: [PATCH] Namecheap take Provider (#202) * re-copying namecheap dns provider and testing * document limits. mx broken because super odd api * manually path namecheap lib. passing tests. * generate * generate * clarify limit * conflict * add dependency * fully document namecheap capabilities --- build/generate/featureMatrix.go | 6 +- docs/_includes/matrix.html | 32 +- integrationTest/providers.json | 5 + providers/capabilities.go | 18 +- providers/namecheap/namecheap.go | 173 +- vendor/golang.org/x/net/publicsuffix/gen.go | 713 ++ vendor/golang.org/x/net/publicsuffix/list.go | 135 + vendor/golang.org/x/net/publicsuffix/table.go | 9419 +++++++++++++++++ vendor/vendor.json | 6 + 9 files changed, 10486 insertions(+), 21 deletions(-) create mode 100644 vendor/golang.org/x/net/publicsuffix/gen.go create mode 100644 vendor/golang.org/x/net/publicsuffix/list.go create mode 100644 vendor/golang.org/x/net/publicsuffix/table.go diff --git a/build/generate/featureMatrix.go b/build/generate/featureMatrix.go index 1eae083bf..060292142 100644 --- a/build/generate/featureMatrix.go +++ b/build/generate/featureMatrix.go @@ -129,10 +129,10 @@ var tmpl = template.Must(template.New("").Funcs(template.FuncMap{ {{range .Features}}{{$name := .Name}} {{$name}} {{range $pname, $features := $providers}}{{$f := index $features $name}}{{if $f -}} - - {{if $f.Link}}{{end}}{{if $f.Link}}{{end}} + {{if $f.Link}}{{end}}{{if $f.Link}}{{end}} {{- else}}{{end}} {{end -}} diff --git a/docs/_includes/matrix.html b/docs/_includes/matrix.html index bc1bf51ee..2c1740e19 100644 --- a/docs/_includes/matrix.html +++ b/docs/_includes/matrix.html @@ -122,8 +122,8 @@ - - + + @@ -151,7 +151,9 @@ - + + + @@ -182,7 +184,9 @@ - + + + @@ -213,7 +217,9 @@ - + + + @@ -238,7 +244,9 @@ - + + + @@ -257,7 +265,9 @@ - + + + @@ -282,7 +292,9 @@ - + + + @@ -356,8 +368,8 @@ - - + + diff --git a/integrationTest/providers.json b/integrationTest/providers.json index 2f8b75c81..3ef449fe8 100644 --- a/integrationTest/providers.json +++ b/integrationTest/providers.json @@ -46,6 +46,11 @@ "apiuser": "$NAMEDOTCOM_USER", "domain": "$NAMEDOTCOM_DOMAIN" }, + "NAMECHEAP": { + "apikey": "$NAMECHEAP_KEY", + "apiuser": "$NAMECHEAP_USER", + "domain": "$NAMECHEAP_DOMAIN" + }, "ROUTE53": { "KeyId": "$R53_KEY_ID", "SecretKey": "$R53_KEY", diff --git a/providers/capabilities.go b/providers/capabilities.go index b322b50b0..e3cf95d74 100644 --- a/providers/capabilities.go +++ b/providers/capabilities.go @@ -44,9 +44,10 @@ func ProviderHasCabability(pType string, cap Capability) bool { // DocumentationNote is a way for providers to give more detail about what features they support. type DocumentationNote struct { - HasFeature bool - Comment string - Link string + HasFeature bool + Unimplemented bool + Comment string + Link string } // DocumentationNotes is a full list of notes for a single provider @@ -100,6 +101,17 @@ func Cannot(comments ...string) *DocumentationNote { return n } +// Unimplemented is a small helper for concisely creating Documentation Notes +// comments are variadic for easy ommission. First is comment, second is link, the rest are ignored. +func Unimplemented(comments ...string) *DocumentationNote { + n := &DocumentationNote{ + HasFeature: false, + Unimplemented: true, + } + n.addStrings(comments) + return n +} + func (n *DocumentationNote) addStrings(comments []string) { if len(comments) > 0 { n.Comment = comments[0] diff --git a/providers/namecheap/namecheap.go b/providers/namecheap/namecheap.go index 0bd792781..00ddb6d2d 100644 --- a/providers/namecheap/namecheap.go +++ b/providers/namecheap/namecheap.go @@ -1,15 +1,22 @@ package namecheap import ( + "encoding/json" "fmt" "sort" "strings" + "golang.org/x/net/publicsuffix" + "github.com/StackExchange/dnscontrol/models" "github.com/StackExchange/dnscontrol/providers" + "github.com/StackExchange/dnscontrol/providers/diff" nc "github.com/billputer/go-namecheap" + "github.com/miekg/dns/dnsutil" ) +var NamecheapDefaultNs = []string{"dns1.registrar-servers.com", "dns2.registrar-servers.com"} + type Namecheap struct { ApiKey string ApiUser string @@ -19,32 +26,187 @@ type Namecheap struct { var docNotes = providers.DocumentationNotes{ providers.DocCreateDomains: providers.Cannot("Requires domain registered through their service"), providers.DocOfficiallySupported: providers.Cannot(), + providers.DocDualHost: providers.Cannot("Doesn't allow control of apex NS records"), + providers.CanUseAlias: providers.Cannot(), + providers.CanUseCAA: providers.Cannot(), + providers.CanUseSRV: providers.Unimplemented("namecheap supports srv records, we just need someone to implement it and make sure the tests pass."), + providers.CanUsePTR: providers.Cannot(), + providers.CanUseTLSA: providers.Cannot(), } func init() { providers.RegisterRegistrarType("NAMECHEAP", newReg, docNotes) - // NOTE(tlim): If in the future the DNS Service Provider is implemented, - // most likely it will require providers.CantUseNOPURGE. + providers.RegisterDomainServiceProviderType("NAMECHEAP", newDsp, providers.CantUseNOPURGE) } -func newReg(m map[string]string) (providers.Registrar, error) { +func newDsp(conf map[string]string, metadata json.RawMessage) (providers.DNSServiceProvider, error) { + return newProvider(conf, metadata) +} + +func newReg(conf map[string]string) (providers.Registrar, error) { + return newProvider(conf, nil) +} + +func newProvider(m map[string]string, metadata json.RawMessage) (*Namecheap, error) { api := &Namecheap{} api.ApiUser, api.ApiKey = m["apiuser"], m["apikey"] if api.ApiKey == "" || api.ApiUser == "" { return nil, fmt.Errorf("Namecheap apikey and apiuser must be provided.") } api.client = nc.NewClient(api.ApiUser, api.ApiKey, api.ApiUser) - // if BaseURL is specified in creds, use that url BaseURL, ok := m["BaseURL"] if ok { api.client.BaseURL = BaseURL } - return api, nil } +func splitDomain(domain string) (sld string, tld string) { + tld, _ = publicsuffix.PublicSuffix(domain) + d, _ := publicsuffix.EffectiveTLDPlusOne(domain) + sld = strings.Split(d, ".")[0] + return sld, tld +} + +// namecheap has request limiting at unpublished limits +// this channel acts as a global rate limiter +// read from it before every request +// from support in SEP-2017: +// "The limits for the API calls will be 20/Min, 700/Hour and 8000/Day for one user. +// If you can limit the requests within these it should be fine." +var throttle = make(chan bool, 20) + +func init() { + go func() { + for { + // add (up to) 20 requests every minute + for i := 0; i < 20; i++ { + select { + case throttle <- true: + default: + } + } + } + }() +} + +func (n *Namecheap) GetDomainCorrections(dc *models.DomainConfig) ([]*models.Correction, error) { + dc.Punycode() + sld, tld := splitDomain(dc.Name) + <-throttle + records, err := n.client.DomainsDNSGetHosts(sld, tld) + if err != nil { + return nil, err + } + + var actual []*models.RecordConfig + + // namecheap does not allow setting @ NS with basic DNS + dc.Filter(func(r *models.RecordConfig) bool { + if r.Type == "NS" && r.Name == "@" { + if !strings.HasSuffix(r.Target, "registrar-servers.com.") { + fmt.Println("\n", r.Target, "Namecheap does not support changing apex NS records. Skipping.") + } + return false + } + return true + }) + + // namecheap has this really annoying feature where they add some parking records if you have no records. + // This causes a few problems for our purposes, specifically the integration tests. + // lets detect that one case and pretend it is a no-op. + if len(dc.Records) == 0 && len(records.Hosts) == 2 { + if records.Hosts[0].Type == "CNAME" && + strings.Contains(records.Hosts[0].Address, "parkingpage") && + records.Hosts[1].Type == "URL" { + return nil, nil + } + } + + for _, r := range records.Hosts { + if r.Type == "SOA" { + continue + } + rec := &models.RecordConfig{ + NameFQDN: dnsutil.AddOrigin(r.Name, dc.Name), + Type: r.Type, + Target: r.Address, + TTL: uint32(r.TTL), + MxPreference: uint16(r.MXPref), + Original: r, + } + actual = append(actual, rec) + } + + differ := diff.New(dc) + _, create, delete, modify := differ.IncrementalDiff(actual) + + // // because namecheap doesn't have selective create, delete, modify, + // // we bundle them all up to send at once. We *do* want to see the + // // changes though + + var desc []string + for _, i := range create { + desc = append(desc, "\n"+i.String()) + } + for _, i := range delete { + desc = append(desc, "\n"+i.String()) + } + for _, i := range modify { + desc = append(desc, "\n"+i.String()) + } + + msg := fmt.Sprintf("GENERATE_ZONE: %s (%d records)%s", dc.Name, len(dc.Records), desc) + corrections := []*models.Correction{} + + // only create corrections if there are changes + if len(desc) > 0 { + corrections = append(corrections, + &models.Correction{ + Msg: msg, + F: func() error { + return n.generateRecords(dc) + }, + }) + } + + return corrections, nil +} + +func (n *Namecheap) generateRecords(dc *models.DomainConfig) error { + + var recs []nc.DomainDNSHost + + id := 1 + for _, r := range dc.Records { + name := dnsutil.TrimDomainName(r.NameFQDN, dc.Name) + rec := nc.DomainDNSHost{ + ID: id, + Name: name, + Type: r.Type, + Address: r.Target, + MXPref: int(r.MxPreference), + TTL: int(r.TTL), + } + recs = append(recs, rec) + id++ + } + sld, tld := splitDomain(dc.Name) + <-throttle + _, err := n.client.DomainDNSSetHosts(sld, tld, recs) + return err +} + +func (n *Namecheap) GetNameservers(domainName string) ([]*models.Nameserver, error) { + // return default namecheap nameservers + ns := NamecheapDefaultNs + + return models.StringsToNameservers(ns), nil +} + func (n *Namecheap) GetRegistrarCorrections(dc *models.DomainConfig) ([]*models.Correction, error) { + <-throttle info, err := n.client.DomainGetInfo(dc.Name) if err != nil { return nil, err @@ -64,6 +226,7 @@ func (n *Namecheap) GetRegistrarCorrections(dc *models.DomainConfig) ([]*models. { Msg: fmt.Sprintf("Change Nameservers from '%s' to '%s'", found, desired), F: func() error { + <-throttle _, err := n.client.DomainDNSSetCustom(sld, tld, desired) if err != nil { return err diff --git a/vendor/golang.org/x/net/publicsuffix/gen.go b/vendor/golang.org/x/net/publicsuffix/gen.go new file mode 100644 index 000000000..f85a3c32b --- /dev/null +++ b/vendor/golang.org/x/net/publicsuffix/gen.go @@ -0,0 +1,713 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +package main + +// This program generates table.go and table_test.go based on the authoritative +// public suffix list at https://publicsuffix.org/list/effective_tld_names.dat +// +// The version is derived from +// https://api.github.com/repos/publicsuffix/list/commits?path=public_suffix_list.dat +// and a human-readable form is at +// https://github.com/publicsuffix/list/commits/master/public_suffix_list.dat +// +// To fetch a particular git revision, such as 5c70ccd250, pass +// -url "https://raw.githubusercontent.com/publicsuffix/list/5c70ccd250/public_suffix_list.dat" +// and -version "an explicit version string". + +import ( + "bufio" + "bytes" + "flag" + "fmt" + "go/format" + "io" + "io/ioutil" + "net/http" + "os" + "regexp" + "sort" + "strings" + + "golang.org/x/net/idna" +) + +const ( + // These sum of these four values must be no greater than 32. + nodesBitsChildren = 10 + nodesBitsICANN = 1 + nodesBitsTextOffset = 15 + nodesBitsTextLength = 6 + + // These sum of these four values must be no greater than 32. + childrenBitsWildcard = 1 + childrenBitsNodeType = 2 + childrenBitsHi = 14 + childrenBitsLo = 14 +) + +var ( + maxChildren int + maxTextOffset int + maxTextLength int + maxHi uint32 + maxLo uint32 +) + +func max(a, b int) int { + if a < b { + return b + } + return a +} + +func u32max(a, b uint32) uint32 { + if a < b { + return b + } + return a +} + +const ( + nodeTypeNormal = 0 + nodeTypeException = 1 + nodeTypeParentOnly = 2 + numNodeType = 3 +) + +func nodeTypeStr(n int) string { + switch n { + case nodeTypeNormal: + return "+" + case nodeTypeException: + return "!" + case nodeTypeParentOnly: + return "o" + } + panic("unreachable") +} + +const ( + defaultURL = "https://publicsuffix.org/list/effective_tld_names.dat" + gitCommitURL = "https://api.github.com/repos/publicsuffix/list/commits?path=public_suffix_list.dat" +) + +var ( + labelEncoding = map[string]uint32{} + labelsList = []string{} + labelsMap = map[string]bool{} + rules = []string{} + + // validSuffixRE is used to check that the entries in the public suffix + // list are in canonical form (after Punycode encoding). Specifically, + // capital letters are not allowed. + validSuffixRE = regexp.MustCompile(`^[a-z0-9_\!\*\-\.]+$`) + + shaRE = regexp.MustCompile(`"sha":"([^"]+)"`) + dateRE = regexp.MustCompile(`"committer":{[^{]+"date":"([^"]+)"`) + + comments = flag.Bool("comments", false, "generate table.go comments, for debugging") + subset = flag.Bool("subset", false, "generate only a subset of the full table, for debugging") + url = flag.String("url", defaultURL, "URL of the publicsuffix.org list. If empty, stdin is read instead") + v = flag.Bool("v", false, "verbose output (to stderr)") + version = flag.String("version", "", "the effective_tld_names.dat version") +) + +func main() { + if err := main1(); err != nil { + fmt.Fprintln(os.Stderr, err) + os.Exit(1) + } +} + +func main1() error { + flag.Parse() + if nodesBitsTextLength+nodesBitsTextOffset+nodesBitsICANN+nodesBitsChildren > 32 { + return fmt.Errorf("not enough bits to encode the nodes table") + } + if childrenBitsLo+childrenBitsHi+childrenBitsNodeType+childrenBitsWildcard > 32 { + return fmt.Errorf("not enough bits to encode the children table") + } + if *version == "" { + if *url != defaultURL { + return fmt.Errorf("-version was not specified, and the -url is not the default one") + } + sha, date, err := gitCommit() + if err != nil { + return err + } + *version = fmt.Sprintf("publicsuffix.org's public_suffix_list.dat, git revision %s (%s)", sha, date) + } + var r io.Reader = os.Stdin + if *url != "" { + res, err := http.Get(*url) + if err != nil { + return err + } + if res.StatusCode != http.StatusOK { + return fmt.Errorf("bad GET status for %s: %d", *url, res.Status) + } + r = res.Body + defer res.Body.Close() + } + + var root node + icann := false + br := bufio.NewReader(r) + for { + s, err := br.ReadString('\n') + if err != nil { + if err == io.EOF { + break + } + return err + } + s = strings.TrimSpace(s) + if strings.Contains(s, "BEGIN ICANN DOMAINS") { + icann = true + continue + } + if strings.Contains(s, "END ICANN DOMAINS") { + icann = false + continue + } + if s == "" || strings.HasPrefix(s, "//") { + continue + } + s, err = idna.ToASCII(s) + if err != nil { + return err + } + if !validSuffixRE.MatchString(s) { + return fmt.Errorf("bad publicsuffix.org list data: %q", s) + } + + if *subset { + switch { + case s == "ac.jp" || strings.HasSuffix(s, ".ac.jp"): + case s == "ak.us" || strings.HasSuffix(s, ".ak.us"): + case s == "ao" || strings.HasSuffix(s, ".ao"): + case s == "ar" || strings.HasSuffix(s, ".ar"): + case s == "arpa" || strings.HasSuffix(s, ".arpa"): + case s == "cy" || strings.HasSuffix(s, ".cy"): + case s == "dyndns.org" || strings.HasSuffix(s, ".dyndns.org"): + case s == "jp": + case s == "kobe.jp" || strings.HasSuffix(s, ".kobe.jp"): + case s == "kyoto.jp" || strings.HasSuffix(s, ".kyoto.jp"): + case s == "om" || strings.HasSuffix(s, ".om"): + case s == "uk" || strings.HasSuffix(s, ".uk"): + case s == "uk.com" || strings.HasSuffix(s, ".uk.com"): + case s == "tw" || strings.HasSuffix(s, ".tw"): + case s == "zw" || strings.HasSuffix(s, ".zw"): + case s == "xn--p1ai" || strings.HasSuffix(s, ".xn--p1ai"): + // xn--p1ai is Russian-Cyrillic "рф". + default: + continue + } + } + + rules = append(rules, s) + + nt, wildcard := nodeTypeNormal, false + switch { + case strings.HasPrefix(s, "*."): + s, nt = s[2:], nodeTypeParentOnly + wildcard = true + case strings.HasPrefix(s, "!"): + s, nt = s[1:], nodeTypeException + } + labels := strings.Split(s, ".") + for n, i := &root, len(labels)-1; i >= 0; i-- { + label := labels[i] + n = n.child(label) + if i == 0 { + if nt != nodeTypeParentOnly && n.nodeType == nodeTypeParentOnly { + n.nodeType = nt + } + n.icann = n.icann && icann + n.wildcard = n.wildcard || wildcard + } + labelsMap[label] = true + } + } + labelsList = make([]string, 0, len(labelsMap)) + for label := range labelsMap { + labelsList = append(labelsList, label) + } + sort.Strings(labelsList) + + if err := generate(printReal, &root, "table.go"); err != nil { + return err + } + if err := generate(printTest, &root, "table_test.go"); err != nil { + return err + } + return nil +} + +func generate(p func(io.Writer, *node) error, root *node, filename string) error { + buf := new(bytes.Buffer) + if err := p(buf, root); err != nil { + return err + } + b, err := format.Source(buf.Bytes()) + if err != nil { + return err + } + return ioutil.WriteFile(filename, b, 0644) +} + +func gitCommit() (sha, date string, retErr error) { + res, err := http.Get(gitCommitURL) + if err != nil { + return "", "", err + } + if res.StatusCode != http.StatusOK { + return "", "", fmt.Errorf("bad GET status for %s: %d", gitCommitURL, res.Status) + } + defer res.Body.Close() + b, err := ioutil.ReadAll(res.Body) + if err != nil { + return "", "", err + } + if m := shaRE.FindSubmatch(b); m != nil { + sha = string(m[1]) + } + if m := dateRE.FindSubmatch(b); m != nil { + date = string(m[1]) + } + if sha == "" || date == "" { + retErr = fmt.Errorf("could not find commit SHA and date in %s", gitCommitURL) + } + return sha, date, retErr +} + +func printTest(w io.Writer, n *node) error { + fmt.Fprintf(w, "// generated by go run gen.go; DO NOT EDIT\n\n") + fmt.Fprintf(w, "package publicsuffix\n\nvar rules = [...]string{\n") + for _, rule := range rules { + fmt.Fprintf(w, "%q,\n", rule) + } + fmt.Fprintf(w, "}\n\nvar nodeLabels = [...]string{\n") + if err := n.walk(w, printNodeLabel); err != nil { + return err + } + fmt.Fprintf(w, "}\n") + return nil +} + +func printReal(w io.Writer, n *node) error { + const header = `// generated by go run gen.go; DO NOT EDIT + +package publicsuffix + +const version = %q + +const ( + nodesBitsChildren = %d + nodesBitsICANN = %d + nodesBitsTextOffset = %d + nodesBitsTextLength = %d + + childrenBitsWildcard = %d + childrenBitsNodeType = %d + childrenBitsHi = %d + childrenBitsLo = %d +) + +const ( + nodeTypeNormal = %d + nodeTypeException = %d + nodeTypeParentOnly = %d +) + +// numTLD is the number of top level domains. +const numTLD = %d + +` + fmt.Fprintf(w, header, *version, + nodesBitsChildren, nodesBitsICANN, nodesBitsTextOffset, nodesBitsTextLength, + childrenBitsWildcard, childrenBitsNodeType, childrenBitsHi, childrenBitsLo, + nodeTypeNormal, nodeTypeException, nodeTypeParentOnly, len(n.children)) + + text := combineText(labelsList) + if text == "" { + return fmt.Errorf("internal error: makeText returned no text") + } + for _, label := range labelsList { + offset, length := strings.Index(text, label), len(label) + if offset < 0 { + return fmt.Errorf("internal error: could not find %q in text %q", label, text) + } + maxTextOffset, maxTextLength = max(maxTextOffset, offset), max(maxTextLength, length) + if offset >= 1<= 1< 64 { + n, plus = 64, " +" + } + fmt.Fprintf(w, "%q%s\n", text[:n], plus) + text = text[n:] + } + + if err := n.walk(w, assignIndexes); err != nil { + return err + } + + fmt.Fprintf(w, ` + +// nodes is the list of nodes. Each node is represented as a uint32, which +// encodes the node's children, wildcard bit and node type (as an index into +// the children array), ICANN bit and text. +// +// If the table was generated with the -comments flag, there is a //-comment +// after each node's data. In it is the nodes-array indexes of the children, +// formatted as (n0x1234-n0x1256), with * denoting the wildcard bit. The +// nodeType is printed as + for normal, ! for exception, and o for parent-only +// nodes that have children but don't match a domain label in their own right. +// An I denotes an ICANN domain. +// +// The layout within the uint32, from MSB to LSB, is: +// [%2d bits] unused +// [%2d bits] children index +// [%2d bits] ICANN bit +// [%2d bits] text index +// [%2d bits] text length +var nodes = [...]uint32{ +`, + 32-nodesBitsChildren-nodesBitsICANN-nodesBitsTextOffset-nodesBitsTextLength, + nodesBitsChildren, nodesBitsICANN, nodesBitsTextOffset, nodesBitsTextLength) + if err := n.walk(w, printNode); err != nil { + return err + } + fmt.Fprintf(w, `} + +// children is the list of nodes' children, the parent's wildcard bit and the +// parent's node type. If a node has no children then their children index +// will be in the range [0, 6), depending on the wildcard bit and node type. +// +// The layout within the uint32, from MSB to LSB, is: +// [%2d bits] unused +// [%2d bits] wildcard bit +// [%2d bits] node type +// [%2d bits] high nodes index (exclusive) of children +// [%2d bits] low nodes index (inclusive) of children +var children=[...]uint32{ +`, + 32-childrenBitsWildcard-childrenBitsNodeType-childrenBitsHi-childrenBitsLo, + childrenBitsWildcard, childrenBitsNodeType, childrenBitsHi, childrenBitsLo) + for i, c := range childrenEncoding { + s := "---------------" + lo := c & (1<> childrenBitsLo) & (1<>(childrenBitsLo+childrenBitsHi)) & (1<>(childrenBitsLo+childrenBitsHi+childrenBitsNodeType) != 0 + if *comments { + fmt.Fprintf(w, "0x%08x, // c0x%04x (%s)%s %s\n", + c, i, s, wildcardStr(wildcard), nodeTypeStr(nodeType)) + } else { + fmt.Fprintf(w, "0x%x,\n", c) + } + } + fmt.Fprintf(w, "}\n\n") + fmt.Fprintf(w, "// max children %d (capacity %d)\n", maxChildren, 1<= 1<= 1<= 1< 0 && ss[0] == "" { + ss = ss[1:] + } + return ss +} + +// crush combines a list of strings, taking advantage of overlaps. It returns a +// single string that contains each input string as a substring. +func crush(ss []string) string { + maxLabelLen := 0 + for _, s := range ss { + if maxLabelLen < len(s) { + maxLabelLen = len(s) + } + } + + for prefixLen := maxLabelLen; prefixLen > 0; prefixLen-- { + prefixes := makePrefixMap(ss, prefixLen) + for i, s := range ss { + if len(s) <= prefixLen { + continue + } + mergeLabel(ss, i, prefixLen, prefixes) + } + } + + return strings.Join(ss, "") +} + +// mergeLabel merges the label at ss[i] with the first available matching label +// in prefixMap, where the last "prefixLen" characters in ss[i] match the first +// "prefixLen" characters in the matching label. +// It will merge ss[i] repeatedly until no more matches are available. +// All matching labels merged into ss[i] are replaced by "". +func mergeLabel(ss []string, i, prefixLen int, prefixes prefixMap) { + s := ss[i] + suffix := s[len(s)-prefixLen:] + for _, j := range prefixes[suffix] { + // Empty strings mean "already used." Also avoid merging with self. + if ss[j] == "" || i == j { + continue + } + if *v { + fmt.Fprintf(os.Stderr, "%d-length overlap at (%4d,%4d): %q and %q share %q\n", + prefixLen, i, j, ss[i], ss[j], suffix) + } + ss[i] += ss[j][prefixLen:] + ss[j] = "" + // ss[i] has a new suffix, so merge again if possible. + // Note: we only have to merge again at the same prefix length. Shorter + // prefix lengths will be handled in the next iteration of crush's for loop. + // Can there be matches for longer prefix lengths, introduced by the merge? + // I believe that any such matches would by necessity have been eliminated + // during substring removal or merged at a higher prefix length. For + // instance, in crush("abc", "cde", "bcdef"), combining "abc" and "cde" + // would yield "abcde", which could be merged with "bcdef." However, in + // practice "cde" would already have been elimintated by removeSubstrings. + mergeLabel(ss, i, prefixLen, prefixes) + return + } +} + +// prefixMap maps from a prefix to a list of strings containing that prefix. The +// list of strings is represented as indexes into a slice of strings stored +// elsewhere. +type prefixMap map[string][]int + +// makePrefixMap constructs a prefixMap from a slice of strings. +func makePrefixMap(ss []string, prefixLen int) prefixMap { + prefixes := make(prefixMap) + for i, s := range ss { + // We use < rather than <= because if a label matches on a prefix equal to + // its full length, that's actually a substring match handled by + // removeSubstrings. + if prefixLen < len(s) { + prefix := s[:prefixLen] + prefixes[prefix] = append(prefixes[prefix], i) + } + } + + return prefixes +} diff --git a/vendor/golang.org/x/net/publicsuffix/list.go b/vendor/golang.org/x/net/publicsuffix/list.go new file mode 100644 index 000000000..8bbf3bcd7 --- /dev/null +++ b/vendor/golang.org/x/net/publicsuffix/list.go @@ -0,0 +1,135 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:generate go run gen.go + +// Package publicsuffix provides a public suffix list based on data from +// http://publicsuffix.org/. A public suffix is one under which Internet users +// can directly register names. +package publicsuffix // import "golang.org/x/net/publicsuffix" + +// TODO: specify case sensitivity and leading/trailing dot behavior for +// func PublicSuffix and func EffectiveTLDPlusOne. + +import ( + "fmt" + "net/http/cookiejar" + "strings" +) + +// List implements the cookiejar.PublicSuffixList interface by calling the +// PublicSuffix function. +var List cookiejar.PublicSuffixList = list{} + +type list struct{} + +func (list) PublicSuffix(domain string) string { + ps, _ := PublicSuffix(domain) + return ps +} + +func (list) String() string { + return version +} + +// PublicSuffix returns the public suffix of the domain using a copy of the +// publicsuffix.org database compiled into the library. +// +// icann is whether the public suffix is managed by the Internet Corporation +// for Assigned Names and Numbers. If not, the public suffix is privately +// managed. For example, foo.org and foo.co.uk are ICANN domains, +// foo.dyndns.org and foo.blogspot.co.uk are private domains. +// +// Use cases for distinguishing ICANN domains like foo.com from private +// domains like foo.appspot.com can be found at +// https://wiki.mozilla.org/Public_Suffix_List/Use_Cases +func PublicSuffix(domain string) (publicSuffix string, icann bool) { + lo, hi := uint32(0), uint32(numTLD) + s, suffix, wildcard := domain, len(domain), false +loop: + for { + dot := strings.LastIndex(s, ".") + if wildcard { + suffix = 1 + dot + } + if lo == hi { + break + } + f := find(s[1+dot:], lo, hi) + if f == notFound { + break + } + + u := nodes[f] >> (nodesBitsTextOffset + nodesBitsTextLength) + icann = u&(1<>= nodesBitsICANN + u = children[u&(1<>= childrenBitsLo + hi = u & (1<>= childrenBitsHi + switch u & (1<>= childrenBitsNodeType + wildcard = u&(1<>= nodesBitsTextLength + offset := x & (1<