1
0
mirror of https://github.com/StackExchange/dnscontrol.git synced 2024-05-11 05:55:12 +00:00

vendor minify package

This commit is contained in:
Craig Peterson
2017-07-20 17:42:11 -04:00
parent 710d14b1a9
commit 28d0b0c5a0
23 changed files with 3025 additions and 0 deletions

22
vendor/github.com/tdewolff/parse/LICENSE.md generated vendored Normal file
View File

@ -0,0 +1,22 @@
Copyright (c) 2015 Taco de Wolff
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without
restriction, including without limitation the rights to use,
copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.

38
vendor/github.com/tdewolff/parse/README.md generated vendored Normal file
View File

@ -0,0 +1,38 @@
# Parse [![Build Status](https://travis-ci.org/tdewolff/parse.svg?branch=master)](https://travis-ci.org/tdewolff/parse) [![GoDoc](http://godoc.org/github.com/tdewolff/parse?status.svg)](http://godoc.org/github.com/tdewolff/parse) [![Coverage Status](https://coveralls.io/repos/github/tdewolff/parse/badge.svg?branch=master)](https://coveralls.io/github/tdewolff/parse?branch=master)
This package contains several lexers and parsers written in [Go][1]. All subpackages are built to be streaming, high performance and to be in accordance with the official (latest) specifications.
The lexers are implemented using `buffer.Lexer` in https://github.com/tdewolff/buffer and the parsers work on top of the lexers. Some subpackages have hashes defined (using [Hasher](https://github.com/tdewolff/hasher)) that speed up common byte-slice comparisons.
## CSS
This package is a CSS3 lexer and parser. Both follow the specification at [CSS Syntax Module Level 3](http://www.w3.org/TR/css-syntax-3/). The lexer takes an io.Reader and converts it into tokens until the EOF. The parser returns a parse tree of the full io.Reader input stream, but the low-level `Next` function can be used for stream parsing to returns grammar units until the EOF.
[See README here](https://github.com/tdewolff/parse/tree/master/css).
## HTML
This package is an HTML5 lexer. It follows the specification at [The HTML syntax](http://www.w3.org/TR/html5/syntax.html). The lexer takes an io.Reader and converts it into tokens until the EOF.
[See README here](https://github.com/tdewolff/parse/tree/master/html).
## JS
This package is a JS lexer (ECMA-262, edition 6.0). It follows the specification at [ECMAScript Language Specification](http://www.ecma-international.org/ecma-262/6.0/). The lexer takes an io.Reader and converts it into tokens until the EOF.
[See README here](https://github.com/tdewolff/parse/tree/master/js).
## JSON
This package is a JSON parser (ECMA-404). It follows the specification at [JSON](http://json.org/). The parser takes an io.Reader and converts it into tokens until the EOF.
[See README here](https://github.com/tdewolff/parse/tree/master/json).
## SVG
This package contains common hashes for SVG1.1 tags and attributes.
## XML
This package is an XML1.0 lexer. It follows the specification at [Extensible Markup Language (XML) 1.0 (Fifth Edition)](http://www.w3.org/TR/xml/). The lexer takes an io.Reader and converts it into tokens until the EOF.
[See README here](https://github.com/tdewolff/parse/tree/master/xml).
## License
Released under the [MIT license](LICENSE.md).
[1]: http://golang.org/ "Go Language"

230
vendor/github.com/tdewolff/parse/common.go generated vendored Normal file
View File

@ -0,0 +1,230 @@
// Package parse contains a collection of parsers for various formats in its subpackages.
package parse // import "github.com/tdewolff/parse"
import (
"encoding/base64"
"errors"
"net/url"
)
// ErrBadDataURI is returned by DataURI when the byte slice does not start with 'data:' or is too short.
var ErrBadDataURI = errors.New("not a data URI")
// Number returns the number of bytes that parse as a number of the regex format (+|-)?([0-9]+(\.[0-9]+)?|\.[0-9]+)((e|E)(+|-)?[0-9]+)?.
func Number(b []byte) int {
if len(b) == 0 {
return 0
}
i := 0
if b[i] == '+' || b[i] == '-' {
i++
if i >= len(b) {
return 0
}
}
firstDigit := (b[i] >= '0' && b[i] <= '9')
if firstDigit {
i++
for i < len(b) && b[i] >= '0' && b[i] <= '9' {
i++
}
}
if i < len(b) && b[i] == '.' {
i++
if i < len(b) && b[i] >= '0' && b[i] <= '9' {
i++
for i < len(b) && b[i] >= '0' && b[i] <= '9' {
i++
}
} else if firstDigit {
// . could belong to the next token
i--
return i
} else {
return 0
}
} else if !firstDigit {
return 0
}
iOld := i
if i < len(b) && (b[i] == 'e' || b[i] == 'E') {
i++
if i < len(b) && (b[i] == '+' || b[i] == '-') {
i++
}
if i >= len(b) || b[i] < '0' || b[i] > '9' {
// e could belong to next token
return iOld
}
for i < len(b) && b[i] >= '0' && b[i] <= '9' {
i++
}
}
return i
}
// Dimension parses a byte-slice and returns the length of the number and its unit.
func Dimension(b []byte) (int, int) {
num := Number(b)
if num == 0 || num == len(b) {
return num, 0
} else if b[num] == '%' {
return num, 1
} else if b[num] >= 'a' && b[num] <= 'z' || b[num] >= 'A' && b[num] <= 'Z' {
i := num + 1
for i < len(b) && (b[i] >= 'a' && b[i] <= 'z' || b[i] >= 'A' && b[i] <= 'Z') {
i++
}
return num, i - num
}
return num, 0
}
// Mediatype parses a given mediatype and splits the mimetype from the parameters.
// It works similar to mime.ParseMediaType but is faster.
func Mediatype(b []byte) ([]byte, map[string]string) {
i := 0
for i < len(b) && b[i] == ' ' {
i++
}
b = b[i:]
n := len(b)
mimetype := b
var params map[string]string
for i := 3; i < n; i++ { // mimetype is at least three characters long
if b[i] == ';' || b[i] == ' ' {
mimetype = b[:i]
if b[i] == ' ' {
i++
for i < n && b[i] == ' ' {
i++
}
if i < n && b[i] != ';' {
break
}
}
params = map[string]string{}
s := string(b)
PARAM:
i++
for i < n && s[i] == ' ' {
i++
}
start := i
for i < n && s[i] != '=' && s[i] != ';' && s[i] != ' ' {
i++
}
key := s[start:i]
for i < n && s[i] == ' ' {
i++
}
if i < n && s[i] == '=' {
i++
for i < n && s[i] == ' ' {
i++
}
start = i
for i < n && s[i] != ';' && s[i] != ' ' {
i++
}
} else {
start = i
}
params[key] = s[start:i]
for i < n && s[i] == ' ' {
i++
}
if i < n && s[i] == ';' {
goto PARAM
}
break
}
}
return mimetype, params
}
// DataURI parses the given data URI and returns the mediatype, data and ok.
func DataURI(dataURI []byte) ([]byte, []byte, error) {
if len(dataURI) > 5 && Equal(dataURI[:5], []byte("data:")) {
dataURI = dataURI[5:]
inBase64 := false
var mediatype []byte
i := 0
for j := 0; j < len(dataURI); j++ {
c := dataURI[j]
if c == '=' || c == ';' || c == ',' {
if c != '=' && Equal(TrimWhitespace(dataURI[i:j]), []byte("base64")) {
if len(mediatype) > 0 {
mediatype = mediatype[:len(mediatype)-1]
}
inBase64 = true
i = j
} else if c != ',' {
mediatype = append(append(mediatype, TrimWhitespace(dataURI[i:j])...), c)
i = j + 1
} else {
mediatype = append(mediatype, TrimWhitespace(dataURI[i:j])...)
}
if c == ',' {
if len(mediatype) == 0 || mediatype[0] == ';' {
mediatype = []byte("text/plain")
}
data := dataURI[j+1:]
if inBase64 {
decoded := make([]byte, base64.StdEncoding.DecodedLen(len(data)))
n, err := base64.StdEncoding.Decode(decoded, data)
if err != nil {
return nil, nil, err
}
data = decoded[:n]
} else if unescaped, err := url.QueryUnescape(string(data)); err == nil {
data = []byte(unescaped)
}
return mediatype, data, nil
}
}
}
}
return nil, nil, ErrBadDataURI
}
// QuoteEntity parses the given byte slice and returns the quote that got matched (' or ") and its entity length.
func QuoteEntity(b []byte) (quote byte, n int) {
if len(b) < 5 || b[0] != '&' {
return 0, 0
}
if b[1] == '#' {
if b[2] == 'x' {
i := 3
for i < len(b) && b[i] == '0' {
i++
}
if i+2 < len(b) && b[i] == '2' && b[i+2] == ';' {
if b[i+1] == '2' {
return '"', i + 3 // &#x22;
} else if b[i+1] == '7' {
return '\'', i + 3 // &#x27;
}
}
} else {
i := 2
for i < len(b) && b[i] == '0' {
i++
}
if i+2 < len(b) && b[i] == '3' && b[i+2] == ';' {
if b[i+1] == '4' {
return '"', i + 3 // &#34;
} else if b[i+1] == '9' {
return '\'', i + 3 // &#39;
}
}
}
} else if len(b) >= 6 && b[5] == ';' {
if EqualFold(b[1:5], []byte{'q', 'u', 'o', 't'}) {
return '"', 6 // &quot;
} else if EqualFold(b[1:5], []byte{'a', 'p', 'o', 's'}) {
return '\'', 6 // &apos;
}
}
return 0, 0
}

81
vendor/github.com/tdewolff/parse/json/README.md generated vendored Normal file
View File

@ -0,0 +1,81 @@
# JSON [![GoDoc](http://godoc.org/github.com/tdewolff/parse/json?status.svg)](http://godoc.org/github.com/tdewolff/parse/json) [![GoCover](http://gocover.io/_badge/github.com/tdewolff/parse/json)](http://gocover.io/github.com/tdewolff/parse/json)
This package is a JSON lexer (ECMA-404) written in [Go][1]. It follows the specification at [JSON](http://json.org/). The lexer takes an io.Reader and converts it into tokens until the EOF.
## Installation
Run the following command
go get github.com/tdewolff/parse/json
or add the following import and run project with `go get`
import "github.com/tdewolff/parse/json"
## Parser
### Usage
The following initializes a new Parser with io.Reader `r`:
``` go
p := json.NewParser(r)
```
To tokenize until EOF an error, use:
``` go
for {
gt, text := p.Next()
switch gt {
case json.ErrorGrammar:
// error or EOF set in p.Err()
return
// ...
}
}
```
All grammars:
``` go
ErrorGrammar GrammarType = iota // extra grammar when errors occur
WhitespaceGrammar // space \t \r \n
LiteralGrammar // null true false
NumberGrammar
StringGrammar
StartObjectGrammar // {
EndObjectGrammar // }
StartArrayGrammar // [
EndArrayGrammar // ]
```
### Examples
``` go
package main
import (
"os"
"github.com/tdewolff/parse/json"
)
// Tokenize JSON from stdin.
func main() {
p := json.NewParser(os.Stdin)
for {
gt, text := p.Next()
switch gt {
case json.ErrorGrammar:
if p.Err() != io.EOF {
fmt.Println("Error on line", p.Line(), ":", p.Err())
}
return
case json.LiteralGrammar:
fmt.Println("Literal", string(text))
case json.NumberGrammar:
fmt.Println("Number", string(text))
// ...
}
}
}
```
## License
Released under the [MIT license](https://github.com/tdewolff/parse/blob/master/LICENSE.md).
[1]: http://golang.org/ "Go Language"

317
vendor/github.com/tdewolff/parse/json/parse.go generated vendored Normal file
View File

@ -0,0 +1,317 @@
// Package json is a JSON parser following the specifications at http://json.org/.
package json // import "github.com/tdewolff/parse/json"
import (
"errors"
"io"
"strconv"
"github.com/tdewolff/buffer"
)
// ErrBadComma is returned when an unexpected comma is encountered.
var ErrBadComma = errors.New("unexpected comma character outside an array or object")
// ErrNoComma is returned when no comma is present between two values.
var ErrNoComma = errors.New("expected comma character or an array or object ending")
// ErrBadObjectKey is returned when the object key is not a quoted string.
var ErrBadObjectKey = errors.New("expected object key to be a quoted string")
// ErrBadObjectDeclaration is returned when the object key is not followed by a colon character.
var ErrBadObjectDeclaration = errors.New("expected colon character after object key")
// ErrBadObjectEnding is returned when an unexpected right brace is encountered.
var ErrBadObjectEnding = errors.New("unexpected right brace character")
// ErrBadArrayEnding is returned when an unexpected right bracket is encountered.
var ErrBadArrayEnding = errors.New("unexpected right bracket character")
////////////////////////////////////////////////////////////////
// GrammarType determines the type of grammar
type GrammarType uint32
// GrammarType values.
const (
ErrorGrammar GrammarType = iota // extra grammar when errors occur
WhitespaceGrammar
LiteralGrammar
NumberGrammar
StringGrammar
StartObjectGrammar // {
EndObjectGrammar // }
StartArrayGrammar // [
EndArrayGrammar // ]
)
// String returns the string representation of a GrammarType.
func (gt GrammarType) String() string {
switch gt {
case ErrorGrammar:
return "Error"
case WhitespaceGrammar:
return "Whitespace"
case LiteralGrammar:
return "Literal"
case NumberGrammar:
return "Number"
case StringGrammar:
return "String"
case StartObjectGrammar:
return "StartObject"
case EndObjectGrammar:
return "EndObject"
case StartArrayGrammar:
return "StartArray"
case EndArrayGrammar:
return "EndArray"
}
return "Invalid(" + strconv.Itoa(int(gt)) + ")"
}
////////////////////////////////////////////////////////////////
// State determines the current state the parser is in.
type State uint32
// State values.
const (
ValueState State = iota // extra token when errors occur
ObjectKeyState
ObjectValueState
ArrayState
)
// String returns the string representation of a State.
func (state State) String() string {
switch state {
case ValueState:
return "Value"
case ObjectKeyState:
return "ObjectKey"
case ObjectValueState:
return "ObjectValue"
case ArrayState:
return "Array"
}
return "Invalid(" + strconv.Itoa(int(state)) + ")"
}
////////////////////////////////////////////////////////////////
// Parser is the state for the lexer.
type Parser struct {
r *buffer.Lexer
state []State
err error
needComma bool
}
// NewParser returns a new Parser for a given io.Reader.
func NewParser(r io.Reader) *Parser {
return &Parser{
r: buffer.NewLexer(r),
state: []State{ValueState},
}
}
// Err returns the error encountered during tokenization, this is often io.EOF but also other errors can be returned.
func (p Parser) Err() error {
err := p.r.Err()
if err != nil {
return err
}
return p.err
}
// Next returns the next Grammar. It returns ErrorGrammar when an error was encountered. Using Err() one can retrieve the error message.
func (p *Parser) Next() (GrammarType, []byte) {
p.r.Free(p.r.ShiftLen())
p.moveWhitespace()
c := p.r.Peek(0)
state := p.state[len(p.state)-1]
if c == ',' {
if state != ArrayState && state != ObjectKeyState {
p.err = ErrBadComma
return ErrorGrammar, nil
}
p.r.Move(1)
p.moveWhitespace()
p.needComma = false
c = p.r.Peek(0)
}
p.r.Skip()
if p.needComma && c != '}' && c != ']' && c != 0 {
p.err = ErrNoComma
return ErrorGrammar, nil
} else if c == '{' {
p.state = append(p.state, ObjectKeyState)
p.r.Move(1)
return StartObjectGrammar, p.r.Shift()
} else if c == '}' {
if state != ObjectKeyState {
p.err = ErrBadObjectEnding
return ErrorGrammar, nil
}
p.needComma = true
p.state = p.state[:len(p.state)-1]
if p.state[len(p.state)-1] == ObjectValueState {
p.state[len(p.state)-1] = ObjectKeyState
}
p.r.Move(1)
return EndObjectGrammar, p.r.Shift()
} else if c == '[' {
p.state = append(p.state, ArrayState)
p.r.Move(1)
return StartArrayGrammar, p.r.Shift()
} else if c == ']' {
p.needComma = true
if state != ArrayState {
p.err = ErrBadArrayEnding
return ErrorGrammar, nil
}
p.state = p.state[:len(p.state)-1]
if p.state[len(p.state)-1] == ObjectValueState {
p.state[len(p.state)-1] = ObjectKeyState
}
p.r.Move(1)
return EndArrayGrammar, p.r.Shift()
} else if state == ObjectKeyState {
if c != '"' || !p.consumeStringToken() {
p.err = ErrBadObjectKey
return ErrorGrammar, nil
}
n := p.r.Pos()
p.moveWhitespace()
if c := p.r.Peek(0); c != ':' {
p.err = ErrBadObjectDeclaration
return ErrorGrammar, nil
}
p.r.Move(1)
p.state[len(p.state)-1] = ObjectValueState
return StringGrammar, p.r.Shift()[:n]
} else {
p.needComma = true
if state == ObjectValueState {
p.state[len(p.state)-1] = ObjectKeyState
}
if c == '"' && p.consumeStringToken() {
return StringGrammar, p.r.Shift()
} else if p.consumeNumberToken() {
return NumberGrammar, p.r.Shift()
} else if p.consumeLiteralToken() {
return LiteralGrammar, p.r.Shift()
}
}
return ErrorGrammar, nil
}
// State returns the state the parser is currently in (ie. which token is expected).
func (p *Parser) State() State {
return p.state[len(p.state)-1]
}
////////////////////////////////////////////////////////////////
/*
The following functions follow the specifications at http://json.org/
*/
func (p *Parser) moveWhitespace() {
for {
if c := p.r.Peek(0); c != ' ' && c != '\t' && c != '\r' && c != '\n' {
break
}
p.r.Move(1)
}
}
func (p *Parser) consumeLiteralToken() bool {
c := p.r.Peek(0)
if c == 't' && p.r.Peek(1) == 'r' && p.r.Peek(2) == 'u' && p.r.Peek(3) == 'e' {
p.r.Move(4)
return true
} else if c == 'f' && p.r.Peek(1) == 'a' && p.r.Peek(2) == 'l' && p.r.Peek(3) == 's' && p.r.Peek(4) == 'e' {
p.r.Move(5)
return true
} else if c == 'n' && p.r.Peek(1) == 'u' && p.r.Peek(2) == 'l' && p.r.Peek(3) == 'l' {
p.r.Move(4)
return true
}
return false
}
func (p *Parser) consumeNumberToken() bool {
mark := p.r.Pos()
if p.r.Peek(0) == '-' {
p.r.Move(1)
}
c := p.r.Peek(0)
if c >= '1' && c <= '9' {
p.r.Move(1)
for {
if c := p.r.Peek(0); c < '0' || c > '9' {
break
}
p.r.Move(1)
}
} else if c != '0' {
p.r.Rewind(mark)
return false
} else {
p.r.Move(1) // 0
}
if c := p.r.Peek(0); c == '.' {
p.r.Move(1)
if c := p.r.Peek(0); c < '0' || c > '9' {
p.r.Move(-1)
return true
}
for {
if c := p.r.Peek(0); c < '0' || c > '9' {
break
}
p.r.Move(1)
}
}
mark = p.r.Pos()
if c := p.r.Peek(0); c == 'e' || c == 'E' {
p.r.Move(1)
if c := p.r.Peek(0); c == '+' || c == '-' {
p.r.Move(1)
}
if c := p.r.Peek(0); c < '0' || c > '9' {
p.r.Rewind(mark)
return true
}
for {
if c := p.r.Peek(0); c < '0' || c > '9' {
break
}
p.r.Move(1)
}
}
return true
}
func (p *Parser) consumeStringToken() bool {
// assume to be on "
p.r.Move(1)
for {
c := p.r.Peek(0)
if c == '"' {
p.r.Move(1)
break
} else if c == '\\' && (p.r.Peek(1) != 0 || p.r.Err() == nil) {
p.r.Move(1)
} else if c == 0 {
return false
}
p.r.Move(1)
}
return true
}

160
vendor/github.com/tdewolff/parse/util.go generated vendored Normal file
View File

@ -0,0 +1,160 @@
package parse // import "github.com/tdewolff/parse"
// Copy returns a copy of the given byte slice.
func Copy(src []byte) (dst []byte) {
dst = make([]byte, len(src))
copy(dst, src)
return
}
// ToLower converts all characters in the byte slice from A-Z to a-z.
func ToLower(src []byte) []byte {
for i, c := range src {
if c >= 'A' && c <= 'Z' {
src[i] = c + ('a' - 'A')
}
}
return src
}
// Equal returns true when s matches the target.
func Equal(s, target []byte) bool {
if len(s) != len(target) {
return false
}
for i, c := range target {
if s[i] != c {
return false
}
}
return true
}
// EqualFold returns true when s matches case-insensitively the targetLower (which must be lowercase).
func EqualFold(s, targetLower []byte) bool {
if len(s) != len(targetLower) {
return false
}
for i, c := range targetLower {
if s[i] != c && (c < 'A' && c > 'Z' || s[i]+('a'-'A') != c) {
return false
}
}
return true
}
var whitespaceTable = [256]bool{
// ASCII
false, false, false, false, false, false, false, false,
false, true, true, false, true, true, false, false, // tab, new line, form feed, carriage return
false, false, false, false, false, false, false, false,
false, false, false, false, false, false, false, false,
true, false, false, false, false, false, false, false, // space
false, false, false, false, false, false, false, false,
false, false, false, false, false, false, false, false,
false, false, false, false, false, false, false, false,
false, false, false, false, false, false, false, false,
false, false, false, false, false, false, false, false,
false, false, false, false, false, false, false, false,
false, false, false, false, false, false, false, false,
false, false, false, false, false, false, false, false,
false, false, false, false, false, false, false, false,
false, false, false, false, false, false, false, false,
false, false, false, false, false, false, false, false,
// non-ASCII
false, false, false, false, false, false, false, false,
false, false, false, false, false, false, false, false,
false, false, false, false, false, false, false, false,
false, false, false, false, false, false, false, false,
false, false, false, false, false, false, false, false,
false, false, false, false, false, false, false, false,
false, false, false, false, false, false, false, false,
false, false, false, false, false, false, false, false,
false, false, false, false, false, false, false, false,
false, false, false, false, false, false, false, false,
false, false, false, false, false, false, false, false,
false, false, false, false, false, false, false, false,
false, false, false, false, false, false, false, false,
false, false, false, false, false, false, false, false,
false, false, false, false, false, false, false, false,
false, false, false, false, false, false, false, false,
}
// IsWhitespace returns true for space, \n, \r, \t, \f.
func IsWhitespace(c byte) bool {
return whitespaceTable[c]
}
// IsAllWhitespace returns true when the entire byte slice consists of space, \n, \r, \t, \f.
func IsAllWhitespace(b []byte) bool {
for _, c := range b {
if !IsWhitespace(c) {
return false
}
}
return true
}
// TrimWhitespace removes any leading and trailing whitespace characters.
func TrimWhitespace(b []byte) []byte {
n := len(b)
start := n
for i := 0; i < n; i++ {
if !IsWhitespace(b[i]) {
start = i
break
}
}
end := n
for i := n - 1; i >= start; i-- {
if !IsWhitespace(b[i]) {
end = i + 1
break
}
}
return b[start:end]
}
// ReplaceMultipleWhitespace replaces character series of space, \n, \t, \f, \r into a single space or newline (when the serie contained a \n or \r).
func ReplaceMultipleWhitespace(b []byte) []byte {
j := 0
prevWS := false
hasNewline := false
for i, c := range b {
if IsWhitespace(c) {
prevWS = true
if c == '\n' || c == '\r' {
hasNewline = true
}
} else {
if prevWS {
prevWS = false
if hasNewline {
hasNewline = false
b[j] = '\n'
} else {
b[j] = ' '
}
j++
}
b[j] = b[i]
j++
}
}
if prevWS {
if hasNewline {
b[j] = '\n'
} else {
b[j] = ' '
}
j++
}
return b[:j]
}