1
0
mirror of https://github.com/StackExchange/dnscontrol.git synced 2024-05-11 05:55:12 +00:00

go mod vendor

This commit is contained in:
Tom Limoncelli
2020-02-12 21:17:56 -05:00
parent f708f19535
commit 0f07d91f54
34 changed files with 705 additions and 930 deletions

View File

@ -88,6 +88,13 @@ func builtinNumber_toPrecision(call FunctionCall) Value {
return toValue_string(strconv.FormatFloat(call.This.float64(), 'g', int(precision), 64))
}
func builtinNumber_isNaN(call FunctionCall) Value {
if len(call.ArgumentList) < 1 {
return toValue_bool(false)
}
return toValue_bool(call.Argument(0).IsNaN())
}
func builtinNumber_toLocaleString(call FunctionCall) Value {
return builtinNumber_toString(call)
}

View File

@ -2627,6 +2627,29 @@ func _newContext(runtime *_runtime) {
call: builtinNumber_toLocaleString,
},
}
isNaN_function := &_object{
runtime: runtime,
class: "Function",
objectClass: _classObject,
prototype: runtime.global.FunctionPrototype,
extensible: true,
property: map[string]_property{
"length": _property{
mode: 0,
value: Value{
kind: valueNumber,
value: 1,
},
},
},
propertyOrder: []string{
"length",
},
value: _nativeFunctionObject{
name: "isNaN",
call: builtinNumber_isNaN,
},
}
runtime.global.NumberPrototype = &_object{
runtime: runtime,
class: "Number",
@ -2713,6 +2736,13 @@ func _newContext(runtime *_runtime) {
value: runtime.global.NumberPrototype,
},
},
"isNaN": _property{
mode: 0101,
value: Value{
kind: valueObject,
value: isNaN_function,
},
},
"MAX_VALUE": _property{
mode: 0,
value: Value{
@ -2752,6 +2782,7 @@ func _newContext(runtime *_runtime) {
propertyOrder: []string{
"length",
"prototype",
"isNaN",
"MAX_VALUE",
"MIN_VALUE",
"NaN",

View File

@ -372,6 +372,7 @@ sub newContext {
1,
$self->functionDeclare(
$class,
"isNaN", 1,
),
$self->numberConstantDeclare(
"MAX_VALUE", "math.MaxFloat64",

View File

@ -2,6 +2,7 @@ package otto
import (
"encoding"
"encoding/json"
"errors"
"fmt"
"math"
@ -264,6 +265,8 @@ func (self *_runtime) convertNumeric(v Value, t reflect.Type) reflect.Value {
panic(self.panicRangeError(fmt.Sprintf("converting %v to %v would overflow", val.Type(), t)))
}
return val.Convert(t)
case reflect.Float32, reflect.Float64:
return val.Convert(t)
}
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
@ -279,13 +282,48 @@ func (self *_runtime) convertNumeric(v Value, t reflect.Type) reflect.Value {
panic(self.panicRangeError(fmt.Sprintf("converting %v to %v would overflow", val.Type(), t)))
}
return val.Convert(t)
case reflect.Float32, reflect.Float64:
return val.Convert(t)
}
}
panic(self.panicTypeError(fmt.Sprintf("unsupported type %v for numeric conversion", val.Type())))
panic(self.panicTypeError(fmt.Sprintf("unsupported type %v -> %v for numeric conversion", val.Type(), t)))
}
func fieldIndexByName(t reflect.Type, name string) []int {
for i := 0; i < t.NumField(); i++ {
f := t.Field(i)
if !validGoStructName(f.Name) {
continue
}
if f.Anonymous {
if a := fieldIndexByName(f.Type, name); a != nil {
return append([]int{i}, a...)
}
}
if a := strings.SplitN(f.Tag.Get("json"), ",", 2); a[0] != "" {
if a[0] == "-" {
continue
}
if a[0] == name {
return []int{i}
}
}
if f.Name == name {
return []int{i}
}
}
return nil
}
var typeOfValue = reflect.TypeOf(Value{})
var typeOfJSONRawMessage = reflect.TypeOf(json.RawMessage{})
// convertCallParameter converts request val to type t if possible.
// If the conversion fails due to overflow or type miss-match then it panics.
@ -295,6 +333,12 @@ func (self *_runtime) convertCallParameter(v Value, t reflect.Type) reflect.Valu
return reflect.ValueOf(v)
}
if t == typeOfJSONRawMessage {
if d, err := json.Marshal(v.export()); err == nil {
return reflect.ValueOf(d)
}
}
if v.kind == valueObject {
if gso, ok := v._object().value.(*_goStructObject); ok {
if gso.value.Type().AssignableTo(t) {
@ -467,41 +511,15 @@ func (self *_runtime) convertCallParameter(v Value, t reflect.Type) reflect.Valu
s := reflect.New(t)
for _, k := range o.propertyOrder {
var f *reflect.StructField
idx := fieldIndexByName(t, k)
for i := 0; i < t.NumField(); i++ {
ff := t.Field(i)
if j := ff.Tag.Get("json"); j != "" {
if j == "-" {
continue
}
a := strings.Split(j, ",")
if a[0] == k {
f = &ff
break
}
}
if ff.Name == k {
f = &ff
break
}
if strings.EqualFold(ff.Name, k) {
f = &ff
}
}
if f == nil {
if idx == nil {
panic(self.panicTypeError("can't convert object; field %q was supplied but does not exist on target %v", k, t))
}
ss := s
for _, i := range f.Index {
for _, i := range idx {
if ss.Kind() == reflect.Ptr {
if ss.IsNil() {
if !ss.CanSet() {
@ -572,7 +590,7 @@ func (self *_runtime) convertCallParameter(v Value, t reflect.Type) reflect.Valu
s = v.Class()
}
panic(self.panicTypeError("can't convert from %q to %q", s, t.String()))
panic(self.panicTypeError("can't convert from %q to %q", s, t))
}
func (self *_runtime) toValue(value interface{}) Value {

View File

@ -33,16 +33,32 @@ func _newGoArrayObject(value reflect.Value) *_goArrayObject {
return self
}
func (self _goArrayObject) getValue(index int64) (reflect.Value, bool) {
func (self _goArrayObject) getValue(name string) (reflect.Value, bool) {
if index, err := strconv.ParseInt(name, 10, 64); err != nil {
v, ok := self.getValueIndex(index)
if ok {
return v, ok
}
}
if m := self.value.MethodByName(name); m != (reflect.Value{}) {
return m, true
}
return reflect.Value{}, false
}
func (self _goArrayObject) getValueIndex(index int64) (reflect.Value, bool) {
value := reflect.Indirect(self.value)
if index < int64(value.Len()) {
return value.Index(int(index)), true
}
return reflect.Value{}, false
}
func (self _goArrayObject) setValue(index int64, value Value) bool {
indexValue, exists := self.getValue(index)
indexValue, exists := self.getValueIndex(index)
if !exists {
return false
}
@ -64,11 +80,10 @@ func goArrayGetOwnProperty(self *_object, name string) *_property {
}
// .0, .1, .2, ...
index := stringToArrayIndex(name)
if index >= 0 {
if index := stringToArrayIndex(name); index >= 0 {
object := self.value.(*_goArrayObject)
value := Value{}
reflectValue, exists := object.getValue(index)
reflectValue, exists := object.getValueIndex(index)
if exists {
value = self.runtime.toValue(reflectValue.Interface())
}
@ -78,6 +93,13 @@ func goArrayGetOwnProperty(self *_object, name string) *_property {
}
}
if method := self.value.(*_goArrayObject).value.MethodByName(name); method != (reflect.Value{}) {
return &_property{
self.runtime.toValue(method.Interface()),
0110,
}
}
return objectGetOwnProperty(self, name)
}
@ -121,7 +143,7 @@ func goArrayDelete(self *_object, name string, throw bool) bool {
if index >= 0 {
object := self.value.(*_goArrayObject)
if object.writable {
indexValue, exists := object.getValue(index)
indexValue, exists := object.getValueIndex(index)
if exists {
indexValue.Set(reflect.Zero(reflect.Indirect(object.value).Type().Elem()))
return true

View File

@ -36,6 +36,10 @@ func _newGoStructObject(value reflect.Value) *_goStructObject {
}
func (self _goStructObject) getValue(name string) reflect.Value {
if idx := fieldIndexByName(reflect.Indirect(self.value).Type(), name); len(idx) > 0 {
return reflect.Indirect(self.value).FieldByIndex(idx)
}
if validGoStructName(name) {
// Do not reveal hidden or unexported fields
if field := reflect.Indirect(self.value).FieldByName(name); (field != reflect.Value{}) {
@ -50,25 +54,21 @@ func (self _goStructObject) getValue(name string) reflect.Value {
return reflect.Value{}
}
func (self _goStructObject) field(name string) (reflect.StructField, bool) {
return reflect.Indirect(self.value).Type().FieldByName(name)
func (self _goStructObject) fieldIndex(name string) []int {
return fieldIndexByName(reflect.Indirect(self.value).Type(), name)
}
func (self _goStructObject) method(name string) (reflect.Method, bool) {
return reflect.Indirect(self.value).Type().MethodByName(name)
}
func (self _goStructObject) setValue(name string, value Value) bool {
field, exists := self.field(name)
if !exists {
func (self _goStructObject) setValue(rt *_runtime, name string, value Value) bool {
if idx := fieldIndexByName(reflect.Indirect(self.value).Type(), name); len(idx) == 0 {
return false
}
fieldValue := self.getValue(name)
reflectValue, err := value.toReflectValue(field.Type.Kind())
if err != nil {
panic(err)
}
fieldValue.Set(reflectValue)
fieldValue.Set(rt.convertCallParameter(value, fieldValue.Type()))
return true
}
@ -128,7 +128,7 @@ func goStructCanPut(self *_object, name string) bool {
func goStructPut(self *_object, name string, value Value, throw bool) {
object := self.value.(*_goStructObject)
if object.setValue(name, value) {
if object.setValue(self.runtime, name, value) {
return
}

View File

@ -1,22 +0,0 @@
Copyright (c) 2015 Taco de Wolff
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without
restriction, including without limitation the rights to use,
copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.

View File

@ -1,40 +0,0 @@
# Buffer [![GoDoc](http://godoc.org/github.com/tdewolff/buffer?status.svg)](http://godoc.org/github.com/tdewolff/buffer)
This package contains several buffer types used in https://github.com/tdewolff/parse for example.
## Installation
Run the following command
go get github.com/tdewolff/buffer
or add the following import and run the project with `go get`
``` go
import "github.com/tdewolff/buffer"
```
## Reader
Reader is a wrapper around a `[]byte` that implements the `io.Reader` interface. It is a much thinner layer than `bytes.Buffer` provides and is therefore faster.
## Writer
Writer is a buffer that implements the `io.Writer` interface. It is a much thinner layer than `bytes.Buffer` provides and is therefore faster. It will expand the buffer when needed.
The reset functionality allows for better memory reuse. After calling `Reset`, it will overwrite the current buffer and thus reduce allocations.
## Lexer
Lexer is a read buffer specifically designed for building lexers. It keeps track of two positions: a start and end position. The start position is the beginning of the current token being parsed, the end position is being moved forward until a valid token is found. Calling `Shift` will collapse the positions to the end and return the parsed `[]byte`.
Moving the end position can go through `Move(int)` which also accepts negative integers. One can also use `Pos() int` to try and parse a token, and if it fails rewind with `Rewind(int)`, passing the previously saved position.
`Peek(int) byte` will peek forward (relative to the end position) and return the byte at that location. `PeekRune(int) (rune, int)` returns UTF-8 runes and its length at the given **byte** position. Upon an error `Peek` will return `0`, the **user must peek at every character** and not skip any, otherwise it may skip a `0` and panic on out-of-bounds indexing.
`Lexeme() []byte` will return the currently selected bytes, `Skip()` will collapse the selection. `Shift() []byte` is a combination of `Lexeme() []byte` and `Skip()`.
When the passed `io.Reader` returned an error, `Err() error` will return that error even if not at the end of the buffer.
## StreamLexer
StreamLexer behaves like Lexer but uses a buffer pool to read in chunks from `io.Reader`, retaining old buffers in memory that are still in use, and re-using old buffers otherwise. Calling `Free(n int)` frees up `n` bytes from the internal buffer(s). It holds an array of buffers to accommodate for keeping everything in-memory. Calling `ShiftLen() int` returns the number of bytes that have been shifted since the previous call to `ShiftLen`, which can be used to specify how many bytes need to be freed up from the buffer. If you don't need to keep returned byte slices around, call `Free(ShiftLen())` after every `Shift` call.
## License
Released under the [MIT license](LICENSE.md).
[1]: http://golang.org/ "Go Language"

View File

@ -1,15 +0,0 @@
/*
Package buffer contains buffer and wrapper types for byte slices. It is useful for writing lexers or other high-performance byte slice handling.
The `Reader` and `Writer` types implement the `io.Reader` and `io.Writer` respectively and provide a thinner and faster interface than `bytes.Buffer`.
The `Shifter` type is useful for building lexers because it keeps track of the start and end position of a byte selection, and shifts the bytes whenever a valid token is found.
The `Lexer` is however an improved version of `Shifter`, allowing zero-copy for the parser by using a (kind of) ring buffer underneath.
*/
package buffer // import "github.com/tdewolff/buffer"
// defaultBufSize specifies the default initial length of internal buffers.
var defaultBufSize = 4096
// MinBuf specifies the default initial length of internal buffers.
// Solely here to support old versions of parse.
var MinBuf = defaultBufSize

View File

@ -1,153 +0,0 @@
package buffer // import "github.com/tdewolff/buffer"
import (
"io"
"io/ioutil"
)
var nullBuffer = []byte{0}
// Lexer is a buffered reader that allows peeking forward and shifting, taking an io.Reader.
// It keeps data in-memory until Free, taking a byte length, is called to move beyond the data.
type Lexer struct {
buf []byte
pos int // index in buf
start int // index in buf
err error
restore func()
}
// NewLexerBytes returns a new Lexer for a given io.Reader, and uses ioutil.ReadAll to read it into a byte slice.
// If the io.Reader implements Bytes, that is used instead.
// It will append a NULL at the end of the buffer.
func NewLexer(r io.Reader) *Lexer {
var b []byte
if r != nil {
if buffer, ok := r.(interface {
Bytes() []byte
}); ok {
b = buffer.Bytes()
} else {
var err error
b, err = ioutil.ReadAll(r)
if err != nil {
return &Lexer{
buf: []byte{0},
err: err,
}
}
}
}
return NewLexerBytes(b)
}
// NewLexerBytes returns a new Lexer for a given byte slice, and appends NULL at the end.
// To avoid reallocation, make sure the capacity has room for one more byte.
func NewLexerBytes(b []byte) *Lexer {
z := &Lexer{
buf: b,
}
n := len(b)
if n == 0 {
z.buf = nullBuffer
} else if b[n-1] != 0 {
// Append NULL to buffer, but try to avoid reallocation
if cap(b) > n {
// Overwrite next byte but restore when done
b = b[:n+1]
c := b[n]
b[n] = 0
z.buf = b
z.restore = func() {
b[n] = c
}
} else {
z.buf = append(b, 0)
}
}
return z
}
// Restore restores the replaced byte past the end of the buffer by NULL.
func (z *Lexer) Restore() {
if z.restore != nil {
z.restore()
z.restore = nil
}
}
// Err returns the error returned from io.Reader or io.EOF when the end has been reached.
func (z *Lexer) Err() error {
if z.err != nil {
return z.err
} else if z.pos >= len(z.buf)-1 {
return io.EOF
}
return nil
}
// Peek returns the ith byte relative to the end position.
// Peek returns 0 when an error has occurred, Err returns the error.
func (z *Lexer) Peek(pos int) byte {
pos += z.pos
return z.buf[pos]
}
// PeekRune returns the rune and rune length of the ith byte relative to the end position.
func (z *Lexer) PeekRune(pos int) (rune, int) {
// from unicode/utf8
c := z.Peek(pos)
if c < 0xC0 || z.Peek(pos+1) == 0 {
return rune(c), 1
} else if c < 0xE0 || z.Peek(pos+2) == 0 {
return rune(c&0x1F)<<6 | rune(z.Peek(pos+1)&0x3F), 2
} else if c < 0xF0 || z.Peek(pos+3) == 0 {
return rune(c&0x0F)<<12 | rune(z.Peek(pos+1)&0x3F)<<6 | rune(z.Peek(pos+2)&0x3F), 3
}
return rune(c&0x07)<<18 | rune(z.Peek(pos+1)&0x3F)<<12 | rune(z.Peek(pos+2)&0x3F)<<6 | rune(z.Peek(pos+3)&0x3F), 4
}
// Move advances the position.
func (z *Lexer) Move(n int) {
z.pos += n
}
// Pos returns a mark to which can be rewinded.
func (z *Lexer) Pos() int {
return z.pos - z.start
}
// Rewind rewinds the position to the given position.
func (z *Lexer) Rewind(pos int) {
z.pos = z.start + pos
}
// Lexeme returns the bytes of the current selection.
func (z *Lexer) Lexeme() []byte {
return z.buf[z.start:z.pos]
}
// Skip collapses the position to the end of the selection.
func (z *Lexer) Skip() {
z.start = z.pos
}
// Shift returns the bytes of the current selection and collapses the position to the end of the selection.
func (z *Lexer) Shift() []byte {
b := z.buf[z.start:z.pos]
z.start = z.pos
return b
}
// Offset returns the character position in the buffer.
func (z *Lexer) Offset() int {
return z.pos
}
// Bytes returns the underlying buffer.
func (z *Lexer) Bytes() []byte {
return z.buf
}

View File

@ -1,44 +0,0 @@
package buffer // import "github.com/tdewolff/buffer"
import "io"
// Reader implements an io.Reader over a byte slice.
type Reader struct {
buf []byte
pos int
}
// NewReader returns a new Reader for a given byte slice.
func NewReader(buf []byte) *Reader {
return &Reader{
buf: buf,
}
}
// Read reads bytes into the given byte slice and returns the number of bytes read and an error if occurred.
func (r *Reader) Read(b []byte) (n int, err error) {
if len(b) == 0 {
return 0, nil
}
if r.pos >= len(r.buf) {
return 0, io.EOF
}
n = copy(b, r.buf[r.pos:])
r.pos += n
return
}
// Bytes returns the underlying byte slice.
func (r *Reader) Bytes() []byte {
return r.buf
}
// Reset resets the position of the read pointer to the beginning of the underlying byte slice.
func (r *Reader) Reset() {
r.pos = 0
}
// Len returns the length of the buffer.
func (r *Reader) Len() int {
return len(r.buf)
}

View File

@ -1,223 +0,0 @@
package buffer // import "github.com/tdewolff/buffer"
import (
"io"
)
type block struct {
buf []byte
next int // index in pool plus one
active bool
}
type bufferPool struct {
pool []block
head int // index in pool plus one
tail int // index in pool plus one
pos int // byte pos in tail
}
func (z *bufferPool) swap(oldBuf []byte, size int) []byte {
// find new buffer that can be reused
swap := -1
for i := 0; i < len(z.pool); i++ {
if !z.pool[i].active && size <= cap(z.pool[i].buf) {
swap = i
break
}
}
if swap == -1 { // no free buffer found for reuse
if z.tail == 0 && z.pos >= len(oldBuf) && size <= cap(oldBuf) { // but we can reuse the current buffer!
z.pos -= len(oldBuf)
return oldBuf[:0]
}
// allocate new
z.pool = append(z.pool, block{make([]byte, 0, size), 0, true})
swap = len(z.pool) - 1
}
newBuf := z.pool[swap].buf
// put current buffer into pool
z.pool[swap] = block{oldBuf, 0, true}
if z.head != 0 {
z.pool[z.head-1].next = swap + 1
}
z.head = swap + 1
if z.tail == 0 {
z.tail = swap + 1
}
return newBuf[:0]
}
func (z *bufferPool) free(n int) {
z.pos += n
// move the tail over to next buffers
for z.tail != 0 && z.pos >= len(z.pool[z.tail-1].buf) {
z.pos -= len(z.pool[z.tail-1].buf)
newTail := z.pool[z.tail-1].next
z.pool[z.tail-1].active = false // after this, any thread may pick up the inactive buffer, so it can't be used anymore
z.tail = newTail
}
if z.tail == 0 {
z.head = 0
}
}
// StreamLexer is a buffered reader that allows peeking forward and shifting, taking an io.Reader.
// It keeps data in-memory until Free, taking a byte length, is called to move beyond the data.
type StreamLexer struct {
r io.Reader
err error
pool bufferPool
buf []byte
start int // index in buf
pos int // index in buf
prevStart int
free int
}
// NewStreamLexer returns a new StreamLexer for a given io.Reader with a 4kB estimated buffer size.
// If the io.Reader implements Bytes, that buffer is used instead.
func NewStreamLexer(r io.Reader) *StreamLexer {
return NewStreamLexerSize(r, defaultBufSize)
}
// NewStreamLexerSize returns a new StreamLexer for a given io.Reader and estimated required buffer size.
// If the io.Reader implements Bytes, that buffer is used instead.
func NewStreamLexerSize(r io.Reader, size int) *StreamLexer {
// if reader has the bytes in memory already, use that instead
if buffer, ok := r.(interface {
Bytes() []byte
}); ok {
return &StreamLexer{
err: io.EOF,
buf: buffer.Bytes(),
}
}
return &StreamLexer{
r: r,
buf: make([]byte, 0, size),
}
}
func (z *StreamLexer) read(pos int) byte {
if z.err != nil {
return 0
}
// free unused bytes
z.pool.free(z.free)
z.free = 0
// get new buffer
c := cap(z.buf)
p := pos - z.start + 1
if 2*p > c { // if the token is larger than half the buffer, increase buffer size
c = 2*c + p
}
d := len(z.buf) - z.start
buf := z.pool.swap(z.buf[:z.start], c)
copy(buf[:d], z.buf[z.start:]) // copy the left-overs (unfinished token) from the old buffer
// read in new data for the rest of the buffer
var n int
for pos-z.start >= d && z.err == nil {
n, z.err = z.r.Read(buf[d:cap(buf)])
d += n
}
pos -= z.start
z.pos -= z.start
z.start, z.buf = 0, buf[:d]
if pos >= d {
return 0
}
return z.buf[pos]
}
// Err returns the error returned from io.Reader. It may still return valid bytes for a while though.
func (z *StreamLexer) Err() error {
if z.err == io.EOF && z.pos < len(z.buf) {
return nil
}
return z.err
}
// Free frees up bytes of length n from previously shifted tokens.
// Each call to Shift should at one point be followed by a call to Free with a length returned by ShiftLen.
func (z *StreamLexer) Free(n int) {
z.free += n
}
// Peek returns the ith byte relative to the end position and possibly does an allocation.
// Peek returns zero when an error has occurred, Err returns the error.
// TODO: inline function
func (z *StreamLexer) Peek(pos int) byte {
pos += z.pos
if uint(pos) < uint(len(z.buf)) { // uint for BCE
return z.buf[pos]
}
return z.read(pos)
}
// PeekRune returns the rune and rune length of the ith byte relative to the end position.
func (z *StreamLexer) PeekRune(pos int) (rune, int) {
// from unicode/utf8
c := z.Peek(pos)
if c < 0xC0 {
return rune(c), 1
} else if c < 0xE0 {
return rune(c&0x1F)<<6 | rune(z.Peek(pos+1)&0x3F), 2
} else if c < 0xF0 {
return rune(c&0x0F)<<12 | rune(z.Peek(pos+1)&0x3F)<<6 | rune(z.Peek(pos+2)&0x3F), 3
}
return rune(c&0x07)<<18 | rune(z.Peek(pos+1)&0x3F)<<12 | rune(z.Peek(pos+2)&0x3F)<<6 | rune(z.Peek(pos+3)&0x3F), 4
}
// Move advances the position.
func (z *StreamLexer) Move(n int) {
z.pos += n
}
// Pos returns a mark to which can be rewinded.
func (z *StreamLexer) Pos() int {
return z.pos - z.start
}
// Rewind rewinds the position to the given position.
func (z *StreamLexer) Rewind(pos int) {
z.pos = z.start + pos
}
// Lexeme returns the bytes of the current selection.
func (z *StreamLexer) Lexeme() []byte {
return z.buf[z.start:z.pos]
}
// Skip collapses the position to the end of the selection.
func (z *StreamLexer) Skip() {
z.start = z.pos
}
// Shift returns the bytes of the current selection and collapses the position to the end of the selection.
// It also returns the number of bytes we moved since the last call to Shift. This can be used in calls to Free.
func (z *StreamLexer) Shift() []byte {
if z.pos > len(z.buf) { // make sure we peeked at least as much as we shift
z.read(z.pos - 1)
}
b := z.buf[z.start:z.pos]
z.start = z.pos
return b
}
// ShiftLen returns the number of bytes moved since the last call to ShiftLen. This can be used in calls to Free because it takes into account multiple Shifts or Skips.
func (z *StreamLexer) ShiftLen() int {
n := z.start - z.prevStart
z.prevStart = z.start
return n
}

View File

@ -1,41 +0,0 @@
package buffer // import "github.com/tdewolff/buffer"
// Writer implements an io.Writer over a byte slice.
type Writer struct {
buf []byte
}
// NewWriter returns a new Writer for a given byte slice.
func NewWriter(buf []byte) *Writer {
return &Writer{
buf: buf,
}
}
// Write writes bytes from the given byte slice and returns the number of bytes written and an error if occurred. When err != nil, n == 0.
func (w *Writer) Write(b []byte) (int, error) {
n := len(b)
end := len(w.buf)
if end+n > cap(w.buf) {
buf := make([]byte, end, 2*cap(w.buf)+n)
copy(buf, w.buf)
w.buf = buf
}
w.buf = w.buf[:end+n]
return copy(w.buf[end:], b), nil
}
// Len returns the length of the underlying byte slice.
func (w *Writer) Len() int {
return len(w.buf)
}
// Bytes returns the underlying byte slice.
func (w *Writer) Bytes() []byte {
return w.buf
}
// Reset empties and reuses the current buffer. Subsequent writes will overwrite the buffer, so any reference to the underlying slice is invalidated after this call.
func (w *Writer) Reset() {
w.buf = w.buf[:0]
}

1
vendor/github.com/tdewolff/minify/.gitattributes generated vendored Normal file
View File

@ -0,0 +1 @@
benchmarks/sample_* linguist-generated=true

4
vendor/github.com/tdewolff/minify/.gitignore generated vendored Normal file
View File

@ -0,0 +1,4 @@
dist/
benchmarks/*
!benchmarks/*.go
!benchmarks/sample_*

28
vendor/github.com/tdewolff/minify/.goreleaser.yml generated vendored Normal file
View File

@ -0,0 +1,28 @@
builds:
- binary: minify
main: ./cmd/minify/
ldflags: -s -w -X main.Version={{.Version}} -X main.Commit={{.Commit}} -X main.Date={{.Date}}
env:
- CGO_ENABLED=0
goos:
- linux
- windows
- darwin
- freebsd
- netbsd
- openbsd
goarch:
- amd64
archive:
format: tar.gz
format_overrides:
- goos: windows
format: zip
name_template: "{{.Binary}}_{{.Version}}_{{.Os}}_{{.Arch}}"
files:
- README.md
- LICENSE.md
snapshot:
name_template: "devel"
release:
draft: true

View File

@ -3,5 +3,3 @@ before_install:
- go get github.com/mattn/goveralls
script:
- goveralls -v -service travis-ci -repotoken $COVERALLS_TOKEN -ignore=cmd/minify/* || go test -v ./...
addons:
srcclr: true

View File

@ -1,18 +1,14 @@
# Minify <a name="minify"></a> [![Build Status](https://travis-ci.org/tdewolff/minify.svg?branch=master)](https://travis-ci.org/tdewolff/minify) [![GoDoc](http://godoc.org/github.com/tdewolff/minify?status.svg)](http://godoc.org/github.com/tdewolff/minify) [![Coverage Status](https://coveralls.io/repos/github/tdewolff/minify/badge.svg?branch=master)](https://coveralls.io/github/tdewolff/minify?branch=master) [![Join the chat at https://gitter.im/tdewolff/minify](https://badges.gitter.im/Join%20Chat.svg)](https://gitter.im/tdewolff/minify?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge)
**The preferred stable release is v2. Master has some new changes for SVG that haven't yet endured the test of time, bug reports are appreciated.**
**[Online demo](http://go.tacodewolff.nl/minify) if you need to minify files *now*.**
**[Online demo](https://go.tacodewolff.nl/minify) if you need to minify files *now*.**
**[Command line tool](https://github.com/tdewolff/minify/tree/master/cmd/minify) that minifies concurrently and supports watching file changes.**
**[All releases](https://dl.equinox.io/tdewolff/minify/stable) on Equinox for various platforms.**
If this software is useful to you, consider making a [donation](https://www.paypal.com/cgi-bin/webscr?cmd=_s-xclick&hosted_button_id=27MSRR5UJQQUL)! When a significant amount has been deposited, I will write a much improved JS minifier.
**[All releases](https://github.com/tdewolff/minify/releases) for various platforms.**
---
Minify is a minifier package written in [Go][1]. It provides HTML5, CSS3, JS, JSON, SVG and XML minifiers and an interface to implement any other minifier. Minification is the process of removing bytes from a file (such as whitespace) without changing its output and therefore shrinking its size and speeding up transmission over the internet and possibly parsing. The implemented minifiers are high performance and streaming, which implies O(n).
Minify is a minifier package written in [Go][1]. It provides HTML5, CSS3, JS, JSON, SVG and XML minifiers and an interface to implement any other minifier. Minification is the process of removing bytes from a file (such as whitespace) without changing its output and therefore shrinking its size and speeding up transmission over the internet and possibly parsing. The implemented minifiers are designed for high performance.
The core functionality associates mimetypes with minification functions, allowing embedded resources (like CSS or JS within HTML files) to be minified as well. Users can add new implementations that are triggered based on a mimetype (or pattern), or redirect to an external command (like ClosureCompiler, UglifyCSS, ...).
@ -23,6 +19,7 @@ The core functionality associates mimetypes with minification functions, allowin
- [Installation](#installation)
- [API stability](#api-stability)
- [Testing](#testing)
- [Performance](#performance)
- [HTML](#html)
- [Whitespace removal](#whitespace-removal)
- [CSS](#css)
@ -47,24 +44,35 @@ The core functionality associates mimetypes with minification functions, allowin
- [Templates](#templates)
- [License](#license)
#### Status
### Status
* CSS: **fully implemented**
* HTML: **fully implemented**
* JS: basic JSmin-like implementation
* JS: improved JSmin implementation
* JSON: **fully implemented**
* SVG: partially implemented; in development
* XML: **fully implemented**
## Prologue
Minifiers or bindings to minifiers exist in almost all programming languages. Some implementations are merely using several regular-expressions to trim whitespace and comments (even though regex for parsing HTML/XML is ill-advised, for a good read see [Regular Expressions: Now You Have Two Problems](http://blog.codinghorror.com/regular-expressions-now-you-have-two-problems/)). Some implementations are much more profound, such as the [YUI Compressor](http://yui.github.io/yuicompressor/) and [Google Closure Compiler](https://github.com/google/closure-compiler) for JS. As most existing implementations either use Java or JavaScript and don't focus on performance, they are pretty slow. And loading the whole file into memory is bad for really large files (or impossible for infinite streams).
### Roadmap
This minifier proves to be that fast and extensive minifier that can handle HTML and any other filetype it may contain (CSS, JS, ...). It streams the input and output and can minify files concurrently.
- [ ] General speed-up of all minifiers (use ASM for whitespace funcs)
- [ ] Improve JS minifiers by shortening variables and proper semicolon omission
- [ ] Speed-up SVG minifier, it is very slow
- [x] Proper parser error reporting and line number + column information
- [ ] Generation of source maps (uncertain, might slow down parsers too much if it cannot run separately nicely)
- [ ] Look into compression of images, fonts and other web resources (into package `compress`)?
- [ ] Create a cmd to pack webfiles (much like webpack), ie. merging CSS and JS files, inlining small external files, minification and gzipping. This would work on HTML files.
- [ ] Create a package to format files, much like `gofmt` for Go files?
## Prologue
Minifiers or bindings to minifiers exist in almost all programming languages. Some implementations are merely using several regular-expressions to trim whitespace and comments (even though regex for parsing HTML/XML is ill-advised, for a good read see [Regular Expressions: Now You Have Two Problems](http://blog.codinghorror.com/regular-expressions-now-you-have-two-problems/)). Some implementations are much more profound, such as the [YUI Compressor](http://yui.github.io/yuicompressor/) and [Google Closure Compiler](https://github.com/google/closure-compiler) for JS. As most existing implementations either use JavaScript, use regexes, and don't focus on performance, they are pretty slow.
This minifier proves to be that fast and extensive minifier that can handle HTML and any other filetype it may contain (CSS, JS, ...). It is usually orders of magnitude faster than existing minifiers.
## Installation
Run the following command
go get github.com/tdewolff/minify
go get -u github.com/tdewolff/minify
or add the following imports and run the project with `go get`
``` go
@ -82,37 +90,67 @@ import (
## API stability
There is no guarantee for absolute stability, but I take issues and bugs seriously and don't take API changes lightly. The library will be maintained in a compatible way unless vital bugs prevent me from doing so. There has been one API change after v1 which added options support and I took the opportunity to push through some more API clean up as well. There are no plans whatsoever for future API changes.
- minify-v1.0.0 depends on parse-v1.0.0
- minify-v1.1.0 depends on parse-v1.1.0
- minify-v2.0.0 depends on parse-v2.0.0
- minify-v2.1.0 depends on parse-v2.1.0
- minify-tip will always compile with my other packages on tip
The API differences between v1 and v2 are listed below. If `m := minify.New()` and `w` and `r` are your writer and reader respectfully, then **v1** &#8594; **v2**:
- `minify.Bytes(m, ...)` &#8594; `m.Bytes(...)`
- `minify.String(m, ...)` &#8594; `m.String(...)`
- `html.Minify(m, "text/html", w, r)` &#8594; `html.Minify(m, w, r, nil)` also for `css`, `js`, ...
- `css.Minify(m, "text/css;inline=1", w, r)` &#8594; `css.Minify(m, w, r, map[string]string{"inline":"1"})`
## Testing
For all subpackages and the imported `parse` and `buffer` packages, test coverage of 100% is pursued. Besides full coverage, the minifiers are [fuzz tested](https://github.com/tdewolff/fuzz) using [github.com/dvyukov/go-fuzz](http://www.github.com/dvyukov/go-fuzz), see [the wiki](https://github.com/tdewolff/minify/wiki) for the most important bugs found by fuzz testing. Furthermore am I working on adding visual testing to ensure that minification doesn't change anything visually. By using the WebKit browser to render the original and minified pages we can check whether any pixel is different.
These tests ensure that everything works as intended, the code does not crash (whatever the input) and that it doesn't change the final result visually. If you still encounter a bug, please report [here](https://github.com/tdewolff/minify/issues)!
## Performance
The benchmarks directory contains a number of standardized samples used to compare performance between changes. To give an indication of the speed of this library, I've ran the tests on my Thinkpad T460 (i5-6300U quad-core 2.4GHz running Arch Linux) using Go 1.9.2.
```
name time/op
CSS/sample_bootstrap.css-4 2.26ms ± 0%
CSS/sample_gumby.css-4 2.92ms ± 1%
HTML/sample_amazon.html-4 2.33ms ± 2%
HTML/sample_bbc.html-4 1.02ms ± 1%
HTML/sample_blogpost.html-4 171µs ± 2%
HTML/sample_es6.html-4 14.5ms ± 0%
HTML/sample_stackoverflow.html-4 2.41ms ± 1%
HTML/sample_wikipedia.html-4 4.76ms ± 0%
JS/sample_ace.js-4 7.41ms ± 0%
JS/sample_dot.js-4 63.7µs ± 0%
JS/sample_jquery.js-4 2.99ms ± 0%
JS/sample_jqueryui.js-4 5.92ms ± 2%
JS/sample_moment.js-4 1.09ms ± 1%
JSON/sample_large.json-4 2.95ms ± 0%
JSON/sample_testsuite.json-4 1.51ms ± 1%
JSON/sample_twitter.json-4 6.75µs ± 1%
SVG/sample_arctic.svg-4 62.3ms ± 1%
SVG/sample_gopher.svg-4 218µs ± 0%
SVG/sample_usa.svg-4 33.1ms ± 3%
XML/sample_books.xml-4 36.2µs ± 0%
XML/sample_catalog.xml-4 14.9µs ± 0%
XML/sample_omg.xml-4 6.31ms ± 1%
name speed
CSS/sample_bootstrap.css-4 60.8MB/s ± 0%
CSS/sample_gumby.css-4 63.9MB/s ± 1%
HTML/sample_amazon.html-4 203MB/s ± 2%
HTML/sample_bbc.html-4 113MB/s ± 1%
HTML/sample_blogpost.html-4 123MB/s ± 2%
HTML/sample_es6.html-4 70.7MB/s ± 0%
HTML/sample_stackoverflow.html-4 85.2MB/s ± 1%
HTML/sample_wikipedia.html-4 93.6MB/s ± 0%
JS/sample_ace.js-4 86.9MB/s ± 0%
JS/sample_dot.js-4 81.0MB/s ± 0%
JS/sample_jquery.js-4 82.8MB/s ± 0%
JS/sample_jqueryui.js-4 79.3MB/s ± 2%
JS/sample_moment.js-4 91.2MB/s ± 1%
JSON/sample_large.json-4 258MB/s ± 0%
JSON/sample_testsuite.json-4 457MB/s ± 1%
JSON/sample_twitter.json-4 226MB/s ± 1%
SVG/sample_arctic.svg-4 23.6MB/s ± 1%
SVG/sample_gopher.svg-4 26.7MB/s ± 0%
SVG/sample_usa.svg-4 30.9MB/s ± 3%
XML/sample_books.xml-4 122MB/s ± 0%
XML/sample_catalog.xml-4 130MB/s ± 0%
XML/sample_omg.xml-4 180MB/s ± 1%
```
## HTML
HTML (with JS and CSS) minification typically runs at about 40MB/s ~= 140GB/h, depending on the composition of the file.
Website | Original | Minified | Ratio | Time<sup>&#42;</sup>
------- | -------- | -------- | ----- | -----------------------
[Amazon](http://www.amazon.com/) | 463kB | **414kB** | 90% | 10ms
[BBC](http://www.bbc.com/) | 113kB | **96kB** | 85% | 3ms
[StackOverflow](http://stackoverflow.com/) | 201kB | **182kB** | 91% | 5ms
[Wikipedia](http://en.wikipedia.org/wiki/President_of_the_United_States) | 435kB | **410kB** | 94%<sup>&#42;&#42;</sup> | 11ms
<sup>&#42;</sup>These times are measured on my home computer which is an average development computer. The duration varies a lot but it's important to see it's in the 10ms range! The benchmark uses all the minifiers and excludes reading from and writing to the file from the measurement.
<sup>&#42;&#42;</sup>Is already somewhat minified, so this doesn't reflect the full potential of this minifier.
HTML (with JS and CSS) minification typically shaves off about 10%.
The HTML5 minifier uses these minifications:
@ -130,7 +168,7 @@ The HTML5 minifier uses these minifications:
Options:
- `KeepConditionalComments` preserve all IE conditional comments such as `<!--[if IE 6]><![endif]-->` and `<![if IE 6]><![endif]>`, see https://msdn.microsoft.com/en-us/library/ms537512(v=vs.85).aspx#syntax
- `KeepDefaultAttrVals` preserve default attribute values such as `<script type="text/javascript">`
- `KeepDefaultAttrVals` preserve default attribute values such as `<script type="application/javascript">`
- `KeepDocumentTags` preserve `html`, `head` and `body` tags
- `KeepEndTags` preserve all end tags
- `KeepWhitespace` preserve whitespace between inline tags but still collapse multiple whitespace characters into one
@ -146,32 +184,27 @@ Make sure your HTML doesn't depend on whitespace between `block` elements that h
## CSS
Minification typically runs at about 25MB/s ~= 90GB/h.
Library | Original | Minified | Ratio | Time<sup>&#42;</sup>
------- | -------- | -------- | ----- | -----------------------
[Bootstrap](http://getbootstrap.com/) | 134kB | **111kB** | 83% | 4ms
[Gumby](http://gumbyframework.com/) | 182kB | **167kB** | 90% | 7ms
<sup>&#42;</sup>The benchmark excludes the time reading from and writing to a file from the measurement.
Minification typically shaves off about 10%-15%. This CSS minifier will _not_ do structural changes to your stylesheets. Although this could result in smaller files, the complexity is quite high and the risk of breaking website is high too.
The CSS minifier will only use safe minifications:
- remove comments and unnecessary whitespace
- remove comments and unnecessary whitespace (but keep `/*! ... */` which usually contains the license)
- remove trailing semicolons
- optimize `margin`, `padding` and `border-width` number of sides
- shorten numbers by removing unnecessary `+` and zeros and rewriting with/without exponent
- remove dimension and percentage for zero values
- remove quotes for URLs
- remove quotes for font families and make lowercase
- rewrite hex colors to/from color names, or to 3 digit hex
- rewrite hex colors to/from color names, or to three digit hex
- rewrite `rgb(`, `rgba(`, `hsl(` and `hsla(` colors to hex or name
- use four digit hex for alpha values (`transparent` &#8594; `#0000`)
- replace `normal` and `bold` by numbers for `font-weight` and `font`
- replace `none` &#8594; `0` for `border`, `background` and `outline`
- lowercase all identifiers except classes, IDs and URLs to enhance gzip compression
- shorten MS alpha function
- rewrite data URIs with base64 or ASCII whichever is shorter
- calls minifier for data URI mediatypes, thus you can compress embedded SVG files if you have that minifier attached
- shorten aggregate declarations such as `background` and `font`
It does purposely not use the following techniques:
@ -184,26 +217,18 @@ It does purposely not use the following techniques:
- rewrite attribute selectors for IDs and classes (`div[id=a]` &#8594; `div#a`)
- put space after pseudo-selectors (IE6 is old, move on!)
It's great that so many other tools make comparison tables: [CSS Minifier Comparison](http://www.codenothing.com/benchmarks/css-compressor-3.0/full.html), [CSS minifiers comparison](http://www.phpied.com/css-minifiers-comparison/) and [CleanCSS tests](http://goalsmashers.github.io/css-minification-benchmark/). From the last link, this CSS minifier is almost without doubt the fastest and has near-perfect minification rates. It falls short with the purposely not implemented and often unsafe techniques, so that's fine.
There are a couple of comparison tables online, such as [CSS Minifier Comparison](http://www.codenothing.com/benchmarks/css-compressor-3.0/full.html), [CSS minifiers comparison](http://www.phpied.com/css-minifiers-comparison/) and [CleanCSS tests](http://goalsmashers.github.io/css-minification-benchmark/). Comparing speed between each, this minifier will usually be between 10x-300x faster than existing implementations, and even rank among the top for minification ratios. It falls short with the purposely not implemented and often unsafe techniques.
Options:
- `Decimals` number of decimals to preserve for numbers, `-1` means no trimming
- `KeepCSS2` prohibits using CSS3 syntax (such as exponents in numbers, or `rgba(` &#8594; `rgb(`), might be incomplete
## JS
The JS minifier is pretty basic. It removes comments, whitespace and line breaks whenever it can. It employs all the rules that [JSMin](http://www.crockford.com/javascript/jsmin.html) does too, but has additional improvements. For example the prefix-postfix bug is fixed.
Minification typically runs at about 50MB/s ~= 180GB/h. Common speeds of PHP and JS implementations are about 100-300kB/s (see [Uglify2](http://lisperator.net/uglifyjs/), [Adventures in PHP web asset minimization](https://www.happyassassin.net/2014/12/29/adventures-in-php-web-asset-minimization/)).
Library | Original | Minified | Ratio | Time<sup>&#42;</sup>
------- | -------- | -------- | ----- | -----------------------
[ACE](https://github.com/ajaxorg/ace-builds) | 630kB | **442kB** | 70% | 12ms
[jQuery](http://jquery.com/download/) | 242kB | **130kB** | 54% | 5ms
[jQuery UI](http://jqueryui.com/download/) | 459kB | **300kB** | 65% | 10ms
[Moment](http://momentjs.com/) | 97kB | **51kB** | 52% | 2ms
<sup>&#42;</sup>The benchmark excludes the time reading from and writing to a file from the measurement.
Common speeds of PHP and JS implementations are about 100-300kB/s (see [Uglify2](http://lisperator.net/uglifyjs/), [Adventures in PHP web asset minimization](https://www.happyassassin.net/2014/12/29/adventures-in-php-web-asset-minimization/)). This implementation or orders of magnitude faster, around ~80MB/s.
TODO:
- shorten local variables / function parameters names
@ -211,14 +236,12 @@ TODO:
## JSON
Minification typically runs at about 95MB/s ~= 340GB/h. It shaves off about 15% of filesize for common indented JSON such as generated by [JSON Generator](http://www.json-generator.com/).
Minification typically shaves off about 15% of filesize for common indented JSON such as generated by [JSON Generator](http://www.json-generator.com/).
The JSON minifier only removes whitespace, which is the only thing that can be left out.
## SVG
Minification typically runs at about 15MB/s ~= 55GB/h. Performance improvement are due.
The SVG minifier uses these minifications:
- trim and collapse whitespace between all tags
@ -226,7 +249,6 @@ The SVG minifier uses these minifications:
- strip SVG version
- strip CDATA sections wherever possible
- collapse tags with no content to a void tag
- collapse empty container tags (`g`, `svg`, ...)
- minify style tag and attributes with the CSS minifier
- minify colors
- shorten lengths and numbers and remove default `px` unit
@ -237,7 +259,6 @@ The SVG minifier uses these minifications:
TODO:
- convert attributes to style attribute whenever shorter
- merge path data? (same style and no intersection -- the latter is difficult)
- truncate decimals
Options:
@ -245,8 +266,6 @@ Options:
## XML
Minification typically runs at about 70MB/s ~= 250GB/h.
The XML minifier uses these minifications:
- strip unnecessary whitespace and otherwise collapse it to one space (or newline if it originally contained a newline)
@ -272,8 +291,8 @@ The following loads all provided minifiers.
m := minify.New()
m.AddFunc("text/css", css.Minify)
m.AddFunc("text/html", html.Minify)
m.AddFunc("text/javascript", js.Minify)
m.AddFunc("image/svg+xml", svg.Minify)
m.AddFuncRegexp(regexp.MustCompile("^(application|text)/(x-)?(java|ecma)script$"), js.Minify)
m.AddFuncRegexp(regexp.MustCompile("[/+]json$"), json.Minify)
m.AddFuncRegexp(regexp.MustCompile("[/+]xml$"), xml.Minify)
```
@ -404,13 +423,13 @@ func main() {
m := minify.New()
m.AddFunc("text/css", css.Minify)
m.AddFunc("text/html", html.Minify)
m.AddFunc("text/javascript", js.Minify)
m.AddFunc("image/svg+xml", svg.Minify)
m.AddFuncRegexp(regexp.MustCompile("^(application|text)/(x-)?(java|ecma)script$"), js.Minify)
m.AddFuncRegexp(regexp.MustCompile("[/+]json$"), json.Minify)
m.AddFuncRegexp(regexp.MustCompile("[/+]xml$"), xml.Minify)
// Or use the following for better minification of JS but lower speed:
// m.AddCmd("text/javascript", exec.Command("java", "-jar", "build/compiler.jar"))
// m.AddCmdRegexp(regexp.MustCompile("^(application|text)/(x-)?(java|ecma)script$"), exec.Command("java", "-jar", "build/compiler.jar"))
if err := m.Minify("text/html", os.Stdout, os.Stdin); err != nil {
panic(err)
@ -470,8 +489,8 @@ func main() {
m := minify.New()
m.AddFunc("text/css", css.Minify)
m.AddFunc("text/html", html.Minify)
m.AddFunc("text/javascript", js.Minify)
m.AddFunc("image/svg+xml", svg.Minify)
m.AddFuncRegexp(regexp.MustCompile("^(application|text)/(x-)?(java|ecma)script$"), js.Minify)
m.AddFuncRegexp(regexp.MustCompile("[/+]json$"), json.Minify)
m.AddFuncRegexp(regexp.MustCompile("[/+]xml$"), xml.Minify)

View File

@ -6,14 +6,14 @@ import (
"net/url"
"github.com/tdewolff/parse"
"github.com/tdewolff/strconv"
"github.com/tdewolff/parse/strconv"
)
// Epsilon is the closest number to zero that is not considered to be zero.
var Epsilon = 0.00001
// ContentType minifies a given mediatype by removing all whitespace.
func ContentType(b []byte) []byte {
// Mediatype minifies a given mediatype by removing all whitespace.
func Mediatype(b []byte) []byte {
j := 0
start := 0
inString := false
@ -79,6 +79,107 @@ func DataURI(m *M, dataURI []byte) []byte {
const MaxInt = int(^uint(0) >> 1)
const MinInt = -MaxInt - 1
// Decimal minifies a given byte slice containing a number (see parse.Number) and removes superfluous characters.
// It does not parse or output exponents.
func Decimal(num []byte, prec int) []byte {
// omit first + and register mantissa start and end, whether it's negative and the exponent
neg := false
start := 0
dot := -1
end := len(num)
if 0 < end && (num[0] == '+' || num[0] == '-') {
if num[0] == '-' {
neg = true
}
start++
}
for i, c := range num[start:] {
if c == '.' {
dot = start + i
break
}
}
if dot == -1 {
dot = end
}
// trim leading zeros but leave at least one digit
for start < end-1 && num[start] == '0' {
start++
}
// trim trailing zeros
i := end - 1
for ; i > dot; i-- {
if num[i] != '0' {
end = i + 1
break
}
}
if i == dot {
end = dot
if start == end {
num[start] = '0'
return num[start : start+1]
}
} else if start == end-1 && num[start] == '0' {
return num[start:end]
}
// apply precision
if prec > -1 && dot+1+prec < end {
end = dot + 1 + prec
inc := num[end] >= '5'
if inc || num[end-1] == '0' {
for i := end - 1; i > start; i-- {
if i == dot {
end--
} else if inc {
if num[i] == '9' {
if i > dot {
end--
} else {
num[i] = '0'
}
} else {
num[i]++
inc = false
break
}
} else if i > dot && num[i] == '0' {
end--
}
}
}
if dot == start && end == start+1 {
if inc {
num[start] = '1'
} else {
num[start] = '0'
}
} else {
if dot+1 == end {
end--
}
if inc {
if num[start] == '9' {
num[start] = '0'
copy(num[start+1:], num[start:end])
end++
num[start] = '1'
} else {
num[start]++
}
}
}
}
if neg {
start--
num[start] = '-'
}
return num[start:end]
}
// Number minifies a given byte slice containing a number (see parse.Number) and removes superfluous characters.
func Number(num []byte, prec int) []byte {
// omit first + and register mantissa start and end, whether it's negative and the exponent
@ -311,24 +412,46 @@ func Number(num []byte, prec int) []byte {
}
} else {
// case 3
if dot < end {
if dot == start {
copy(num[start:], num[end-n:end])
end = start + n
} else {
copy(num[dot:], num[dot+1:end])
end--
// find new end, considering moving numbers to the front, removing the dot and increasing the length of the exponent
newEnd := end
if dot == start {
newEnd = start + n
} else {
newEnd--
}
newEnd += 2 + lenIntExp
exp := intExp
lenExp := lenIntExp
if newEnd < len(num) {
// it saves space to convert the decimal to an integer and decrease the exponent
if dot < end {
if dot == start {
copy(num[start:], num[end-n:end])
end = start + n
} else {
copy(num[dot:], num[dot+1:end])
end--
}
}
} else {
// it does not save space and will panic, so we revert to the original representation
exp = origExp
lenExp = 1
if origExp <= -10 || origExp >= 10 {
lenExp = strconv.LenInt(int64(origExp))
}
}
num[end] = 'e'
num[end+1] = '-'
end += 2
intExp = -intExp
for i := end + lenIntExp - 1; i >= end; i-- {
num[i] = byte(intExp%10) + '0'
intExp /= 10
exp = -exp
for i := end + lenExp - 1; i >= end; i-- {
num[i] = byte(exp%10) + '0'
exp /= 10
}
end += lenIntExp
end += lenExp
}
if neg {

View File

@ -31,6 +31,8 @@ func (o *Minifier) Minify(_ *minify.M, w io.Writer, r io.Reader, _ map[string]st
skipComma := true
p := json.NewParser(r)
defer p.Restore()
for {
state := p.State()
gt, text := p.Next()

View File

@ -12,8 +12,8 @@ import (
"regexp"
"sync"
"github.com/tdewolff/buffer"
"github.com/tdewolff/parse"
"github.com/tdewolff/parse/buffer"
)
// ErrNotExist is returned when no minifier exists for a given mimetype.
@ -112,11 +112,11 @@ func (m *M) Match(mediatype string) (string, map[string]string, MinifierFunc) {
mimetype, params := parse.Mediatype([]byte(mediatype))
if minifier, ok := m.literal[string(mimetype)]; ok { // string conversion is optimized away
return string(mimetype), params, minifier.Minify
} else {
for _, minifier := range m.pattern {
if minifier.pattern.Match(mimetype) {
return minifier.pattern.String(), params, minifier.Minify
}
}
for _, minifier := range m.pattern {
if minifier.pattern.Match(mimetype) {
return minifier.pattern.String(), params, minifier.Minify
}
}
return string(mimetype), params, nil

View File

@ -1,4 +1,4 @@
package strconv // import "github.com/tdewolff/strconv"
package strconv // import "github.com/tdewolff/parse/strconv"
import "math"

View File

@ -1,6 +1,8 @@
package strconv // import "github.com/tdewolff/strconv"
package strconv // import "github.com/tdewolff/parse/strconv"
import "math"
import (
"math"
)
// Int parses a byte-slice and returns the integer it represents.
// If an invalid character is encountered, it will stop there.
@ -34,6 +36,9 @@ func ParseInt(b []byte) (int64, int) {
func LenInt(i int64) int {
if i < 0 {
if i == -9223372036854775808 {
return 19
}
i = -i
}
switch {

83
vendor/github.com/tdewolff/parse/strconv/price.go generated vendored Normal file
View File

@ -0,0 +1,83 @@
package strconv
// AppendPrice will append an int64 formatted as a price, where the int64 is the price in cents.
// It does not display whether a price is negative or not.
func AppendPrice(b []byte, price int64, dec bool, milSeparator byte, decSeparator byte) []byte {
if price < 0 {
if price == -9223372036854775808 {
x := []byte("92 233 720 368 547 758 08")
x[2] = milSeparator
x[6] = milSeparator
x[10] = milSeparator
x[14] = milSeparator
x[18] = milSeparator
x[22] = decSeparator
return append(b, x...)
}
price = -price
}
// rounding
if !dec {
firstDec := (price / 10) % 10
if firstDec >= 5 {
price += 100
}
}
// calculate size
n := LenInt(price) - 2
if n > 0 {
n += (n - 1) / 3 // mil separator
} else {
n = 1
}
if dec {
n += 2 + 1 // decimals + dec separator
}
// resize byte slice
i := len(b)
if i+n > cap(b) {
b = append(b, make([]byte, n)...)
} else {
b = b[:i+n]
}
// print fractional-part
i += n - 1
if dec {
for j := 0; j < 2; j++ {
c := byte(price%10) + '0'
price /= 10
b[i] = c
i--
}
b[i] = decSeparator
i--
} else {
price /= 100
}
if price == 0 {
b[i] = '0'
return b
}
// print integer-part
j := 0
for price > 0 {
if j == 3 {
b[i] = milSeparator
i--
j = 0
}
c := byte(price%10) + '0'
price /= 10
b[i] = c
i--
j++
}
return b
}

View File

@ -1,22 +0,0 @@
Copyright (c) 2015 Taco de Wolff
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without
restriction, including without limitation the rights to use,
copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.

View File

@ -1,10 +0,0 @@
# Strconv [![GoDoc](http://godoc.org/github.com/tdewolff/strconv?status.svg)](http://godoc.org/github.com/tdewolff/strconv)
This package contains string conversion function and is written in [Go][1]. It is much alike the standard library's strconv package, but it is specifically tailored for the performance needs within the minify package.
For example, the floating-point to string conversion function is approximately twice as fast as the standard library, but it is not as precise.
## License
Released under the [MIT license](LICENSE.md).
[1]: http://golang.org/ "Go Language"

View File

@ -1,10 +1,14 @@
sudo: false
language: go
go:
- 1.2
- 1.3
- 1.4
- 1.5
- 1.6
- 1.7
- tip
matrix:
allow_failures:
- go: tip
install:
- mkdir -p $HOME/gopath/src/gopkg.in

25
vendor/gopkg.in/sourcemap.v1/LICENSE generated vendored Normal file
View File

@ -0,0 +1,25 @@
Copyright (c) 2016 The github.com/go-sourcemap/sourcemap Contributors.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

View File

@ -1,3 +1,4 @@
all:
go test ./... -test.v -test.cpu=1,2,4
go test ./... -test.v -test.short -test.race
go test ./...
go test ./... -short -race
go vet

View File

@ -1,4 +1,4 @@
# Source Maps consumer for Golang [![Build Status](https://travis-ci.org/go-sourcemap/sourcemap.svg)](https://travis-ci.org/go-sourcemap/sourcemap)
# Source Maps consumer for Golang [![Build Status](https://travis-ci.org/go-sourcemap/sourcemap.svg?branch=v1)](https://travis-ci.org/go-sourcemap/sourcemap)
## Installation

View File

@ -14,22 +14,19 @@ const (
vlqContinuationBit = vlqBase
)
var (
decodeMap [256]int
)
var decodeMap [256]byte
func init() {
for i := 0; i < len(encodeStd); i++ {
decodeMap[encodeStd[i]] = i
decodeMap[encodeStd[i]] = byte(i)
}
}
func toVLQSigned(n int) int {
if n < 0 {
return -n<<1 + 1
} else {
return n << 1
}
return n << 1
}
func fromVLQSigned(n int) int {
@ -51,7 +48,7 @@ func NewEncoder(w io.ByteWriter) *Encoder {
}
}
func (enc *Encoder) Encode(n int) error {
func (enc Encoder) Encode(n int) error {
n = toVLQSigned(n)
for digit := vlqContinuationBit; digit&vlqContinuationBit != 0; {
digit = n & vlqBaseMask
@ -59,6 +56,7 @@ func (enc *Encoder) Encode(n int) error {
if n > 0 {
digit |= vlqContinuationBit
}
err := enc.w.WriteByte(encodeStd[digit])
if err != nil {
return err
@ -77,7 +75,7 @@ func NewDecoder(r io.ByteReader) *Decoder {
}
}
func (dec *Decoder) Decode() (n int, err error) {
func (dec Decoder) Decode() (n int, err error) {
shift := uint(0)
for continuation := true; continuation; {
c, err := dec.r.ReadByte()
@ -85,10 +83,9 @@ func (dec *Decoder) Decode() (n int, err error) {
return 0, err
}
digit := decodeMap[c]
continuation = digit&vlqContinuationBit != 0
digit &= vlqBaseMask
n = n + digit<<shift
c = decodeMap[c]
continuation = c&vlqContinuationBit != 0
n += int(c&vlqBaseMask) << shift
shift += vlqBaseShift
}
return fromVLQSigned(n), nil

View File

@ -1,17 +1,12 @@
package sourcemap // import "gopkg.in/sourcemap.v1"
package sourcemap
import (
"encoding/json"
"errors"
"fmt"
"io"
"net/url"
"path"
"sort"
"strconv"
"strings"
"gopkg.in/sourcemap.v1/base64vlq"
)
type Consumer struct {
@ -28,7 +23,10 @@ func Parse(mapURL string, b []byte) (*Consumer, error) {
}
if smap.Version != 3 {
return nil, errors.New("sourcemap: only 3rd version is supported")
return nil, fmt.Errorf(
"sourcemap: got version=%d, but only 3rd version is supported",
smap.Version,
)
}
var sourceRootURL *url.URL
@ -86,7 +84,10 @@ func (c *Consumer) Source(genLine, genCol int) (source, name string, line, col i
match := &c.mappings[i]
// Fuzzy match.
if match.genCol > genCol && i > 0 {
if match.genLine > genLine || match.genCol > genCol {
if i == 0 {
return
}
match = &c.mappings[i-1]
}
@ -94,14 +95,14 @@ func (c *Consumer) Source(genLine, genCol int) (source, name string, line, col i
source = c.absSource(c.smap.Sources[match.sourcesInd])
}
if match.namesInd >= 0 {
iv := c.smap.Names[match.namesInd]
switch v := iv.(type) {
v := c.smap.Names[match.namesInd]
switch v := v.(type) {
case string:
name = v
case float64:
name = strconv.FormatFloat(v, 'f', -1, 64)
default:
name = fmt.Sprint(iv)
name = fmt.Sprint(v)
}
}
line = match.sourceLine
@ -131,182 +132,3 @@ func (c *Consumer) absSource(source string) string {
return source
}
func (c *Consumer) SourceName(genLine, genCol int, genName string) (name string, ok bool) {
ind := sort.Search(len(c.mappings), func(i int) bool {
m := c.mappings[i]
if m.genLine == genLine {
return m.genCol >= genCol
}
return m.genLine >= genLine
})
// Mapping not found.
if ind == len(c.mappings) {
return "", false
}
for i := ind; i >= 0; i-- {
m := c.mappings[i]
if m.namesInd == -1 {
continue
}
if c.smap.Names[m.namesInd] == "" {
}
}
return
}
type fn func() (fn, error)
type sourceMap struct {
Version int `json:"version"`
File string `json:"file"`
SourceRoot string `json:"sourceRoot"`
Sources []string `json:"sources"`
Names []interface{} `json:"names"`
Mappings string `json:"mappings"`
}
type mapping struct {
genLine int
genCol int
sourcesInd int
sourceLine int
sourceCol int
namesInd int
}
type mappings struct {
rd *strings.Reader
dec *base64vlq.Decoder
genLine int
genCol int
sourcesInd int
sourceLine int
sourceCol int
namesInd int
value mapping
values []mapping
}
func parseMappings(s string) ([]mapping, error) {
rd := strings.NewReader(s)
m := &mappings{
rd: rd,
dec: base64vlq.NewDecoder(rd),
genLine: 1,
sourceLine: 1,
}
m.zeroValue()
err := m.parse()
if err != nil {
return nil, err
}
return m.values, nil
}
func (m *mappings) parse() error {
next := m.parseGenCol
for {
c, err := m.rd.ReadByte()
if err == io.EOF {
m.pushValue()
return nil
} else if err != nil {
return err
}
switch c {
case ',':
m.pushValue()
next = m.parseGenCol
case ';':
m.pushValue()
m.genLine++
m.genCol = 0
next = m.parseGenCol
default:
m.rd.UnreadByte()
var err error
next, err = next()
if err != nil {
return err
}
}
}
}
func (m *mappings) parseGenCol() (fn, error) {
n, err := m.dec.Decode()
if err != nil {
return nil, err
}
m.genCol += n
m.value.genCol = m.genCol
return m.parseSourcesInd, nil
}
func (m *mappings) parseSourcesInd() (fn, error) {
n, err := m.dec.Decode()
if err != nil {
return nil, err
}
m.sourcesInd += n
m.value.sourcesInd = m.sourcesInd
return m.parseSourceLine, nil
}
func (m *mappings) parseSourceLine() (fn, error) {
n, err := m.dec.Decode()
if err != nil {
return nil, err
}
m.sourceLine += n
m.value.sourceLine = m.sourceLine
return m.parseSourceCol, nil
}
func (m *mappings) parseSourceCol() (fn, error) {
n, err := m.dec.Decode()
if err != nil {
return nil, err
}
m.sourceCol += n
m.value.sourceCol = m.sourceCol
return m.parseNamesInd, nil
}
func (m *mappings) parseNamesInd() (fn, error) {
n, err := m.dec.Decode()
if err != nil {
return nil, err
}
m.namesInd += n
m.value.namesInd = m.namesInd
return m.parseGenCol, nil
}
func (m *mappings) zeroValue() {
m.value = mapping{
genLine: m.genLine,
genCol: 0,
sourcesInd: -1,
sourceLine: 0,
sourceCol: 0,
namesInd: -1,
}
}
func (m *mappings) pushValue() {
m.values = append(m.values, m.value)
m.zeroValue()
}

157
vendor/gopkg.in/sourcemap.v1/sourcemap.go generated vendored Normal file
View File

@ -0,0 +1,157 @@
package sourcemap // import "gopkg.in/sourcemap.v1"
import (
"io"
"strings"
"gopkg.in/sourcemap.v1/base64vlq"
)
type fn func(m *mappings) (fn, error)
type sourceMap struct {
Version int `json:"version"`
File string `json:"file"`
SourceRoot string `json:"sourceRoot"`
Sources []string `json:"sources"`
Names []interface{} `json:"names"`
Mappings string `json:"mappings"`
}
type mapping struct {
genLine int
genCol int
sourcesInd int
sourceLine int
sourceCol int
namesInd int
}
type mappings struct {
rd *strings.Reader
dec *base64vlq.Decoder
hasName bool
value mapping
values []mapping
}
func parseMappings(s string) ([]mapping, error) {
rd := strings.NewReader(s)
m := &mappings{
rd: rd,
dec: base64vlq.NewDecoder(rd),
}
m.value.genLine = 1
m.value.sourceLine = 1
err := m.parse()
if err != nil {
return nil, err
}
return m.values, nil
}
func (m *mappings) parse() error {
next := parseGenCol
for {
c, err := m.rd.ReadByte()
if err == io.EOF {
m.pushValue()
return nil
}
if err != nil {
return err
}
switch c {
case ',':
m.pushValue()
next = parseGenCol
case ';':
m.pushValue()
m.value.genLine++
m.value.genCol = 0
next = parseGenCol
default:
err := m.rd.UnreadByte()
if err != nil {
return err
}
next, err = next(m)
if err != nil {
return err
}
}
}
}
func parseGenCol(m *mappings) (fn, error) {
n, err := m.dec.Decode()
if err != nil {
return nil, err
}
m.value.genCol += n
return parseSourcesInd, nil
}
func parseSourcesInd(m *mappings) (fn, error) {
n, err := m.dec.Decode()
if err != nil {
return nil, err
}
m.value.sourcesInd += n
return parseSourceLine, nil
}
func parseSourceLine(m *mappings) (fn, error) {
n, err := m.dec.Decode()
if err != nil {
return nil, err
}
m.value.sourceLine += n
return parseSourceCol, nil
}
func parseSourceCol(m *mappings) (fn, error) {
n, err := m.dec.Decode()
if err != nil {
return nil, err
}
m.value.sourceCol += n
return parseNamesInd, nil
}
func parseNamesInd(m *mappings) (fn, error) {
n, err := m.dec.Decode()
if err != nil {
return nil, err
}
m.hasName = true
m.value.namesInd += n
return parseGenCol, nil
}
func (m *mappings) pushValue() {
if m.value.sourceLine == 1 && m.value.sourceCol == 0 {
return
}
if m.hasName {
m.values = append(m.values, m.value)
m.hasName = false
} else {
m.values = append(m.values, mapping{
genLine: m.value.genLine,
genCol: m.value.genCol,
sourcesInd: m.value.sourcesInd,
sourceLine: m.value.sourceLine,
sourceCol: m.value.sourceCol,
namesInd: -1,
})
}
}

11
vendor/modules.txt vendored
View File

@ -207,7 +207,7 @@ github.com/prasmussen/gandi-api/operation
github.com/prasmussen/gandi-api/util
# github.com/renier/xmlrpc v0.0.0-20170708154548-ce4a1a486c03
github.com/renier/xmlrpc
# github.com/robertkrimen/otto v0.0.0-20180617131154-15f95af6e78d
# github.com/robertkrimen/otto v0.0.0-20191219234010-c382bd3c16ff
github.com/robertkrimen/otto
github.com/robertkrimen/otto/ast
github.com/robertkrimen/otto/dbg
@ -231,17 +231,14 @@ github.com/softlayer/softlayer-go/session
github.com/softlayer/softlayer-go/sl
# github.com/stretchr/testify v1.4.0
github.com/stretchr/testify/assert
# github.com/tdewolff/buffer v2.0.0+incompatible
github.com/tdewolff/buffer
# github.com/tdewolff/minify v2.1.1-0.20170910185944-d515420d53ba+incompatible
# github.com/tdewolff/minify v2.3.6+incompatible
github.com/tdewolff/minify
github.com/tdewolff/minify/json
# github.com/tdewolff/parse v2.3.4+incompatible
github.com/tdewolff/parse
github.com/tdewolff/parse/buffer
github.com/tdewolff/parse/json
# github.com/tdewolff/strconv v1.0.0
github.com/tdewolff/strconv
github.com/tdewolff/parse/strconv
# github.com/tiramiseb/go-gandi v0.0.0-20200128175142-df8b8e9d23a1
github.com/tiramiseb/go-gandi
github.com/tiramiseb/go-gandi/domain
@ -394,7 +391,7 @@ gopkg.in/ns1/ns1-go.v2/rest/model/data
gopkg.in/ns1/ns1-go.v2/rest/model/dns
gopkg.in/ns1/ns1-go.v2/rest/model/filter
gopkg.in/ns1/ns1-go.v2/rest/model/monitor
# gopkg.in/sourcemap.v1 v1.0.2
# gopkg.in/sourcemap.v1 v1.0.5
gopkg.in/sourcemap.v1
gopkg.in/sourcemap.v1/base64vlq
# gopkg.in/square/go-jose.v2 v2.3.1