Switch to dep

This commit is contained in:
Tulir Asokan 2018-04-22 21:25:06 +03:00
parent bfb5f0dd45
commit 64fa922ec0
324 changed files with 74891 additions and 15 deletions

149
Gopkg.lock generated Normal file
View File

@ -0,0 +1,149 @@
# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'.
[[projects]]
name = "github.com/davecgh/go-spew"
packages = ["spew"]
revision = "346938d642f2ec3594ed81d874461961cd0faa76"
version = "v1.1.0"
[[projects]]
name = "github.com/disintegration/imaging"
packages = ["."]
revision = "fd34ef7671b12cdf1b024d98c6b327b2770d32c4"
version = "v1.4.1"
[[projects]]
branch = "master"
name = "github.com/gdamore/encoding"
packages = ["."]
revision = "b23993cbb6353f0e6aa98d0ee318a34728f628b9"
[[projects]]
branch = "master"
name = "github.com/lucasb-eyer/go-colorful"
packages = ["."]
revision = "231272389856c976b7500c4fffcc52ddf06ff4eb"
[[projects]]
name = "github.com/mattn/go-runewidth"
packages = ["."]
revision = "9e777a8366cce605130a531d2cd6363d07ad7317"
version = "v0.0.2"
[[projects]]
branch = "master"
name = "github.com/nu7hatch/gouuid"
packages = ["."]
revision = "179d4d0c4d8d407a32af483c2354df1d2c91e6c3"
[[projects]]
name = "github.com/pmezard/go-difflib"
packages = ["difflib"]
revision = "792786c7400a136282c1664665ae0a8db921c6c2"
version = "v1.0.0"
[[projects]]
branch = "master"
name = "github.com/shurcooL/sanitized_anchor_name"
packages = ["."]
revision = "86672fcb3f950f35f2e675df2240550f2a50762f"
[[projects]]
name = "github.com/stretchr/testify"
packages = ["assert"]
revision = "12b6f73e6084dad08a7c6e575284b177ecafbc71"
version = "v1.2.1"
[[projects]]
branch = "master"
name = "github.com/zyedidia/clipboard"
packages = ["."]
revision = "4611e809d8b1a3051c11d11f4b610c44df73fa38"
[[projects]]
branch = "master"
name = "github.com/zyedidia/glob"
packages = ["."]
revision = "dd4023a66dc351ae26e592d21cd133b5b143f3d8"
[[projects]]
branch = "master"
name = "golang.org/x/image"
packages = [
"bmp",
"riff",
"tiff",
"tiff/lzw",
"vp8",
"vp8l",
"webp"
]
revision = "f315e440302883054d0c2bd85486878cb4f8572c"
[[projects]]
branch = "master"
name = "golang.org/x/net"
packages = [
"html",
"html/atom"
]
revision = "5f9ae10d9af5b1c89ae6904293b14b064d4ada23"
[[projects]]
name = "golang.org/x/text"
packages = [
"encoding",
"encoding/internal/identifier",
"internal/gen",
"transform",
"unicode/cldr"
]
revision = "f21a4dfb5e38f5895301dc265a8def02365cc3d0"
version = "v0.3.0"
[[projects]]
name = "gopkg.in/russross/blackfriday.v2"
packages = ["."]
revision = "cadec560ec52d93835bf2f15bd794700d3a2473b"
version = "v2.0.0"
[[projects]]
branch = "v1"
name = "gopkg.in/toast.v1"
packages = ["."]
revision = "b700e246b8b6d3e13554091e540e1019e26389f1"
[[projects]]
name = "gopkg.in/yaml.v2"
packages = ["."]
revision = "5420a8b6744d3b0345ab293f6fcba19c978f1183"
version = "v2.2.1"
[[projects]]
branch = "master"
name = "maunium.net/go/gomatrix"
packages = ["."]
revision = "618319f327acbd401426c711f54e1a634e2243e4"
[[projects]]
branch = "master"
name = "maunium.net/go/tcell"
packages = [
".",
"terminfo"
]
revision = "e03df2c40d0ab5487a6b3c06a228755cdfec1c46"
[[projects]]
branch = "master"
name = "maunium.net/go/tview"
packages = ["."]
revision = "6146b7fe2331e23a78e217016705bc6801bfc55a"
[solve-meta]
analyzer-name = "dep"
analyzer-version = 1
inputs-digest = "ea0f742be221116a10b3fb332aee126fd8f4b3392b65a8e38a7c26e8c45faf8f"
solver-name = "gps-cdcl"
solver-version = 1

82
Gopkg.toml Normal file
View File

@ -0,0 +1,82 @@
# Gopkg.toml example
#
# Refer to https://golang.github.io/dep/docs/Gopkg.toml.html
# for detailed Gopkg.toml documentation.
#
# required = ["github.com/user/thing/cmd/thing"]
# ignored = ["github.com/user/project/pkgX", "bitbucket.org/user/project/pkgA/pkgY"]
#
# [[constraint]]
# name = "github.com/user/project"
# version = "1.0.0"
#
# [[constraint]]
# name = "github.com/user/project2"
# branch = "dev"
# source = "github.com/myfork/project2"
#
# [[override]]
# name = "github.com/x/y"
# version = "2.4.0"
#
# [prune]
# non-go = false
# go-tests = true
# unused-packages = true
[[constraint]]
name = "github.com/disintegration/imaging"
version = "1.4.1"
[[constraint]]
name = "github.com/mattn/go-runewidth"
version = "0.0.2"
[[constraint]]
name = "github.com/stretchr/testify"
version = "1.2.1"
[[constraint]]
branch = "master"
name = "github.com/zyedidia/clipboard"
[[constraint]]
branch = "master"
name = "github.com/zyedidia/glob"
[[constraint]]
branch = "master"
name = "golang.org/x/image"
[[constraint]]
branch = "master"
name = "golang.org/x/net"
[[constraint]]
name = "gopkg.in/russross/blackfriday.v2"
version = "2.0.0"
[[constraint]]
branch = "v1"
name = "gopkg.in/toast.v1"
[[constraint]]
name = "gopkg.in/yaml.v2"
version = "2.2.1"
[[constraint]]
branch = "master"
name = "maunium.net/go/gomatrix"
[[constraint]]
branch = "master"
name = "maunium.net/go/tcell"
[[constraint]]
branch = "master"
name = "maunium.net/go/tview"
[prune]
go-tests = true
unused-packages = true

15
go.mod
View File

@ -1,15 +0,0 @@
module "github.com/tulir/gomuks"
require (
"github.com/gdamore/encoding" v0.0.0-20151215212835-b23993cbb635
"github.com/gdamore/tcell" v0.0.0-20180402155337-2548ddfbd80a
"github.com/lucasb-eyer/go-colorful" v0.0.0-20170903184257-231272389856
"github.com/mattn/go-runewidth" v0.0.2
"github.com/zyedidia/clipboard" v0.0.0-20180208191628-4611e809d8b1
"github.com/zyedidia/glob" v0.0.0-20170209203856-dd4023a66dc3
"golang.org/x/text" v0.0.0-20171214130843-f21a4dfb5e38
"gopkg.in/yaml.v2" v1.2.1-gopkgin-v2.2.1
"maunium.net/go/gomatrix" v0.0.0-20180318193435-618319f327ac
"maunium.net/go/gomuks" v0.0.0-20180410183527-75a0945a83fb
"maunium.net/go/tview" v0.0.0-20180330155404-723cca66ac4d
)

15
vendor/github.com/davecgh/go-spew/LICENSE generated vendored Normal file
View File

@ -0,0 +1,15 @@
ISC License
Copyright (c) 2012-2016 Dave Collins <dave@davec.name>
Permission to use, copy, modify, and distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.

152
vendor/github.com/davecgh/go-spew/spew/bypass.go generated vendored Normal file
View File

@ -0,0 +1,152 @@
// Copyright (c) 2015-2016 Dave Collins <dave@davec.name>
//
// Permission to use, copy, modify, and distribute this software for any
// purpose with or without fee is hereby granted, provided that the above
// copyright notice and this permission notice appear in all copies.
//
// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
// NOTE: Due to the following build constraints, this file will only be compiled
// when the code is not running on Google App Engine, compiled by GopherJS, and
// "-tags safe" is not added to the go build command line. The "disableunsafe"
// tag is deprecated and thus should not be used.
// +build !js,!appengine,!safe,!disableunsafe
package spew
import (
"reflect"
"unsafe"
)
const (
// UnsafeDisabled is a build-time constant which specifies whether or
// not access to the unsafe package is available.
UnsafeDisabled = false
// ptrSize is the size of a pointer on the current arch.
ptrSize = unsafe.Sizeof((*byte)(nil))
)
var (
// offsetPtr, offsetScalar, and offsetFlag are the offsets for the
// internal reflect.Value fields. These values are valid before golang
// commit ecccf07e7f9d which changed the format. The are also valid
// after commit 82f48826c6c7 which changed the format again to mirror
// the original format. Code in the init function updates these offsets
// as necessary.
offsetPtr = uintptr(ptrSize)
offsetScalar = uintptr(0)
offsetFlag = uintptr(ptrSize * 2)
// flagKindWidth and flagKindShift indicate various bits that the
// reflect package uses internally to track kind information.
//
// flagRO indicates whether or not the value field of a reflect.Value is
// read-only.
//
// flagIndir indicates whether the value field of a reflect.Value is
// the actual data or a pointer to the data.
//
// These values are valid before golang commit 90a7c3c86944 which
// changed their positions. Code in the init function updates these
// flags as necessary.
flagKindWidth = uintptr(5)
flagKindShift = uintptr(flagKindWidth - 1)
flagRO = uintptr(1 << 0)
flagIndir = uintptr(1 << 1)
)
func init() {
// Older versions of reflect.Value stored small integers directly in the
// ptr field (which is named val in the older versions). Versions
// between commits ecccf07e7f9d and 82f48826c6c7 added a new field named
// scalar for this purpose which unfortunately came before the flag
// field, so the offset of the flag field is different for those
// versions.
//
// This code constructs a new reflect.Value from a known small integer
// and checks if the size of the reflect.Value struct indicates it has
// the scalar field. When it does, the offsets are updated accordingly.
vv := reflect.ValueOf(0xf00)
if unsafe.Sizeof(vv) == (ptrSize * 4) {
offsetScalar = ptrSize * 2
offsetFlag = ptrSize * 3
}
// Commit 90a7c3c86944 changed the flag positions such that the low
// order bits are the kind. This code extracts the kind from the flags
// field and ensures it's the correct type. When it's not, the flag
// order has been changed to the newer format, so the flags are updated
// accordingly.
upf := unsafe.Pointer(uintptr(unsafe.Pointer(&vv)) + offsetFlag)
upfv := *(*uintptr)(upf)
flagKindMask := uintptr((1<<flagKindWidth - 1) << flagKindShift)
if (upfv&flagKindMask)>>flagKindShift != uintptr(reflect.Int) {
flagKindShift = 0
flagRO = 1 << 5
flagIndir = 1 << 6
// Commit adf9b30e5594 modified the flags to separate the
// flagRO flag into two bits which specifies whether or not the
// field is embedded. This causes flagIndir to move over a bit
// and means that flagRO is the combination of either of the
// original flagRO bit and the new bit.
//
// This code detects the change by extracting what used to be
// the indirect bit to ensure it's set. When it's not, the flag
// order has been changed to the newer format, so the flags are
// updated accordingly.
if upfv&flagIndir == 0 {
flagRO = 3 << 5
flagIndir = 1 << 7
}
}
}
// unsafeReflectValue converts the passed reflect.Value into a one that bypasses
// the typical safety restrictions preventing access to unaddressable and
// unexported data. It works by digging the raw pointer to the underlying
// value out of the protected value and generating a new unprotected (unsafe)
// reflect.Value to it.
//
// This allows us to check for implementations of the Stringer and error
// interfaces to be used for pretty printing ordinarily unaddressable and
// inaccessible values such as unexported struct fields.
func unsafeReflectValue(v reflect.Value) (rv reflect.Value) {
indirects := 1
vt := v.Type()
upv := unsafe.Pointer(uintptr(unsafe.Pointer(&v)) + offsetPtr)
rvf := *(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&v)) + offsetFlag))
if rvf&flagIndir != 0 {
vt = reflect.PtrTo(v.Type())
indirects++
} else if offsetScalar != 0 {
// The value is in the scalar field when it's not one of the
// reference types.
switch vt.Kind() {
case reflect.Uintptr:
case reflect.Chan:
case reflect.Func:
case reflect.Map:
case reflect.Ptr:
case reflect.UnsafePointer:
default:
upv = unsafe.Pointer(uintptr(unsafe.Pointer(&v)) +
offsetScalar)
}
}
pv := reflect.NewAt(vt, upv)
rv = pv
for i := 0; i < indirects; i++ {
rv = rv.Elem()
}
return rv
}

38
vendor/github.com/davecgh/go-spew/spew/bypasssafe.go generated vendored Normal file
View File

@ -0,0 +1,38 @@
// Copyright (c) 2015-2016 Dave Collins <dave@davec.name>
//
// Permission to use, copy, modify, and distribute this software for any
// purpose with or without fee is hereby granted, provided that the above
// copyright notice and this permission notice appear in all copies.
//
// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
// NOTE: Due to the following build constraints, this file will only be compiled
// when the code is running on Google App Engine, compiled by GopherJS, or
// "-tags safe" is added to the go build command line. The "disableunsafe"
// tag is deprecated and thus should not be used.
// +build js appengine safe disableunsafe
package spew
import "reflect"
const (
// UnsafeDisabled is a build-time constant which specifies whether or
// not access to the unsafe package is available.
UnsafeDisabled = true
)
// unsafeReflectValue typically converts the passed reflect.Value into a one
// that bypasses the typical safety restrictions preventing access to
// unaddressable and unexported data. However, doing this relies on access to
// the unsafe package. This is a stub version which simply returns the passed
// reflect.Value when the unsafe package is not available.
func unsafeReflectValue(v reflect.Value) reflect.Value {
return v
}

341
vendor/github.com/davecgh/go-spew/spew/common.go generated vendored Normal file
View File

@ -0,0 +1,341 @@
/*
* Copyright (c) 2013-2016 Dave Collins <dave@davec.name>
*
* Permission to use, copy, modify, and distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
package spew
import (
"bytes"
"fmt"
"io"
"reflect"
"sort"
"strconv"
)
// Some constants in the form of bytes to avoid string overhead. This mirrors
// the technique used in the fmt package.
var (
panicBytes = []byte("(PANIC=")
plusBytes = []byte("+")
iBytes = []byte("i")
trueBytes = []byte("true")
falseBytes = []byte("false")
interfaceBytes = []byte("(interface {})")
commaNewlineBytes = []byte(",\n")
newlineBytes = []byte("\n")
openBraceBytes = []byte("{")
openBraceNewlineBytes = []byte("{\n")
closeBraceBytes = []byte("}")
asteriskBytes = []byte("*")
colonBytes = []byte(":")
colonSpaceBytes = []byte(": ")
openParenBytes = []byte("(")
closeParenBytes = []byte(")")
spaceBytes = []byte(" ")
pointerChainBytes = []byte("->")
nilAngleBytes = []byte("<nil>")
maxNewlineBytes = []byte("<max depth reached>\n")
maxShortBytes = []byte("<max>")
circularBytes = []byte("<already shown>")
circularShortBytes = []byte("<shown>")
invalidAngleBytes = []byte("<invalid>")
openBracketBytes = []byte("[")
closeBracketBytes = []byte("]")
percentBytes = []byte("%")
precisionBytes = []byte(".")
openAngleBytes = []byte("<")
closeAngleBytes = []byte(">")
openMapBytes = []byte("map[")
closeMapBytes = []byte("]")
lenEqualsBytes = []byte("len=")
capEqualsBytes = []byte("cap=")
)
// hexDigits is used to map a decimal value to a hex digit.
var hexDigits = "0123456789abcdef"
// catchPanic handles any panics that might occur during the handleMethods
// calls.
func catchPanic(w io.Writer, v reflect.Value) {
if err := recover(); err != nil {
w.Write(panicBytes)
fmt.Fprintf(w, "%v", err)
w.Write(closeParenBytes)
}
}
// handleMethods attempts to call the Error and String methods on the underlying
// type the passed reflect.Value represents and outputes the result to Writer w.
//
// It handles panics in any called methods by catching and displaying the error
// as the formatted value.
func handleMethods(cs *ConfigState, w io.Writer, v reflect.Value) (handled bool) {
// We need an interface to check if the type implements the error or
// Stringer interface. However, the reflect package won't give us an
// interface on certain things like unexported struct fields in order
// to enforce visibility rules. We use unsafe, when it's available,
// to bypass these restrictions since this package does not mutate the
// values.
if !v.CanInterface() {
if UnsafeDisabled {
return false
}
v = unsafeReflectValue(v)
}
// Choose whether or not to do error and Stringer interface lookups against
// the base type or a pointer to the base type depending on settings.
// Technically calling one of these methods with a pointer receiver can
// mutate the value, however, types which choose to satisify an error or
// Stringer interface with a pointer receiver should not be mutating their
// state inside these interface methods.
if !cs.DisablePointerMethods && !UnsafeDisabled && !v.CanAddr() {
v = unsafeReflectValue(v)
}
if v.CanAddr() {
v = v.Addr()
}
// Is it an error or Stringer?
switch iface := v.Interface().(type) {
case error:
defer catchPanic(w, v)
if cs.ContinueOnMethod {
w.Write(openParenBytes)
w.Write([]byte(iface.Error()))
w.Write(closeParenBytes)
w.Write(spaceBytes)
return false
}
w.Write([]byte(iface.Error()))
return true
case fmt.Stringer:
defer catchPanic(w, v)
if cs.ContinueOnMethod {
w.Write(openParenBytes)
w.Write([]byte(iface.String()))
w.Write(closeParenBytes)
w.Write(spaceBytes)
return false
}
w.Write([]byte(iface.String()))
return true
}
return false
}
// printBool outputs a boolean value as true or false to Writer w.
func printBool(w io.Writer, val bool) {
if val {
w.Write(trueBytes)
} else {
w.Write(falseBytes)
}
}
// printInt outputs a signed integer value to Writer w.
func printInt(w io.Writer, val int64, base int) {
w.Write([]byte(strconv.FormatInt(val, base)))
}
// printUint outputs an unsigned integer value to Writer w.
func printUint(w io.Writer, val uint64, base int) {
w.Write([]byte(strconv.FormatUint(val, base)))
}
// printFloat outputs a floating point value using the specified precision,
// which is expected to be 32 or 64bit, to Writer w.
func printFloat(w io.Writer, val float64, precision int) {
w.Write([]byte(strconv.FormatFloat(val, 'g', -1, precision)))
}
// printComplex outputs a complex value using the specified float precision
// for the real and imaginary parts to Writer w.
func printComplex(w io.Writer, c complex128, floatPrecision int) {
r := real(c)
w.Write(openParenBytes)
w.Write([]byte(strconv.FormatFloat(r, 'g', -1, floatPrecision)))
i := imag(c)
if i >= 0 {
w.Write(plusBytes)
}
w.Write([]byte(strconv.FormatFloat(i, 'g', -1, floatPrecision)))
w.Write(iBytes)
w.Write(closeParenBytes)
}
// printHexPtr outputs a uintptr formatted as hexidecimal with a leading '0x'
// prefix to Writer w.
func printHexPtr(w io.Writer, p uintptr) {
// Null pointer.
num := uint64(p)
if num == 0 {
w.Write(nilAngleBytes)
return
}
// Max uint64 is 16 bytes in hex + 2 bytes for '0x' prefix
buf := make([]byte, 18)
// It's simpler to construct the hex string right to left.
base := uint64(16)
i := len(buf) - 1
for num >= base {
buf[i] = hexDigits[num%base]
num /= base
i--
}
buf[i] = hexDigits[num]
// Add '0x' prefix.
i--
buf[i] = 'x'
i--
buf[i] = '0'
// Strip unused leading bytes.
buf = buf[i:]
w.Write(buf)
}
// valuesSorter implements sort.Interface to allow a slice of reflect.Value
// elements to be sorted.
type valuesSorter struct {
values []reflect.Value
strings []string // either nil or same len and values
cs *ConfigState
}
// newValuesSorter initializes a valuesSorter instance, which holds a set of
// surrogate keys on which the data should be sorted. It uses flags in
// ConfigState to decide if and how to populate those surrogate keys.
func newValuesSorter(values []reflect.Value, cs *ConfigState) sort.Interface {
vs := &valuesSorter{values: values, cs: cs}
if canSortSimply(vs.values[0].Kind()) {
return vs
}
if !cs.DisableMethods {
vs.strings = make([]string, len(values))
for i := range vs.values {
b := bytes.Buffer{}
if !handleMethods(cs, &b, vs.values[i]) {
vs.strings = nil
break
}
vs.strings[i] = b.String()
}
}
if vs.strings == nil && cs.SpewKeys {
vs.strings = make([]string, len(values))
for i := range vs.values {
vs.strings[i] = Sprintf("%#v", vs.values[i].Interface())
}
}
return vs
}
// canSortSimply tests whether a reflect.Kind is a primitive that can be sorted
// directly, or whether it should be considered for sorting by surrogate keys
// (if the ConfigState allows it).
func canSortSimply(kind reflect.Kind) bool {
// This switch parallels valueSortLess, except for the default case.
switch kind {
case reflect.Bool:
return true
case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int:
return true
case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint:
return true
case reflect.Float32, reflect.Float64:
return true
case reflect.String:
return true
case reflect.Uintptr:
return true
case reflect.Array:
return true
}
return false
}
// Len returns the number of values in the slice. It is part of the
// sort.Interface implementation.
func (s *valuesSorter) Len() int {
return len(s.values)
}
// Swap swaps the values at the passed indices. It is part of the
// sort.Interface implementation.
func (s *valuesSorter) Swap(i, j int) {
s.values[i], s.values[j] = s.values[j], s.values[i]
if s.strings != nil {
s.strings[i], s.strings[j] = s.strings[j], s.strings[i]
}
}
// valueSortLess returns whether the first value should sort before the second
// value. It is used by valueSorter.Less as part of the sort.Interface
// implementation.
func valueSortLess(a, b reflect.Value) bool {
switch a.Kind() {
case reflect.Bool:
return !a.Bool() && b.Bool()
case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int:
return a.Int() < b.Int()
case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint:
return a.Uint() < b.Uint()
case reflect.Float32, reflect.Float64:
return a.Float() < b.Float()
case reflect.String:
return a.String() < b.String()
case reflect.Uintptr:
return a.Uint() < b.Uint()
case reflect.Array:
// Compare the contents of both arrays.
l := a.Len()
for i := 0; i < l; i++ {
av := a.Index(i)
bv := b.Index(i)
if av.Interface() == bv.Interface() {
continue
}
return valueSortLess(av, bv)
}
}
return a.String() < b.String()
}
// Less returns whether the value at index i should sort before the
// value at index j. It is part of the sort.Interface implementation.
func (s *valuesSorter) Less(i, j int) bool {
if s.strings == nil {
return valueSortLess(s.values[i], s.values[j])
}
return s.strings[i] < s.strings[j]
}
// sortValues is a sort function that handles both native types and any type that
// can be converted to error or Stringer. Other inputs are sorted according to
// their Value.String() value to ensure display stability.
func sortValues(values []reflect.Value, cs *ConfigState) {
if len(values) == 0 {
return
}
sort.Sort(newValuesSorter(values, cs))
}

306
vendor/github.com/davecgh/go-spew/spew/config.go generated vendored Normal file
View File

@ -0,0 +1,306 @@
/*
* Copyright (c) 2013-2016 Dave Collins <dave@davec.name>
*
* Permission to use, copy, modify, and distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
package spew
import (
"bytes"
"fmt"
"io"
"os"
)
// ConfigState houses the configuration options used by spew to format and
// display values. There is a global instance, Config, that is used to control
// all top-level Formatter and Dump functionality. Each ConfigState instance
// provides methods equivalent to the top-level functions.
//
// The zero value for ConfigState provides no indentation. You would typically
// want to set it to a space or a tab.
//
// Alternatively, you can use NewDefaultConfig to get a ConfigState instance
// with default settings. See the documentation of NewDefaultConfig for default
// values.
type ConfigState struct {
// Indent specifies the string to use for each indentation level. The
// global config instance that all top-level functions use set this to a
// single space by default. If you would like more indentation, you might
// set this to a tab with "\t" or perhaps two spaces with " ".
Indent string
// MaxDepth controls the maximum number of levels to descend into nested
// data structures. The default, 0, means there is no limit.
//
// NOTE: Circular data structures are properly detected, so it is not
// necessary to set this value unless you specifically want to limit deeply
// nested data structures.
MaxDepth int
// DisableMethods specifies whether or not error and Stringer interfaces are
// invoked for types that implement them.
DisableMethods bool
// DisablePointerMethods specifies whether or not to check for and invoke
// error and Stringer interfaces on types which only accept a pointer
// receiver when the current type is not a pointer.
//
// NOTE: This might be an unsafe action since calling one of these methods
// with a pointer receiver could technically mutate the value, however,
// in practice, types which choose to satisify an error or Stringer
// interface with a pointer receiver should not be mutating their state
// inside these interface methods. As a result, this option relies on
// access to the unsafe package, so it will not have any effect when
// running in environments without access to the unsafe package such as
// Google App Engine or with the "safe" build tag specified.
DisablePointerMethods bool
// DisablePointerAddresses specifies whether to disable the printing of
// pointer addresses. This is useful when diffing data structures in tests.
DisablePointerAddresses bool
// DisableCapacities specifies whether to disable the printing of capacities
// for arrays, slices, maps and channels. This is useful when diffing
// data structures in tests.
DisableCapacities bool
// ContinueOnMethod specifies whether or not recursion should continue once
// a custom error or Stringer interface is invoked. The default, false,
// means it will print the results of invoking the custom error or Stringer
// interface and return immediately instead of continuing to recurse into
// the internals of the data type.
//
// NOTE: This flag does not have any effect if method invocation is disabled
// via the DisableMethods or DisablePointerMethods options.
ContinueOnMethod bool
// SortKeys specifies map keys should be sorted before being printed. Use
// this to have a more deterministic, diffable output. Note that only
// native types (bool, int, uint, floats, uintptr and string) and types
// that support the error or Stringer interfaces (if methods are
// enabled) are supported, with other types sorted according to the
// reflect.Value.String() output which guarantees display stability.
SortKeys bool
// SpewKeys specifies that, as a last resort attempt, map keys should
// be spewed to strings and sorted by those strings. This is only
// considered if SortKeys is true.
SpewKeys bool
}
// Config is the active configuration of the top-level functions.
// The configuration can be changed by modifying the contents of spew.Config.
var Config = ConfigState{Indent: " "}
// Errorf is a wrapper for fmt.Errorf that treats each argument as if it were
// passed with a Formatter interface returned by c.NewFormatter. It returns
// the formatted string as a value that satisfies error. See NewFormatter
// for formatting details.
//
// This function is shorthand for the following syntax:
//
// fmt.Errorf(format, c.NewFormatter(a), c.NewFormatter(b))
func (c *ConfigState) Errorf(format string, a ...interface{}) (err error) {
return fmt.Errorf(format, c.convertArgs(a)...)
}
// Fprint is a wrapper for fmt.Fprint that treats each argument as if it were
// passed with a Formatter interface returned by c.NewFormatter. It returns
// the number of bytes written and any write error encountered. See
// NewFormatter for formatting details.
//
// This function is shorthand for the following syntax:
//
// fmt.Fprint(w, c.NewFormatter(a), c.NewFormatter(b))
func (c *ConfigState) Fprint(w io.Writer, a ...interface{}) (n int, err error) {
return fmt.Fprint(w, c.convertArgs(a)...)
}
// Fprintf is a wrapper for fmt.Fprintf that treats each argument as if it were
// passed with a Formatter interface returned by c.NewFormatter. It returns
// the number of bytes written and any write error encountered. See
// NewFormatter for formatting details.
//
// This function is shorthand for the following syntax:
//
// fmt.Fprintf(w, format, c.NewFormatter(a), c.NewFormatter(b))
func (c *ConfigState) Fprintf(w io.Writer, format string, a ...interface{}) (n int, err error) {
return fmt.Fprintf(w, format, c.convertArgs(a)...)
}
// Fprintln is a wrapper for fmt.Fprintln that treats each argument as if it
// passed with a Formatter interface returned by c.NewFormatter. See
// NewFormatter for formatting details.
//
// This function is shorthand for the following syntax:
//
// fmt.Fprintln(w, c.NewFormatter(a), c.NewFormatter(b))
func (c *ConfigState) Fprintln(w io.Writer, a ...interface{}) (n int, err error) {
return fmt.Fprintln(w, c.convertArgs(a)...)
}
// Print is a wrapper for fmt.Print that treats each argument as if it were
// passed with a Formatter interface returned by c.NewFormatter. It returns
// the number of bytes written and any write error encountered. See
// NewFormatter for formatting details.
//
// This function is shorthand for the following syntax:
//
// fmt.Print(c.NewFormatter(a), c.NewFormatter(b))
func (c *ConfigState) Print(a ...interface{}) (n int, err error) {
return fmt.Print(c.convertArgs(a)...)
}
// Printf is a wrapper for fmt.Printf that treats each argument as if it were
// passed with a Formatter interface returned by c.NewFormatter. It returns
// the number of bytes written and any write error encountered. See
// NewFormatter for formatting details.
//
// This function is shorthand for the following syntax:
//
// fmt.Printf(format, c.NewFormatter(a), c.NewFormatter(b))
func (c *ConfigState) Printf(format string, a ...interface{}) (n int, err error) {
return fmt.Printf(format, c.convertArgs(a)...)
}
// Println is a wrapper for fmt.Println that treats each argument as if it were
// passed with a Formatter interface returned by c.NewFormatter. It returns
// the number of bytes written and any write error encountered. See
// NewFormatter for formatting details.
//
// This function is shorthand for the following syntax:
//
// fmt.Println(c.NewFormatter(a), c.NewFormatter(b))
func (c *ConfigState) Println(a ...interface{}) (n int, err error) {
return fmt.Println(c.convertArgs(a)...)
}
// Sprint is a wrapper for fmt.Sprint that treats each argument as if it were
// passed with a Formatter interface returned by c.NewFormatter. It returns
// the resulting string. See NewFormatter for formatting details.
//
// This function is shorthand for the following syntax:
//
// fmt.Sprint(c.NewFormatter(a), c.NewFormatter(b))
func (c *ConfigState) Sprint(a ...interface{}) string {
return fmt.Sprint(c.convertArgs(a)...)
}
// Sprintf is a wrapper for fmt.Sprintf that treats each argument as if it were
// passed with a Formatter interface returned by c.NewFormatter. It returns
// the resulting string. See NewFormatter for formatting details.
//
// This function is shorthand for the following syntax:
//
// fmt.Sprintf(format, c.NewFormatter(a), c.NewFormatter(b))
func (c *ConfigState) Sprintf(format string, a ...interface{}) string {
return fmt.Sprintf(format, c.convertArgs(a)...)
}
// Sprintln is a wrapper for fmt.Sprintln that treats each argument as if it
// were passed with a Formatter interface returned by c.NewFormatter. It
// returns the resulting string. See NewFormatter for formatting details.
//
// This function is shorthand for the following syntax:
//
// fmt.Sprintln(c.NewFormatter(a), c.NewFormatter(b))
func (c *ConfigState) Sprintln(a ...interface{}) string {
return fmt.Sprintln(c.convertArgs(a)...)
}
/*
NewFormatter returns a custom formatter that satisfies the fmt.Formatter
interface. As a result, it integrates cleanly with standard fmt package
printing functions. The formatter is useful for inline printing of smaller data
types similar to the standard %v format specifier.
The custom formatter only responds to the %v (most compact), %+v (adds pointer
addresses), %#v (adds types), and %#+v (adds types and pointer addresses) verb
combinations. Any other verbs such as %x and %q will be sent to the the
standard fmt package for formatting. In addition, the custom formatter ignores
the width and precision arguments (however they will still work on the format
specifiers not handled by the custom formatter).
Typically this function shouldn't be called directly. It is much easier to make
use of the custom formatter by calling one of the convenience functions such as
c.Printf, c.Println, or c.Printf.
*/
func (c *ConfigState) NewFormatter(v interface{}) fmt.Formatter {
return newFormatter(c, v)
}
// Fdump formats and displays the passed arguments to io.Writer w. It formats
// exactly the same as Dump.
func (c *ConfigState) Fdump(w io.Writer, a ...interface{}) {
fdump(c, w, a...)
}
/*
Dump displays the passed parameters to standard out with newlines, customizable
indentation, and additional debug information such as complete types and all
pointer addresses used to indirect to the final value. It provides the
following features over the built-in printing facilities provided by the fmt
package:
* Pointers are dereferenced and followed
* Circular data structures are detected and handled properly
* Custom Stringer/error interfaces are optionally invoked, including
on unexported types
* Custom types which only implement the Stringer/error interfaces via
a pointer receiver are optionally invoked when passing non-pointer
variables
* Byte arrays and slices are dumped like the hexdump -C command which
includes offsets, byte values in hex, and ASCII output
The configuration options are controlled by modifying the public members
of c. See ConfigState for options documentation.
See Fdump if you would prefer dumping to an arbitrary io.Writer or Sdump to
get the formatted result as a string.
*/
func (c *ConfigState) Dump(a ...interface{}) {
fdump(c, os.Stdout, a...)
}
// Sdump returns a string with the passed arguments formatted exactly the same
// as Dump.
func (c *ConfigState) Sdump(a ...interface{}) string {
var buf bytes.Buffer
fdump(c, &buf, a...)
return buf.String()
}
// convertArgs accepts a slice of arguments and returns a slice of the same
// length with each argument converted to a spew Formatter interface using
// the ConfigState associated with s.
func (c *ConfigState) convertArgs(args []interface{}) (formatters []interface{}) {
formatters = make([]interface{}, len(args))
for index, arg := range args {
formatters[index] = newFormatter(c, arg)
}
return formatters
}
// NewDefaultConfig returns a ConfigState with the following default settings.
//
// Indent: " "
// MaxDepth: 0
// DisableMethods: false
// DisablePointerMethods: false
// ContinueOnMethod: false
// SortKeys: false
func NewDefaultConfig() *ConfigState {
return &ConfigState{Indent: " "}
}

211
vendor/github.com/davecgh/go-spew/spew/doc.go generated vendored Normal file
View File

@ -0,0 +1,211 @@
/*
* Copyright (c) 2013-2016 Dave Collins <dave@davec.name>
*
* Permission to use, copy, modify, and distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
/*
Package spew implements a deep pretty printer for Go data structures to aid in
debugging.
A quick overview of the additional features spew provides over the built-in
printing facilities for Go data types are as follows:
* Pointers are dereferenced and followed
* Circular data structures are detected and handled properly
* Custom Stringer/error interfaces are optionally invoked, including
on unexported types
* Custom types which only implement the Stringer/error interfaces via
a pointer receiver are optionally invoked when passing non-pointer
variables
* Byte arrays and slices are dumped like the hexdump -C command which
includes offsets, byte values in hex, and ASCII output (only when using
Dump style)
There are two different approaches spew allows for dumping Go data structures:
* Dump style which prints with newlines, customizable indentation,
and additional debug information such as types and all pointer addresses
used to indirect to the final value
* A custom Formatter interface that integrates cleanly with the standard fmt
package and replaces %v, %+v, %#v, and %#+v to provide inline printing
similar to the default %v while providing the additional functionality
outlined above and passing unsupported format verbs such as %x and %q
along to fmt
Quick Start
This section demonstrates how to quickly get started with spew. See the
sections below for further details on formatting and configuration options.
To dump a variable with full newlines, indentation, type, and pointer
information use Dump, Fdump, or Sdump:
spew.Dump(myVar1, myVar2, ...)
spew.Fdump(someWriter, myVar1, myVar2, ...)
str := spew.Sdump(myVar1, myVar2, ...)
Alternatively, if you would prefer to use format strings with a compacted inline
printing style, use the convenience wrappers Printf, Fprintf, etc with
%v (most compact), %+v (adds pointer addresses), %#v (adds types), or
%#+v (adds types and pointer addresses):
spew.Printf("myVar1: %v -- myVar2: %+v", myVar1, myVar2)
spew.Printf("myVar3: %#v -- myVar4: %#+v", myVar3, myVar4)
spew.Fprintf(someWriter, "myVar1: %v -- myVar2: %+v", myVar1, myVar2)
spew.Fprintf(someWriter, "myVar3: %#v -- myVar4: %#+v", myVar3, myVar4)
Configuration Options
Configuration of spew is handled by fields in the ConfigState type. For
convenience, all of the top-level functions use a global state available
via the spew.Config global.
It is also possible to create a ConfigState instance that provides methods
equivalent to the top-level functions. This allows concurrent configuration
options. See the ConfigState documentation for more details.
The following configuration options are available:
* Indent
String to use for each indentation level for Dump functions.
It is a single space by default. A popular alternative is "\t".
* MaxDepth
Maximum number of levels to descend into nested data structures.
There is no limit by default.
* DisableMethods
Disables invocation of error and Stringer interface methods.
Method invocation is enabled by default.
* DisablePointerMethods
Disables invocation of error and Stringer interface methods on types
which only accept pointer receivers from non-pointer variables.
Pointer method invocation is enabled by default.
* DisablePointerAddresses
DisablePointerAddresses specifies whether to disable the printing of
pointer addresses. This is useful when diffing data structures in tests.
* DisableCapacities
DisableCapacities specifies whether to disable the printing of
capacities for arrays, slices, maps and channels. This is useful when
diffing data structures in tests.
* ContinueOnMethod
Enables recursion into types after invoking error and Stringer interface
methods. Recursion after method invocation is disabled by default.
* SortKeys
Specifies map keys should be sorted before being printed. Use
this to have a more deterministic, diffable output. Note that
only native types (bool, int, uint, floats, uintptr and string)
and types which implement error or Stringer interfaces are
supported with other types sorted according to the
reflect.Value.String() output which guarantees display
stability. Natural map order is used by default.
* SpewKeys
Specifies that, as a last resort attempt, map keys should be
spewed to strings and sorted by those strings. This is only
considered if SortKeys is true.
Dump Usage
Simply call spew.Dump with a list of variables you want to dump:
spew.Dump(myVar1, myVar2, ...)
You may also call spew.Fdump if you would prefer to output to an arbitrary
io.Writer. For example, to dump to standard error:
spew.Fdump(os.Stderr, myVar1, myVar2, ...)
A third option is to call spew.Sdump to get the formatted output as a string:
str := spew.Sdump(myVar1, myVar2, ...)
Sample Dump Output
See the Dump example for details on the setup of the types and variables being
shown here.
(main.Foo) {
unexportedField: (*main.Bar)(0xf84002e210)({
flag: (main.Flag) flagTwo,
data: (uintptr) <nil>
}),
ExportedField: (map[interface {}]interface {}) (len=1) {
(string) (len=3) "one": (bool) true
}
}
Byte (and uint8) arrays and slices are displayed uniquely like the hexdump -C
command as shown.
([]uint8) (len=32 cap=32) {
00000000 11 12 13 14 15 16 17 18 19 1a 1b 1c 1d 1e 1f 20 |............... |
00000010 21 22 23 24 25 26 27 28 29 2a 2b 2c 2d 2e 2f 30 |!"#$%&'()*+,-./0|
00000020 31 32 |12|
}
Custom Formatter
Spew provides a custom formatter that implements the fmt.Formatter interface
so that it integrates cleanly with standard fmt package printing functions. The
formatter is useful for inline printing of smaller data types similar to the
standard %v format specifier.
The custom formatter only responds to the %v (most compact), %+v (adds pointer
addresses), %#v (adds types), or %#+v (adds types and pointer addresses) verb
combinations. Any other verbs such as %x and %q will be sent to the the
standard fmt package for formatting. In addition, the custom formatter ignores
the width and precision arguments (however they will still work on the format
specifiers not handled by the custom formatter).
Custom Formatter Usage
The simplest way to make use of the spew custom formatter is to call one of the
convenience functions such as spew.Printf, spew.Println, or spew.Printf. The
functions have syntax you are most likely already familiar with:
spew.Printf("myVar1: %v -- myVar2: %+v", myVar1, myVar2)
spew.Printf("myVar3: %#v -- myVar4: %#+v", myVar3, myVar4)
spew.Println(myVar, myVar2)
spew.Fprintf(os.Stderr, "myVar1: %v -- myVar2: %+v", myVar1, myVar2)
spew.Fprintf(os.Stderr, "myVar3: %#v -- myVar4: %#+v", myVar3, myVar4)
See the Index for the full list convenience functions.
Sample Formatter Output
Double pointer to a uint8:
%v: <**>5
%+v: <**>(0xf8400420d0->0xf8400420c8)5
%#v: (**uint8)5
%#+v: (**uint8)(0xf8400420d0->0xf8400420c8)5
Pointer to circular struct with a uint8 field and a pointer to itself:
%v: <*>{1 <*><shown>}
%+v: <*>(0xf84003e260){ui8:1 c:<*>(0xf84003e260)<shown>}
%#v: (*main.circular){ui8:(uint8)1 c:(*main.circular)<shown>}
%#+v: (*main.circular)(0xf84003e260){ui8:(uint8)1 c:(*main.circular)(0xf84003e260)<shown>}
See the Printf example for details on the setup of variables being shown
here.
Errors
Since it is possible for custom Stringer/error interfaces to panic, spew
detects them and handles them internally by printing the panic information
inline with the output. Since spew is intended to provide deep pretty printing
capabilities on structures, it intentionally does not return any errors.
*/
package spew

509
vendor/github.com/davecgh/go-spew/spew/dump.go generated vendored Normal file
View File

@ -0,0 +1,509 @@
/*
* Copyright (c) 2013-2016 Dave Collins <dave@davec.name>
*
* Permission to use, copy, modify, and distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
package spew
import (
"bytes"
"encoding/hex"
"fmt"
"io"
"os"
"reflect"
"regexp"
"strconv"
"strings"
)
var (
// uint8Type is a reflect.Type representing a uint8. It is used to
// convert cgo types to uint8 slices for hexdumping.
uint8Type = reflect.TypeOf(uint8(0))
// cCharRE is a regular expression that matches a cgo char.
// It is used to detect character arrays to hexdump them.
cCharRE = regexp.MustCompile("^.*\\._Ctype_char$")
// cUnsignedCharRE is a regular expression that matches a cgo unsigned
// char. It is used to detect unsigned character arrays to hexdump
// them.
cUnsignedCharRE = regexp.MustCompile("^.*\\._Ctype_unsignedchar$")
// cUint8tCharRE is a regular expression that matches a cgo uint8_t.
// It is used to detect uint8_t arrays to hexdump them.
cUint8tCharRE = regexp.MustCompile("^.*\\._Ctype_uint8_t$")
)
// dumpState contains information about the state of a dump operation.
type dumpState struct {
w io.Writer
depth int
pointers map[uintptr]int
ignoreNextType bool
ignoreNextIndent bool
cs *ConfigState
}
// indent performs indentation according to the depth level and cs.Indent
// option.
func (d *dumpState) indent() {
if d.ignoreNextIndent {
d.ignoreNextIndent = false
return
}
d.w.Write(bytes.Repeat([]byte(d.cs.Indent), d.depth))
}
// unpackValue returns values inside of non-nil interfaces when possible.
// This is useful for data types like structs, arrays, slices, and maps which
// can contain varying types packed inside an interface.
func (d *dumpState) unpackValue(v reflect.Value) reflect.Value {
if v.Kind() == reflect.Interface && !v.IsNil() {
v = v.Elem()
}
return v
}
// dumpPtr handles formatting of pointers by indirecting them as necessary.
func (d *dumpState) dumpPtr(v reflect.Value) {
// Remove pointers at or below the current depth from map used to detect
// circular refs.
for k, depth := range d.pointers {
if depth >= d.depth {
delete(d.pointers, k)
}
}
// Keep list of all dereferenced pointers to show later.
pointerChain := make([]uintptr, 0)
// Figure out how many levels of indirection there are by dereferencing
// pointers and unpacking interfaces down the chain while detecting circular
// references.
nilFound := false
cycleFound := false
indirects := 0
ve := v
for ve.Kind() == reflect.Ptr {
if ve.IsNil() {
nilFound = true
break
}
indirects++
addr := ve.Pointer()
pointerChain = append(pointerChain, addr)
if pd, ok := d.pointers[addr]; ok && pd < d.depth {
cycleFound = true
indirects--
break
}
d.pointers[addr] = d.depth
ve = ve.Elem()
if ve.Kind() == reflect.Interface {
if ve.IsNil() {
nilFound = true
break
}
ve = ve.Elem()
}
}
// Display type information.
d.w.Write(openParenBytes)
d.w.Write(bytes.Repeat(asteriskBytes, indirects))
d.w.Write([]byte(ve.Type().String()))
d.w.Write(closeParenBytes)
// Display pointer information.
if !d.cs.DisablePointerAddresses && len(pointerChain) > 0 {
d.w.Write(openParenBytes)
for i, addr := range pointerChain {
if i > 0 {
d.w.Write(pointerChainBytes)
}
printHexPtr(d.w, addr)
}
d.w.Write(closeParenBytes)
}
// Display dereferenced value.
d.w.Write(openParenBytes)
switch {
case nilFound == true:
d.w.Write(nilAngleBytes)
case cycleFound == true:
d.w.Write(circularBytes)
default:
d.ignoreNextType = true
d.dump(ve)
}
d.w.Write(closeParenBytes)
}
// dumpSlice handles formatting of arrays and slices. Byte (uint8 under
// reflection) arrays and slices are dumped in hexdump -C fashion.
func (d *dumpState) dumpSlice(v reflect.Value) {
// Determine whether this type should be hex dumped or not. Also,
// for types which should be hexdumped, try to use the underlying data
// first, then fall back to trying to convert them to a uint8 slice.
var buf []uint8
doConvert := false
doHexDump := false
numEntries := v.Len()
if numEntries > 0 {
vt := v.Index(0).Type()
vts := vt.String()
switch {
// C types that need to be converted.
case cCharRE.MatchString(vts):
fallthrough
case cUnsignedCharRE.MatchString(vts):
fallthrough
case cUint8tCharRE.MatchString(vts):
doConvert = true
// Try to use existing uint8 slices and fall back to converting
// and copying if that fails.
case vt.Kind() == reflect.Uint8:
// We need an addressable interface to convert the type
// to a byte slice. However, the reflect package won't
// give us an interface on certain things like
// unexported struct fields in order to enforce
// visibility rules. We use unsafe, when available, to
// bypass these restrictions since this package does not
// mutate the values.
vs := v
if !vs.CanInterface() || !vs.CanAddr() {
vs = unsafeReflectValue(vs)
}
if !UnsafeDisabled {
vs = vs.Slice(0, numEntries)
// Use the existing uint8 slice if it can be
// type asserted.
iface := vs.Interface()
if slice, ok := iface.([]uint8); ok {
buf = slice
doHexDump = true
break
}
}
// The underlying data needs to be converted if it can't
// be type asserted to a uint8 slice.
doConvert = true
}
// Copy and convert the underlying type if needed.
if doConvert && vt.ConvertibleTo(uint8Type) {
// Convert and copy each element into a uint8 byte
// slice.
buf = make([]uint8, numEntries)
for i := 0; i < numEntries; i++ {
vv := v.Index(i)
buf[i] = uint8(vv.Convert(uint8Type).Uint())
}
doHexDump = true
}
}
// Hexdump the entire slice as needed.
if doHexDump {
indent := strings.Repeat(d.cs.Indent, d.depth)
str := indent + hex.Dump(buf)
str = strings.Replace(str, "\n", "\n"+indent, -1)
str = strings.TrimRight(str, d.cs.Indent)
d.w.Write([]byte(str))
return
}
// Recursively call dump for each item.
for i := 0; i < numEntries; i++ {
d.dump(d.unpackValue(v.Index(i)))
if i < (numEntries - 1) {
d.w.Write(commaNewlineBytes)
} else {
d.w.Write(newlineBytes)
}
}
}
// dump is the main workhorse for dumping a value. It uses the passed reflect
// value to figure out what kind of object we are dealing with and formats it
// appropriately. It is a recursive function, however circular data structures
// are detected and handled properly.
func (d *dumpState) dump(v reflect.Value) {
// Handle invalid reflect values immediately.
kind := v.Kind()
if kind == reflect.Invalid {
d.w.Write(invalidAngleBytes)
return
}
// Handle pointers specially.
if kind == reflect.Ptr {
d.indent()
d.dumpPtr(v)
return
}
// Print type information unless already handled elsewhere.
if !d.ignoreNextType {
d.indent()
d.w.Write(openParenBytes)
d.w.Write([]byte(v.Type().String()))
d.w.Write(closeParenBytes)
d.w.Write(spaceBytes)
}
d.ignoreNextType = false
// Display length and capacity if the built-in len and cap functions
// work with the value's kind and the len/cap itself is non-zero.
valueLen, valueCap := 0, 0
switch v.Kind() {
case reflect.Array, reflect.Slice, reflect.Chan:
valueLen, valueCap = v.Len(), v.Cap()
case reflect.Map, reflect.String:
valueLen = v.Len()
}
if valueLen != 0 || !d.cs.DisableCapacities && valueCap != 0 {
d.w.Write(openParenBytes)
if valueLen != 0 {
d.w.Write(lenEqualsBytes)
printInt(d.w, int64(valueLen), 10)
}
if !d.cs.DisableCapacities && valueCap != 0 {
if valueLen != 0 {
d.w.Write(spaceBytes)
}
d.w.Write(capEqualsBytes)
printInt(d.w, int64(valueCap), 10)
}
d.w.Write(closeParenBytes)
d.w.Write(spaceBytes)
}
// Call Stringer/error interfaces if they exist and the handle methods flag
// is enabled
if !d.cs.DisableMethods {
if (kind != reflect.Invalid) && (kind != reflect.Interface) {
if handled := handleMethods(d.cs, d.w, v); handled {
return
}
}
}
switch kind {
case reflect.Invalid:
// Do nothing. We should never get here since invalid has already
// been handled above.
case reflect.Bool:
printBool(d.w, v.Bool())
case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int:
printInt(d.w, v.Int(), 10)
case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint:
printUint(d.w, v.Uint(), 10)
case reflect.Float32:
printFloat(d.w, v.Float(), 32)
case reflect.Float64:
printFloat(d.w, v.Float(), 64)
case reflect.Complex64:
printComplex(d.w, v.Complex(), 32)
case reflect.Complex128:
printComplex(d.w, v.Complex(), 64)
case reflect.Slice:
if v.IsNil() {
d.w.Write(nilAngleBytes)
break
}
fallthrough
case reflect.Array:
d.w.Write(openBraceNewlineBytes)
d.depth++
if (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) {
d.indent()
d.w.Write(maxNewlineBytes)
} else {
d.dumpSlice(v)
}
d.depth--
d.indent()
d.w.Write(closeBraceBytes)
case reflect.String:
d.w.Write([]byte(strconv.Quote(v.String())))
case reflect.Interface:
// The only time we should get here is for nil interfaces due to
// unpackValue calls.
if v.IsNil() {
d.w.Write(nilAngleBytes)
}
case reflect.Ptr:
// Do nothing. We should never get here since pointers have already
// been handled above.
case reflect.Map:
// nil maps should be indicated as different than empty maps
if v.IsNil() {
d.w.Write(nilAngleBytes)
break
}
d.w.Write(openBraceNewlineBytes)
d.depth++
if (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) {
d.indent()
d.w.Write(maxNewlineBytes)
} else {
numEntries := v.Len()
keys := v.MapKeys()
if d.cs.SortKeys {
sortValues(keys, d.cs)
}
for i, key := range keys {
d.dump(d.unpackValue(key))
d.w.Write(colonSpaceBytes)
d.ignoreNextIndent = true
d.dump(d.unpackValue(v.MapIndex(key)))
if i < (numEntries - 1) {
d.w.Write(commaNewlineBytes)
} else {
d.w.Write(newlineBytes)
}
}
}
d.depth--
d.indent()
d.w.Write(closeBraceBytes)
case reflect.Struct:
d.w.Write(openBraceNewlineBytes)
d.depth++
if (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) {
d.indent()
d.w.Write(maxNewlineBytes)
} else {
vt := v.Type()
numFields := v.NumField()
for i := 0; i < numFields; i++ {
d.indent()
vtf := vt.Field(i)
d.w.Write([]byte(vtf.Name))
d.w.Write(colonSpaceBytes)
d.ignoreNextIndent = true
d.dump(d.unpackValue(v.Field(i)))
if i < (numFields - 1) {
d.w.Write(commaNewlineBytes)
} else {
d.w.Write(newlineBytes)
}
}
}
d.depth--
d.indent()
d.w.Write(closeBraceBytes)
case reflect.Uintptr:
printHexPtr(d.w, uintptr(v.Uint()))
case reflect.UnsafePointer, reflect.Chan, reflect.Func:
printHexPtr(d.w, v.Pointer())
// There were not any other types at the time this code was written, but
// fall back to letting the default fmt package handle it in case any new
// types are added.
default:
if v.CanInterface() {
fmt.Fprintf(d.w, "%v", v.Interface())
} else {
fmt.Fprintf(d.w, "%v", v.String())
}
}
}
// fdump is a helper function to consolidate the logic from the various public
// methods which take varying writers and config states.
func fdump(cs *ConfigState, w io.Writer, a ...interface{}) {
for _, arg := range a {
if arg == nil {
w.Write(interfaceBytes)
w.Write(spaceBytes)
w.Write(nilAngleBytes)
w.Write(newlineBytes)
continue
}
d := dumpState{w: w, cs: cs}
d.pointers = make(map[uintptr]int)
d.dump(reflect.ValueOf(arg))
d.w.Write(newlineBytes)
}
}
// Fdump formats and displays the passed arguments to io.Writer w. It formats
// exactly the same as Dump.
func Fdump(w io.Writer, a ...interface{}) {
fdump(&Config, w, a...)
}
// Sdump returns a string with the passed arguments formatted exactly the same
// as Dump.
func Sdump(a ...interface{}) string {
var buf bytes.Buffer
fdump(&Config, &buf, a...)
return buf.String()
}
/*
Dump displays the passed parameters to standard out with newlines, customizable
indentation, and additional debug information such as complete types and all
pointer addresses used to indirect to the final value. It provides the
following features over the built-in printing facilities provided by the fmt
package:
* Pointers are dereferenced and followed
* Circular data structures are detected and handled properly
* Custom Stringer/error interfaces are optionally invoked, including
on unexported types
* Custom types which only implement the Stringer/error interfaces via
a pointer receiver are optionally invoked when passing non-pointer
variables
* Byte arrays and slices are dumped like the hexdump -C command which
includes offsets, byte values in hex, and ASCII output
The configuration options are controlled by an exported package global,
spew.Config. See ConfigState for options documentation.
See Fdump if you would prefer dumping to an arbitrary io.Writer or Sdump to
get the formatted result as a string.
*/
func Dump(a ...interface{}) {
fdump(&Config, os.Stdout, a...)
}

419
vendor/github.com/davecgh/go-spew/spew/format.go generated vendored Normal file
View File

@ -0,0 +1,419 @@
/*
* Copyright (c) 2013-2016 Dave Collins <dave@davec.name>
*
* Permission to use, copy, modify, and distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
package spew
import (
"bytes"
"fmt"
"reflect"
"strconv"
"strings"
)
// supportedFlags is a list of all the character flags supported by fmt package.
const supportedFlags = "0-+# "
// formatState implements the fmt.Formatter interface and contains information
// about the state of a formatting operation. The NewFormatter function can
// be used to get a new Formatter which can be used directly as arguments
// in standard fmt package printing calls.
type formatState struct {
value interface{}
fs fmt.State
depth int
pointers map[uintptr]int
ignoreNextType bool
cs *ConfigState
}
// buildDefaultFormat recreates the original format string without precision
// and width information to pass in to fmt.Sprintf in the case of an
// unrecognized type. Unless new types are added to the language, this
// function won't ever be called.
func (f *formatState) buildDefaultFormat() (format string) {
buf := bytes.NewBuffer(percentBytes)
for _, flag := range supportedFlags {
if f.fs.Flag(int(flag)) {
buf.WriteRune(flag)
}
}
buf.WriteRune('v')
format = buf.String()
return format
}
// constructOrigFormat recreates the original format string including precision
// and width information to pass along to the standard fmt package. This allows
// automatic deferral of all format strings this package doesn't support.
func (f *formatState) constructOrigFormat(verb rune) (format string) {
buf := bytes.NewBuffer(percentBytes)
for _, flag := range supportedFlags {
if f.fs.Flag(int(flag)) {
buf.WriteRune(flag)
}
}
if width, ok := f.fs.Width(); ok {
buf.WriteString(strconv.Itoa(width))
}
if precision, ok := f.fs.Precision(); ok {
buf.Write(precisionBytes)
buf.WriteString(strconv.Itoa(precision))
}
buf.WriteRune(verb)
format = buf.String()
return format
}
// unpackValue returns values inside of non-nil interfaces when possible and
// ensures that types for values which have been unpacked from an interface
// are displayed when the show types flag is also set.
// This is useful for data types like structs, arrays, slices, and maps which
// can contain varying types packed inside an interface.
func (f *formatState) unpackValue(v reflect.Value) reflect.Value {
if v.Kind() == reflect.Interface {
f.ignoreNextType = false
if !v.IsNil() {
v = v.Elem()
}
}
return v
}
// formatPtr handles formatting of pointers by indirecting them as necessary.
func (f *formatState) formatPtr(v reflect.Value) {
// Display nil if top level pointer is nil.
showTypes := f.fs.Flag('#')
if v.IsNil() && (!showTypes || f.ignoreNextType) {
f.fs.Write(nilAngleBytes)
return
}
// Remove pointers at or below the current depth from map used to detect
// circular refs.
for k, depth := range f.pointers {
if depth >= f.depth {
delete(f.pointers, k)
}
}
// Keep list of all dereferenced pointers to possibly show later.
pointerChain := make([]uintptr, 0)
// Figure out how many levels of indirection there are by derferencing
// pointers and unpacking interfaces down the chain while detecting circular
// references.
nilFound := false
cycleFound := false
indirects := 0
ve := v
for ve.Kind() == reflect.Ptr {
if ve.IsNil() {
nilFound = true
break
}
indirects++
addr := ve.Pointer()
pointerChain = append(pointerChain, addr)
if pd, ok := f.pointers[addr]; ok && pd < f.depth {
cycleFound = true
indirects--
break
}
f.pointers[addr] = f.depth
ve = ve.Elem()
if ve.Kind() == reflect.Interface {
if ve.IsNil() {
nilFound = true
break
}
ve = ve.Elem()
}
}
// Display type or indirection level depending on flags.
if showTypes && !f.ignoreNextType {
f.fs.Write(openParenBytes)
f.fs.Write(bytes.Repeat(asteriskBytes, indirects))
f.fs.Write([]byte(ve.Type().String()))
f.fs.Write(closeParenBytes)
} else {
if nilFound || cycleFound {
indirects += strings.Count(ve.Type().String(), "*")
}
f.fs.Write(openAngleBytes)
f.fs.Write([]byte(strings.Repeat("*", indirects)))
f.fs.Write(closeAngleBytes)
}
// Display pointer information depending on flags.
if f.fs.Flag('+') && (len(pointerChain) > 0) {
f.fs.Write(openParenBytes)
for i, addr := range pointerChain {
if i > 0 {
f.fs.Write(pointerChainBytes)
}
printHexPtr(f.fs, addr)
}
f.fs.Write(closeParenBytes)
}
// Display dereferenced value.
switch {
case nilFound == true:
f.fs.Write(nilAngleBytes)
case cycleFound == true:
f.fs.Write(circularShortBytes)
default:
f.ignoreNextType = true
f.format(ve)
}
}
// format is the main workhorse for providing the Formatter interface. It
// uses the passed reflect value to figure out what kind of object we are
// dealing with and formats it appropriately. It is a recursive function,
// however circular data structures are detected and handled properly.
func (f *formatState) format(v reflect.Value) {
// Handle invalid reflect values immediately.
kind := v.Kind()
if kind == reflect.Invalid {
f.fs.Write(invalidAngleBytes)
return
}
// Handle pointers specially.
if kind == reflect.Ptr {
f.formatPtr(v)
return
}
// Print type information unless already handled elsewhere.
if !f.ignoreNextType && f.fs.Flag('#') {
f.fs.Write(openParenBytes)
f.fs.Write([]byte(v.Type().String()))
f.fs.Write(closeParenBytes)
}
f.ignoreNextType = false
// Call Stringer/error interfaces if they exist and the handle methods
// flag is enabled.
if !f.cs.DisableMethods {
if (kind != reflect.Invalid) && (kind != reflect.Interface) {
if handled := handleMethods(f.cs, f.fs, v); handled {
return
}
}
}
switch kind {
case reflect.Invalid:
// Do nothing. We should never get here since invalid has already
// been handled above.
case reflect.Bool:
printBool(f.fs, v.Bool())
case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int:
printInt(f.fs, v.Int(), 10)
case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint:
printUint(f.fs, v.Uint(), 10)
case reflect.Float32:
printFloat(f.fs, v.Float(), 32)
case reflect.Float64:
printFloat(f.fs, v.Float(), 64)
case reflect.Complex64:
printComplex(f.fs, v.Complex(), 32)
case reflect.Complex128:
printComplex(f.fs, v.Complex(), 64)
case reflect.Slice:
if v.IsNil() {
f.fs.Write(nilAngleBytes)
break
}
fallthrough
case reflect.Array:
f.fs.Write(openBracketBytes)
f.depth++
if (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) {
f.fs.Write(maxShortBytes)
} else {
numEntries := v.Len()
for i := 0; i < numEntries; i++ {
if i > 0 {
f.fs.Write(spaceBytes)
}
f.ignoreNextType = true
f.format(f.unpackValue(v.Index(i)))
}
}
f.depth--
f.fs.Write(closeBracketBytes)
case reflect.String:
f.fs.Write([]byte(v.String()))
case reflect.Interface:
// The only time we should get here is for nil interfaces due to
// unpackValue calls.
if v.IsNil() {
f.fs.Write(nilAngleBytes)
}
case reflect.Ptr:
// Do nothing. We should never get here since pointers have already
// been handled above.
case reflect.Map:
// nil maps should be indicated as different than empty maps
if v.IsNil() {
f.fs.Write(nilAngleBytes)
break
}
f.fs.Write(openMapBytes)
f.depth++
if (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) {
f.fs.Write(maxShortBytes)
} else {
keys := v.MapKeys()
if f.cs.SortKeys {
sortValues(keys, f.cs)
}
for i, key := range keys {
if i > 0 {
f.fs.Write(spaceBytes)
}
f.ignoreNextType = true
f.format(f.unpackValue(key))
f.fs.Write(colonBytes)
f.ignoreNextType = true
f.format(f.unpackValue(v.MapIndex(key)))
}
}
f.depth--
f.fs.Write(closeMapBytes)
case reflect.Struct:
numFields := v.NumField()
f.fs.Write(openBraceBytes)
f.depth++
if (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) {
f.fs.Write(maxShortBytes)
} else {
vt := v.Type()
for i := 0; i < numFields; i++ {
if i > 0 {
f.fs.Write(spaceBytes)
}
vtf := vt.Field(i)
if f.fs.Flag('+') || f.fs.Flag('#') {
f.fs.Write([]byte(vtf.Name))
f.fs.Write(colonBytes)
}
f.format(f.unpackValue(v.Field(i)))
}
}
f.depth--
f.fs.Write(closeBraceBytes)
case reflect.Uintptr:
printHexPtr(f.fs, uintptr(v.Uint()))
case reflect.UnsafePointer, reflect.Chan, reflect.Func:
printHexPtr(f.fs, v.Pointer())
// There were not any other types at the time this code was written, but
// fall back to letting the default fmt package handle it if any get added.
default:
format := f.buildDefaultFormat()
if v.CanInterface() {
fmt.Fprintf(f.fs, format, v.Interface())
} else {
fmt.Fprintf(f.fs, format, v.String())
}
}
}
// Format satisfies the fmt.Formatter interface. See NewFormatter for usage
// details.
func (f *formatState) Format(fs fmt.State, verb rune) {
f.fs = fs
// Use standard formatting for verbs that are not v.
if verb != 'v' {
format := f.constructOrigFormat(verb)
fmt.Fprintf(fs, format, f.value)
return
}
if f.value == nil {
if fs.Flag('#') {
fs.Write(interfaceBytes)
}
fs.Write(nilAngleBytes)
return
}
f.format(reflect.ValueOf(f.value))
}
// newFormatter is a helper function to consolidate the logic from the various
// public methods which take varying config states.
func newFormatter(cs *ConfigState, v interface{}) fmt.Formatter {
fs := &formatState{value: v, cs: cs}
fs.pointers = make(map[uintptr]int)
return fs
}
/*
NewFormatter returns a custom formatter that satisfies the fmt.Formatter
interface. As a result, it integrates cleanly with standard fmt package
printing functions. The formatter is useful for inline printing of smaller data
types similar to the standard %v format specifier.
The custom formatter only responds to the %v (most compact), %+v (adds pointer
addresses), %#v (adds types), or %#+v (adds types and pointer addresses) verb
combinations. Any other verbs such as %x and %q will be sent to the the
standard fmt package for formatting. In addition, the custom formatter ignores
the width and precision arguments (however they will still work on the format
specifiers not handled by the custom formatter).
Typically this function shouldn't be called directly. It is much easier to make
use of the custom formatter by calling one of the convenience functions such as
Printf, Println, or Fprintf.
*/
func NewFormatter(v interface{}) fmt.Formatter {
return newFormatter(&Config, v)
}

148
vendor/github.com/davecgh/go-spew/spew/spew.go generated vendored Normal file
View File

@ -0,0 +1,148 @@
/*
* Copyright (c) 2013-2016 Dave Collins <dave@davec.name>
*
* Permission to use, copy, modify, and distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
package spew
import (
"fmt"
"io"
)
// Errorf is a wrapper for fmt.Errorf that treats each argument as if it were
// passed with a default Formatter interface returned by NewFormatter. It
// returns the formatted string as a value that satisfies error. See
// NewFormatter for formatting details.
//
// This function is shorthand for the following syntax:
//
// fmt.Errorf(format, spew.NewFormatter(a), spew.NewFormatter(b))
func Errorf(format string, a ...interface{}) (err error) {
return fmt.Errorf(format, convertArgs(a)...)
}
// Fprint is a wrapper for fmt.Fprint that treats each argument as if it were
// passed with a default Formatter interface returned by NewFormatter. It
// returns the number of bytes written and any write error encountered. See
// NewFormatter for formatting details.
//
// This function is shorthand for the following syntax:
//
// fmt.Fprint(w, spew.NewFormatter(a), spew.NewFormatter(b))
func Fprint(w io.Writer, a ...interface{}) (n int, err error) {
return fmt.Fprint(w, convertArgs(a)...)
}
// Fprintf is a wrapper for fmt.Fprintf that treats each argument as if it were
// passed with a default Formatter interface returned by NewFormatter. It
// returns the number of bytes written and any write error encountered. See
// NewFormatter for formatting details.
//
// This function is shorthand for the following syntax:
//
// fmt.Fprintf(w, format, spew.NewFormatter(a), spew.NewFormatter(b))
func Fprintf(w io.Writer, format string, a ...interface{}) (n int, err error) {
return fmt.Fprintf(w, format, convertArgs(a)...)
}
// Fprintln is a wrapper for fmt.Fprintln that treats each argument as if it
// passed with a default Formatter interface returned by NewFormatter. See
// NewFormatter for formatting details.
//
// This function is shorthand for the following syntax:
//
// fmt.Fprintln(w, spew.NewFormatter(a), spew.NewFormatter(b))
func Fprintln(w io.Writer, a ...interface{}) (n int, err error) {
return fmt.Fprintln(w, convertArgs(a)...)
}
// Print is a wrapper for fmt.Print that treats each argument as if it were
// passed with a default Formatter interface returned by NewFormatter. It
// returns the number of bytes written and any write error encountered. See
// NewFormatter for formatting details.
//
// This function is shorthand for the following syntax:
//
// fmt.Print(spew.NewFormatter(a), spew.NewFormatter(b))
func Print(a ...interface{}) (n int, err error) {
return fmt.Print(convertArgs(a)...)
}
// Printf is a wrapper for fmt.Printf that treats each argument as if it were
// passed with a default Formatter interface returned by NewFormatter. It
// returns the number of bytes written and any write error encountered. See
// NewFormatter for formatting details.
//
// This function is shorthand for the following syntax:
//
// fmt.Printf(format, spew.NewFormatter(a), spew.NewFormatter(b))
func Printf(format string, a ...interface{}) (n int, err error) {
return fmt.Printf(format, convertArgs(a)...)
}
// Println is a wrapper for fmt.Println that treats each argument as if it were
// passed with a default Formatter interface returned by NewFormatter. It
// returns the number of bytes written and any write error encountered. See
// NewFormatter for formatting details.
//
// This function is shorthand for the following syntax:
//
// fmt.Println(spew.NewFormatter(a), spew.NewFormatter(b))
func Println(a ...interface{}) (n int, err error) {
return fmt.Println(convertArgs(a)...)
}
// Sprint is a wrapper for fmt.Sprint that treats each argument as if it were
// passed with a default Formatter interface returned by NewFormatter. It
// returns the resulting string. See NewFormatter for formatting details.
//
// This function is shorthand for the following syntax:
//
// fmt.Sprint(spew.NewFormatter(a), spew.NewFormatter(b))
func Sprint(a ...interface{}) string {
return fmt.Sprint(convertArgs(a)...)
}
// Sprintf is a wrapper for fmt.Sprintf that treats each argument as if it were
// passed with a default Formatter interface returned by NewFormatter. It
// returns the resulting string. See NewFormatter for formatting details.
//
// This function is shorthand for the following syntax:
//
// fmt.Sprintf(format, spew.NewFormatter(a), spew.NewFormatter(b))
func Sprintf(format string, a ...interface{}) string {
return fmt.Sprintf(format, convertArgs(a)...)
}
// Sprintln is a wrapper for fmt.Sprintln that treats each argument as if it
// were passed with a default Formatter interface returned by NewFormatter. It
// returns the resulting string. See NewFormatter for formatting details.
//
// This function is shorthand for the following syntax:
//
// fmt.Sprintln(spew.NewFormatter(a), spew.NewFormatter(b))
func Sprintln(a ...interface{}) string {
return fmt.Sprintln(convertArgs(a)...)
}
// convertArgs accepts a slice of arguments and returns a slice of the same
// length with each argument converted to a default spew Formatter interface.
func convertArgs(args []interface{}) (formatters []interface{}) {
formatters = make([]interface{}, len(args))
for index, arg := range args {
formatters[index] = NewFormatter(arg)
}
return formatters
}

13
vendor/github.com/disintegration/imaging/.travis.yml generated vendored Normal file
View File

@ -0,0 +1,13 @@
language: go
go:
- "1.7.x"
- "1.8.x"
- "1.9.x"
- "1.10.x"
before_install:
- go get github.com/mattn/goveralls
script:
- go test -v -race -cover
- $GOPATH/bin/goveralls -service=travis-ci

21
vendor/github.com/disintegration/imaging/LICENSE generated vendored Normal file
View File

@ -0,0 +1,21 @@
The MIT License (MIT)
Copyright (c) 2012-2018 Grigory Dryapak
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

188
vendor/github.com/disintegration/imaging/README.md generated vendored Normal file
View File

@ -0,0 +1,188 @@
# Imaging
[![GoDoc](https://godoc.org/github.com/disintegration/imaging?status.svg)](https://godoc.org/github.com/disintegration/imaging)
[![Build Status](https://travis-ci.org/disintegration/imaging.svg?branch=master)](https://travis-ci.org/disintegration/imaging)
[![Coverage Status](https://coveralls.io/repos/github/disintegration/imaging/badge.svg?branch=master&service=github)](https://coveralls.io/github/disintegration/imaging?branch=master)
[![Go Report Card](https://goreportcard.com/badge/github.com/disintegration/imaging)](https://goreportcard.com/report/github.com/disintegration/imaging)
Package imaging provides basic image processing functions (resize, rotate, crop, brightness/contrast adjustments, etc.).
All the image processing functions provided by the package accept any image type that implements `image.Image` interface
as an input, and return a new image of `*image.NRGBA` type (32bit RGBA colors, not premultiplied by alpha).
## Installation
go get -u github.com/disintegration/imaging
## Documentation
http://godoc.org/github.com/disintegration/imaging
## Usage examples
A few usage examples can be found below. See the documentation for the full list of supported functions.
### Image resizing
```go
// Resize srcImage to size = 128x128px using the Lanczos filter.
dstImage128 := imaging.Resize(srcImage, 128, 128, imaging.Lanczos)
// Resize srcImage to width = 800px preserving the aspect ratio.
dstImage800 := imaging.Resize(srcImage, 800, 0, imaging.Lanczos)
// Scale down srcImage to fit the 800x600px bounding box.
dstImageFit := imaging.Fit(srcImage, 800, 600, imaging.Lanczos)
// Resize and crop the srcImage to fill the 100x100px area.
dstImageFill := imaging.Fill(srcImage, 100, 100, imaging.Center, imaging.Lanczos)
```
Imaging supports image resizing using various resampling filters. The most notable ones:
- `NearestNeighbor` - Fastest resampling filter, no antialiasing.
- `Box` - Simple and fast averaging filter appropriate for downscaling. When upscaling it's similar to NearestNeighbor.
- `Linear` - Bilinear filter, smooth and reasonably fast.
- `MitchellNetravali` - А smooth bicubic filter.
- `CatmullRom` - A sharp bicubic filter.
- `Gaussian` - Blurring filter that uses gaussian function, useful for noise removal.
- `Lanczos` - High-quality resampling filter for photographic images yielding sharp results, slower than cubic filters.
The full list of supported filters: NearestNeighbor, Box, Linear, Hermite, MitchellNetravali, CatmullRom, BSpline, Gaussian, Lanczos, Hann, Hamming, Blackman, Bartlett, Welch, Cosine. Custom filters can be created using ResampleFilter struct.
**Resampling filters comparison**
Original image:
![srcImage](testdata/branches.png)
The same image resized from 600x400px to 150x100px using different resampling filters.
From faster (lower quality) to slower (higher quality):
Filter | Resize result
--------------------------|---------------------------------------------
`imaging.NearestNeighbor` | ![dstImage](testdata/out_resize_nearest.png)
`imaging.Linear` | ![dstImage](testdata/out_resize_linear.png)
`imaging.CatmullRom` | ![dstImage](testdata/out_resize_catrom.png)
`imaging.Lanczos` | ![dstImage](testdata/out_resize_lanczos.png)
### Gaussian Blur
```go
dstImage := imaging.Blur(srcImage, 0.5)
```
Sigma parameter allows to control the strength of the blurring effect.
Original image | Sigma = 0.5 | Sigma = 1.5
-----------------------------------|----------------------------------------|---------------------------------------
![srcImage](testdata/flowers_small.png) | ![dstImage](testdata/out_blur_0.5.png) | ![dstImage](testdata/out_blur_1.5.png)
### Sharpening
```go
dstImage := imaging.Sharpen(srcImage, 0.5)
```
`Sharpen` uses gaussian function internally. Sigma parameter allows to control the strength of the sharpening effect.
Original image | Sigma = 0.5 | Sigma = 1.5
-----------------------------------|-------------------------------------------|------------------------------------------
![srcImage](testdata/flowers_small.png) | ![dstImage](testdata/out_sharpen_0.5.png) | ![dstImage](testdata/out_sharpen_1.5.png)
### Gamma correction
```go
dstImage := imaging.AdjustGamma(srcImage, 0.75)
```
Original image | Gamma = 0.75 | Gamma = 1.25
-----------------------------------|------------------------------------------|-----------------------------------------
![srcImage](testdata/flowers_small.png) | ![dstImage](testdata/out_gamma_0.75.png) | ![dstImage](testdata/out_gamma_1.25.png)
### Contrast adjustment
```go
dstImage := imaging.AdjustContrast(srcImage, 20)
```
Original image | Contrast = 15 | Contrast = -15
-----------------------------------|--------------------------------------------|-------------------------------------------
![srcImage](testdata/flowers_small.png) | ![dstImage](testdata/out_contrast_p15.png) | ![dstImage](testdata/out_contrast_m15.png)
### Brightness adjustment
```go
dstImage := imaging.AdjustBrightness(srcImage, 20)
```
Original image | Brightness = 10 | Brightness = -10
-----------------------------------|----------------------------------------------|---------------------------------------------
![srcImage](testdata/flowers_small.png) | ![dstImage](testdata/out_brightness_p10.png) | ![dstImage](testdata/out_brightness_m10.png)
## Example code
```go
package main
import (
"image"
"image/color"
"log"
"github.com/disintegration/imaging"
)
func main() {
// Open a test image.
src, err := imaging.Open("testdata/flowers.png")
if err != nil {
log.Fatalf("failed to open image: %v", err)
}
// Crop the original image to 300x300px size using the center anchor.
src = imaging.CropAnchor(src, 300, 300, imaging.Center)
// Resize the cropped image to width = 200px preserving the aspect ratio.
src = imaging.Resize(src, 200, 0, imaging.Lanczos)
// Create a blurred version of the image.
img1 := imaging.Blur(src, 5)
// Create a grayscale version of the image with higher contrast and sharpness.
img2 := imaging.Grayscale(src)
img2 = imaging.AdjustContrast(img2, 20)
img2 = imaging.Sharpen(img2, 2)
// Create an inverted version of the image.
img3 := imaging.Invert(src)
// Create an embossed version of the image using a convolution filter.
img4 := imaging.Convolve3x3(
src,
[9]float64{
-1, -1, 0,
-1, 1, 1,
0, 1, 1,
},
nil,
)
// Create a new image and paste the four produced images into it.
dst := imaging.New(400, 400, color.NRGBA{0, 0, 0, 0})
dst = imaging.Paste(dst, img1, image.Pt(0, 0))
dst = imaging.Paste(dst, img2, image.Pt(0, 200))
dst = imaging.Paste(dst, img3, image.Pt(200, 0))
dst = imaging.Paste(dst, img4, image.Pt(200, 200))
// Save the resulting image as JPEG.
err = imaging.Save(dst, "testdata/out_example.jpg")
if err != nil {
log.Fatalf("failed to save image: %v", err)
}
}
```
Output:
![dstImage](testdata/out_example.jpg)

222
vendor/github.com/disintegration/imaging/adjust.go generated vendored Normal file
View File

@ -0,0 +1,222 @@
package imaging
import (
"image"
"image/color"
"math"
)
// Grayscale produces a grayscale version of the image.
func Grayscale(img image.Image) *image.NRGBA {
src := newScanner(img)
dst := image.NewNRGBA(image.Rect(0, 0, src.w, src.h))
parallel(0, src.h, func(ys <-chan int) {
for y := range ys {
i := y * dst.Stride
src.scan(0, y, src.w, y+1, dst.Pix[i:i+src.w*4])
for x := 0; x < src.w; x++ {
r := dst.Pix[i+0]
g := dst.Pix[i+1]
b := dst.Pix[i+2]
f := 0.299*float64(r) + 0.587*float64(g) + 0.114*float64(b)
y := uint8(f + 0.5)
dst.Pix[i+0] = y
dst.Pix[i+1] = y
dst.Pix[i+2] = y
i += 4
}
}
})
return dst
}
// Invert produces an inverted (negated) version of the image.
func Invert(img image.Image) *image.NRGBA {
src := newScanner(img)
dst := image.NewNRGBA(image.Rect(0, 0, src.w, src.h))
parallel(0, src.h, func(ys <-chan int) {
for y := range ys {
i := y * dst.Stride
src.scan(0, y, src.w, y+1, dst.Pix[i:i+src.w*4])
for x := 0; x < src.w; x++ {
dst.Pix[i+0] = 255 - dst.Pix[i+0]
dst.Pix[i+1] = 255 - dst.Pix[i+1]
dst.Pix[i+2] = 255 - dst.Pix[i+2]
i += 4
}
}
})
return dst
}
// AdjustContrast changes the contrast of the image using the percentage parameter and returns the adjusted image.
// The percentage must be in range (-100, 100). The percentage = 0 gives the original image.
// The percentage = -100 gives solid gray image.
//
// Examples:
//
// dstImage = imaging.AdjustContrast(srcImage, -10) // decrease image contrast by 10%
// dstImage = imaging.AdjustContrast(srcImage, 20) // increase image contrast by 20%
//
func AdjustContrast(img image.Image, percentage float64) *image.NRGBA {
percentage = math.Min(math.Max(percentage, -100.0), 100.0)
lut := make([]uint8, 256)
v := (100.0 + percentage) / 100.0
for i := 0; i < 256; i++ {
if 0 <= v && v <= 1 {
lut[i] = clamp((0.5 + (float64(i)/255.0-0.5)*v) * 255.0)
} else if 1 < v && v < 2 {
lut[i] = clamp((0.5 + (float64(i)/255.0-0.5)*(1/(2.0-v))) * 255.0)
} else {
lut[i] = uint8(float64(i)/255.0+0.5) * 255
}
}
return adjustLUT(img, lut)
}
// AdjustBrightness changes the brightness of the image using the percentage parameter and returns the adjusted image.
// The percentage must be in range (-100, 100). The percentage = 0 gives the original image.
// The percentage = -100 gives solid black image. The percentage = 100 gives solid white image.
//
// Examples:
//
// dstImage = imaging.AdjustBrightness(srcImage, -15) // decrease image brightness by 15%
// dstImage = imaging.AdjustBrightness(srcImage, 10) // increase image brightness by 10%
//
func AdjustBrightness(img image.Image, percentage float64) *image.NRGBA {
percentage = math.Min(math.Max(percentage, -100.0), 100.0)
lut := make([]uint8, 256)
shift := 255.0 * percentage / 100.0
for i := 0; i < 256; i++ {
lut[i] = clamp(float64(i) + shift)
}
return adjustLUT(img, lut)
}
// AdjustGamma performs a gamma correction on the image and returns the adjusted image.
// Gamma parameter must be positive. Gamma = 1.0 gives the original image.
// Gamma less than 1.0 darkens the image and gamma greater than 1.0 lightens it.
//
// Example:
//
// dstImage = imaging.AdjustGamma(srcImage, 0.7)
//
func AdjustGamma(img image.Image, gamma float64) *image.NRGBA {
e := 1.0 / math.Max(gamma, 0.0001)
lut := make([]uint8, 256)
for i := 0; i < 256; i++ {
lut[i] = clamp(math.Pow(float64(i)/255.0, e) * 255.0)
}
return adjustLUT(img, lut)
}
// AdjustSigmoid changes the contrast of the image using a sigmoidal function and returns the adjusted image.
// It's a non-linear contrast change useful for photo adjustments as it preserves highlight and shadow detail.
// The midpoint parameter is the midpoint of contrast that must be between 0 and 1, typically 0.5.
// The factor parameter indicates how much to increase or decrease the contrast, typically in range (-10, 10).
// If the factor parameter is positive the image contrast is increased otherwise the contrast is decreased.
//
// Examples:
//
// dstImage = imaging.AdjustSigmoid(srcImage, 0.5, 3.0) // increase the contrast
// dstImage = imaging.AdjustSigmoid(srcImage, 0.5, -3.0) // decrease the contrast
//
func AdjustSigmoid(img image.Image, midpoint, factor float64) *image.NRGBA {
if factor == 0 {
return Clone(img)
}
lut := make([]uint8, 256)
a := math.Min(math.Max(midpoint, 0.0), 1.0)
b := math.Abs(factor)
sig0 := sigmoid(a, b, 0)
sig1 := sigmoid(a, b, 1)
e := 1.0e-6
if factor > 0 {
for i := 0; i < 256; i++ {
x := float64(i) / 255.0
sigX := sigmoid(a, b, x)
f := (sigX - sig0) / (sig1 - sig0)
lut[i] = clamp(f * 255.0)
}
} else {
for i := 0; i < 256; i++ {
x := float64(i) / 255.0
arg := math.Min(math.Max((sig1-sig0)*x+sig0, e), 1.0-e)
f := a - math.Log(1.0/arg-1.0)/b
lut[i] = clamp(f * 255.0)
}
}
return adjustLUT(img, lut)
}
func sigmoid(a, b, x float64) float64 {
return 1 / (1 + math.Exp(b*(a-x)))
}
// adjustLUT applies the given lookup table to the colors of the image.
func adjustLUT(img image.Image, lut []uint8) *image.NRGBA {
src := newScanner(img)
dst := image.NewNRGBA(image.Rect(0, 0, src.w, src.h))
parallel(0, src.h, func(ys <-chan int) {
for y := range ys {
i := y * dst.Stride
src.scan(0, y, src.w, y+1, dst.Pix[i:i+src.w*4])
for x := 0; x < src.w; x++ {
dst.Pix[i+0] = lut[dst.Pix[i+0]]
dst.Pix[i+1] = lut[dst.Pix[i+1]]
dst.Pix[i+2] = lut[dst.Pix[i+2]]
i += 4
}
}
})
return dst
}
// AdjustFunc applies the fn function to each pixel of the img image and returns the adjusted image.
//
// Example:
//
// dstImage = imaging.AdjustFunc(
// srcImage,
// func(c color.NRGBA) color.NRGBA {
// // shift the red channel by 16
// r := int(c.R) + 16
// if r > 255 {
// r = 255
// }
// return color.NRGBA{uint8(r), c.G, c.B, c.A}
// }
// )
//
func AdjustFunc(img image.Image, fn func(c color.NRGBA) color.NRGBA) *image.NRGBA {
src := newScanner(img)
dst := image.NewNRGBA(image.Rect(0, 0, src.w, src.h))
parallel(0, src.h, func(ys <-chan int) {
for y := range ys {
i := y * dst.Stride
src.scan(0, y, src.w, y+1, dst.Pix[i:i+src.w*4])
for x := 0; x < src.w; x++ {
r := dst.Pix[i+0]
g := dst.Pix[i+1]
b := dst.Pix[i+2]
a := dst.Pix[i+3]
c := fn(color.NRGBA{r, g, b, a})
dst.Pix[i+0] = c.R
dst.Pix[i+1] = c.G
dst.Pix[i+2] = c.B
dst.Pix[i+3] = c.A
i += 4
}
}
})
return dst
}

146
vendor/github.com/disintegration/imaging/convolution.go generated vendored Normal file
View File

@ -0,0 +1,146 @@
package imaging
import (
"image"
)
// ConvolveOptions are convolution parameters.
type ConvolveOptions struct {
// If Normalize is true the kernel is normalized before convolution.
Normalize bool
// If Abs is true the absolute value of each color channel is taken after convolution.
Abs bool
// Bias is added to each color channel value after convolution.
Bias int
}
// Convolve3x3 convolves the image with the specified 3x3 convolution kernel.
// Default parameters are used if a nil *ConvolveOptions is passed.
func Convolve3x3(img image.Image, kernel [9]float64, options *ConvolveOptions) *image.NRGBA {
return convolve(img, kernel[:], options)
}
// Convolve5x5 convolves the image with the specified 5x5 convolution kernel.
// Default parameters are used if a nil *ConvolveOptions is passed.
func Convolve5x5(img image.Image, kernel [25]float64, options *ConvolveOptions) *image.NRGBA {
return convolve(img, kernel[:], options)
}
func convolve(img image.Image, kernel []float64, options *ConvolveOptions) *image.NRGBA {
src := toNRGBA(img)
w := src.Bounds().Max.X
h := src.Bounds().Max.Y
dst := image.NewNRGBA(image.Rect(0, 0, w, h))
if w < 1 || h < 1 {
return dst
}
if options == nil {
options = &ConvolveOptions{}
}
if options.Normalize {
normalizeKernel(kernel)
}
type coef struct {
x, y int
k float64
}
var coefs []coef
var m int
switch len(kernel) {
case 9:
m = 1
case 25:
m = 2
}
i := 0
for y := -m; y <= m; y++ {
for x := -m; x <= m; x++ {
if kernel[i] != 0 {
coefs = append(coefs, coef{x: x, y: y, k: kernel[i]})
}
i++
}
}
parallel(0, h, func(ys <-chan int) {
for y := range ys {
for x := 0; x < w; x++ {
var r, g, b float64
for _, c := range coefs {
ix := x + c.x
if ix < 0 {
ix = 0
} else if ix >= w {
ix = w - 1
}
iy := y + c.y
if iy < 0 {
iy = 0
} else if iy >= h {
iy = h - 1
}
off := iy*src.Stride + ix*4
r += float64(src.Pix[off+0]) * c.k
g += float64(src.Pix[off+1]) * c.k
b += float64(src.Pix[off+2]) * c.k
}
if options.Abs {
if r < 0 {
r = -r
}
if g < 0 {
g = -g
}
if b < 0 {
b = -b
}
}
if options.Bias != 0 {
r += float64(options.Bias)
g += float64(options.Bias)
b += float64(options.Bias)
}
srcOff := y*src.Stride + x*4
dstOff := y*dst.Stride + x*4
dst.Pix[dstOff+0] = clamp(r)
dst.Pix[dstOff+1] = clamp(g)
dst.Pix[dstOff+2] = clamp(b)
dst.Pix[dstOff+3] = src.Pix[srcOff+3]
}
}
})
return dst
}
func normalizeKernel(kernel []float64) {
var sum, sumpos float64
for i := range kernel {
sum += kernel[i]
if kernel[i] > 0 {
sumpos += kernel[i]
}
}
if sum != 0 {
for i := range kernel {
kernel[i] /= sum
}
} else if sumpos != 0 {
for i := range kernel {
kernel[i] /= sumpos
}
}
}

7
vendor/github.com/disintegration/imaging/doc.go generated vendored Normal file
View File

@ -0,0 +1,7 @@
/*
Package imaging provides basic image processing functions (resize, rotate, crop, brightness/contrast adjustments, etc.).
All the image processing functions provided by the package accept any image type that implements image.Image interface
as an input, and return a new image of *image.NRGBA type (32bit RGBA colors, not premultiplied by alpha).
*/
package imaging

165
vendor/github.com/disintegration/imaging/effects.go generated vendored Normal file
View File

@ -0,0 +1,165 @@
package imaging
import (
"image"
"math"
)
func gaussianBlurKernel(x, sigma float64) float64 {
return math.Exp(-(x*x)/(2*sigma*sigma)) / (sigma * math.Sqrt(2*math.Pi))
}
// Blur produces a blurred version of the image using a Gaussian function.
// Sigma parameter must be positive and indicates how much the image will be blurred.
//
// Usage example:
//
// dstImage := imaging.Blur(srcImage, 3.5)
//
func Blur(img image.Image, sigma float64) *image.NRGBA {
if sigma <= 0 {
return Clone(img)
}
radius := int(math.Ceil(sigma * 3.0))
kernel := make([]float64, radius+1)
for i := 0; i <= radius; i++ {
kernel[i] = gaussianBlurKernel(float64(i), sigma)
}
return blurVertical(blurHorizontal(img, kernel), kernel)
}
func blurHorizontal(img image.Image, kernel []float64) *image.NRGBA {
src := newScanner(img)
dst := image.NewNRGBA(image.Rect(0, 0, src.w, src.h))
radius := len(kernel) - 1
parallel(0, src.h, func(ys <-chan int) {
scanLine := make([]uint8, src.w*4)
for y := range ys {
src.scan(0, y, src.w, y+1, scanLine)
for x := 0; x < src.w; x++ {
min := x - radius
if min < 0 {
min = 0
}
max := x + radius
if max > src.w-1 {
max = src.w - 1
}
var r, g, b, a, wsum float64
for ix := min; ix <= max; ix++ {
i := ix * 4
weight := kernel[absint(x-ix)]
wsum += weight
wa := float64(scanLine[i+3]) * weight
r += float64(scanLine[i+0]) * wa
g += float64(scanLine[i+1]) * wa
b += float64(scanLine[i+2]) * wa
a += wa
}
if a != 0 {
r /= a
g /= a
b /= a
}
j := y*dst.Stride + x*4
dst.Pix[j+0] = clamp(r)
dst.Pix[j+1] = clamp(g)
dst.Pix[j+2] = clamp(b)
dst.Pix[j+3] = clamp(a / wsum)
}
}
})
return dst
}
func blurVertical(img image.Image, kernel []float64) *image.NRGBA {
src := newScanner(img)
dst := image.NewNRGBA(image.Rect(0, 0, src.w, src.h))
radius := len(kernel) - 1
parallel(0, src.w, func(xs <-chan int) {
scanLine := make([]uint8, src.h*4)
for x := range xs {
src.scan(x, 0, x+1, src.h, scanLine)
for y := 0; y < src.h; y++ {
min := y - radius
if min < 0 {
min = 0
}
max := y + radius
if max > src.h-1 {
max = src.h - 1
}
var r, g, b, a, wsum float64
for iy := min; iy <= max; iy++ {
i := iy * 4
weight := kernel[absint(y-iy)]
wsum += weight
wa := float64(scanLine[i+3]) * weight
r += float64(scanLine[i+0]) * wa
g += float64(scanLine[i+1]) * wa
b += float64(scanLine[i+2]) * wa
a += wa
}
if a != 0 {
r /= a
g /= a
b /= a
}
j := y*dst.Stride + x*4
dst.Pix[j+0] = clamp(r)
dst.Pix[j+1] = clamp(g)
dst.Pix[j+2] = clamp(b)
dst.Pix[j+3] = clamp(a / wsum)
}
}
})
return dst
}
// Sharpen produces a sharpened version of the image.
// Sigma parameter must be positive and indicates how much the image will be sharpened.
//
// Usage example:
//
// dstImage := imaging.Sharpen(srcImage, 3.5)
//
func Sharpen(img image.Image, sigma float64) *image.NRGBA {
if sigma <= 0 {
return Clone(img)
}
src := newScanner(img)
dst := image.NewNRGBA(image.Rect(0, 0, src.w, src.h))
blurred := Blur(img, sigma)
parallel(0, src.h, func(ys <-chan int) {
scanLine := make([]uint8, src.w*4)
for y := range ys {
src.scan(0, y, src.w, y+1, scanLine)
j := y * dst.Stride
for i := 0; i < src.w*4; i++ {
val := int(scanLine[i])<<1 - int(blurred.Pix[j])
if val < 0 {
val = 0
} else if val > 0xff {
val = 0xff
}
dst.Pix[j] = uint8(val)
j++
}
}
})
return dst
}

280
vendor/github.com/disintegration/imaging/helpers.go generated vendored Normal file
View File

@ -0,0 +1,280 @@
package imaging
import (
"errors"
"image"
"image/color"
"image/draw"
"image/gif"
"image/jpeg"
"image/png"
"io"
"os"
"path/filepath"
"strings"
"golang.org/x/image/bmp"
"golang.org/x/image/tiff"
)
// Format is an image file format.
type Format int
// Image file formats.
const (
JPEG Format = iota
PNG
GIF
TIFF
BMP
)
func (f Format) String() string {
switch f {
case JPEG:
return "JPEG"
case PNG:
return "PNG"
case GIF:
return "GIF"
case TIFF:
return "TIFF"
case BMP:
return "BMP"
default:
return "Unsupported"
}
}
var (
// ErrUnsupportedFormat means the given image format (or file extension) is unsupported.
ErrUnsupportedFormat = errors.New("imaging: unsupported image format")
)
type fileSystem interface {
Create(string) (io.WriteCloser, error)
Open(string) (io.ReadCloser, error)
}
type localFS struct{}
func (localFS) Create(name string) (io.WriteCloser, error) { return os.Create(name) }
func (localFS) Open(name string) (io.ReadCloser, error) { return os.Open(name) }
var fs fileSystem = localFS{}
// Decode reads an image from r.
func Decode(r io.Reader) (image.Image, error) {
img, _, err := image.Decode(r)
return img, err
}
// Open loads an image from file
func Open(filename string) (image.Image, error) {
file, err := fs.Open(filename)
if err != nil {
return nil, err
}
defer file.Close()
return Decode(file)
}
type encodeConfig struct {
jpegQuality int
gifNumColors int
gifQuantizer draw.Quantizer
gifDrawer draw.Drawer
pngCompressionLevel png.CompressionLevel
}
var defaultEncodeConfig = encodeConfig{
jpegQuality: 95,
gifNumColors: 256,
gifQuantizer: nil,
gifDrawer: nil,
pngCompressionLevel: png.DefaultCompression,
}
// EncodeOption sets an optional parameter for the Encode and Save functions.
type EncodeOption func(*encodeConfig)
// JPEGQuality returns an EncodeOption that sets the output JPEG quality.
// Quality ranges from 1 to 100 inclusive, higher is better. Default is 95.
func JPEGQuality(quality int) EncodeOption {
return func(c *encodeConfig) {
c.jpegQuality = quality
}
}
// GIFNumColors returns an EncodeOption that sets the maximum number of colors
// used in the GIF-encoded image. It ranges from 1 to 256. Default is 256.
func GIFNumColors(numColors int) EncodeOption {
return func(c *encodeConfig) {
c.gifNumColors = numColors
}
}
// GIFQuantizer returns an EncodeOption that sets the quantizer that is used to produce
// a palette of the GIF-encoded image.
func GIFQuantizer(quantizer draw.Quantizer) EncodeOption {
return func(c *encodeConfig) {
c.gifQuantizer = quantizer
}
}
// GIFDrawer returns an EncodeOption that sets the drawer that is used to convert
// the source image to the desired palette of the GIF-encoded image.
func GIFDrawer(drawer draw.Drawer) EncodeOption {
return func(c *encodeConfig) {
c.gifDrawer = drawer
}
}
// PNGCompressionLevel returns an EncodeOption that sets the compression level
// of the PNG-encoded image. Default is png.DefaultCompression.
func PNGCompressionLevel(level png.CompressionLevel) EncodeOption {
return func(c *encodeConfig) {
c.pngCompressionLevel = level
}
}
// Encode writes the image img to w in the specified format (JPEG, PNG, GIF, TIFF or BMP).
func Encode(w io.Writer, img image.Image, format Format, opts ...EncodeOption) error {
cfg := defaultEncodeConfig
for _, option := range opts {
option(&cfg)
}
var err error
switch format {
case JPEG:
var rgba *image.RGBA
if nrgba, ok := img.(*image.NRGBA); ok {
if nrgba.Opaque() {
rgba = &image.RGBA{
Pix: nrgba.Pix,
Stride: nrgba.Stride,
Rect: nrgba.Rect,
}
}
}
if rgba != nil {
err = jpeg.Encode(w, rgba, &jpeg.Options{Quality: cfg.jpegQuality})
} else {
err = jpeg.Encode(w, img, &jpeg.Options{Quality: cfg.jpegQuality})
}
case PNG:
enc := png.Encoder{CompressionLevel: cfg.pngCompressionLevel}
err = enc.Encode(w, img)
case GIF:
err = gif.Encode(w, img, &gif.Options{
NumColors: cfg.gifNumColors,
Quantizer: cfg.gifQuantizer,
Drawer: cfg.gifDrawer,
})
case TIFF:
err = tiff.Encode(w, img, &tiff.Options{Compression: tiff.Deflate, Predictor: true})
case BMP:
err = bmp.Encode(w, img)
default:
err = ErrUnsupportedFormat
}
return err
}
// Save saves the image to file with the specified filename.
// The format is determined from the filename extension: "jpg" (or "jpeg"), "png", "gif", "tif" (or "tiff") and "bmp" are supported.
//
// Examples:
//
// // Save the image as PNG.
// err := imaging.Save(img, "out.png")
//
// // Save the image as JPEG with optional quality parameter set to 80.
// err := imaging.Save(img, "out.jpg", imaging.JPEGQuality(80))
//
func Save(img image.Image, filename string, opts ...EncodeOption) (err error) {
formats := map[string]Format{
".jpg": JPEG,
".jpeg": JPEG,
".png": PNG,
".tif": TIFF,
".tiff": TIFF,
".bmp": BMP,
".gif": GIF,
}
ext := strings.ToLower(filepath.Ext(filename))
f, ok := formats[ext]
if !ok {
return ErrUnsupportedFormat
}
file, err := fs.Create(filename)
if err != nil {
return err
}
defer func() {
cerr := file.Close()
if err == nil {
err = cerr
}
}()
return Encode(file, img, f, opts...)
}
// New creates a new image with the specified width and height, and fills it with the specified color.
func New(width, height int, fillColor color.Color) *image.NRGBA {
if width <= 0 || height <= 0 {
return &image.NRGBA{}
}
dst := image.NewNRGBA(image.Rect(0, 0, width, height))
c := color.NRGBAModel.Convert(fillColor).(color.NRGBA)
if c.R == 0 && c.G == 0 && c.B == 0 && c.A == 0 {
return dst
}
// Fill the first row.
i := 0
for x := 0; x < width; x++ {
dst.Pix[i+0] = c.R
dst.Pix[i+1] = c.G
dst.Pix[i+2] = c.B
dst.Pix[i+3] = c.A
i += 4
}
// Copy the first row to other rows.
size := width * 4
parallel(1, height, func(ys <-chan int) {
for y := range ys {
i = y * dst.Stride
copy(dst.Pix[i:i+size], dst.Pix[0:size])
}
})
return dst
}
// Clone returns a copy of the given image.
func Clone(img image.Image) *image.NRGBA {
src := newScanner(img)
dst := image.NewNRGBA(image.Rect(0, 0, src.w, src.h))
size := src.w * 4
parallel(0, src.h, func(ys <-chan int) {
for y := range ys {
i := y * dst.Stride
src.scan(0, y, src.w, y+1, dst.Pix[i:i+size])
}
})
return dst
}

51
vendor/github.com/disintegration/imaging/histogram.go generated vendored Normal file
View File

@ -0,0 +1,51 @@
package imaging
import (
"image"
"sync"
)
// Histogram returns a normalized histogram of an image.
//
// Resulting histogram is represented as an array of 256 floats, where
// histogram[i] is a probability of a pixel being of a particular luminance i.
func Histogram(img image.Image) [256]float64 {
var mu sync.Mutex
var histogram [256]float64
var total float64
src := newScanner(img)
if src.w == 0 || src.h == 0 {
return histogram
}
parallel(0, src.h, func(ys <-chan int) {
var tmpHistogram [256]float64
var tmpTotal float64
scanLine := make([]uint8, src.w*4)
for y := range ys {
src.scan(0, y, src.w, y+1, scanLine)
i := 0
for x := 0; x < src.w; x++ {
r := scanLine[i+0]
g := scanLine[i+1]
b := scanLine[i+2]
y := 0.299*float32(r) + 0.587*float32(g) + 0.114*float32(b)
tmpHistogram[int(y+0.5)]++
tmpTotal++
i += 4
}
}
mu.Lock()
for i := 0; i < 256; i++ {
histogram[i] += tmpHistogram[i]
}
total += tmpTotal
mu.Unlock()
})
for i := 0; i < 256; i++ {
histogram[i] = histogram[i] / total
}
return histogram
}

572
vendor/github.com/disintegration/imaging/resize.go generated vendored Normal file
View File

@ -0,0 +1,572 @@
package imaging
import (
"image"
"math"
)
type indexWeight struct {
index int
weight float64
}
func precomputeWeights(dstSize, srcSize int, filter ResampleFilter) [][]indexWeight {
du := float64(srcSize) / float64(dstSize)
scale := du
if scale < 1.0 {
scale = 1.0
}
ru := math.Ceil(scale * filter.Support)
out := make([][]indexWeight, dstSize)
tmp := make([]indexWeight, 0, dstSize*int(ru+2)*2)
for v := 0; v < dstSize; v++ {
fu := (float64(v)+0.5)*du - 0.5
begin := int(math.Ceil(fu - ru))
if begin < 0 {
begin = 0
}
end := int(math.Floor(fu + ru))
if end > srcSize-1 {
end = srcSize - 1
}
var sum float64
for u := begin; u <= end; u++ {
w := filter.Kernel((float64(u) - fu) / scale)
if w != 0 {
sum += w
tmp = append(tmp, indexWeight{index: u, weight: w})
}
}
if sum != 0 {
for i := range tmp {
tmp[i].weight /= sum
}
}
out[v] = tmp
tmp = tmp[len(tmp):]
}
return out
}
// Resize resizes the image to the specified width and height using the specified resampling
// filter and returns the transformed image. If one of width or height is 0, the image aspect
// ratio is preserved.
//
// Supported resample filters: NearestNeighbor, Box, Linear, Hermite, MitchellNetravali,
// CatmullRom, BSpline, Gaussian, Lanczos, Hann, Hamming, Blackman, Bartlett, Welch, Cosine.
//
// Usage example:
//
// dstImage := imaging.Resize(srcImage, 800, 600, imaging.Lanczos)
//
func Resize(img image.Image, width, height int, filter ResampleFilter) *image.NRGBA {
dstW, dstH := width, height
if dstW < 0 || dstH < 0 {
return &image.NRGBA{}
}
if dstW == 0 && dstH == 0 {
return &image.NRGBA{}
}
srcW := img.Bounds().Dx()
srcH := img.Bounds().Dy()
if srcW <= 0 || srcH <= 0 {
return &image.NRGBA{}
}
// If new width or height is 0 then preserve aspect ratio, minimum 1px.
if dstW == 0 {
tmpW := float64(dstH) * float64(srcW) / float64(srcH)
dstW = int(math.Max(1.0, math.Floor(tmpW+0.5)))
}
if dstH == 0 {
tmpH := float64(dstW) * float64(srcH) / float64(srcW)
dstH = int(math.Max(1.0, math.Floor(tmpH+0.5)))
}
if filter.Support <= 0 {
// Nearest-neighbor special case.
return resizeNearest(img, dstW, dstH)
}
if srcW != dstW && srcH != dstH {
return resizeVertical(resizeHorizontal(img, dstW, filter), dstH, filter)
}
if srcW != dstW {
return resizeHorizontal(img, dstW, filter)
}
if srcH != dstH {
return resizeVertical(img, dstH, filter)
}
return Clone(img)
}
func resizeHorizontal(img image.Image, width int, filter ResampleFilter) *image.NRGBA {
src := newScanner(img)
dst := image.NewNRGBA(image.Rect(0, 0, width, src.h))
weights := precomputeWeights(width, src.w, filter)
parallel(0, src.h, func(ys <-chan int) {
scanLine := make([]uint8, src.w*4)
for y := range ys {
src.scan(0, y, src.w, y+1, scanLine)
j0 := y * dst.Stride
for x := 0; x < width; x++ {
var r, g, b, a float64
for _, w := range weights[x] {
i := w.index * 4
aw := float64(scanLine[i+3]) * w.weight
r += float64(scanLine[i+0]) * aw
g += float64(scanLine[i+1]) * aw
b += float64(scanLine[i+2]) * aw
a += aw
}
if a != 0 {
aInv := 1 / a
j := j0 + x*4
dst.Pix[j+0] = clamp(r * aInv)
dst.Pix[j+1] = clamp(g * aInv)
dst.Pix[j+2] = clamp(b * aInv)
dst.Pix[j+3] = clamp(a)
}
}
}
})
return dst
}
func resizeVertical(img image.Image, height int, filter ResampleFilter) *image.NRGBA {
src := newScanner(img)
dst := image.NewNRGBA(image.Rect(0, 0, src.w, height))
weights := precomputeWeights(height, src.h, filter)
parallel(0, src.w, func(xs <-chan int) {
scanLine := make([]uint8, src.h*4)
for x := range xs {
src.scan(x, 0, x+1, src.h, scanLine)
for y := 0; y < height; y++ {
var r, g, b, a float64
for _, w := range weights[y] {
i := w.index * 4
aw := float64(scanLine[i+3]) * w.weight
r += float64(scanLine[i+0]) * aw
g += float64(scanLine[i+1]) * aw
b += float64(scanLine[i+2]) * aw
a += aw
}
if a != 0 {
aInv := 1 / a
j := y*dst.Stride + x*4
dst.Pix[j+0] = clamp(r * aInv)
dst.Pix[j+1] = clamp(g * aInv)
dst.Pix[j+2] = clamp(b * aInv)
dst.Pix[j+3] = clamp(a)
}
}
}
})
return dst
}
// resizeNearest is a fast nearest-neighbor resize, no filtering.
func resizeNearest(img image.Image, width, height int) *image.NRGBA {
dst := image.NewNRGBA(image.Rect(0, 0, width, height))
dx := float64(img.Bounds().Dx()) / float64(width)
dy := float64(img.Bounds().Dy()) / float64(height)
if dx > 1 && dy > 1 {
src := newScanner(img)
parallel(0, height, func(ys <-chan int) {
for y := range ys {
srcY := int((float64(y) + 0.5) * dy)
dstOff := y * dst.Stride
for x := 0; x < width; x++ {
srcX := int((float64(x) + 0.5) * dx)
src.scan(srcX, srcY, srcX+1, srcY+1, dst.Pix[dstOff:dstOff+4])
dstOff += 4
}
}
})
} else {
src := toNRGBA(img)
parallel(0, height, func(ys <-chan int) {
for y := range ys {
srcY := int((float64(y) + 0.5) * dy)
srcOff0 := srcY * src.Stride
dstOff := y * dst.Stride
for x := 0; x < width; x++ {
srcX := int((float64(x) + 0.5) * dx)
srcOff := srcOff0 + srcX*4
copy(dst.Pix[dstOff:dstOff+4], src.Pix[srcOff:srcOff+4])
dstOff += 4
}
}
})
}
return dst
}
// Fit scales down the image using the specified resample filter to fit the specified
// maximum width and height and returns the transformed image.
//
// Supported resample filters: NearestNeighbor, Box, Linear, Hermite, MitchellNetravali,
// CatmullRom, BSpline, Gaussian, Lanczos, Hann, Hamming, Blackman, Bartlett, Welch, Cosine.
//
// Usage example:
//
// dstImage := imaging.Fit(srcImage, 800, 600, imaging.Lanczos)
//
func Fit(img image.Image, width, height int, filter ResampleFilter) *image.NRGBA {
maxW, maxH := width, height
if maxW <= 0 || maxH <= 0 {
return &image.NRGBA{}
}
srcBounds := img.Bounds()
srcW := srcBounds.Dx()
srcH := srcBounds.Dy()
if srcW <= 0 || srcH <= 0 {
return &image.NRGBA{}
}
if srcW <= maxW && srcH <= maxH {
return Clone(img)
}
srcAspectRatio := float64(srcW) / float64(srcH)
maxAspectRatio := float64(maxW) / float64(maxH)
var newW, newH int
if srcAspectRatio > maxAspectRatio {
newW = maxW
newH = int(float64(newW) / srcAspectRatio)
} else {
newH = maxH
newW = int(float64(newH) * srcAspectRatio)
}
return Resize(img, newW, newH, filter)
}
// Fill scales the image to the smallest possible size that will cover the specified dimensions,
// crops the resized image to the specified dimensions using the given anchor point and returns
// the transformed image.
//
// Supported resample filters: NearestNeighbor, Box, Linear, Hermite, MitchellNetravali,
// CatmullRom, BSpline, Gaussian, Lanczos, Hann, Hamming, Blackman, Bartlett, Welch, Cosine.
//
// Usage example:
//
// dstImage := imaging.Fill(srcImage, 800, 600, imaging.Center, imaging.Lanczos)
//
func Fill(img image.Image, width, height int, anchor Anchor, filter ResampleFilter) *image.NRGBA {
minW, minH := width, height
if minW <= 0 || minH <= 0 {
return &image.NRGBA{}
}
srcBounds := img.Bounds()
srcW := srcBounds.Dx()
srcH := srcBounds.Dy()
if srcW <= 0 || srcH <= 0 {
return &image.NRGBA{}
}
if srcW == minW && srcH == minH {
return Clone(img)
}
srcAspectRatio := float64(srcW) / float64(srcH)
minAspectRatio := float64(minW) / float64(minH)
var tmp *image.NRGBA
if srcAspectRatio < minAspectRatio {
tmp = Resize(img, minW, 0, filter)
} else {
tmp = Resize(img, 0, minH, filter)
}
return CropAnchor(tmp, minW, minH, anchor)
}
// Thumbnail scales the image up or down using the specified resample filter, crops it
// to the specified width and hight and returns the transformed image.
//
// Supported resample filters: NearestNeighbor, Box, Linear, Hermite, MitchellNetravali,
// CatmullRom, BSpline, Gaussian, Lanczos, Hann, Hamming, Blackman, Bartlett, Welch, Cosine.
//
// Usage example:
//
// dstImage := imaging.Thumbnail(srcImage, 100, 100, imaging.Lanczos)
//
func Thumbnail(img image.Image, width, height int, filter ResampleFilter) *image.NRGBA {
return Fill(img, width, height, Center, filter)
}
// ResampleFilter is a resampling filter struct. It can be used to define custom filters.
//
// Supported resample filters: NearestNeighbor, Box, Linear, Hermite, MitchellNetravali,
// CatmullRom, BSpline, Gaussian, Lanczos, Hann, Hamming, Blackman, Bartlett, Welch, Cosine.
//
// General filter recommendations:
//
// - Lanczos
// High-quality resampling filter for photographic images yielding sharp results.
// It's slower than cubic filters (see below).
//
// - CatmullRom
// A sharp cubic filter. It's a good filter for both upscaling and downscaling if sharp results are needed.
//
// - MitchellNetravali
// A high quality cubic filter that produces smoother results with less ringing artifacts than CatmullRom.
//
// - BSpline
// A good filter if a very smooth output is needed.
//
// - Linear
// Bilinear interpolation filter, produces reasonably good, smooth output.
// It's faster than cubic filters.
//
// - Box
// Simple and fast averaging filter appropriate for downscaling.
// When upscaling it's similar to NearestNeighbor.
//
// - NearestNeighbor
// Fastest resampling filter, no antialiasing.
//
type ResampleFilter struct {
Support float64
Kernel func(float64) float64
}
// NearestNeighbor is a nearest-neighbor filter (no anti-aliasing).
var NearestNeighbor ResampleFilter
// Box filter (averaging pixels).
var Box ResampleFilter
// Linear filter.
var Linear ResampleFilter
// Hermite cubic spline filter (BC-spline; B=0; C=0).
var Hermite ResampleFilter
// MitchellNetravali is Mitchell-Netravali cubic filter (BC-spline; B=1/3; C=1/3).
var MitchellNetravali ResampleFilter
// CatmullRom is a Catmull-Rom - sharp cubic filter (BC-spline; B=0; C=0.5).
var CatmullRom ResampleFilter
// BSpline is a smooth cubic filter (BC-spline; B=1; C=0).
var BSpline ResampleFilter
// Gaussian is a Gaussian blurring Filter.
var Gaussian ResampleFilter
// Bartlett is a Bartlett-windowed sinc filter (3 lobes).
var Bartlett ResampleFilter
// Lanczos filter (3 lobes).
var Lanczos ResampleFilter
// Hann is a Hann-windowed sinc filter (3 lobes).
var Hann ResampleFilter
// Hamming is a Hamming-windowed sinc filter (3 lobes).
var Hamming ResampleFilter
// Blackman is a Blackman-windowed sinc filter (3 lobes).
var Blackman ResampleFilter
// Welch is a Welch-windowed sinc filter (parabolic window, 3 lobes).
var Welch ResampleFilter
// Cosine is a Cosine-windowed sinc filter (3 lobes).
var Cosine ResampleFilter
func bcspline(x, b, c float64) float64 {
var y float64
x = math.Abs(x)
if x < 1.0 {
y = ((12-9*b-6*c)*x*x*x + (-18+12*b+6*c)*x*x + (6 - 2*b)) / 6
} else if x < 2.0 {
y = ((-b-6*c)*x*x*x + (6*b+30*c)*x*x + (-12*b-48*c)*x + (8*b + 24*c)) / 6
}
return y
}
func sinc(x float64) float64 {
if x == 0 {
return 1
}
return math.Sin(math.Pi*x) / (math.Pi * x)
}
func init() {
NearestNeighbor = ResampleFilter{
Support: 0.0, // special case - not applying the filter
}
Box = ResampleFilter{
Support: 0.5,
Kernel: func(x float64) float64 {
x = math.Abs(x)
if x <= 0.5 {
return 1.0
}
return 0
},
}
Linear = ResampleFilter{
Support: 1.0,
Kernel: func(x float64) float64 {
x = math.Abs(x)
if x < 1.0 {
return 1.0 - x
}
return 0
},
}
Hermite = ResampleFilter{
Support: 1.0,
Kernel: func(x float64) float64 {
x = math.Abs(x)
if x < 1.0 {
return bcspline(x, 0.0, 0.0)
}
return 0
},
}
MitchellNetravali = ResampleFilter{
Support: 2.0,
Kernel: func(x float64) float64 {
x = math.Abs(x)
if x < 2.0 {
return bcspline(x, 1.0/3.0, 1.0/3.0)
}
return 0
},
}
CatmullRom = ResampleFilter{
Support: 2.0,
Kernel: func(x float64) float64 {
x = math.Abs(x)
if x < 2.0 {
return bcspline(x, 0.0, 0.5)
}
return 0
},
}
BSpline = ResampleFilter{
Support: 2.0,
Kernel: func(x float64) float64 {
x = math.Abs(x)
if x < 2.0 {
return bcspline(x, 1.0, 0.0)
}
return 0
},
}
Gaussian = ResampleFilter{
Support: 2.0,
Kernel: func(x float64) float64 {
x = math.Abs(x)
if x < 2.0 {
return math.Exp(-2 * x * x)
}
return 0
},
}
Bartlett = ResampleFilter{
Support: 3.0,
Kernel: func(x float64) float64 {
x = math.Abs(x)
if x < 3.0 {
return sinc(x) * (3.0 - x) / 3.0
}
return 0
},
}
Lanczos = ResampleFilter{
Support: 3.0,
Kernel: func(x float64) float64 {
x = math.Abs(x)
if x < 3.0 {
return sinc(x) * sinc(x/3.0)
}
return 0
},
}
Hann = ResampleFilter{
Support: 3.0,
Kernel: func(x float64) float64 {
x = math.Abs(x)
if x < 3.0 {
return sinc(x) * (0.5 + 0.5*math.Cos(math.Pi*x/3.0))
}
return 0
},
}
Hamming = ResampleFilter{
Support: 3.0,
Kernel: func(x float64) float64 {
x = math.Abs(x)
if x < 3.0 {
return sinc(x) * (0.54 + 0.46*math.Cos(math.Pi*x/3.0))
}
return 0
},
}
Blackman = ResampleFilter{
Support: 3.0,
Kernel: func(x float64) float64 {
x = math.Abs(x)
if x < 3.0 {
return sinc(x) * (0.42 - 0.5*math.Cos(math.Pi*x/3.0+math.Pi) + 0.08*math.Cos(2.0*math.Pi*x/3.0))
}
return 0
},
}
Welch = ResampleFilter{
Support: 3.0,
Kernel: func(x float64) float64 {
x = math.Abs(x)
if x < 3.0 {
return sinc(x) * (1.0 - (x * x / 9.0))
}
return 0
},
}
Cosine = ResampleFilter{
Support: 3.0,
Kernel: func(x float64) float64 {
x = math.Abs(x)
if x < 3.0 {
return sinc(x) * math.Cos((math.Pi/2.0)*(x/3.0))
}
return 0
},
}
}

250
vendor/github.com/disintegration/imaging/scanner.go generated vendored Normal file
View File

@ -0,0 +1,250 @@
package imaging
import (
"image"
"image/color"
)
type scanner struct {
image image.Image
w, h int
palette []color.NRGBA
}
func newScanner(img image.Image) *scanner {
s := &scanner{
image: img,
w: img.Bounds().Dx(),
h: img.Bounds().Dy(),
}
if img, ok := img.(*image.Paletted); ok {
s.palette = make([]color.NRGBA, len(img.Palette))
for i := 0; i < len(img.Palette); i++ {
s.palette[i] = color.NRGBAModel.Convert(img.Palette[i]).(color.NRGBA)
}
}
return s
}
// scan scans the given rectangular region of the image into dst.
func (s *scanner) scan(x1, y1, x2, y2 int, dst []uint8) {
switch img := s.image.(type) {
case *image.NRGBA:
size := (x2 - x1) * 4
j := 0
i := y1*img.Stride + x1*4
for y := y1; y < y2; y++ {
copy(dst[j:j+size], img.Pix[i:i+size])
j += size
i += img.Stride
}
case *image.NRGBA64:
j := 0
for y := y1; y < y2; y++ {
i := y*img.Stride + x1*8
for x := x1; x < x2; x++ {
dst[j+0] = img.Pix[i+0]
dst[j+1] = img.Pix[i+2]
dst[j+2] = img.Pix[i+4]
dst[j+3] = img.Pix[i+6]
j += 4
i += 8
}
}
case *image.RGBA:
j := 0
for y := y1; y < y2; y++ {
i := y*img.Stride + x1*4
for x := x1; x < x2; x++ {
a := img.Pix[i+3]
switch a {
case 0:
dst[j+0] = 0
dst[j+1] = 0
dst[j+2] = 0
case 0xff:
dst[j+0] = img.Pix[i+0]
dst[j+1] = img.Pix[i+1]
dst[j+2] = img.Pix[i+2]
default:
r16 := uint16(img.Pix[i+0])
g16 := uint16(img.Pix[i+1])
b16 := uint16(img.Pix[i+2])
a16 := uint16(a)
dst[j+0] = uint8(r16 * 0xff / a16)
dst[j+1] = uint8(g16 * 0xff / a16)
dst[j+2] = uint8(b16 * 0xff / a16)
}
dst[j+3] = a
j += 4
i += 4
}
}
case *image.RGBA64:
j := 0
for y := y1; y < y2; y++ {
i := y*img.Stride + x1*8
for x := x1; x < x2; x++ {
a := img.Pix[i+6]
switch a {
case 0:
dst[j+0] = 0
dst[j+1] = 0
dst[j+2] = 0
case 0xff:
dst[j+0] = img.Pix[i+0]
dst[j+1] = img.Pix[i+2]
dst[j+2] = img.Pix[i+4]
default:
r32 := uint32(img.Pix[i+0])<<8 | uint32(img.Pix[i+1])
g32 := uint32(img.Pix[i+2])<<8 | uint32(img.Pix[i+3])
b32 := uint32(img.Pix[i+4])<<8 | uint32(img.Pix[i+5])
a32 := uint32(img.Pix[i+6])<<8 | uint32(img.Pix[i+7])
dst[j+0] = uint8((r32 * 0xffff / a32) >> 8)
dst[j+1] = uint8((g32 * 0xffff / a32) >> 8)
dst[j+2] = uint8((b32 * 0xffff / a32) >> 8)
}
dst[j+3] = a
j += 4
i += 8
}
}
case *image.Gray:
j := 0
for y := y1; y < y2; y++ {
i := y*img.Stride + x1
for x := x1; x < x2; x++ {
c := img.Pix[i]
dst[j+0] = c
dst[j+1] = c
dst[j+2] = c
dst[j+3] = 0xff
j += 4
i++
}
}
case *image.Gray16:
j := 0
for y := y1; y < y2; y++ {
i := y*img.Stride + x1*2
for x := x1; x < x2; x++ {
c := img.Pix[i]
dst[j+0] = c
dst[j+1] = c
dst[j+2] = c
dst[j+3] = 0xff
j += 4
i += 2
}
}
case *image.YCbCr:
j := 0
x1 += img.Rect.Min.X
x2 += img.Rect.Min.X
y1 += img.Rect.Min.Y
y2 += img.Rect.Min.Y
for y := y1; y < y2; y++ {
iy := (y-img.Rect.Min.Y)*img.YStride + (x1 - img.Rect.Min.X)
for x := x1; x < x2; x++ {
var ic int
switch img.SubsampleRatio {
case image.YCbCrSubsampleRatio444:
ic = (y-img.Rect.Min.Y)*img.CStride + (x - img.Rect.Min.X)
case image.YCbCrSubsampleRatio422:
ic = (y-img.Rect.Min.Y)*img.CStride + (x/2 - img.Rect.Min.X/2)
case image.YCbCrSubsampleRatio420:
ic = (y/2-img.Rect.Min.Y/2)*img.CStride + (x/2 - img.Rect.Min.X/2)
case image.YCbCrSubsampleRatio440:
ic = (y/2-img.Rect.Min.Y/2)*img.CStride + (x - img.Rect.Min.X)
default:
ic = img.COffset(x, y)
}
yy := int(img.Y[iy])
cb := int(img.Cb[ic]) - 128
cr := int(img.Cr[ic]) - 128
r := (yy<<16 + 91881*cr + 1<<15) >> 16
if r > 0xff {
r = 0xff
} else if r < 0 {
r = 0
}
g := (yy<<16 - 22554*cb - 46802*cr + 1<<15) >> 16
if g > 0xff {
g = 0xff
} else if g < 0 {
g = 0
}
b := (yy<<16 + 116130*cb + 1<<15) >> 16
if b > 0xff {
b = 0xff
} else if b < 0 {
b = 0
}
dst[j+0] = uint8(r)
dst[j+1] = uint8(g)
dst[j+2] = uint8(b)
dst[j+3] = 0xff
iy++
j += 4
}
}
case *image.Paletted:
j := 0
for y := y1; y < y2; y++ {
i := y*img.Stride + x1
for x := x1; x < x2; x++ {
c := s.palette[img.Pix[i]]
dst[j+0] = c.R
dst[j+1] = c.G
dst[j+2] = c.B
dst[j+3] = c.A
j += 4
i++
}
}
default:
j := 0
b := s.image.Bounds()
x1 += b.Min.X
x2 += b.Min.X
y1 += b.Min.Y
y2 += b.Min.Y
for y := y1; y < y2; y++ {
for x := x1; x < x2; x++ {
r16, g16, b16, a16 := s.image.At(x, y).RGBA()
switch a16 {
case 0xffff:
dst[j+0] = uint8(r16 >> 8)
dst[j+1] = uint8(g16 >> 8)
dst[j+2] = uint8(b16 >> 8)
dst[j+3] = 0xff
case 0:
dst[j+0] = 0
dst[j+1] = 0
dst[j+2] = 0
dst[j+3] = 0
default:
dst[j+0] = uint8(((r16 * 0xffff) / a16) >> 8)
dst[j+1] = uint8(((g16 * 0xffff) / a16) >> 8)
dst[j+2] = uint8(((b16 * 0xffff) / a16) >> 8)
dst[j+3] = uint8(a16 >> 8)
}
j += 4
}
}
}
}

213
vendor/github.com/disintegration/imaging/tools.go generated vendored Normal file
View File

@ -0,0 +1,213 @@
package imaging
import (
"image"
"math"
)
// Anchor is the anchor point for image alignment.
type Anchor int
// Anchor point positions.
const (
Center Anchor = iota
TopLeft
Top
TopRight
Left
Right
BottomLeft
Bottom
BottomRight
)
func anchorPt(b image.Rectangle, w, h int, anchor Anchor) image.Point {
var x, y int
switch anchor {
case TopLeft:
x = b.Min.X
y = b.Min.Y
case Top:
x = b.Min.X + (b.Dx()-w)/2
y = b.Min.Y
case TopRight:
x = b.Max.X - w
y = b.Min.Y
case Left:
x = b.Min.X
y = b.Min.Y + (b.Dy()-h)/2
case Right:
x = b.Max.X - w
y = b.Min.Y + (b.Dy()-h)/2
case BottomLeft:
x = b.Min.X
y = b.Max.Y - h
case Bottom:
x = b.Min.X + (b.Dx()-w)/2
y = b.Max.Y - h
case BottomRight:
x = b.Max.X - w
y = b.Max.Y - h
default:
x = b.Min.X + (b.Dx()-w)/2
y = b.Min.Y + (b.Dy()-h)/2
}
return image.Pt(x, y)
}
// Crop cuts out a rectangular region with the specified bounds
// from the image and returns the cropped image.
func Crop(img image.Image, rect image.Rectangle) *image.NRGBA {
r := rect.Intersect(img.Bounds()).Sub(img.Bounds().Min)
if r.Empty() {
return &image.NRGBA{}
}
src := newScanner(img)
dst := image.NewNRGBA(image.Rect(0, 0, r.Dx(), r.Dy()))
rowSize := r.Dx() * 4
parallel(r.Min.Y, r.Max.Y, func(ys <-chan int) {
for y := range ys {
i := (y - r.Min.Y) * dst.Stride
src.scan(r.Min.X, y, r.Max.X, y+1, dst.Pix[i:i+rowSize])
}
})
return dst
}
// CropAnchor cuts out a rectangular region with the specified size
// from the image using the specified anchor point and returns the cropped image.
func CropAnchor(img image.Image, width, height int, anchor Anchor) *image.NRGBA {
srcBounds := img.Bounds()
pt := anchorPt(srcBounds, width, height, anchor)
r := image.Rect(0, 0, width, height).Add(pt)
b := srcBounds.Intersect(r)
return Crop(img, b)
}
// CropCenter cuts out a rectangular region with the specified size
// from the center of the image and returns the cropped image.
func CropCenter(img image.Image, width, height int) *image.NRGBA {
return CropAnchor(img, width, height, Center)
}
// Paste pastes the img image to the background image at the specified position and returns the combined image.
func Paste(background, img image.Image, pos image.Point) *image.NRGBA {
dst := Clone(background)
pos = pos.Sub(background.Bounds().Min)
pasteRect := image.Rectangle{Min: pos, Max: pos.Add(img.Bounds().Size())}
interRect := pasteRect.Intersect(dst.Bounds())
if interRect.Empty() {
return dst
}
src := newScanner(img)
parallel(interRect.Min.Y, interRect.Max.Y, func(ys <-chan int) {
for y := range ys {
x1 := interRect.Min.X - pasteRect.Min.X
x2 := interRect.Max.X - pasteRect.Min.X
y1 := y - pasteRect.Min.Y
y2 := y1 + 1
i1 := y*dst.Stride + interRect.Min.X*4
i2 := i1 + interRect.Dx()*4
src.scan(x1, y1, x2, y2, dst.Pix[i1:i2])
}
})
return dst
}
// PasteCenter pastes the img image to the center of the background image and returns the combined image.
func PasteCenter(background, img image.Image) *image.NRGBA {
bgBounds := background.Bounds()
bgW := bgBounds.Dx()
bgH := bgBounds.Dy()
bgMinX := bgBounds.Min.X
bgMinY := bgBounds.Min.Y
centerX := bgMinX + bgW/2
centerY := bgMinY + bgH/2
x0 := centerX - img.Bounds().Dx()/2
y0 := centerY - img.Bounds().Dy()/2
return Paste(background, img, image.Pt(x0, y0))
}
// Overlay draws the img image over the background image at given position
// and returns the combined image. Opacity parameter is the opacity of the img
// image layer, used to compose the images, it must be from 0.0 to 1.0.
//
// Usage examples:
//
// // Draw spriteImage over backgroundImage at the given position (x=50, y=50).
// dstImage := imaging.Overlay(backgroundImage, spriteImage, image.Pt(50, 50), 1.0)
//
// // Blend two opaque images of the same size.
// dstImage := imaging.Overlay(imageOne, imageTwo, image.Pt(0, 0), 0.5)
//
func Overlay(background, img image.Image, pos image.Point, opacity float64) *image.NRGBA {
opacity = math.Min(math.Max(opacity, 0.0), 1.0) // Ensure 0.0 <= opacity <= 1.0.
dst := Clone(background)
pos = pos.Sub(background.Bounds().Min)
pasteRect := image.Rectangle{Min: pos, Max: pos.Add(img.Bounds().Size())}
interRect := pasteRect.Intersect(dst.Bounds())
if interRect.Empty() {
return dst
}
src := newScanner(img)
parallel(interRect.Min.Y, interRect.Max.Y, func(ys <-chan int) {
scanLine := make([]uint8, interRect.Dx()*4)
for y := range ys {
x1 := interRect.Min.X - pasteRect.Min.X
x2 := interRect.Max.X - pasteRect.Min.X
y1 := y - pasteRect.Min.Y
y2 := y1 + 1
src.scan(x1, y1, x2, y2, scanLine)
i := y*dst.Stride + interRect.Min.X*4
j := 0
for x := interRect.Min.X; x < interRect.Max.X; x++ {
r1 := float64(dst.Pix[i+0])
g1 := float64(dst.Pix[i+1])
b1 := float64(dst.Pix[i+2])
a1 := float64(dst.Pix[i+3])
r2 := float64(scanLine[j+0])
g2 := float64(scanLine[j+1])
b2 := float64(scanLine[j+2])
a2 := float64(scanLine[j+3])
coef2 := opacity * a2 / 255
coef1 := (1 - coef2) * a1 / 255
coefSum := coef1 + coef2
coef1 /= coefSum
coef2 /= coefSum
dst.Pix[i+0] = uint8(r1*coef1 + r2*coef2)
dst.Pix[i+1] = uint8(g1*coef1 + g2*coef2)
dst.Pix[i+2] = uint8(b1*coef1 + b2*coef2)
dst.Pix[i+3] = uint8(math.Min(a1+a2*opacity*(255-a1)/255, 255))
i += 4
j += 4
}
}
})
return dst
}
// OverlayCenter overlays the img image to the center of the background image and
// returns the combined image. Opacity parameter is the opacity of the img
// image layer, used to compose the images, it must be from 0.0 to 1.0.
func OverlayCenter(background, img image.Image, opacity float64) *image.NRGBA {
bgBounds := background.Bounds()
bgW := bgBounds.Dx()
bgH := bgBounds.Dy()
bgMinX := bgBounds.Min.X
bgMinY := bgBounds.Min.Y
centerX := bgMinX + bgW/2
centerY := bgMinY + bgH/2
x0 := centerX - img.Bounds().Dx()/2
y0 := centerY - img.Bounds().Dy()/2
return Overlay(background, img, image.Point{x0, y0}, opacity)
}

271
vendor/github.com/disintegration/imaging/transform.go generated vendored Normal file
View File

@ -0,0 +1,271 @@
package imaging
import (
"image"
"image/color"
"math"
)
// FlipH flips the image horizontally (from left to right) and returns the transformed image.
func FlipH(img image.Image) *image.NRGBA {
src := newScanner(img)
dstW := src.w
dstH := src.h
rowSize := dstW * 4
dst := image.NewNRGBA(image.Rect(0, 0, dstW, dstH))
parallel(0, dstH, func(ys <-chan int) {
for dstY := range ys {
i := dstY * dst.Stride
srcY := dstY
src.scan(0, srcY, src.w, srcY+1, dst.Pix[i:i+rowSize])
reverse(dst.Pix[i : i+rowSize])
}
})
return dst
}
// FlipV flips the image vertically (from top to bottom) and returns the transformed image.
func FlipV(img image.Image) *image.NRGBA {
src := newScanner(img)
dstW := src.w
dstH := src.h
rowSize := dstW * 4
dst := image.NewNRGBA(image.Rect(0, 0, dstW, dstH))
parallel(0, dstH, func(ys <-chan int) {
for dstY := range ys {
i := dstY * dst.Stride
srcY := dstH - dstY - 1
src.scan(0, srcY, src.w, srcY+1, dst.Pix[i:i+rowSize])
}
})
return dst
}
// Transpose flips the image horizontally and rotates 90 degrees counter-clockwise.
func Transpose(img image.Image) *image.NRGBA {
src := newScanner(img)
dstW := src.h
dstH := src.w
rowSize := dstW * 4
dst := image.NewNRGBA(image.Rect(0, 0, dstW, dstH))
parallel(0, dstH, func(ys <-chan int) {
for dstY := range ys {
i := dstY * dst.Stride
srcX := dstY
src.scan(srcX, 0, srcX+1, src.h, dst.Pix[i:i+rowSize])
}
})
return dst
}
// Transverse flips the image vertically and rotates 90 degrees counter-clockwise.
func Transverse(img image.Image) *image.NRGBA {
src := newScanner(img)
dstW := src.h
dstH := src.w
rowSize := dstW * 4
dst := image.NewNRGBA(image.Rect(0, 0, dstW, dstH))
parallel(0, dstH, func(ys <-chan int) {
for dstY := range ys {
i := dstY * dst.Stride
srcX := dstH - dstY - 1
src.scan(srcX, 0, srcX+1, src.h, dst.Pix[i:i+rowSize])
reverse(dst.Pix[i : i+rowSize])
}
})
return dst
}
// Rotate90 rotates the image 90 degrees counter-clockwise and returns the transformed image.
func Rotate90(img image.Image) *image.NRGBA {
src := newScanner(img)
dstW := src.h
dstH := src.w
rowSize := dstW * 4
dst := image.NewNRGBA(image.Rect(0, 0, dstW, dstH))
parallel(0, dstH, func(ys <-chan int) {
for dstY := range ys {
i := dstY * dst.Stride
srcX := dstH - dstY - 1
src.scan(srcX, 0, srcX+1, src.h, dst.Pix[i:i+rowSize])
}
})
return dst
}
// Rotate180 rotates the image 180 degrees counter-clockwise and returns the transformed image.
func Rotate180(img image.Image) *image.NRGBA {
src := newScanner(img)
dstW := src.w
dstH := src.h
rowSize := dstW * 4
dst := image.NewNRGBA(image.Rect(0, 0, dstW, dstH))
parallel(0, dstH, func(ys <-chan int) {
for dstY := range ys {
i := dstY * dst.Stride
srcY := dstH - dstY - 1
src.scan(0, srcY, src.w, srcY+1, dst.Pix[i:i+rowSize])
reverse(dst.Pix[i : i+rowSize])
}
})
return dst
}
// Rotate270 rotates the image 270 degrees counter-clockwise and returns the transformed image.
func Rotate270(img image.Image) *image.NRGBA {
src := newScanner(img)
dstW := src.h
dstH := src.w
rowSize := dstW * 4
dst := image.NewNRGBA(image.Rect(0, 0, dstW, dstH))
parallel(0, dstH, func(ys <-chan int) {
for dstY := range ys {
i := dstY * dst.Stride
srcX := dstY
src.scan(srcX, 0, srcX+1, src.h, dst.Pix[i:i+rowSize])
reverse(dst.Pix[i : i+rowSize])
}
})
return dst
}
// Rotate rotates an image by the given angle counter-clockwise .
// The angle parameter is the rotation angle in degrees.
// The bgColor parameter specifies the color of the uncovered zone after the rotation.
func Rotate(img image.Image, angle float64, bgColor color.Color) *image.NRGBA {
angle = angle - math.Floor(angle/360)*360
switch angle {
case 0:
return Clone(img)
case 90:
return Rotate90(img)
case 180:
return Rotate180(img)
case 270:
return Rotate270(img)
}
src := toNRGBA(img)
srcW := src.Bounds().Max.X
srcH := src.Bounds().Max.Y
dstW, dstH := rotatedSize(srcW, srcH, angle)
dst := image.NewNRGBA(image.Rect(0, 0, dstW, dstH))
if dstW <= 0 || dstH <= 0 {
return dst
}
srcXOff := float64(srcW)/2 - 0.5
srcYOff := float64(srcH)/2 - 0.5
dstXOff := float64(dstW)/2 - 0.5
dstYOff := float64(dstH)/2 - 0.5
bgColorNRGBA := color.NRGBAModel.Convert(bgColor).(color.NRGBA)
sin, cos := math.Sincos(math.Pi * angle / 180)
parallel(0, dstH, func(ys <-chan int) {
for dstY := range ys {
for dstX := 0; dstX < dstW; dstX++ {
xf, yf := rotatePoint(float64(dstX)-dstXOff, float64(dstY)-dstYOff, sin, cos)
xf, yf = xf+srcXOff, yf+srcYOff
interpolatePoint(dst, dstX, dstY, src, xf, yf, bgColorNRGBA)
}
}
})
return dst
}
func rotatePoint(x, y, sin, cos float64) (float64, float64) {
return x*cos - y*sin, x*sin + y*cos
}
func rotatedSize(w, h int, angle float64) (int, int) {
if w <= 0 || h <= 0 {
return 0, 0
}
sin, cos := math.Sincos(math.Pi * angle / 180)
x1, y1 := rotatePoint(float64(w-1), 0, sin, cos)
x2, y2 := rotatePoint(float64(w-1), float64(h-1), sin, cos)
x3, y3 := rotatePoint(0, float64(h-1), sin, cos)
minx := math.Min(x1, math.Min(x2, math.Min(x3, 0)))
maxx := math.Max(x1, math.Max(x2, math.Max(x3, 0)))
miny := math.Min(y1, math.Min(y2, math.Min(y3, 0)))
maxy := math.Max(y1, math.Max(y2, math.Max(y3, 0)))
neww := maxx - minx + 1
if neww-math.Floor(neww) > 0.1 {
neww++
}
newh := maxy - miny + 1
if newh-math.Floor(newh) > 0.1 {
newh++
}
return int(neww), int(newh)
}
func interpolatePoint(dst *image.NRGBA, dstX, dstY int, src *image.NRGBA, xf, yf float64, bgColor color.NRGBA) {
dstIndex := dstY*dst.Stride + dstX*4
x0 := int(math.Floor(xf))
y0 := int(math.Floor(yf))
bounds := src.Bounds()
if !image.Pt(x0, y0).In(image.Rect(bounds.Min.X-1, bounds.Min.Y-1, bounds.Max.X, bounds.Max.Y)) {
dst.Pix[dstIndex+0] = bgColor.R
dst.Pix[dstIndex+1] = bgColor.G
dst.Pix[dstIndex+2] = bgColor.B
dst.Pix[dstIndex+3] = bgColor.A
return
}
xq := xf - float64(x0)
yq := yf - float64(y0)
var pxs [4]color.NRGBA
var cfs [4]float64
for i := 0; i < 2; i++ {
for j := 0; j < 2; j++ {
k := i*2 + j
pt := image.Pt(x0+j, y0+i)
if pt.In(bounds) {
l := pt.Y*src.Stride + pt.X*4
pxs[k].R = src.Pix[l+0]
pxs[k].G = src.Pix[l+1]
pxs[k].B = src.Pix[l+2]
pxs[k].A = src.Pix[l+3]
} else {
pxs[k] = bgColor
}
}
}
cfs[0] = (1 - xq) * (1 - yq)
cfs[1] = xq * (1 - yq)
cfs[2] = (1 - xq) * yq
cfs[3] = xq * yq
var r, g, b, a float64
for i := range pxs {
wa := float64(pxs[i].A) * cfs[i]
r += float64(pxs[i].R) * wa
g += float64(pxs[i].G) * wa
b += float64(pxs[i].B) * wa
a += wa
}
if a != 0 {
r /= a
g /= a
b /= a
}
dst.Pix[dstIndex+0] = clamp(r)
dst.Pix[dstIndex+1] = clamp(g)
dst.Pix[dstIndex+2] = clamp(b)
dst.Pix[dstIndex+3] = clamp(a)
}

83
vendor/github.com/disintegration/imaging/utils.go generated vendored Normal file
View File

@ -0,0 +1,83 @@
package imaging
import (
"image"
"runtime"
"sync"
)
// parallel processes the data in separate goroutines.
func parallel(start, stop int, fn func(<-chan int)) {
count := stop - start
if count < 1 {
return
}
procs := runtime.GOMAXPROCS(0)
if procs > count {
procs = count
}
c := make(chan int, count)
for i := start; i < stop; i++ {
c <- i
}
close(c)
var wg sync.WaitGroup
for i := 0; i < procs; i++ {
wg.Add(1)
go func() {
defer wg.Done()
fn(c)
}()
}
wg.Wait()
}
// absint returns the absolute value of i.
func absint(i int) int {
if i < 0 {
return -i
}
return i
}
// clamp rounds and clamps float64 value to fit into uint8.
func clamp(x float64) uint8 {
v := int64(x + 0.5)
if v > 255 {
return 255
}
if v > 0 {
return uint8(v)
}
return 0
}
func reverse(pix []uint8) {
if len(pix) <= 4 {
return
}
i := 0
j := len(pix) - 4
for i < j {
pix[i+0], pix[j+0] = pix[j+0], pix[i+0]
pix[i+1], pix[j+1] = pix[j+1], pix[i+1]
pix[i+2], pix[j+2] = pix[j+2], pix[i+2]
pix[i+3], pix[j+3] = pix[j+3], pix[i+3]
i += 4
j -= 4
}
}
func toNRGBA(img image.Image) *image.NRGBA {
if img, ok := img.(*image.NRGBA); ok {
return &image.NRGBA{
Pix: img.Pix,
Stride: img.Stride,
Rect: img.Rect.Sub(img.Rect.Min),
}
}
return Clone(img)
}

13
vendor/github.com/gdamore/encoding/.appveyor.yml generated vendored Normal file
View File

@ -0,0 +1,13 @@
version: 1.0.{build}
clone_folder: c:\gopath\src\github.com\gdamore\encoding
environment:
GOPATH: c:\gopath
build_script:
- go version
- go env
- SET PATH=%LOCALAPPDATA%\atom\bin;%GOPATH%\bin;%PATH%
- go get -t ./...
- go build
- go install ./...
test_script:
- go test ./...

6
vendor/github.com/gdamore/encoding/.travis.yml generated vendored Normal file
View File

@ -0,0 +1,6 @@
language: go
go:
- 1.3
- 1.5
- tip

202
vendor/github.com/gdamore/encoding/LICENSE generated vendored Normal file
View File

@ -0,0 +1,202 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

19
vendor/github.com/gdamore/encoding/README.md generated vendored Normal file
View File

@ -0,0 +1,19 @@
## encoding
[![Linux Status](https://img.shields.io/travis/gdamore/encoding.svg?label=linux)](https://travis-ci.org/gdamore/encoding)
[![Windows Status](https://img.shields.io/appveyor/ci/gdamore/encoding.svg?label=windows)](https://ci.appveyor.com/project/gdamore/encoding)
[![Apache License](https://img.shields.io/badge/license-APACHE2-blue.svg)](https://github.com/gdamore/encoding/blob/master/LICENSE)
[![GoDoc](https://img.shields.io/badge/godoc-reference-blue.svg)](https://godoc.org/github.com/gdamore/encoding)
[![Go Report Card](http://goreportcard.com/badge/gdamore/encoding)](http://goreportcard.com/report/gdamore/encoding)
Package encoding provides a number of encodings that are missing from the
standard Go [encoding]("https://godoc.org/golang.org/x/text/encoding") package.
We hope that we can contribute these to the standard Go library someday. It
turns out that some of these are useful for dealing with I/O streams coming
from non-UTF friendly sources.
The UTF8 Encoder is also useful for situations where valid UTF-8 might be
carried in streams that contain non-valid UTF; in particular I use it for
helping me cope with terminals that embed escape sequences in otherwise
valid UTF-8.

36
vendor/github.com/gdamore/encoding/ascii.go generated vendored Normal file
View File

@ -0,0 +1,36 @@
// Copyright 2015 Garrett D'Amore
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use file except in compliance with the License.
// You may obtain a copy of the license at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package encoding
import (
"golang.org/x/text/encoding"
)
// ASCII represents the 7-bit US-ASCII scheme. It decodes directly to
// UTF-8 without change, as all ASCII values are legal UTF-8.
// Unicode values less than 128 (i.e. 7 bits) map 1:1 with ASCII.
// It encodes runes outside of that to 0x1A, the ASCII substitution character.
var ASCII encoding.Encoding
func init() {
amap := make(map[byte]rune)
for i := 128; i <= 255; i++ {
amap[byte(i)] = RuneError
}
cm := &Charmap{Map: amap}
cm.Init()
ASCII = cm
}

192
vendor/github.com/gdamore/encoding/charmap.go generated vendored Normal file
View File

@ -0,0 +1,192 @@
// Copyright 2015 Garrett D'Amore
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use file except in compliance with the License.
// You may obtain a copy of the license at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package encoding
import (
"sync"
"unicode/utf8"
"golang.org/x/text/transform"
"golang.org/x/text/encoding"
)
const (
// RuneError is an alias for the UTF-8 replacement rune, '\uFFFD'.
RuneError = '\uFFFD'
// RuneSelf is the rune below which UTF-8 and the Unicode values are
// identical. Its also the limit for ASCII.
RuneSelf = 0x80
// ASCIISub is the ASCII substitution character.
ASCIISub = '\x1a'
)
// Charmap is a structure for setting up encodings for 8-bit character sets,
// for transforming between UTF8 and that other character set. It has some
// ideas borrowed from golang.org/x/text/encoding/charmap, but it uses a
// different implementation. This implementation uses maps, and supports
// user-defined maps.
//
// We do assume that a character map has a reasonable substitution character,
// and that valid encodings are stable (exactly a 1:1 map) and stateless
// (that is there is no shift character or anything like that.) Hence this
// approach will not work for many East Asian character sets.
//
// Measurement shows little or no measurable difference in the performance of
// the two approaches. The difference was down to a couple of nsec/op, and
// no consistent pattern as to which ran faster. With the conversion to
// UTF-8 the code takes about 25 nsec/op. The conversion in the reverse
// direction takes about 100 nsec/op. (The larger cost for conversion
// from UTF-8 is most likely due to the need to convert the UTF-8 byte stream
// to a rune before conversion.
//
type Charmap struct {
transform.NopResetter
bytes map[rune]byte
runes [256][]byte
once sync.Once
// The map between bytes and runes. To indicate that a specific
// byte value is invalid for a charcter set, use the rune
// utf8.RuneError. Values that are absent from this map will
// be assumed to have the identity mapping -- that is the default
// is to assume ISO8859-1, where all 8-bit characters have the same
// numeric value as their Unicode runes. (Not to be confused with
// the UTF-8 values, which *will* be different for non-ASCII runes.)
//
// If no values less than RuneSelf are changed (or have non-identity
// mappings), then the character set is assumed to be an ASCII
// superset, and certain assumptions and optimizations become
// available for ASCII bytes.
Map map[byte]rune
// The ReplacementChar is the byte value to use for substitution.
// It should normally be ASCIISub for ASCII encodings. This may be
// unset (left to zero) for mappings that are strictly ASCII supersets.
// In that case ASCIISub will be assumed instead.
ReplacementChar byte
}
type cmapDecoder struct {
transform.NopResetter
runes [256][]byte
}
type cmapEncoder struct {
transform.NopResetter
bytes map[rune]byte
replace byte
}
// Init initializes internal values of a character map. This should
// be done early, to minimize the cost of allocation of transforms
// later. It is not strictly necessary however, as the allocation
// functions will arrange to call it if it has not already been done.
func (c *Charmap) Init() {
c.once.Do(c.initialize)
}
func (c *Charmap) initialize() {
c.bytes = make(map[rune]byte)
ascii := true
for i := 0; i < 256; i++ {
r, ok := c.Map[byte(i)]
if !ok {
r = rune(i)
}
if r < 128 && r != rune(i) {
ascii = false
}
if r != RuneError {
c.bytes[r] = byte(i)
}
utf := make([]byte, utf8.RuneLen(r))
utf8.EncodeRune(utf, r)
c.runes[i] = utf
}
if ascii && c.ReplacementChar == '\x00' {
c.ReplacementChar = ASCIISub
}
}
// NewDecoder returns a Decoder the converts from the 8-bit
// character set to UTF-8. Unknown mappings, if any, are mapped
// to '\uFFFD'.
func (c *Charmap) NewDecoder() *encoding.Decoder {
c.Init()
return &encoding.Decoder{Transformer: &cmapDecoder{runes: c.runes}}
}
// NewEncoder returns a Transformer that converts from UTF8 to the
// 8-bit character set. Unknown mappings are mapped to 0x1A.
func (c *Charmap) NewEncoder() *encoding.Encoder {
c.Init()
return &encoding.Encoder{Transformer:
&cmapEncoder{bytes: c.bytes, replace: c.ReplacementChar}}
}
func (d *cmapDecoder) Transform(dst, src []byte, atEOF bool) (int, int, error) {
var e error
var ndst, nsrc int
for _, c := range src {
b := d.runes[c]
l := len(b)
if ndst+l > len(dst) {
e = transform.ErrShortDst
break
}
for i := 0; i < l; i++ {
dst[ndst] = b[i]
ndst++
}
nsrc++
}
return ndst, nsrc, e
}
func (d *cmapEncoder) Transform(dst, src []byte, atEOF bool) (int, int, error) {
var e error
var ndst, nsrc int
for nsrc < len(src) {
if ndst >= len(dst) {
e = transform.ErrShortDst
break
}
r, sz := utf8.DecodeRune(src[nsrc:])
if r == utf8.RuneError && sz == 1 {
// If its inconclusive due to insufficient data in
// in the source, report it
if !atEOF && !utf8.FullRune(src[nsrc:]) {
e = transform.ErrShortSrc
break
}
}
if c, ok := d.bytes[r]; ok {
dst[ndst] = c
} else {
dst[ndst] = d.replace
}
nsrc += sz
ndst++
}
return ndst, nsrc, e
}

17
vendor/github.com/gdamore/encoding/doc.go generated vendored Normal file
View File

@ -0,0 +1,17 @@
// Copyright 2015 Garrett D'Amore
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use file except in compliance with the License.
// You may obtain a copy of the license at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package encoding provides a few of the encoding structures that are
// missing from the Go x/text/encoding tree.
package encoding

273
vendor/github.com/gdamore/encoding/ebcdic.go generated vendored Normal file
View File

@ -0,0 +1,273 @@
// Copyright 2015 Garrett D'Amore
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use file except in compliance with the License.
// You may obtain a copy of the license at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package encoding
import (
"golang.org/x/text/encoding"
)
// EBCDIC represents the 8-bit EBCDIC scheme, found in some mainframe
// environments. If you don't know what this is, consider yourself lucky.
var EBCDIC encoding.Encoding
func init() {
cm := &Charmap{
ReplacementChar: '\x3f',
Map: map[byte]rune{
// 0x00-0x03 match
0x04: RuneError,
0x05: '\t',
0x06: RuneError,
0x07: '\x7f',
0x08: RuneError,
0x09: RuneError,
0x0a: RuneError,
// 0x0b-0x13 match
0x14: RuneError,
0x15: '\x85', // Not in any ISO code
0x16: '\x08',
0x17: RuneError,
// 0x18-0x19 match
0x1a: RuneError,
0x1b: RuneError,
// 0x1c-0x1f match
0x20: RuneError,
0x21: RuneError,
0x22: RuneError,
0x23: RuneError,
0x24: RuneError,
0x25: '\n',
0x26: '\x17',
0x27: '\x1b',
0x28: RuneError,
0x29: RuneError,
0x2a: RuneError,
0x2b: RuneError,
0x2c: RuneError,
0x2d: '\x05',
0x2e: '\x06',
0x2f: '\x07',
0x30: RuneError,
0x31: RuneError,
0x32: '\x16',
0x33: RuneError,
0x34: RuneError,
0x35: RuneError,
0x36: RuneError,
0x37: '\x04',
0x38: RuneError,
0x39: RuneError,
0x3a: RuneError,
0x3b: RuneError,
0x3c: '\x14',
0x3d: '\x15',
0x3e: RuneError,
0x3f: '\x1a', // also replacement char
0x40: ' ',
0x41: '\xa0',
0x42: RuneError,
0x43: RuneError,
0x44: RuneError,
0x45: RuneError,
0x46: RuneError,
0x47: RuneError,
0x48: RuneError,
0x49: RuneError,
0x4a: RuneError,
0x4b: '.',
0x4c: '<',
0x4d: '(',
0x4e: '+',
0x4f: '|',
0x50: '&',
0x51: RuneError,
0x52: RuneError,
0x53: RuneError,
0x54: RuneError,
0x55: RuneError,
0x56: RuneError,
0x57: RuneError,
0x58: RuneError,
0x59: RuneError,
0x5a: '!',
0x5b: '$',
0x5c: '*',
0x5d: ')',
0x5e: ';',
0x5f: '¬',
0x60: '-',
0x61: '/',
0x62: RuneError,
0x63: RuneError,
0x64: RuneError,
0x65: RuneError,
0x66: RuneError,
0x67: RuneError,
0x68: RuneError,
0x69: RuneError,
0x6a: '¦',
0x6b: ',',
0x6c: '%',
0x6d: '_',
0x6e: '>',
0x6f: '?',
0x70: RuneError,
0x71: RuneError,
0x72: RuneError,
0x73: RuneError,
0x74: RuneError,
0x75: RuneError,
0x76: RuneError,
0x77: RuneError,
0x78: RuneError,
0x79: '`',
0x7a: ':',
0x7b: '#',
0x7c: '@',
0x7d: '\'',
0x7e: '=',
0x7f: '"',
0x80: RuneError,
0x81: 'a',
0x82: 'b',
0x83: 'c',
0x84: 'd',
0x85: 'e',
0x86: 'f',
0x87: 'g',
0x88: 'h',
0x89: 'i',
0x8a: RuneError,
0x8b: RuneError,
0x8c: RuneError,
0x8d: RuneError,
0x8e: RuneError,
0x8f: '±',
0x90: RuneError,
0x91: 'j',
0x92: 'k',
0x93: 'l',
0x94: 'm',
0x95: 'n',
0x96: 'o',
0x97: 'p',
0x98: 'q',
0x99: 'r',
0x9a: RuneError,
0x9b: RuneError,
0x9c: RuneError,
0x9d: RuneError,
0x9e: RuneError,
0x9f: RuneError,
0xa0: RuneError,
0xa1: '~',
0xa2: 's',
0xa3: 't',
0xa4: 'u',
0xa5: 'v',
0xa6: 'w',
0xa7: 'x',
0xa8: 'y',
0xa9: 'z',
0xaa: RuneError,
0xab: RuneError,
0xac: RuneError,
0xad: RuneError,
0xae: RuneError,
0xaf: RuneError,
0xb0: '^',
0xb1: RuneError,
0xb2: RuneError,
0xb3: RuneError,
0xb4: RuneError,
0xb5: RuneError,
0xb6: RuneError,
0xb7: RuneError,
0xb8: RuneError,
0xb9: RuneError,
0xba: '[',
0xbb: ']',
0xbc: RuneError,
0xbd: RuneError,
0xbe: RuneError,
0xbf: RuneError,
0xc0: '{',
0xc1: 'A',
0xc2: 'B',
0xc3: 'C',
0xc4: 'D',
0xc5: 'E',
0xc6: 'F',
0xc7: 'G',
0xc8: 'H',
0xc9: 'I',
0xca: '\xad', // NB: soft hyphen
0xcb: RuneError,
0xcc: RuneError,
0xcd: RuneError,
0xce: RuneError,
0xcf: RuneError,
0xd0: '}',
0xd1: 'J',
0xd2: 'K',
0xd3: 'L',
0xd4: 'M',
0xd5: 'N',
0xd6: 'O',
0xd7: 'P',
0xd8: 'Q',
0xd9: 'R',
0xda: RuneError,
0xdb: RuneError,
0xdc: RuneError,
0xdd: RuneError,
0xde: RuneError,
0xdf: RuneError,
0xe0: '\\',
0xe1: '\u2007', // Non-breaking space
0xe2: 'S',
0xe3: 'T',
0xe4: 'U',
0xe5: 'V',
0xe6: 'W',
0xe7: 'X',
0xe8: 'Y',
0xe9: 'Z',
0xea: RuneError,
0xeb: RuneError,
0xec: RuneError,
0xed: RuneError,
0xee: RuneError,
0xef: RuneError,
0xf0: '0',
0xf1: '1',
0xf2: '2',
0xf3: '3',
0xf4: '4',
0xf5: '5',
0xf6: '6',
0xf7: '7',
0xf8: '8',
0xf9: '9',
0xfa: RuneError,
0xfb: RuneError,
0xfc: RuneError,
0xfd: RuneError,
0xfe: RuneError,
0xff: RuneError,
}}
cm.Init()
EBCDIC = cm
}

33
vendor/github.com/gdamore/encoding/latin1.go generated vendored Normal file
View File

@ -0,0 +1,33 @@
// Copyright 2015 Garrett D'Amore
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use file except in compliance with the License.
// You may obtain a copy of the license at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package encoding
import (
"golang.org/x/text/encoding"
)
// ISO8859_1 represents the 8-bit ISO8859-1 scheme. It decodes directly to
// UTF-8 without change, as all ISO8859-1 values are legal UTF-8.
// Unicode values less than 256 (i.e. 8 bits) map 1:1 with 8859-1.
// It encodes runes outside of that to 0x1A, the ASCII substitution character.
var ISO8859_1 encoding.Encoding
func init() {
cm := &Charmap{}
cm.Init()
// 8859-1 is the 8-bit identity map for Unicode.
ISO8859_1 = cm
}

35
vendor/github.com/gdamore/encoding/latin5.go generated vendored Normal file
View File

@ -0,0 +1,35 @@
// Copyright 2015 Garrett D'Amore
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use file except in compliance with the License.
// You may obtain a copy of the license at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package encoding
import (
"golang.org/x/text/encoding"
)
// ISO8859_9 represents the 8-bit ISO8859-9 scheme.
var ISO8859_9 encoding.Encoding
func init() {
cm := &Charmap{Map: map[byte]rune{
0xD0: 'Ğ',
0xDD: 'İ',
0xDE: 'Ş',
0xF0: 'ğ',
0xFD: 'ı',
0xFE: 'ş',
}}
cm.Init()
ISO8859_9 = cm
}

35
vendor/github.com/gdamore/encoding/utf8.go generated vendored Normal file
View File

@ -0,0 +1,35 @@
// Copyright 2015 Garrett D'Amore
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use file except in compliance with the License.
// You may obtain a copy of the license at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package encoding
import (
"golang.org/x/text/encoding"
)
type validUtf8 struct{}
// UTF8 is an encoding for UTF-8. All it does is verify that the UTF-8
// in is valid. The main reason for its existence is that it will detect
// and report ErrSrcShort or ErrDstShort, whereas the Nop encoding just
// passes every byte, blithely.
var UTF8 encoding.Encoding = validUtf8{}
func (validUtf8) NewDecoder() *encoding.Decoder {
return &encoding.Decoder{Transformer: encoding.UTF8Validator}
}
func (validUtf8) NewEncoder() *encoding.Encoder {
return &encoding.Encoder{Transformer: encoding.UTF8Validator}
}

28
vendor/github.com/lucasb-eyer/go-colorful/.gitignore generated vendored Normal file
View File

@ -0,0 +1,28 @@
# Compiled Object files, Static and Dynamic libs (Shared Objects)
*.o
*.a
*.so
# Folders
_obj
_test
# Vim swap files
.*.sw?
# Architecture specific extensions/prefixes
*.[568vq]
[568vq].out
*.cgo1.go
*.cgo2.c
_cgo_defun.c
_cgo_gotypes.go
_cgo_export.*
_testmain.go
*.exe
# Code coverage stuff
coverage.out

View File

@ -0,0 +1,8 @@
language: go
install:
- go get golang.org/x/tools/cmd/cover
- go get gopkg.in/DATA-DOG/go-sqlmock.v1
- go get github.com/mattn/goveralls
script:
- go test -v -covermode=count -coverprofile=coverage.out
- if [[ "$TRAVIS_PULL_REQUEST" = "false" ]]; then $HOME/gopath/bin/goveralls -coverprofile=coverage.out -service=travis-ci -repotoken $COVERALLS_TOKEN; fi

7
vendor/github.com/lucasb-eyer/go-colorful/LICENSE generated vendored Normal file
View File

@ -0,0 +1,7 @@
Copyright (c) 2013 Lucas Beyer
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

479
vendor/github.com/lucasb-eyer/go-colorful/README.md generated vendored Normal file
View File

@ -0,0 +1,479 @@
go-colorful
===========
A library for playing with colors in go (golang).
[![Build Status](https://travis-ci.org/lucasb-eyer/go-colorful.svg?branch=master)](https://travis-ci.org/lucasb-eyer/go-colorful)
[![Coverage Status](https://coveralls.io/repos/github/lucasb-eyer/go-colorful/badge.svg?branch=master)](https://coveralls.io/github/lucasb-eyer/go-colorful?branch=master)
Why?
====
I love games. I make games. I love detail and I get lost in detail.
One such detail popped up during the development of [Memory Which Does Not Suck](https://github.com/lucasb-eyer/mwdns/),
when we wanted the server to assign the players random colors. Sometimes
two players got very similar colors, which bugged me. The very same evening,
[I want hue](http://tools.medialab.sciences-po.fr/iwanthue/) was the top post
on HackerNews' frontpage and showed me how to Do It Right™. Last but not
least, there was no library for handling color spaces available in go. Colorful
does just that and implements Go's `color.Color` interface.
What?
=====
Go-Colorful stores colors in RGB and provides methods from converting these to various color-spaces. Currently supported colorspaces are:
- **RGB:** All three of Red, Green and Blue in [0..1].
- **HSL:** Hue in [0..360], Saturation and Luminance in [0..1]. For legacy reasons; please forget that it exists.
- **HSV:** Hue in [0..360], Saturation and Value in [0..1]. You're better off using HCL, see below.
- **Hex RGB:** The "internet" color format, as in #FF00FF.
- **Linear RGB:** See [gamma correct rendering](http://www.sjbrown.co.uk/2004/05/14/gamma-correct-rendering/).
- **CIE-XYZ:** CIE's standard color space, almost in [0..1].
- **CIE-xyY:** encodes chromacity in x and y and luminance in Y, all in [0..1]
- **CIE-L\*a\*b\*:** A *perceptually uniform* color space, i.e. distances are meaningful. L\* in [0..1] and a\*, b\* almost in [-1..1].
- **CIE-L\*u\*v\*:** Very similar to CIE-L\*a\*b\*, there is [no consensus](http://en.wikipedia.org/wiki/CIELUV#Historical_background) on which one is "better".
- **CIE-L\*C\*h° (HCL):** This is generally the [most useful](http://vis4.net/blog/posts/avoid-equidistant-hsv-colors/) one; CIE-L\*a\*b\* space in polar coordinates, i.e. a *better* HSV. H° is in [0..360], C\* almost in [-1..1] and L\* as in CIE-L\*a\*b\*.
For the colorspaces where it makes sense (XYZ, Lab, Luv, HCl), the
[D65](http://en.wikipedia.org/wiki/Illuminant_D65) is used as reference white
by default but methods for using your own reference white are provided.
A coordinate being *almost in* a range means that generally it is, but for very
bright colors and depending on the reference white, it might overflow this
range slightly. For example, C\* of #0000ff is 1.338.
Unit-tests are provided.
Nice, but what's it useful for?
-------------------------------
- Converting color spaces. Some people like to do that.
- Blending (interpolating) between colors in a "natural" look by using the right colorspace.
- Generating random colors under some constraints (e.g. colors of the same shade, or shades of one color.)
- Generating gorgeous random palettes with distinct colors of a same temperature.
What not (yet)?
===============
There are a few features which are currently missing and might be useful.
I just haven't implemented them yet because I didn't have the need for it.
Pull requests welcome.
- Sorting colors (potentially using above mentioned distances)
So which colorspace should I use?
=================================
It depends on what you want to do. I think the folks from *I want hue* are
on-spot when they say that RGB fits to how *screens produce* color, CIE L\*a\*b\*
fits how *humans perceive* color and HCL fits how *humans think* colors.
Whenever you'd use HSV, rather go for CIE-L\*C\*h°. for fixed lightness L\* and
chroma C\* values, the hue angle h° rotates through colors of the same
perceived brightness and intensity.
How?
====
### Installing
Installing the library is as easy as
```bash
$ go get github.com/lucasb-eyer/go-colorful
```
The package can then be used through an
```go
import "github.com/lucasb-eyer/go-colorful"
```
### Basic usage
Create a beautiful blue color using different source space:
```go
// Any of the following should be the same
c := colorful.Color{0.313725, 0.478431, 0.721569}
c, err := colorful.Hex("#517AB8")
if err != nil{
log.Fatal(err)
}
c = colorful.Hsv(216.0, 0.56, 0.722)
c = colorful.Xyz(0.189165, 0.190837, 0.480248)
c = colorful.Xyy(0.219895, 0.221839, 0.190837)
c = colorful.Lab(0.507850, 0.040585,-0.370945)
c = colorful.Luv(0.507849,-0.194172,-0.567924)
c = colorful.Hcl(276.2440, 0.373160, 0.507849)
fmt.Printf("RGB values: %v, %v, %v", c.R, c.G, c.B)
```
And then converting this color back into various color spaces:
```go
hex := c.Hex()
h, s, v := c.Hsv()
x, y, z := c.Xyz()
x, y, Y := c.Xyy()
l, a, b := c.Lab()
l, u, v := c.Luv()
h, c, l := c.Hcl()
```
Note that, because of Go's unfortunate choice of requiring an initial uppercase,
the name of the functions relating to the xyY space are just off. If you have
any good suggestion, please open an issue. (I don't consider XyY good.)
### The `color.Color` interface
Because a `colorful.Color` implements Go's `color.Color` interface (found in the
`image/color` package), it can be used anywhere that expects a `color.Color`.
Furthermore, you can convert anything that implements the `color.Color` interface
into a `colorful.Color` using the `MakeColorful` function:
```go
c := colorful.MakeColor(color.Gray16{12345})
```
**Caveat:** Be aware that for this latter conversion (using `MakeColor`) hits a corner-case
when alpha is exactly zero. Because `color.Color` uses pre-multiplied alpha colors,
this means the RGB values are lost and it's impossible to recover them. The current
implementation `panic()`s in that case, see [issue 21](https://github.com/lucasb-eyer/go-colorful/issues/21)
for discussion and suggesting alternatives.
### Comparing colors
In the RGB color space, the Euclidian distance between colors *doesn't* correspond
to visual/perceptual distance. This means that two pairs of colors which have the
same distance in RGB space can look much further apart. This is fixed by the
CIE-L\*a\*b\*, CIE-L\*u\*v\* and CIE-L\*C\*h° color spaces.
Thus you should only compare colors in any of these space.
(Note that the distance in CIE-L\*a\*b\* and CIE-L\*C\*h° are the same, since it's the same space but in cylindrical coordinates)
![Color distance comparison](doc/colordist/colordist.png)
The two colors shown on the top look much more different than the two shown on
the bottom. Still, in RGB space, their distance is the same.
Here is a little example program which shows the distances between the top two
and bottom two colors in RGB, CIE-L\*a\*b\* and CIE-L\*u\*v\* space. You can find it in `doc/colordist/colordist.go`.
```go
package main
import "fmt"
import "github.com/lucasb-eyer/go-colorful"
func main() {
c1a := colorful.Color{150.0/255.0, 10.0/255.0, 150.0/255.0}
c1b := colorful.Color{ 53.0/255.0, 10.0/255.0, 150.0/255.0}
c2a := colorful.Color{10.0/255.0, 150.0/255.0, 50.0/255.0}
c2b := colorful.Color{99.9/255.0, 150.0/255.0, 10.0/255.0}
fmt.Printf("DistanceRgb: c1: %v\tand c2: %v\n", c1a.DistanceRgb(c1b), c2a.DistanceRgb(c2b))
fmt.Printf("DistanceLab: c1: %v\tand c2: %v\n", c1a.DistanceLab(c1b), c2a.DistanceLab(c2b))
fmt.Printf("DistanceLuv: c1: %v\tand c2: %v\n", c1a.DistanceLuv(c1b), c2a.DistanceLuv(c2b))
fmt.Printf("DistanceCIE76: c1: %v\tand c2: %v\n", c1a.DistanceCIE76(c1b), c2a.DistanceCIE76(c2b))
fmt.Printf("DistanceCIE94: c1: %v\tand c2: %v\n", c1a.DistanceCIE94(c1b), c2a.DistanceCIE94(c2b))
}
```
Running the above program shows that you should always prefer any of the CIE distances:
```bash
$ go run colordist.go
DistanceRgb: c1: 0.3803921568627451 and c2: 0.3858713931171159
DistanceLab: c1: 0.3204845831279805 and c2: 0.2439715175856528
DistanceLuv: c1: 0.5134369614199698 and c2: 0.25686928398606323
DistanceCIE76: c1: 0.3204845831279805 and c2: 0.2439715175856528
DistanceCIE94: c1: 0.19799168128511327 and c2: 0.12207136371167401
```
It also shows that `DistanceLab` is more formally known as `DistanceCIE76` and
has been superseded by the slightly more accurate, but much more expensive
`DistanceCIE94`.
Note that `AlmostEqualRgb` is provided mainly for (unit-)testing purposes. Use
it only if you really know what you're doing. It will eat your cat.
### Blending colors
Blending is highly connected to distance, since it basically "walks through" the
colorspace thus, if the colorspace maps distances well, the walk is "smooth".
Colorful comes with blending functions in RGB, HSV and any of the LAB spaces.
Of course, you'd rather want to use the blending functions of the LAB spaces since
these spaces map distances well but, just in case, here is an example showing
you how the blendings (`#fdffcc` to `#242a42`) are done in the various spaces:
![Blending colors in different spaces.](doc/colorblend/colorblend.png)
What you see is that HSV is really bad: it adds some green, which is not present
in the original colors at all! RGB is much better, but it stays light a little
too long. LUV and LAB both hit the right lightness but LAB has a little more
color. HCL works in the same vein as HSV (both cylindrical interpolations) but
it does it right in that there is no green appearing and the lighthness changes
in a linear manner.
While this seems all good, you need to know one thing: When interpolating in any
of the CIE color spaces, you might get invalid RGB colors! This is important if
the starting and ending colors are user-input or random. An example of where this
happens is when blending between `#eeef61` and `#1e3140`:
![Invalid RGB colors may crop up when blending in CIE spaces.](doc/colorblend/invalid.png)
You can test whether a color is a valid RGB color by calling the `IsValid` method
and indeed, calling IsValid will return false for the redish colors on the bottom.
One way to "fix" this is to get a valid color close to the invalid one by calling
`Clamped`, which always returns a nearby valid color. Doing this, we get the
following result, which is satisfactory:
![Fixing invalid RGB colors by clamping them to the valid range.](doc/colorblend/clamped.png)
The following is the code creating the above three images; it can be found in `doc/colorblend/colorblend.go`
```go
package main
import "fmt"
import "github.com/lucasb-eyer/go-colorful"
import "image"
import "image/draw"
import "image/png"
import "os"
func main() {
blocks := 10
blockw := 40
img := image.NewRGBA(image.Rect(0,0,blocks*blockw,200))
c1, _ := colorful.Hex("#fdffcc")
c2, _ := colorful.Hex("#242a42")
// Use these colors to get invalid RGB in the gradient.
//c1, _ := colorful.Hex("#EEEF61")
//c2, _ := colorful.Hex("#1E3140")
for i := 0 ; i < blocks ; i++ {
draw.Draw(img, image.Rect(i*blockw, 0,(i+1)*blockw, 40), &image.Uniform{c1.BlendHsv(c2, float64(i)/float64(blocks-1))}, image.ZP, draw.Src)
draw.Draw(img, image.Rect(i*blockw, 40,(i+1)*blockw, 80), &image.Uniform{c1.BlendLuv(c2, float64(i)/float64(blocks-1))}, image.ZP, draw.Src)
draw.Draw(img, image.Rect(i*blockw, 80,(i+1)*blockw,120), &image.Uniform{c1.BlendRgb(c2, float64(i)/float64(blocks-1))}, image.ZP, draw.Src)
draw.Draw(img, image.Rect(i*blockw,120,(i+1)*blockw,160), &image.Uniform{c1.BlendLab(c2, float64(i)/float64(blocks-1))}, image.ZP, draw.Src)
draw.Draw(img, image.Rect(i*blockw,160,(i+1)*blockw,200), &image.Uniform{c1.BlendHcl(c2, float64(i)/float64(blocks-1))}, image.ZP, draw.Src)
// This can be used to "fix" invalid colors in the gradient.
//draw.Draw(img, image.Rect(i*blockw,160,(i+1)*blockw,200), &image.Uniform{c1.BlendHcl(c2, float64(i)/float64(blocks-1)).Clamped()}, image.ZP, draw.Src)
}
toimg, err := os.Create("colorblend.png")
if err != nil {
fmt.Printf("Error: %v", err)
return
}
defer toimg.Close()
png.Encode(toimg, img)
}
```
#### Generating color gradients
A very common reason to blend colors is creating gradients. There is an example
program in [doc/gradientgen.go](doc/gradientgen/gradientgen.go); it doesn't use any API
which hasn't been used in the previous example code, so I won't bother pasting
the code in here. Just look at that gorgeous gradient it generated in HCL space:
!["Spectral" colorbrewer gradient in HCL space.](doc/gradientgen/gradientgen.png)
### Getting random colors
It is sometimes necessary to generate random colors. You could simply do this
on your own by generating colors with random values. By restricting the random
values to a range smaller than [0..1] and using a space such as CIE-H\*C\*l° or
HSV, you can generate both random shades of a color or random colors of a
lightness:
```go
random_blue := colorful.Hcl(180.0+rand.Float64()*50.0, 0.2+rand.Float64()*0.8, 0.3+rand.Float64()*0.7)
random_dark := colorful.Hcl(rand.Float64()*360.0, rand.Float64(), rand.Float64()*0.4)
random_light := colorful.Hcl(rand.Float64()*360.0, rand.Float64(), 0.6+rand.Float64()*0.4)
```
Since getting random "warm" and "happy" colors is quite a common task, there
are some helper functions:
```go
colorful.WarmColor()
colorful.HappyColor()
colorful.FastWarmColor()
colorful.FastHappyColor()
```
The ones prefixed by `Fast` are faster but less coherent since they use the HSV
space as opposed to the regular ones which use CIE-L\*C\*h° space. The
following picture shows the warm colors in the top two rows and happy colors
in the bottom two rows. Within these, the first is the regular one and the
second is the fast one.
![Warm, fast warm, happy and fast happy random colors, respectively.](doc/colorgens/colorgens.png)
Don't forget to initialize the random seed! You can see the code used for
generating this picture in `doc/colorgens/golorgens.go`.
### Getting random palettes
As soon as you need to generate more than one random color, you probably want
them to be distinguishible. Playing against an opponent which has almost the
same blue as I do is not fun. This is where random palettes can help.
These palettes are generated using an algorithm which ensures that all colors
on the palette are as distinguishible as possible. Again, there is a `Fast`
method which works in HSV and is less perceptually uniform and a non-`Fast`
method which works in CIE spaces. For more theory on `SoftPalette`, check out
[I want hue](http://tools.medialab.sciences-po.fr/iwanthue/theory.php). Yet
again, there is a `Happy` and a `Warm` version, which do what you expect, but
now there is an additional `Soft` version, which is more configurable: you can
give a constraint on the color space in order to get colors within a certain *feel*.
Let's start with the simple methods first, all they take is the amount of
colors to generate, which could, for example, be the player count. They return
an array of `colorful.Color` objects:
```go
pal1, err1 := colorful.WarmPalette(10)
pal2 := colorful.FastWarmPalette(10)
pal3, err3 := colorful.HappyPalette(10)
pal4 := colorful.FastHappyPalette(10)
pal5, err5 := colorful.SoftPalette(10)
```
Note that the non-fast methods *may* fail if you ask for way too many colors.
Let's move on to the advanced one, namely `SoftPaletteEx`. Besides the color
count, this function takes a `SoftPaletteSettings` object as argument. The
interesting part here is its `CheckColor` member, which is a boolean function
taking three floating points as arguments: `l`, `a` and `b`. This function
should return `true` for colors which lie within the region you want and `false`
otherwise. The other members are `Iteration`, which should be within [5..100]
where higher means slower but more exact palette, and `ManySamples` which you
should set to `true` in case your `CheckColor` constraint rejects a large part
of the color space.
For example, to create a palette of 10 brownish colors, you'd call it like this:
```go
func isbrowny(l, a, b float64) bool {
h, c, L := colorful.LabToHcl(l, a, b)
return 10.0 < h && h < 50.0 && 0.1 < c && c < 0.5 && L < 0.5
}
// Since the above function is pretty restrictive, we set ManySamples to true.
brownies := colorful.SoftPaletteEx(10, colorful.SoftPaletteSettings{isbrowny, 50, true})
```
The following picture shows the palettes generated by all of these methods
(sourcecode in `doc/palettegens/palettegens.go`), in the order they were presented, i.e.
from top to bottom: `Warm`, `FastWarm`, `Happy`, `FastHappy`, `Soft`,
`SoftEx(isbrowny)`. All of them contain some randomness, so YMMV.
![All example palettes](doc/palettegens/palettegens.png)
### Sorting colors
TODO: Sort using dist fn.
### Using linear RGB for computations
There are two methods for transforming RGB<->Linear RGB: a fast and almost precise one,
and a slow and precise one.
```go
r, g, b := colorful.Hex("#FF0000").FastLinearRgb()
```
TODO: describe some more.
### Want to use some other reference point?
```go
c := colorful.LabWhiteRef(0.507850, 0.040585,-0.370945, colorful.D50)
l, a, b := c.LabWhiteRef(colorful.D50)
```
### Reading and writing colors from databases
The type `HexColor` makes it easy to store colors as strings in a database. It
implements the [https://godoc.org/database/sql#Scanner](database/sql.Scanner)
and [database/sql/driver.Value](https://godoc.org/database/sql/driver.Value)
interfaces which provide automatic type conversion.
Example:
```go
var hc HexColor
_, err := db.QueryRow("SELECT '#ff0000';").Scan(&hc)
// hc == HexColor{R: 1, G: 0, B: 0}; err == nil
```
FAQ
===
### Q: I get all f!@#ed up values! Your library sucks!
A: You probably provided values in the wrong range. For example, RGB values are
expected to reside between 0 and 1, *not* between 0 and 255. Normalize your colors.
### Q: Lab/Luv/HCl seem broken! Your library sucks!
They look like this:
<img height="150" src="https://user-images.githubusercontent.com/3779568/28646900-6548040c-7264-11e7-8f12-81097a97c260.png">
A: You're likely trying to generate and display colors that can't be represented by RGB,
and thus monitors. When you're trying to convert, say, `HCL(190.0, 1.0, 1.0).RGB255()`,
you're asking for RGB values of `(-2105.254 300.680 286.185)`, which clearly don't exist,
and the `RGB255` function just casts these numbers to `uint8`, creating wrap-around and
what looks like a completely broken gradient. What you want to do, is either use more
reasonable values of colors which actually exist in RGB, or just `Clamp()` the resulting
color to its nearest existing one, living with the consequences:
`HCL(190.0, 1.0, 1.0).Clamp().RGB255()`. It will look something like this:
<img height="150" src="https://user-images.githubusercontent.com/1476029/29596343-9a8c62c6-8771-11e7-9026-b8eb8852cc4a.png">
[Here's an issue going in-depth about this](https://github.com/lucasb-eyer/go-colorful/issues/14),
as well as [my answer](https://github.com/lucasb-eyer/go-colorful/issues/14#issuecomment-324205385),
both with code and pretty pictures. Also note that this was somewhat covered above in the
["Blending colors" section](https://github.com/lucasb-eyer/go-colorful#blending-colors).
### Q: In a tight loop, conversion to Lab/Luv/HCl/... are slooooow!
A: Yes, they are.
This library aims for correctness, readability, and modularity; it wasn't written with speed in mind.
A large part of the slowness comes from these conversions going through `LinearRgb` which uses powers.
I implemented a fast approximation to `LinearRgb` called `FastLinearRgb` by using Taylor approximations.
The approximation is roughly 5x faster and precise up to roughly 0.5%,
the major caveat being that if the input values are outside the range 0-1, accuracy drops dramatically.
You can use these in your conversions as follows:
```go
col := // Get your color somehow
l, a, b := XyzToLab(LinearRgbToXyz(col.LinearRgb()))
```
If you need faster versions of `Distance*` and `Blend*` that make use of this fast approximation,
feel free to implement them and open a pull-request, I'll happily accept.
The derivation of these functions can be followed in [this Jupyter notebook](doc/LinearRGB Approximations.ipynb).
Here's the main figure showing the approximation quality:
![approximation quality](doc/approx-quality.png)
More speed could be gained by using SIMD instructions in many places.
You can also get more speed for specific conversions by approximating the full conversion function,
but that is outside the scope of this library.
Thanks to [@ZirconiumX](https://github.com/ZirconiumX) for starting this investigation,
see [issue #18](https://github.com/lucasb-eyer/go-colorful/issues/18) for details.
### Q: `MakeColor` panics when alpha is zero!
A: Because in that case, the conversion is undefined. See
[issue 21](https://github.com/lucasb-eyer/go-colorful/issues/21)
as well as the short caveat discussion in the ["The `color.Color` interface"](README.md#the-colorcolor-interface) section above.
Who?
====
This library has been developed by Lucas Beyer with contributions from
Bastien Dejean (@baskerville) and Phil Kulak (@pkulak).
License: MIT
============
Copyright (c) 2013 Lucas Beyer
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

54
vendor/github.com/lucasb-eyer/go-colorful/colorgens.go generated vendored Normal file
View File

@ -0,0 +1,54 @@
// Various ways to generate single random colors
package colorful
import (
"math/rand"
)
// Creates a random dark, "warm" color through a restricted HSV space.
func FastWarmColor() Color {
return Hsv(
rand.Float64() * 360.0,
0.5 + rand.Float64()*0.3,
0.3 + rand.Float64()*0.3)
}
// Creates a random dark, "warm" color through restricted HCL space.
// This is slower than FastWarmColor but will likely give you colors which have
// the same "warmness" if you run it many times.
func WarmColor() (c Color) {
for c = randomWarm() ; !c.IsValid() ; c = randomWarm() {}
return
}
func randomWarm() Color {
return Hcl(
rand.Float64() * 360.0,
0.1 + rand.Float64()*0.3,
0.2 + rand.Float64()*0.3)
}
// Creates a random bright, "pimpy" color through a restricted HSV space.
func FastHappyColor() Color {
return Hsv(
rand.Float64() * 360.0,
0.7 + rand.Float64()*0.3,
0.6 + rand.Float64()*0.3)
}
// Creates a random bright, "pimpy" color through restricted HCL space.
// This is slower than FastHappyColor but will likely give you colors which
// have the same "brightness" if you run it many times.
func HappyColor() (c Color) {
for c = randomPimp() ; !c.IsValid() ; c = randomPimp() {}
return
}
func randomPimp() Color {
return Hcl(
rand.Float64() * 360.0,
0.5 + rand.Float64()*0.3,
0.5 + rand.Float64()*0.3)
}

797
vendor/github.com/lucasb-eyer/go-colorful/colors.go generated vendored Normal file
View File

@ -0,0 +1,797 @@
// The colorful package provides all kinds of functions for working with colors.
package colorful
import(
"fmt"
"math"
"image/color"
)
// A color is stored internally using sRGB (standard RGB) values in the range 0-1
type Color struct {
R, G, B float64
}
// Implement the Go color.Color interface.
func (col Color) RGBA() (r, g, b, a uint32) {
r = uint32(col.R*65535.0+0.5)
g = uint32(col.G*65535.0+0.5)
b = uint32(col.B*65535.0+0.5)
a = 0xFFFF
return
}
// Constructs a colorful.Color from something implementing color.Color
func MakeColor(col color.Color) Color {
r, g, b, a := col.RGBA()
// Since color.Color is alpha pre-multiplied, we need to divide the
// RGB values by alpha again in order to get back the original RGB.
r *= 0xffff
r /= a
g *= 0xffff
g /= a
b *= 0xffff
b /= a
return Color{float64(r)/65535.0, float64(g)/65535.0, float64(b)/65535.0}
}
// Might come in handy sometimes to reduce boilerplate code.
func (col Color) RGB255() (r, g, b uint8) {
r = uint8(col.R*255.0+0.5)
g = uint8(col.G*255.0+0.5)
b = uint8(col.B*255.0+0.5)
return
}
// This is the tolerance used when comparing colors using AlmostEqualRgb.
const Delta = 1.0/255.0
// This is the default reference white point.
var D65 = [3]float64{0.95047, 1.00000, 1.08883}
// And another one.
var D50 = [3]float64{0.96422, 1.00000, 0.82521}
// Checks whether the color exists in RGB space, i.e. all values are in [0..1]
func (c Color) IsValid() bool {
return 0.0 <= c.R && c.R <= 1.0 &&
0.0 <= c.G && c.G <= 1.0 &&
0.0 <= c.B && c.B <= 1.0
}
func clamp01(v float64) float64 {
return math.Max(0.0, math.Min(v, 1.0))
}
// Returns Clamps the color into valid range, clamping each value to [0..1]
// If the color is valid already, this is a no-op.
func (c Color) Clamped() Color {
return Color{clamp01(c.R), clamp01(c.G), clamp01(c.B)}
}
func sq(v float64) float64 {
return v * v;
}
func cub(v float64) float64 {
return v * v * v;
}
// DistanceRgb computes the distance between two colors in RGB space.
// This is not a good measure! Rather do it in Lab space.
func (c1 Color) DistanceRgb(c2 Color) float64 {
return math.Sqrt(sq(c1.R-c2.R) + sq(c1.G-c2.G) + sq(c1.B-c2.B))
}
// Check for equality between colors within the tolerance Delta (1/255).
func (c1 Color) AlmostEqualRgb(c2 Color) bool {
return math.Abs(c1.R - c2.R) +
math.Abs(c1.G - c2.G) +
math.Abs(c1.B - c2.B) < 3.0*Delta
}
// You don't really want to use this, do you? Go for BlendLab, BlendLuv or BlendHcl.
func (c1 Color) BlendRgb(c2 Color, t float64) Color {
return Color{c1.R + t*(c2.R - c1.R),
c1.G + t*(c2.G - c1.G),
c1.B + t*(c2.B - c1.B)}
}
// Utility used by Hxx color-spaces for interpolating between two angles in [0,360].
func interp_angle(a0, a1, t float64) float64 {
// Based on the answer here: http://stackoverflow.com/a/14498790/2366315
// With potential proof that it works here: http://math.stackexchange.com/a/2144499
delta := math.Mod(math.Mod(a1 - a0, 360.0) + 540, 360.0) - 180.0
return math.Mod(a0 + t*delta + 360.0, 360.0)
}
/// HSV ///
///////////
// From http://en.wikipedia.org/wiki/HSL_and_HSV
// Note that h is in [0..360] and s,v in [0..1]
// Hsv returns the Hue [0..360], Saturation and Value [0..1] of the color.
func (col Color) Hsv() (h, s, v float64) {
min := math.Min(math.Min(col.R, col.G), col.B)
v = math.Max(math.Max(col.R, col.G), col.B)
C := v - min
s = 0.0
if v != 0.0 {
s = C / v
}
h = 0.0 // We use 0 instead of undefined as in wp.
if min != v {
if v == col.R { h = math.Mod((col.G - col.B) / C, 6.0) }
if v == col.G { h = (col.B - col.R) / C + 2.0 }
if v == col.B { h = (col.R - col.G) / C + 4.0 }
h *= 60.0
if h < 0.0 { h += 360.0 }
}
return
}
// Hsv creates a new Color given a Hue in [0..360], a Saturation and a Value in [0..1]
func Hsv(H, S, V float64) Color {
Hp := H/60.0
C := V*S
X := C*(1.0-math.Abs(math.Mod(Hp, 2.0)-1.0))
m := V-C;
r, g, b := 0.0, 0.0, 0.0
switch {
case 0.0 <= Hp && Hp < 1.0: r = C; g = X
case 1.0 <= Hp && Hp < 2.0: r = X; g = C
case 2.0 <= Hp && Hp < 3.0: g = C; b = X
case 3.0 <= Hp && Hp < 4.0: g = X; b = C
case 4.0 <= Hp && Hp < 5.0: r = X; b = C
case 5.0 <= Hp && Hp < 6.0: r = C; b = X
}
return Color{m+r, m+g, m+b}
}
// You don't really want to use this, do you? Go for BlendLab, BlendLuv or BlendHcl.
func (c1 Color) BlendHsv(c2 Color, t float64) Color {
h1, s1, v1 := c1.Hsv()
h2, s2, v2 := c2.Hsv()
// We know that h are both in [0..360]
return Hsv(interp_angle(h1, h2, t), s1 + t*(s2 - s1), v1 + t*(v2 - v1))
}
/// HSL ///
///////////
// Hsl returns the Hue [0..360], Saturation [0..1], and Luminance (lightness) [0..1] of the color.
func (col Color) Hsl() (h, s, l float64) {
min := math.Min(math.Min(col.R, col.G), col.B)
max := math.Max(math.Max(col.R, col.G), col.B)
l = (max + min) / 2
if min == max {
s = 0
h = 0
} else {
if l < 0.5 {
s = (max - min) / (max + min)
} else {
s = (max - min) / (2.0 - max - min)
}
if max == col.R {
h = (col.G - col.B) / (max - min)
} else if max == col.G {
h = 2.0 + (col.B-col.R)/(max-min)
} else {
h = 4.0 + (col.R-col.G)/(max-min)
}
h *= 60
if h < 0 {
h += 360
}
}
return
}
// Hsl creates a new Color given a Hue in [0..360], a Saturation [0..1], and a Luminance (lightness) in [0..1]
func Hsl(h, s, l float64) Color {
if s == 0 {
return Color{l, l, l}
}
var r, g, b float64
var t1 float64
var t2 float64
var tr float64
var tg float64
var tb float64
if l < 0.5 {
t1 = l * (1.0 + s)
} else {
t1 = l + s - l*s
}
t2 = 2*l - t1
h = h / 360
tr = h + 1.0/3.0
tg = h
tb = h - 1.0/3.0
if tr < 0 {
tr += 1
}
if tr > 1 {
tr -= 1
}
if tg < 0 {
tg += 1
}
if tg > 1 {
tg -= 1
}
if tb < 0 {
tb += 1
}
if tb > 1 {
tb -= 1
}
// Red
if 6*tr < 1 {
r = t2 + (t1-t2)*6*tr
} else if 2*tr < 1 {
r = t1
} else if 3*tr < 2 {
r = t2 + (t1-t2)*(2.0/3.0-tr)*6
} else {
r = t2
}
// Green
if 6*tg < 1 {
g = t2 + (t1-t2)*6*tg
} else if 2*tg < 1 {
g = t1
} else if 3*tg < 2 {
g = t2 + (t1-t2)*(2.0/3.0-tg)*6
} else {
g = t2
}
// Blue
if 6*tb < 1 {
b = t2 + (t1-t2)*6*tb
} else if 2*tb < 1 {
b = t1
} else if 3*tb < 2 {
b = t2 + (t1-t2)*(2.0/3.0-tb)*6
} else {
b = t2
}
return Color{r, g, b}
}
/// Hex ///
///////////
// Hex returns the hex "html" representation of the color, as in #ff0080.
func (col Color) Hex() string {
// Add 0.5 for rounding
return fmt.Sprintf("#%02x%02x%02x", uint8(col.R*255.0+0.5), uint8(col.G*255.0+0.5), uint8(col.B*255.0+0.5))
}
// Hex parses a "html" hex color-string, either in the 3 "#f0c" or 6 "#ff1034" digits form.
func Hex(scol string) (Color, error) {
format := "#%02x%02x%02x"
factor := 1.0/255.0
if len(scol) == 4 {
format = "#%1x%1x%1x"
factor = 1.0/15.0
}
var r, g, b uint8
n, err := fmt.Sscanf(scol, format, &r, &g, &b)
if err != nil {
return Color{}, err
}
if n != 3 {
return Color{}, fmt.Errorf("color: %v is not a hex-color", scol)
}
return Color{float64(r)*factor, float64(g)*factor, float64(b)*factor}, nil
}
/// Linear ///
//////////////
// http://www.sjbrown.co.uk/2004/05/14/gamma-correct-rendering/
// http://www.brucelindbloom.com/Eqn_RGB_to_XYZ.html
func linearize(v float64) float64 {
if v <= 0.04045 {
return v / 12.92
}
return math.Pow((v + 0.055)/1.055, 2.4)
}
// LinearRgb converts the color into the linear RGB space (see http://www.sjbrown.co.uk/2004/05/14/gamma-correct-rendering/).
func (col Color) LinearRgb() (r, g, b float64) {
r = linearize(col.R)
g = linearize(col.G)
b = linearize(col.B)
return
}
// A much faster and still quite precise linearization using a 6th-order Taylor approximation.
// See the accompanying Jupyter notebook for derivation of the constants.
func linearize_fast(v float64) float64 {
v1 := v - 0.5
v2 := v1*v1
v3 := v2*v1
v4 := v2*v2
//v5 := v3*v2
return -0.248750514614486 + 0.925583310193438*v + 1.16740237321695*v2 + 0.280457026598666*v3 - 0.0757991963780179*v4 //+ 0.0437040411548932*v5
}
// FastLinearRgb is much faster than and almost as accurate as LinearRgb.
// BUT it is important to NOTE that they only produce good results for valid colors r,g,b in [0,1].
func (col Color) FastLinearRgb() (r, g, b float64) {
r = linearize_fast(col.R)
g = linearize_fast(col.G)
b = linearize_fast(col.B)
return
}
func delinearize(v float64) float64 {
if v <= 0.0031308 {
return 12.92 * v
}
return 1.055 * math.Pow(v, 1.0/2.4) - 0.055
}
// LinearRgb creates an sRGB color out of the given linear RGB color (see http://www.sjbrown.co.uk/2004/05/14/gamma-correct-rendering/).
func LinearRgb(r, g, b float64) Color {
return Color{delinearize(r), delinearize(g), delinearize(b)}
}
func delinearize_fast(v float64) float64 {
// This function (fractional root) is much harder to linearize, so we need to split.
if v > 0.2 {
v1 := v - 0.6
v2 := v1*v1
v3 := v2*v1
v4 := v2*v2
v5 := v3*v2
return 0.442430344268235 + 0.592178981271708*v - 0.287864782562636*v2 + 0.253214392068985*v3 - 0.272557158129811*v4 + 0.325554383321718*v5
} else if v > 0.03 {
v1 := v - 0.115
v2 := v1*v1
v3 := v2*v1
v4 := v2*v2
v5 := v3*v2
return 0.194915592891669 + 1.55227076330229*v - 3.93691860257828*v2 + 18.0679839248761*v3 - 101.468750302746*v4 + 632.341487393927*v5
} else {
v1 := v - 0.015
v2 := v1*v1
v3 := v2*v1
v4 := v2*v2
v5 := v3*v2
// You can clearly see from the involved constants that the low-end is highly nonlinear.
return 0.0519565234928877 + 5.09316778537561*v - 99.0338180489702*v2 + 3484.52322764895*v3 - 150028.083412663*v4 + 7168008.42971613*v5
}
}
// FastLinearRgb is much faster than and almost as accurate as LinearRgb.
// BUT it is important to NOTE that they only produce good results for valid inputs r,g,b in [0,1].
func FastLinearRgb(r, g, b float64) Color {
return Color{delinearize_fast(r), delinearize_fast(g), delinearize_fast(b)}
}
// XyzToLinearRgb converts from CIE XYZ-space to Linear RGB space.
func XyzToLinearRgb(x, y, z float64) (r, g, b float64) {
r = 3.2404542*x - 1.5371385*y - 0.4985314*z
g = -0.9692660*x + 1.8760108*y + 0.0415560*z
b = 0.0556434*x - 0.2040259*y + 1.0572252*z
return
}
func LinearRgbToXyz(r, g, b float64) (x, y, z float64) {
x = 0.4124564*r + 0.3575761*g + 0.1804375*b
y = 0.2126729*r + 0.7151522*g + 0.0721750*b
z = 0.0193339*r + 0.1191920*g + 0.9503041*b
return
}
/// XYZ ///
///////////
// http://www.sjbrown.co.uk/2004/05/14/gamma-correct-rendering/
func (col Color) Xyz() (x, y, z float64) {
return LinearRgbToXyz(col.LinearRgb())
}
func Xyz(x, y, z float64) Color {
return LinearRgb(XyzToLinearRgb(x, y, z))
}
/// xyY ///
///////////
// http://www.brucelindbloom.com/Eqn_XYZ_to_xyY.html
// Well, the name is bad, since it's xyY but Golang needs me to start with a
// capital letter to make the method public.
func XyzToXyy(X, Y, Z float64) (x, y, Yout float64) {
return XyzToXyyWhiteRef(X, Y, Z, D65)
}
func XyzToXyyWhiteRef(X, Y, Z float64, wref [3]float64) (x, y, Yout float64) {
Yout = Y
N := X + Y + Z
if math.Abs(N) < 1e-14 {
// When we have black, Bruce Lindbloom recommends to use
// the reference white's chromacity for x and y.
x = wref[0] / (wref[0] + wref[1] + wref[2])
y = wref[1] / (wref[0] + wref[1] + wref[2])
} else {
x = X / N
y = Y / N
}
return
}
func XyyToXyz(x, y, Y float64) (X, Yout, Z float64) {
Yout = Y
if -1e-14 < y && y < 1e-14 {
X = 0.0
Z = 0.0
} else {
X = Y / y * x
Z = Y / y * (1.0 - x - y)
}
return
}
// Converts the given color to CIE xyY space using D65 as reference white.
// (Note that the reference white is only used for black input.)
// x, y and Y are in [0..1]
func (col Color) Xyy() (x, y, Y float64) {
return XyzToXyy(col.Xyz())
}
// Converts the given color to CIE xyY space, taking into account
// a given reference white. (i.e. the monitor's white)
// (Note that the reference white is only used for black input.)
// x, y and Y are in [0..1]
func (col Color) XyyWhiteRef(wref [3]float64) (x, y, Y float64) {
X, Y2, Z := col.Xyz()
return XyzToXyyWhiteRef(X, Y2, Z, wref)
}
// Generates a color by using data given in CIE xyY space.
// x, y and Y are in [0..1]
func Xyy(x, y, Y float64) Color {
return Xyz(XyyToXyz(x, y, Y))
}
/// L*a*b* ///
//////////////
// http://en.wikipedia.org/wiki/Lab_color_space#CIELAB-CIEXYZ_conversions
// For L*a*b*, we need to L*a*b*<->XYZ->RGB and the first one is device dependent.
func lab_f(t float64) float64 {
if t > 6.0/29.0 * 6.0/29.0 * 6.0/29.0 {
return math.Cbrt(t)
}
return t/3.0 * 29.0/6.0 * 29.0/6.0 + 4.0/29.0
}
func XyzToLab(x, y, z float64) (l, a, b float64) {
// Use D65 white as reference point by default.
// http://www.fredmiranda.com/forum/topic/1035332
// http://en.wikipedia.org/wiki/Standard_illuminant
return XyzToLabWhiteRef(x, y, z, D65)
}
func XyzToLabWhiteRef(x, y, z float64, wref [3]float64) (l, a, b float64) {
fy := lab_f(y/wref[1])
l = 1.16*fy - 0.16
a = 5.0*(lab_f(x/wref[0]) - fy)
b = 2.0*(fy - lab_f(z/wref[2]))
return
}
func lab_finv(t float64) float64 {
if t > 6.0/29.0 {
return t * t * t
}
return 3.0 * 6.0/29.0 * 6.0/29.0 * (t - 4.0/29.0)
}
func LabToXyz(l, a, b float64) (x, y, z float64) {
// D65 white (see above).
return LabToXyzWhiteRef(l, a, b, D65)
}
func LabToXyzWhiteRef(l, a, b float64, wref [3]float64) (x, y, z float64) {
l2 := (l + 0.16) / 1.16
x = wref[0] * lab_finv(l2 + a/5.0)
y = wref[1] * lab_finv(l2)
z = wref[2] * lab_finv(l2 - b/2.0)
return
}
// Converts the given color to CIE L*a*b* space using D65 as reference white.
func (col Color) Lab() (l, a, b float64) {
return XyzToLab(col.Xyz())
}
// Converts the given color to CIE L*a*b* space, taking into account
// a given reference white. (i.e. the monitor's white)
func (col Color) LabWhiteRef(wref [3]float64) (l, a, b float64) {
x, y, z := col.Xyz()
return XyzToLabWhiteRef(x, y, z, wref)
}
// Generates a color by using data given in CIE L*a*b* space using D65 as reference white.
// WARNING: many combinations of `l`, `a`, and `b` values do not have corresponding
// valid RGB values, check the FAQ in the README if you're unsure.
func Lab(l, a, b float64) Color {
return Xyz(LabToXyz(l, a, b))
}
// Generates a color by using data given in CIE L*a*b* space, taking
// into account a given reference white. (i.e. the monitor's white)
func LabWhiteRef(l, a, b float64, wref [3]float64) Color {
return Xyz(LabToXyzWhiteRef(l, a, b, wref))
}
// DistanceLab is a good measure of visual similarity between two colors!
// A result of 0 would mean identical colors, while a result of 1 or higher
// means the colors differ a lot.
func (c1 Color) DistanceLab(c2 Color) float64 {
l1, a1, b1 := c1.Lab()
l2, a2, b2 := c2.Lab()
return math.Sqrt(sq(l1-l2) + sq(a1-a2) + sq(b1-b2))
}
// That's actually the same, but I don't want to break code.
func (c1 Color) DistanceCIE76(c2 Color) float64 {
return c1.DistanceLab(c2)
}
// Uses the CIE94 formula to calculate color distance. More accurate than
// DistanceLab, but also more work.
func (cl Color) DistanceCIE94(cr Color) float64 {
l1, a1, b1 := cl.Lab()
l2, a2, b2 := cr.Lab()
// NOTE: Since all those formulas expect L,a,b values 100x larger than we
// have them in this library, we either need to adjust all constants
// in the formula, or convert the ranges of L,a,b before, and then
// scale the distances down again. The latter is less error-prone.
l1, a1, b1 = l1*100.0, a1*100.0, b1*100.0
l2, a2, b2 = l2*100.0, a2*100.0, b2*100.0
kl := 1.0 // 2.0 for textiles
kc := 1.0
kh := 1.0
k1 := 0.045 // 0.048 for textiles
k2 := 0.015 // 0.014 for textiles.
deltaL := l1 - l2
c1 := math.Sqrt(sq(a1) + sq(b1))
c2 := math.Sqrt(sq(a2) + sq(b2))
deltaCab := c1 - c2
// Not taking Sqrt here for stability, and it's unnecessary.
deltaHab2 := sq(a1-a2) + sq(b1-b2) - sq(deltaCab)
sl := 1.0
sc := 1.0 + k1*c1
sh := 1.0 + k2*c1
vL2 := sq(deltaL/(kl*sl))
vC2 := sq(deltaCab/(kc*sc))
vH2 := deltaHab2/sq(kh*sh)
return math.Sqrt(vL2 + vC2 + vH2)*0.01 // See above.
}
// BlendLab blends two colors in the L*a*b* color-space, which should result in a smoother blend.
// t == 0 results in c1, t == 1 results in c2
func (c1 Color) BlendLab(c2 Color, t float64) Color {
l1, a1, b1 := c1.Lab()
l2, a2, b2 := c2.Lab()
return Lab(l1 + t*(l2 - l1),
a1 + t*(a2 - a1),
b1 + t*(b2 - b1))
}
/// L*u*v* ///
//////////////
// http://en.wikipedia.org/wiki/CIELUV#XYZ_.E2.86.92_CIELUV_and_CIELUV_.E2.86.92_XYZ_conversions
// For L*u*v*, we need to L*u*v*<->XYZ<->RGB and the first one is device dependent.
func XyzToLuv(x, y, z float64) (l, a, b float64) {
// Use D65 white as reference point by default.
// http://www.fredmiranda.com/forum/topic/1035332
// http://en.wikipedia.org/wiki/Standard_illuminant
return XyzToLuvWhiteRef(x, y, z, D65)
}
func XyzToLuvWhiteRef(x, y, z float64, wref [3]float64) (l, u, v float64) {
if y/wref[1] <= 6.0/29.0 * 6.0/29.0 * 6.0/29.0 {
l = y/wref[1] * 29.0/3.0 * 29.0/3.0 * 29.0/3.0
} else {
l = 1.16 * math.Cbrt(y/wref[1]) - 0.16
}
ubis, vbis := xyz_to_uv(x, y, z)
un, vn := xyz_to_uv(wref[0], wref[1], wref[2])
u = 13.0*l * (ubis - un)
v = 13.0*l * (vbis - vn)
return
}
// For this part, we do as R's graphics.hcl does, not as wikipedia does.
// Or is it the same?
func xyz_to_uv(x, y, z float64) (u, v float64) {
denom := x + 15.0*y + 3.0*z
if denom == 0.0 {
u, v = 0.0, 0.0
} else {
u = 4.0*x/denom
v = 9.0*y/denom
}
return
}
func LuvToXyz(l, u, v float64) (x, y, z float64) {
// D65 white (see above).
return LuvToXyzWhiteRef(l, u, v, D65)
}
func LuvToXyzWhiteRef(l, u, v float64, wref [3]float64) (x, y, z float64) {
//y = wref[1] * lab_finv((l + 0.16) / 1.16)
if l <= 0.08 {
y = wref[1] * l * 100.0 * 3.0/29.0 * 3.0/29.0 * 3.0/29.0
} else {
y = wref[1] * cub((l+0.16)/1.16)
}
un, vn := xyz_to_uv(wref[0], wref[1], wref[2])
if l != 0.0 {
ubis := u/(13.0*l) + un
vbis := v/(13.0*l) + vn
x = y*9.0*ubis/(4.0*vbis)
z = y*(12.0-3.0*ubis-20.0*vbis)/(4.0*vbis)
} else {
x, y = 0.0, 0.0
}
return
}
// Converts the given color to CIE L*u*v* space using D65 as reference white.
// L* is in [0..1] and both u* and v* are in about [-1..1]
func (col Color) Luv() (l, u, v float64) {
return XyzToLuv(col.Xyz())
}
// Converts the given color to CIE L*u*v* space, taking into account
// a given reference white. (i.e. the monitor's white)
// L* is in [0..1] and both u* and v* are in about [-1..1]
func (col Color) LuvWhiteRef(wref [3]float64) (l, u, v float64) {
x, y, z := col.Xyz()
return XyzToLuvWhiteRef(x, y, z, wref)
}
// Generates a color by using data given in CIE L*u*v* space using D65 as reference white.
// L* is in [0..1] and both u* and v* are in about [-1..1]
// WARNING: many combinations of `l`, `a`, and `b` values do not have corresponding
// valid RGB values, check the FAQ in the README if you're unsure.
func Luv(l, u, v float64) Color {
return Xyz(LuvToXyz(l, u, v))
}
// Generates a color by using data given in CIE L*u*v* space, taking
// into account a given reference white. (i.e. the monitor's white)
// L* is in [0..1] and both u* and v* are in about [-1..1]
func LuvWhiteRef(l, u, v float64, wref [3]float64) Color {
return Xyz(LuvToXyzWhiteRef(l, u, v, wref))
}
// DistanceLuv is a good measure of visual similarity between two colors!
// A result of 0 would mean identical colors, while a result of 1 or higher
// means the colors differ a lot.
func (c1 Color) DistanceLuv(c2 Color) float64 {
l1, u1, v1 := c1.Luv()
l2, u2, v2 := c2.Luv()
return math.Sqrt(sq(l1-l2) + sq(u1-u2) + sq(v1-v2))
}
// BlendLuv blends two colors in the CIE-L*u*v* color-space, which should result in a smoother blend.
// t == 0 results in c1, t == 1 results in c2
func (c1 Color) BlendLuv(c2 Color, t float64) Color {
l1, u1, v1 := c1.Luv()
l2, u2, v2 := c2.Luv()
return Luv(l1 + t*(l2 - l1),
u1 + t*(u2 - u1),
v1 + t*(v2 - v1))
}
/// HCL ///
///////////
// HCL is nothing else than L*a*b* in cylindrical coordinates!
// (this was wrong on English wikipedia, I fixed it, let's hope the fix stays.)
// But it is widely popular since it is a "correct HSV"
// http://www.hunterlab.com/appnotes/an09_96a.pdf
// Converts the given color to HCL space using D65 as reference white.
// H values are in [0..360], C and L values are in [0..1] although C can overshoot 1.0
func (col Color) Hcl() (h, c, l float64) {
return col.HclWhiteRef(D65)
}
func LabToHcl(L, a, b float64) (h, c, l float64) {
// Oops, floating point workaround necessary if a ~= b and both are very small (i.e. almost zero).
if math.Abs(b - a) > 1e-4 && math.Abs(a) > 1e-4 {
h = math.Mod(57.29577951308232087721*math.Atan2(b, a) + 360.0, 360.0) // Rad2Deg
} else {
h = 0.0
}
c = math.Sqrt(sq(a) + sq(b))
l = L
return
}
// Converts the given color to HCL space, taking into account
// a given reference white. (i.e. the monitor's white)
// H values are in [0..360], C and L values are in [0..1]
func (col Color) HclWhiteRef(wref [3]float64) (h, c, l float64) {
L, a, b := col.LabWhiteRef(wref)
return LabToHcl(L, a, b)
}
// Generates a color by using data given in HCL space using D65 as reference white.
// H values are in [0..360], C and L values are in [0..1]
// WARNING: many combinations of `l`, `a`, and `b` values do not have corresponding
// valid RGB values, check the FAQ in the README if you're unsure.
func Hcl(h, c, l float64) Color {
return HclWhiteRef(h, c, l, D65)
}
func HclToLab(h, c, l float64) (L, a, b float64) {
H := 0.01745329251994329576*h // Deg2Rad
a = c*math.Cos(H)
b = c*math.Sin(H)
L = l
return
}
// Generates a color by using data given in HCL space, taking
// into account a given reference white. (i.e. the monitor's white)
// H values are in [0..360], C and L values are in [0..1]
func HclWhiteRef(h, c, l float64, wref [3]float64) Color {
L, a, b := HclToLab(h, c, l)
return LabWhiteRef(L, a, b, wref)
}
// BlendHcl blends two colors in the CIE-L*C*h° color-space, which should result in a smoother blend.
// t == 0 results in c1, t == 1 results in c2
func (col1 Color) BlendHcl(col2 Color, t float64) Color {
h1, c1, l1 := col1.Hcl()
h2, c2, l2 := col2.Hcl()
// We know that h are both in [0..360]
return Hcl(interp_angle(h1, h2, t), c1 + t*(c2 - c1), l1 + t*(l2 - l1))
}

View File

@ -0,0 +1,26 @@
package colorful
import (
"math/rand"
)
// Uses the HSV color space to generate colors with similar S,V but distributed
// evenly along their Hue. This is fast but not always pretty.
// If you've got time to spare, use Lab (the non-fast below).
func FastHappyPalette(colorsCount int) (colors []Color) {
colors = make([]Color, colorsCount)
for i := 0 ; i < colorsCount ; i++ {
colors[i] = Hsv(float64(i)*(360.0/float64(colorsCount)), 0.8 + rand.Float64()*0.2, 0.65 + rand.Float64()*0.2)
}
return
}
func HappyPalette(colorsCount int) ([]Color, error) {
pimpy := func(l, a, b float64) bool {
_, c, _ := LabToHcl(l, a, b)
return 0.3 <= c && 0.4 <= l && l <= 0.8
}
return SoftPaletteEx(colorsCount, SoftPaletteSettings{pimpy, 50, true})
}

37
vendor/github.com/lucasb-eyer/go-colorful/hexcolor.go generated vendored Normal file
View File

@ -0,0 +1,37 @@
package colorful
import (
"database/sql/driver"
"fmt"
"reflect"
)
// A HexColor is a Color stored as a hex string "#rrggbb". It implements the
// database/sql.Scanner and database/sql/driver.Value interfaces.
type HexColor Color
type errUnsupportedType struct {
got interface{}
want reflect.Type
}
func (hc *HexColor) Scan(value interface{}) error {
s, ok := value.(string)
if !ok {
return errUnsupportedType{got: reflect.TypeOf(value), want: reflect.TypeOf("")}
}
c, err := Hex(s)
if err != nil {
return err
}
*hc = HexColor(c)
return nil
}
func (hc *HexColor) Value() (driver.Value, error) {
return Color(*hc).Hex(), nil
}
func (e errUnsupportedType) Error() string {
return fmt.Sprintf("unsupported type: got %v, want a %s", e.got, e.want)
}

View File

@ -0,0 +1,185 @@
// Largely inspired by the descriptions in http://lab.medialab.sciences-po.fr/iwanthue/
// but written from scratch.
package colorful
import (
"fmt"
"math"
"math/rand"
)
// The algorithm works in L*a*b* color space and converts to RGB in the end.
// L* in [0..1], a* and b* in [-1..1]
type lab_t struct {
L, A, B float64
}
type SoftPaletteSettings struct {
// A function which can be used to restrict the allowed color-space.
CheckColor func(l, a, b float64) bool
// The higher, the better quality but the slower. Usually two figures.
Iterations int
// Use up to 160000 or 8000 samples of the L*a*b* space (and thus calls to CheckColor).
// Set this to true only if your CheckColor shapes the Lab space weirdly.
ManySamples bool
}
// Yeah, windows-stype Foo, FooEx, screw you golang...
// Uses K-means to cluster the color-space and return the means of the clusters
// as a new palette of distinctive colors. Falls back to K-medoid if the mean
// happens to fall outside of the color-space, which can only happen if you
// specify a CheckColor function.
func SoftPaletteEx(colorsCount int, settings SoftPaletteSettings) ([]Color, error) {
// Checks whether it's a valid RGB and also fulfills the potentially provided constraint.
check := func(col lab_t) bool {
c := Lab(col.L, col.A, col.B)
return c.IsValid() && (settings.CheckColor == nil || settings.CheckColor(col.L, col.A, col.B))
}
// Sample the color space. These will be the points k-means is run on.
dl := 0.05
dab := 0.1
if settings.ManySamples {
dl = 0.01
dab = 0.05
}
samples := make([]lab_t, 0, int(1.0/dl * 2.0/dab * 2.0/dab))
for l := 0.0; l <= 1.0; l += dl {
for a := -1.0; a <= 1.0; a += dab {
for b := -1.0; b <= 1.0; b += dab {
if check(lab_t{l,a,b}) {
samples = append(samples, lab_t{l, a, b})
}
}
}
}
// That would cause some infinite loops down there...
if len(samples) < colorsCount {
return nil, fmt.Errorf("palettegen: more colors requested (%v) than samples available (%v). Your requested color count may be wrong, you might want to use many samples or your constraint function makes the valid color space too small.", colorsCount, len(samples))
} else if len(samples) == colorsCount {
return labs2cols(samples), nil // Oops?
}
// We take the initial means out of the samples, so they are in fact medoids.
// This helps us avoid infinite loops or arbitrary cutoffs with too restrictive constraints.
means := make([]lab_t, colorsCount)
for i := 0; i < colorsCount; i++ {
for means[i] = samples[rand.Intn(len(samples))] ; in(means, i, means[i]) ; means[i] = samples[rand.Intn(len(samples))] {
}
}
clusters := make([]int, len(samples))
samples_used := make([]bool, len(samples))
// The actual k-means/medoid iterations
for i := 0; i < settings.Iterations; i++ {
// Reassing the samples to clusters, i.e. to their closest mean.
// By the way, also check if any sample is used as a medoid and if so, mark that.
for isample, sample := range samples {
samples_used[isample] = false
mindist := math.Inf(+1)
for imean, mean := range means {
dist := lab_dist(sample, mean)
if dist < mindist {
mindist = dist
clusters[isample] = imean
}
// Mark samples which are used as a medoid.
if lab_eq(sample, mean) {
samples_used[isample] = true
}
}
}
// Compute new means according to the samples.
for imean := range means {
// The new mean is the average of all samples belonging to it..
nsamples := 0
newmean := lab_t{0.0, 0.0, 0.0}
for isample, sample := range samples {
if clusters[isample] == imean {
nsamples++
newmean.L += sample.L
newmean.A += sample.A
newmean.B += sample.B
}
}
if nsamples > 0 {
newmean.L /= float64(nsamples)
newmean.A /= float64(nsamples)
newmean.B /= float64(nsamples)
} else {
// That mean doesn't have any samples? Get a new mean from the sample list!
var inewmean int
for inewmean = rand.Intn(len(samples_used)); samples_used[inewmean]; inewmean = rand.Intn(len(samples_used)) {
}
newmean = samples[inewmean]
samples_used[inewmean] = true
}
// But now we still need to check whether the new mean is an allowed color.
if nsamples > 0 && check(newmean) {
// It does, life's good (TM)
means[imean] = newmean
} else {
// New mean isn't an allowed color or doesn't have any samples!
// Switch to medoid mode and pick the closest (unused) sample.
// This should always find something thanks to len(samples) >= colorsCount
mindist := math.Inf(+1)
for isample, sample := range samples {
if !samples_used[isample] {
dist := lab_dist(sample, newmean)
if dist < mindist {
mindist = dist
newmean = sample
}
}
}
}
}
}
return labs2cols(means), nil
}
// A wrapper which uses common parameters.
func SoftPalette(colorsCount int) ([]Color, error) {
return SoftPaletteEx(colorsCount, SoftPaletteSettings{nil, 50, false})
}
func in(haystack []lab_t, upto int, needle lab_t) bool {
for i := 0 ; i < upto && i < len(haystack) ; i++ {
if haystack[i] == needle {
return true
}
}
return false
}
const LAB_DELTA = 1e-6
func lab_eq(lab1, lab2 lab_t) bool {
return math.Abs(lab1.L - lab2.L) < LAB_DELTA &&
math.Abs(lab1.A - lab2.A) < LAB_DELTA &&
math.Abs(lab1.B - lab2.B) < LAB_DELTA
}
// That's faster than using colorful's DistanceLab since we would have to
// convert back and forth for that. Here is no conversion.
func lab_dist(lab1, lab2 lab_t) float64 {
return math.Sqrt(sq(lab1.L-lab2.L) + sq(lab1.A-lab2.A) + sq(lab1.B-lab2.B))
}
func labs2cols(labs []lab_t) (cols []Color) {
cols = make([]Color, len(labs))
for k, v := range labs {
cols[k] = Lab(v.L, v.A, v.B)
}
return cols
}

View File

@ -0,0 +1,26 @@
package colorful
import (
"math/rand"
)
// Uses the HSV color space to generate colors with similar S,V but distributed
// evenly along their Hue. This is fast but not always pretty.
// If you've got time to spare, use Lab (the non-fast below).
func FastWarmPalette(colorsCount int) (colors []Color) {
colors = make([]Color, colorsCount)
for i := 0 ; i < colorsCount ; i++ {
colors[i] = Hsv(float64(i)*(360.0/float64(colorsCount)), 0.55 + rand.Float64()*0.2, 0.35 + rand.Float64()*0.2)
}
return
}
func WarmPalette(colorsCount int) ([]Color, error) {
warmy := func(l, a, b float64) bool {
_, c, _ := LabToHcl(l, a, b)
return 0.1 <= c && c <= 0.4 && 0.2 <= l && l <= 0.5
}
return SoftPaletteEx(colorsCount, SoftPaletteSettings{warmy, 50, true})
}

8
vendor/github.com/mattn/go-runewidth/.travis.yml generated vendored Normal file
View File

@ -0,0 +1,8 @@
language: go
go:
- tip
before_install:
- go get github.com/mattn/goveralls
- go get golang.org/x/tools/cmd/cover
script:
- $HOME/gopath/bin/goveralls -repotoken lAKAWPzcGsD3A8yBX3BGGtRUdJ6CaGERL

21
vendor/github.com/mattn/go-runewidth/LICENSE generated vendored Normal file
View File

@ -0,0 +1,21 @@
The MIT License (MIT)
Copyright (c) 2016 Yasuhiro Matsumoto
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

27
vendor/github.com/mattn/go-runewidth/README.mkd generated vendored Normal file
View File

@ -0,0 +1,27 @@
go-runewidth
============
[![Build Status](https://travis-ci.org/mattn/go-runewidth.png?branch=master)](https://travis-ci.org/mattn/go-runewidth)
[![Coverage Status](https://coveralls.io/repos/mattn/go-runewidth/badge.png?branch=HEAD)](https://coveralls.io/r/mattn/go-runewidth?branch=HEAD)
[![GoDoc](https://godoc.org/github.com/mattn/go-runewidth?status.svg)](http://godoc.org/github.com/mattn/go-runewidth)
[![Go Report Card](https://goreportcard.com/badge/github.com/mattn/go-runewidth)](https://goreportcard.com/report/github.com/mattn/go-runewidth)
Provides functions to get fixed width of the character or string.
Usage
-----
```go
runewidth.StringWidth("つのだ☆HIRO") == 12
```
Author
------
Yasuhiro Matsumoto
License
-------
under the MIT License: http://mattn.mit-license.org/2013

1223
vendor/github.com/mattn/go-runewidth/runewidth.go generated vendored Normal file

File diff suppressed because it is too large Load Diff

8
vendor/github.com/mattn/go-runewidth/runewidth_js.go generated vendored Normal file
View File

@ -0,0 +1,8 @@
// +build js
package runewidth
func IsEastAsian() bool {
// TODO: Implement this for the web. Detect east asian in a compatible way, and return true.
return false
}

View File

@ -0,0 +1,77 @@
// +build !windows,!js
package runewidth
import (
"os"
"regexp"
"strings"
)
var reLoc = regexp.MustCompile(`^[a-z][a-z][a-z]?(?:_[A-Z][A-Z])?\.(.+)`)
var mblenTable = map[string]int{
"utf-8": 6,
"utf8": 6,
"jis": 8,
"eucjp": 3,
"euckr": 2,
"euccn": 2,
"sjis": 2,
"cp932": 2,
"cp51932": 2,
"cp936": 2,
"cp949": 2,
"cp950": 2,
"big5": 2,
"gbk": 2,
"gb2312": 2,
}
func isEastAsian(locale string) bool {
charset := strings.ToLower(locale)
r := reLoc.FindStringSubmatch(locale)
if len(r) == 2 {
charset = strings.ToLower(r[1])
}
if strings.HasSuffix(charset, "@cjk_narrow") {
return false
}
for pos, b := range []byte(charset) {
if b == '@' {
charset = charset[:pos]
break
}
}
max := 1
if m, ok := mblenTable[charset]; ok {
max = m
}
if max > 1 && (charset[0] != 'u' ||
strings.HasPrefix(locale, "ja") ||
strings.HasPrefix(locale, "ko") ||
strings.HasPrefix(locale, "zh")) {
return true
}
return false
}
// IsEastAsian return true if the current locale is CJK
func IsEastAsian() bool {
locale := os.Getenv("LC_CTYPE")
if locale == "" {
locale = os.Getenv("LANG")
}
// ignore C locale
if locale == "POSIX" || locale == "C" {
return false
}
if len(locale) > 1 && locale[0] == 'C' && (locale[1] == '.' || locale[1] == '-') {
return false
}
return isEastAsian(locale)
}

View File

@ -0,0 +1,25 @@
package runewidth
import (
"syscall"
)
var (
kernel32 = syscall.NewLazyDLL("kernel32")
procGetConsoleOutputCP = kernel32.NewProc("GetConsoleOutputCP")
)
// IsEastAsian return true if the current locale is CJK
func IsEastAsian() bool {
r1, _, _ := procGetConsoleOutputCP.Call()
if r1 == 0 {
return false
}
switch int(r1) {
case 932, 51932, 936, 949, 950:
return true
}
return false
}

11
vendor/github.com/nu7hatch/gouuid/.gitignore generated vendored Normal file
View File

@ -0,0 +1,11 @@
_obj
_test
*.6
*.out
_testmain.go
\#*
.\#*
*.log
_cgo*
*.o
*.a

19
vendor/github.com/nu7hatch/gouuid/COPYING generated vendored Normal file
View File

@ -0,0 +1,19 @@
Copyright (C) 2011 by Krzysztof Kowalik <chris@nu7hat.ch>
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
of the Software, and to permit persons to whom the Software is furnished to do
so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

21
vendor/github.com/nu7hatch/gouuid/README.md generated vendored Normal file
View File

@ -0,0 +1,21 @@
# Pure Go UUID implementation
This package provides immutable UUID structs and the functions
NewV3, NewV4, NewV5 and Parse() for generating versions 3, 4
and 5 UUIDs as specified in [RFC 4122](http://www.ietf.org/rfc/rfc4122.txt).
## Installation
Use the `go` tool:
$ go get github.com/nu7hatch/gouuid
## Usage
See [documentation and examples](http://godoc.org/github.com/nu7hatch/gouuid)
for more information.
## Copyright
Copyright (C) 2011 by Krzysztof Kowalik <chris@nu7hat.ch>. See [COPYING](https://github.com/nu7hatch/gouuid/tree/master/COPYING)
file for details.

173
vendor/github.com/nu7hatch/gouuid/uuid.go generated vendored Normal file
View File

@ -0,0 +1,173 @@
// This package provides immutable UUID structs and the functions
// NewV3, NewV4, NewV5 and Parse() for generating versions 3, 4
// and 5 UUIDs as specified in RFC 4122.
//
// Copyright (C) 2011 by Krzysztof Kowalik <chris@nu7hat.ch>
package uuid
import (
"crypto/md5"
"crypto/rand"
"crypto/sha1"
"encoding/hex"
"errors"
"fmt"
"hash"
"regexp"
)
// The UUID reserved variants.
const (
ReservedNCS byte = 0x80
ReservedRFC4122 byte = 0x40
ReservedMicrosoft byte = 0x20
ReservedFuture byte = 0x00
)
// The following standard UUIDs are for use with NewV3() or NewV5().
var (
NamespaceDNS, _ = ParseHex("6ba7b810-9dad-11d1-80b4-00c04fd430c8")
NamespaceURL, _ = ParseHex("6ba7b811-9dad-11d1-80b4-00c04fd430c8")
NamespaceOID, _ = ParseHex("6ba7b812-9dad-11d1-80b4-00c04fd430c8")
NamespaceX500, _ = ParseHex("6ba7b814-9dad-11d1-80b4-00c04fd430c8")
)
// Pattern used to parse hex string representation of the UUID.
// FIXME: do something to consider both brackets at one time,
// current one allows to parse string with only one opening
// or closing bracket.
const hexPattern = "^(urn\\:uuid\\:)?\\{?([a-z0-9]{8})-([a-z0-9]{4})-" +
"([1-5][a-z0-9]{3})-([a-z0-9]{4})-([a-z0-9]{12})\\}?$"
var re = regexp.MustCompile(hexPattern)
// A UUID representation compliant with specification in
// RFC 4122 document.
type UUID [16]byte
// ParseHex creates a UUID object from given hex string
// representation. Function accepts UUID string in following
// formats:
//
// uuid.ParseHex("6ba7b814-9dad-11d1-80b4-00c04fd430c8")
// uuid.ParseHex("{6ba7b814-9dad-11d1-80b4-00c04fd430c8}")
// uuid.ParseHex("urn:uuid:6ba7b814-9dad-11d1-80b4-00c04fd430c8")
//
func ParseHex(s string) (u *UUID, err error) {
md := re.FindStringSubmatch(s)
if md == nil {
err = errors.New("Invalid UUID string")
return
}
hash := md[2] + md[3] + md[4] + md[5] + md[6]
b, err := hex.DecodeString(hash)
if err != nil {
return
}
u = new(UUID)
copy(u[:], b)
return
}
// Parse creates a UUID object from given bytes slice.
func Parse(b []byte) (u *UUID, err error) {
if len(b) != 16 {
err = errors.New("Given slice is not valid UUID sequence")
return
}
u = new(UUID)
copy(u[:], b)
return
}
// Generate a UUID based on the MD5 hash of a namespace identifier
// and a name.
func NewV3(ns *UUID, name []byte) (u *UUID, err error) {
if ns == nil {
err = errors.New("Invalid namespace UUID")
return
}
u = new(UUID)
// Set all bits to MD5 hash generated from namespace and name.
u.setBytesFromHash(md5.New(), ns[:], name)
u.setVariant(ReservedRFC4122)
u.setVersion(3)
return
}
// Generate a random UUID.
func NewV4() (u *UUID, err error) {
u = new(UUID)
// Set all bits to randomly (or pseudo-randomly) chosen values.
_, err = rand.Read(u[:])
if err != nil {
return
}
u.setVariant(ReservedRFC4122)
u.setVersion(4)
return
}
// Generate a UUID based on the SHA-1 hash of a namespace identifier
// and a name.
func NewV5(ns *UUID, name []byte) (u *UUID, err error) {
u = new(UUID)
// Set all bits to truncated SHA1 hash generated from namespace
// and name.
u.setBytesFromHash(sha1.New(), ns[:], name)
u.setVariant(ReservedRFC4122)
u.setVersion(5)
return
}
// Generate a MD5 hash of a namespace and a name, and copy it to the
// UUID slice.
func (u *UUID) setBytesFromHash(hash hash.Hash, ns, name []byte) {
hash.Write(ns[:])
hash.Write(name)
copy(u[:], hash.Sum([]byte{})[:16])
}
// Set the two most significant bits (bits 6 and 7) of the
// clock_seq_hi_and_reserved to zero and one, respectively.
func (u *UUID) setVariant(v byte) {
switch v {
case ReservedNCS:
u[8] = (u[8] | ReservedNCS) & 0xBF
case ReservedRFC4122:
u[8] = (u[8] | ReservedRFC4122) & 0x7F
case ReservedMicrosoft:
u[8] = (u[8] | ReservedMicrosoft) & 0x3F
}
}
// Variant returns the UUID Variant, which determines the internal
// layout of the UUID. This will be one of the constants: RESERVED_NCS,
// RFC_4122, RESERVED_MICROSOFT, RESERVED_FUTURE.
func (u *UUID) Variant() byte {
if u[8]&ReservedNCS == ReservedNCS {
return ReservedNCS
} else if u[8]&ReservedRFC4122 == ReservedRFC4122 {
return ReservedRFC4122
} else if u[8]&ReservedMicrosoft == ReservedMicrosoft {
return ReservedMicrosoft
}
return ReservedFuture
}
// Set the four most significant bits (bits 12 through 15) of the
// time_hi_and_version field to the 4-bit version number.
func (u *UUID) setVersion(v byte) {
u[6] = (u[6] & 0xF) | (v << 4)
}
// Version returns a version number of the algorithm used to
// generate the UUID sequence.
func (u *UUID) Version() uint {
return uint(u[6] >> 4)
}
// Returns unparsed version of the generated UUID sequence.
func (u *UUID) String() string {
return fmt.Sprintf("%x-%x-%x-%x-%x", u[0:4], u[4:6], u[6:8], u[8:10], u[10:])
}

27
vendor/github.com/pmezard/go-difflib/LICENSE generated vendored Normal file
View File

@ -0,0 +1,27 @@
Copyright (c) 2013, Patrick Mezard
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
The names of its contributors may not be used to endorse or promote
products derived from this software without specific prior written
permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

772
vendor/github.com/pmezard/go-difflib/difflib/difflib.go generated vendored Normal file
View File

@ -0,0 +1,772 @@
// Package difflib is a partial port of Python difflib module.
//
// It provides tools to compare sequences of strings and generate textual diffs.
//
// The following class and functions have been ported:
//
// - SequenceMatcher
//
// - unified_diff
//
// - context_diff
//
// Getting unified diffs was the main goal of the port. Keep in mind this code
// is mostly suitable to output text differences in a human friendly way, there
// are no guarantees generated diffs are consumable by patch(1).
package difflib
import (
"bufio"
"bytes"
"fmt"
"io"
"strings"
)
func min(a, b int) int {
if a < b {
return a
}
return b
}
func max(a, b int) int {
if a > b {
return a
}
return b
}
func calculateRatio(matches, length int) float64 {
if length > 0 {
return 2.0 * float64(matches) / float64(length)
}
return 1.0
}
type Match struct {
A int
B int
Size int
}
type OpCode struct {
Tag byte
I1 int
I2 int
J1 int
J2 int
}
// SequenceMatcher compares sequence of strings. The basic
// algorithm predates, and is a little fancier than, an algorithm
// published in the late 1980's by Ratcliff and Obershelp under the
// hyperbolic name "gestalt pattern matching". The basic idea is to find
// the longest contiguous matching subsequence that contains no "junk"
// elements (R-O doesn't address junk). The same idea is then applied
// recursively to the pieces of the sequences to the left and to the right
// of the matching subsequence. This does not yield minimal edit
// sequences, but does tend to yield matches that "look right" to people.
//
// SequenceMatcher tries to compute a "human-friendly diff" between two
// sequences. Unlike e.g. UNIX(tm) diff, the fundamental notion is the
// longest *contiguous* & junk-free matching subsequence. That's what
// catches peoples' eyes. The Windows(tm) windiff has another interesting
// notion, pairing up elements that appear uniquely in each sequence.
// That, and the method here, appear to yield more intuitive difference
// reports than does diff. This method appears to be the least vulnerable
// to synching up on blocks of "junk lines", though (like blank lines in
// ordinary text files, or maybe "<P>" lines in HTML files). That may be
// because this is the only method of the 3 that has a *concept* of
// "junk" <wink>.
//
// Timing: Basic R-O is cubic time worst case and quadratic time expected
// case. SequenceMatcher is quadratic time for the worst case and has
// expected-case behavior dependent in a complicated way on how many
// elements the sequences have in common; best case time is linear.
type SequenceMatcher struct {
a []string
b []string
b2j map[string][]int
IsJunk func(string) bool
autoJunk bool
bJunk map[string]struct{}
matchingBlocks []Match
fullBCount map[string]int
bPopular map[string]struct{}
opCodes []OpCode
}
func NewMatcher(a, b []string) *SequenceMatcher {
m := SequenceMatcher{autoJunk: true}
m.SetSeqs(a, b)
return &m
}
func NewMatcherWithJunk(a, b []string, autoJunk bool,
isJunk func(string) bool) *SequenceMatcher {
m := SequenceMatcher{IsJunk: isJunk, autoJunk: autoJunk}
m.SetSeqs(a, b)
return &m
}
// Set two sequences to be compared.
func (m *SequenceMatcher) SetSeqs(a, b []string) {
m.SetSeq1(a)
m.SetSeq2(b)
}
// Set the first sequence to be compared. The second sequence to be compared is
// not changed.
//
// SequenceMatcher computes and caches detailed information about the second
// sequence, so if you want to compare one sequence S against many sequences,
// use .SetSeq2(s) once and call .SetSeq1(x) repeatedly for each of the other
// sequences.
//
// See also SetSeqs() and SetSeq2().
func (m *SequenceMatcher) SetSeq1(a []string) {
if &a == &m.a {
return
}
m.a = a
m.matchingBlocks = nil
m.opCodes = nil
}
// Set the second sequence to be compared. The first sequence to be compared is
// not changed.
func (m *SequenceMatcher) SetSeq2(b []string) {
if &b == &m.b {
return
}
m.b = b
m.matchingBlocks = nil
m.opCodes = nil
m.fullBCount = nil
m.chainB()
}
func (m *SequenceMatcher) chainB() {
// Populate line -> index mapping
b2j := map[string][]int{}
for i, s := range m.b {
indices := b2j[s]
indices = append(indices, i)
b2j[s] = indices
}
// Purge junk elements
m.bJunk = map[string]struct{}{}
if m.IsJunk != nil {
junk := m.bJunk
for s, _ := range b2j {
if m.IsJunk(s) {
junk[s] = struct{}{}
}
}
for s, _ := range junk {
delete(b2j, s)
}
}
// Purge remaining popular elements
popular := map[string]struct{}{}
n := len(m.b)
if m.autoJunk && n >= 200 {
ntest := n/100 + 1
for s, indices := range b2j {
if len(indices) > ntest {
popular[s] = struct{}{}
}
}
for s, _ := range popular {
delete(b2j, s)
}
}
m.bPopular = popular
m.b2j = b2j
}
func (m *SequenceMatcher) isBJunk(s string) bool {
_, ok := m.bJunk[s]
return ok
}
// Find longest matching block in a[alo:ahi] and b[blo:bhi].
//
// If IsJunk is not defined:
//
// Return (i,j,k) such that a[i:i+k] is equal to b[j:j+k], where
// alo <= i <= i+k <= ahi
// blo <= j <= j+k <= bhi
// and for all (i',j',k') meeting those conditions,
// k >= k'
// i <= i'
// and if i == i', j <= j'
//
// In other words, of all maximal matching blocks, return one that
// starts earliest in a, and of all those maximal matching blocks that
// start earliest in a, return the one that starts earliest in b.
//
// If IsJunk is defined, first the longest matching block is
// determined as above, but with the additional restriction that no
// junk element appears in the block. Then that block is extended as
// far as possible by matching (only) junk elements on both sides. So
// the resulting block never matches on junk except as identical junk
// happens to be adjacent to an "interesting" match.
//
// If no blocks match, return (alo, blo, 0).
func (m *SequenceMatcher) findLongestMatch(alo, ahi, blo, bhi int) Match {
// CAUTION: stripping common prefix or suffix would be incorrect.
// E.g.,
// ab
// acab
// Longest matching block is "ab", but if common prefix is
// stripped, it's "a" (tied with "b"). UNIX(tm) diff does so
// strip, so ends up claiming that ab is changed to acab by
// inserting "ca" in the middle. That's minimal but unintuitive:
// "it's obvious" that someone inserted "ac" at the front.
// Windiff ends up at the same place as diff, but by pairing up
// the unique 'b's and then matching the first two 'a's.
besti, bestj, bestsize := alo, blo, 0
// find longest junk-free match
// during an iteration of the loop, j2len[j] = length of longest
// junk-free match ending with a[i-1] and b[j]
j2len := map[int]int{}
for i := alo; i != ahi; i++ {
// look at all instances of a[i] in b; note that because
// b2j has no junk keys, the loop is skipped if a[i] is junk
newj2len := map[int]int{}
for _, j := range m.b2j[m.a[i]] {
// a[i] matches b[j]
if j < blo {
continue
}
if j >= bhi {
break
}
k := j2len[j-1] + 1
newj2len[j] = k
if k > bestsize {
besti, bestj, bestsize = i-k+1, j-k+1, k
}
}
j2len = newj2len
}
// Extend the best by non-junk elements on each end. In particular,
// "popular" non-junk elements aren't in b2j, which greatly speeds
// the inner loop above, but also means "the best" match so far
// doesn't contain any junk *or* popular non-junk elements.
for besti > alo && bestj > blo && !m.isBJunk(m.b[bestj-1]) &&
m.a[besti-1] == m.b[bestj-1] {
besti, bestj, bestsize = besti-1, bestj-1, bestsize+1
}
for besti+bestsize < ahi && bestj+bestsize < bhi &&
!m.isBJunk(m.b[bestj+bestsize]) &&
m.a[besti+bestsize] == m.b[bestj+bestsize] {
bestsize += 1
}
// Now that we have a wholly interesting match (albeit possibly
// empty!), we may as well suck up the matching junk on each
// side of it too. Can't think of a good reason not to, and it
// saves post-processing the (possibly considerable) expense of
// figuring out what to do with it. In the case of an empty
// interesting match, this is clearly the right thing to do,
// because no other kind of match is possible in the regions.
for besti > alo && bestj > blo && m.isBJunk(m.b[bestj-1]) &&
m.a[besti-1] == m.b[bestj-1] {
besti, bestj, bestsize = besti-1, bestj-1, bestsize+1
}
for besti+bestsize < ahi && bestj+bestsize < bhi &&
m.isBJunk(m.b[bestj+bestsize]) &&
m.a[besti+bestsize] == m.b[bestj+bestsize] {
bestsize += 1
}
return Match{A: besti, B: bestj, Size: bestsize}
}
// Return list of triples describing matching subsequences.
//
// Each triple is of the form (i, j, n), and means that
// a[i:i+n] == b[j:j+n]. The triples are monotonically increasing in
// i and in j. It's also guaranteed that if (i, j, n) and (i', j', n') are
// adjacent triples in the list, and the second is not the last triple in the
// list, then i+n != i' or j+n != j'. IOW, adjacent triples never describe
// adjacent equal blocks.
//
// The last triple is a dummy, (len(a), len(b), 0), and is the only
// triple with n==0.
func (m *SequenceMatcher) GetMatchingBlocks() []Match {
if m.matchingBlocks != nil {
return m.matchingBlocks
}
var matchBlocks func(alo, ahi, blo, bhi int, matched []Match) []Match
matchBlocks = func(alo, ahi, blo, bhi int, matched []Match) []Match {
match := m.findLongestMatch(alo, ahi, blo, bhi)
i, j, k := match.A, match.B, match.Size
if match.Size > 0 {
if alo < i && blo < j {
matched = matchBlocks(alo, i, blo, j, matched)
}
matched = append(matched, match)
if i+k < ahi && j+k < bhi {
matched = matchBlocks(i+k, ahi, j+k, bhi, matched)
}
}
return matched
}
matched := matchBlocks(0, len(m.a), 0, len(m.b), nil)
// It's possible that we have adjacent equal blocks in the
// matching_blocks list now.
nonAdjacent := []Match{}
i1, j1, k1 := 0, 0, 0
for _, b := range matched {
// Is this block adjacent to i1, j1, k1?
i2, j2, k2 := b.A, b.B, b.Size
if i1+k1 == i2 && j1+k1 == j2 {
// Yes, so collapse them -- this just increases the length of
// the first block by the length of the second, and the first
// block so lengthened remains the block to compare against.
k1 += k2
} else {
// Not adjacent. Remember the first block (k1==0 means it's
// the dummy we started with), and make the second block the
// new block to compare against.
if k1 > 0 {
nonAdjacent = append(nonAdjacent, Match{i1, j1, k1})
}
i1, j1, k1 = i2, j2, k2
}
}
if k1 > 0 {
nonAdjacent = append(nonAdjacent, Match{i1, j1, k1})
}
nonAdjacent = append(nonAdjacent, Match{len(m.a), len(m.b), 0})
m.matchingBlocks = nonAdjacent
return m.matchingBlocks
}
// Return list of 5-tuples describing how to turn a into b.
//
// Each tuple is of the form (tag, i1, i2, j1, j2). The first tuple
// has i1 == j1 == 0, and remaining tuples have i1 == the i2 from the
// tuple preceding it, and likewise for j1 == the previous j2.
//
// The tags are characters, with these meanings:
//
// 'r' (replace): a[i1:i2] should be replaced by b[j1:j2]
//
// 'd' (delete): a[i1:i2] should be deleted, j1==j2 in this case.
//
// 'i' (insert): b[j1:j2] should be inserted at a[i1:i1], i1==i2 in this case.
//
// 'e' (equal): a[i1:i2] == b[j1:j2]
func (m *SequenceMatcher) GetOpCodes() []OpCode {
if m.opCodes != nil {
return m.opCodes
}
i, j := 0, 0
matching := m.GetMatchingBlocks()
opCodes := make([]OpCode, 0, len(matching))
for _, m := range matching {
// invariant: we've pumped out correct diffs to change
// a[:i] into b[:j], and the next matching block is
// a[ai:ai+size] == b[bj:bj+size]. So we need to pump
// out a diff to change a[i:ai] into b[j:bj], pump out
// the matching block, and move (i,j) beyond the match
ai, bj, size := m.A, m.B, m.Size
tag := byte(0)
if i < ai && j < bj {
tag = 'r'
} else if i < ai {
tag = 'd'
} else if j < bj {
tag = 'i'
}
if tag > 0 {
opCodes = append(opCodes, OpCode{tag, i, ai, j, bj})
}
i, j = ai+size, bj+size
// the list of matching blocks is terminated by a
// sentinel with size 0
if size > 0 {
opCodes = append(opCodes, OpCode{'e', ai, i, bj, j})
}
}
m.opCodes = opCodes
return m.opCodes
}
// Isolate change clusters by eliminating ranges with no changes.
//
// Return a generator of groups with up to n lines of context.
// Each group is in the same format as returned by GetOpCodes().
func (m *SequenceMatcher) GetGroupedOpCodes(n int) [][]OpCode {
if n < 0 {
n = 3
}
codes := m.GetOpCodes()
if len(codes) == 0 {
codes = []OpCode{OpCode{'e', 0, 1, 0, 1}}
}
// Fixup leading and trailing groups if they show no changes.
if codes[0].Tag == 'e' {
c := codes[0]
i1, i2, j1, j2 := c.I1, c.I2, c.J1, c.J2
codes[0] = OpCode{c.Tag, max(i1, i2-n), i2, max(j1, j2-n), j2}
}
if codes[len(codes)-1].Tag == 'e' {
c := codes[len(codes)-1]
i1, i2, j1, j2 := c.I1, c.I2, c.J1, c.J2
codes[len(codes)-1] = OpCode{c.Tag, i1, min(i2, i1+n), j1, min(j2, j1+n)}
}
nn := n + n
groups := [][]OpCode{}
group := []OpCode{}
for _, c := range codes {
i1, i2, j1, j2 := c.I1, c.I2, c.J1, c.J2
// End the current group and start a new one whenever
// there is a large range with no changes.
if c.Tag == 'e' && i2-i1 > nn {
group = append(group, OpCode{c.Tag, i1, min(i2, i1+n),
j1, min(j2, j1+n)})
groups = append(groups, group)
group = []OpCode{}
i1, j1 = max(i1, i2-n), max(j1, j2-n)
}
group = append(group, OpCode{c.Tag, i1, i2, j1, j2})
}
if len(group) > 0 && !(len(group) == 1 && group[0].Tag == 'e') {
groups = append(groups, group)
}
return groups
}
// Return a measure of the sequences' similarity (float in [0,1]).
//
// Where T is the total number of elements in both sequences, and
// M is the number of matches, this is 2.0*M / T.
// Note that this is 1 if the sequences are identical, and 0 if
// they have nothing in common.
//
// .Ratio() is expensive to compute if you haven't already computed
// .GetMatchingBlocks() or .GetOpCodes(), in which case you may
// want to try .QuickRatio() or .RealQuickRation() first to get an
// upper bound.
func (m *SequenceMatcher) Ratio() float64 {
matches := 0
for _, m := range m.GetMatchingBlocks() {
matches += m.Size
}
return calculateRatio(matches, len(m.a)+len(m.b))
}
// Return an upper bound on ratio() relatively quickly.
//
// This isn't defined beyond that it is an upper bound on .Ratio(), and
// is faster to compute.
func (m *SequenceMatcher) QuickRatio() float64 {
// viewing a and b as multisets, set matches to the cardinality
// of their intersection; this counts the number of matches
// without regard to order, so is clearly an upper bound
if m.fullBCount == nil {
m.fullBCount = map[string]int{}
for _, s := range m.b {
m.fullBCount[s] = m.fullBCount[s] + 1
}
}
// avail[x] is the number of times x appears in 'b' less the
// number of times we've seen it in 'a' so far ... kinda
avail := map[string]int{}
matches := 0
for _, s := range m.a {
n, ok := avail[s]
if !ok {
n = m.fullBCount[s]
}
avail[s] = n - 1
if n > 0 {
matches += 1
}
}
return calculateRatio(matches, len(m.a)+len(m.b))
}
// Return an upper bound on ratio() very quickly.
//
// This isn't defined beyond that it is an upper bound on .Ratio(), and
// is faster to compute than either .Ratio() or .QuickRatio().
func (m *SequenceMatcher) RealQuickRatio() float64 {
la, lb := len(m.a), len(m.b)
return calculateRatio(min(la, lb), la+lb)
}
// Convert range to the "ed" format
func formatRangeUnified(start, stop int) string {
// Per the diff spec at http://www.unix.org/single_unix_specification/
beginning := start + 1 // lines start numbering with one
length := stop - start
if length == 1 {
return fmt.Sprintf("%d", beginning)
}
if length == 0 {
beginning -= 1 // empty ranges begin at line just before the range
}
return fmt.Sprintf("%d,%d", beginning, length)
}
// Unified diff parameters
type UnifiedDiff struct {
A []string // First sequence lines
FromFile string // First file name
FromDate string // First file time
B []string // Second sequence lines
ToFile string // Second file name
ToDate string // Second file time
Eol string // Headers end of line, defaults to LF
Context int // Number of context lines
}
// Compare two sequences of lines; generate the delta as a unified diff.
//
// Unified diffs are a compact way of showing line changes and a few
// lines of context. The number of context lines is set by 'n' which
// defaults to three.
//
// By default, the diff control lines (those with ---, +++, or @@) are
// created with a trailing newline. This is helpful so that inputs
// created from file.readlines() result in diffs that are suitable for
// file.writelines() since both the inputs and outputs have trailing
// newlines.
//
// For inputs that do not have trailing newlines, set the lineterm
// argument to "" so that the output will be uniformly newline free.
//
// The unidiff format normally has a header for filenames and modification
// times. Any or all of these may be specified using strings for
// 'fromfile', 'tofile', 'fromfiledate', and 'tofiledate'.
// The modification times are normally expressed in the ISO 8601 format.
func WriteUnifiedDiff(writer io.Writer, diff UnifiedDiff) error {
buf := bufio.NewWriter(writer)
defer buf.Flush()
wf := func(format string, args ...interface{}) error {
_, err := buf.WriteString(fmt.Sprintf(format, args...))
return err
}
ws := func(s string) error {
_, err := buf.WriteString(s)
return err
}
if len(diff.Eol) == 0 {
diff.Eol = "\n"
}
started := false
m := NewMatcher(diff.A, diff.B)
for _, g := range m.GetGroupedOpCodes(diff.Context) {
if !started {
started = true
fromDate := ""
if len(diff.FromDate) > 0 {
fromDate = "\t" + diff.FromDate
}
toDate := ""
if len(diff.ToDate) > 0 {
toDate = "\t" + diff.ToDate
}
if diff.FromFile != "" || diff.ToFile != "" {
err := wf("--- %s%s%s", diff.FromFile, fromDate, diff.Eol)
if err != nil {
return err
}
err = wf("+++ %s%s%s", diff.ToFile, toDate, diff.Eol)
if err != nil {
return err
}
}
}
first, last := g[0], g[len(g)-1]
range1 := formatRangeUnified(first.I1, last.I2)
range2 := formatRangeUnified(first.J1, last.J2)
if err := wf("@@ -%s +%s @@%s", range1, range2, diff.Eol); err != nil {
return err
}
for _, c := range g {
i1, i2, j1, j2 := c.I1, c.I2, c.J1, c.J2
if c.Tag == 'e' {
for _, line := range diff.A[i1:i2] {
if err := ws(" " + line); err != nil {
return err
}
}
continue
}
if c.Tag == 'r' || c.Tag == 'd' {
for _, line := range diff.A[i1:i2] {
if err := ws("-" + line); err != nil {
return err
}
}
}
if c.Tag == 'r' || c.Tag == 'i' {
for _, line := range diff.B[j1:j2] {
if err := ws("+" + line); err != nil {
return err
}
}
}
}
}
return nil
}
// Like WriteUnifiedDiff but returns the diff a string.
func GetUnifiedDiffString(diff UnifiedDiff) (string, error) {
w := &bytes.Buffer{}
err := WriteUnifiedDiff(w, diff)
return string(w.Bytes()), err
}
// Convert range to the "ed" format.
func formatRangeContext(start, stop int) string {
// Per the diff spec at http://www.unix.org/single_unix_specification/
beginning := start + 1 // lines start numbering with one
length := stop - start
if length == 0 {
beginning -= 1 // empty ranges begin at line just before the range
}
if length <= 1 {
return fmt.Sprintf("%d", beginning)
}
return fmt.Sprintf("%d,%d", beginning, beginning+length-1)
}
type ContextDiff UnifiedDiff
// Compare two sequences of lines; generate the delta as a context diff.
//
// Context diffs are a compact way of showing line changes and a few
// lines of context. The number of context lines is set by diff.Context
// which defaults to three.
//
// By default, the diff control lines (those with *** or ---) are
// created with a trailing newline.
//
// For inputs that do not have trailing newlines, set the diff.Eol
// argument to "" so that the output will be uniformly newline free.
//
// The context diff format normally has a header for filenames and
// modification times. Any or all of these may be specified using
// strings for diff.FromFile, diff.ToFile, diff.FromDate, diff.ToDate.
// The modification times are normally expressed in the ISO 8601 format.
// If not specified, the strings default to blanks.
func WriteContextDiff(writer io.Writer, diff ContextDiff) error {
buf := bufio.NewWriter(writer)
defer buf.Flush()
var diffErr error
wf := func(format string, args ...interface{}) {
_, err := buf.WriteString(fmt.Sprintf(format, args...))
if diffErr == nil && err != nil {
diffErr = err
}
}
ws := func(s string) {
_, err := buf.WriteString(s)
if diffErr == nil && err != nil {
diffErr = err
}
}
if len(diff.Eol) == 0 {
diff.Eol = "\n"
}
prefix := map[byte]string{
'i': "+ ",
'd': "- ",
'r': "! ",
'e': " ",
}
started := false
m := NewMatcher(diff.A, diff.B)
for _, g := range m.GetGroupedOpCodes(diff.Context) {
if !started {
started = true
fromDate := ""
if len(diff.FromDate) > 0 {
fromDate = "\t" + diff.FromDate
}
toDate := ""
if len(diff.ToDate) > 0 {
toDate = "\t" + diff.ToDate
}
if diff.FromFile != "" || diff.ToFile != "" {
wf("*** %s%s%s", diff.FromFile, fromDate, diff.Eol)
wf("--- %s%s%s", diff.ToFile, toDate, diff.Eol)
}
}
first, last := g[0], g[len(g)-1]
ws("***************" + diff.Eol)
range1 := formatRangeContext(first.I1, last.I2)
wf("*** %s ****%s", range1, diff.Eol)
for _, c := range g {
if c.Tag == 'r' || c.Tag == 'd' {
for _, cc := range g {
if cc.Tag == 'i' {
continue
}
for _, line := range diff.A[cc.I1:cc.I2] {
ws(prefix[cc.Tag] + line)
}
}
break
}
}
range2 := formatRangeContext(first.J1, last.J2)
wf("--- %s ----%s", range2, diff.Eol)
for _, c := range g {
if c.Tag == 'r' || c.Tag == 'i' {
for _, cc := range g {
if cc.Tag == 'd' {
continue
}
for _, line := range diff.B[cc.J1:cc.J2] {
ws(prefix[cc.Tag] + line)
}
}
break
}
}
}
return diffErr
}
// Like WriteContextDiff but returns the diff a string.
func GetContextDiffString(diff ContextDiff) (string, error) {
w := &bytes.Buffer{}
err := WriteContextDiff(w, diff)
return string(w.Bytes()), err
}
// Split a string on "\n" while preserving them. The output can be used
// as input for UnifiedDiff and ContextDiff structures.
func SplitLines(s string) []string {
lines := strings.SplitAfter(s, "\n")
lines[len(lines)-1] += "\n"
return lines
}

View File

@ -0,0 +1,16 @@
sudo: false
language: go
go:
- 1.x
- master
matrix:
allow_failures:
- go: master
fast_finish: true
install:
- # Do nothing. This is needed to prevent default install action "go get -t -v ./..." from happening here (we want it to happen inside script step).
script:
- go get -t -v ./...
- diff -u <(echo -n) <(gofmt -d -s .)
- go tool vet .
- go test -v -race ./...

View File

@ -0,0 +1,21 @@
MIT License
Copyright (c) 2015 Dmitri Shuralyov
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

View File

@ -0,0 +1,36 @@
sanitized_anchor_name
=====================
[![Build Status](https://travis-ci.org/shurcooL/sanitized_anchor_name.svg?branch=master)](https://travis-ci.org/shurcooL/sanitized_anchor_name) [![GoDoc](https://godoc.org/github.com/shurcooL/sanitized_anchor_name?status.svg)](https://godoc.org/github.com/shurcooL/sanitized_anchor_name)
Package sanitized_anchor_name provides a func to create sanitized anchor names.
Its logic can be reused by multiple packages to create interoperable anchor names
and links to those anchors.
At this time, it does not try to ensure that generated anchor names
are unique, that responsibility falls on the caller.
Installation
------------
```bash
go get -u github.com/shurcooL/sanitized_anchor_name
```
Example
-------
```Go
anchorName := sanitized_anchor_name.Create("This is a header")
fmt.Println(anchorName)
// Output:
// this-is-a-header
```
License
-------
- [MIT License](LICENSE)

View File

@ -0,0 +1,29 @@
// Package sanitized_anchor_name provides a func to create sanitized anchor names.
//
// Its logic can be reused by multiple packages to create interoperable anchor names
// and links to those anchors.
//
// At this time, it does not try to ensure that generated anchor names
// are unique, that responsibility falls on the caller.
package sanitized_anchor_name // import "github.com/shurcooL/sanitized_anchor_name"
import "unicode"
// Create returns a sanitized anchor name for the given text.
func Create(text string) string {
var anchorName []rune
var futureDash = false
for _, r := range text {
switch {
case unicode.IsLetter(r) || unicode.IsNumber(r):
if futureDash && len(anchorName) > 0 {
anchorName = append(anchorName, '-')
}
futureDash = false
anchorName = append(anchorName, unicode.ToLower(r))
default:
futureDash = true
}
}
return string(anchorName)
}

22
vendor/github.com/stretchr/testify/LICENSE generated vendored Normal file
View File

@ -0,0 +1,22 @@
Copyright (c) 2012 - 2013 Mat Ryer and Tyler Bunnell
Please consider promoting this project if you find it useful.
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without restriction,
including without limitation the rights to use, copy, modify, merge,
publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included
in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT
OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE
OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

View File

@ -0,0 +1,349 @@
/*
* CODE GENERATED AUTOMATICALLY WITH github.com/stretchr/testify/_codegen
* THIS FILE MUST NOT BE EDITED BY HAND
*/
package assert
import (
http "net/http"
url "net/url"
time "time"
)
// Conditionf uses a Comparison to assert a complex condition.
func Conditionf(t TestingT, comp Comparison, msg string, args ...interface{}) bool {
return Condition(t, comp, append([]interface{}{msg}, args...)...)
}
// Containsf asserts that the specified string, list(array, slice...) or map contains the
// specified substring or element.
//
// assert.Containsf(t, "Hello World", "World", "error message %s", "formatted")
// assert.Containsf(t, ["Hello", "World"], "World", "error message %s", "formatted")
// assert.Containsf(t, {"Hello": "World"}, "Hello", "error message %s", "formatted")
func Containsf(t TestingT, s interface{}, contains interface{}, msg string, args ...interface{}) bool {
return Contains(t, s, contains, append([]interface{}{msg}, args...)...)
}
// DirExistsf checks whether a directory exists in the given path. It also fails if the path is a file rather a directory or there is an error checking whether it exists.
func DirExistsf(t TestingT, path string, msg string, args ...interface{}) bool {
return DirExists(t, path, append([]interface{}{msg}, args...)...)
}
// ElementsMatchf asserts that the specified listA(array, slice...) is equal to specified
// listB(array, slice...) ignoring the order of the elements. If there are duplicate elements,
// the number of appearances of each of them in both lists should match.
//
// assert.ElementsMatchf(t, [1, 3, 2, 3], [1, 3, 3, 2], "error message %s", "formatted")
func ElementsMatchf(t TestingT, listA interface{}, listB interface{}, msg string, args ...interface{}) bool {
return ElementsMatch(t, listA, listB, append([]interface{}{msg}, args...)...)
}
// Emptyf asserts that the specified object is empty. I.e. nil, "", false, 0 or either
// a slice or a channel with len == 0.
//
// assert.Emptyf(t, obj, "error message %s", "formatted")
func Emptyf(t TestingT, object interface{}, msg string, args ...interface{}) bool {
return Empty(t, object, append([]interface{}{msg}, args...)...)
}
// Equalf asserts that two objects are equal.
//
// assert.Equalf(t, 123, 123, "error message %s", "formatted")
//
// Pointer variable equality is determined based on the equality of the
// referenced values (as opposed to the memory addresses). Function equality
// cannot be determined and will always fail.
func Equalf(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) bool {
return Equal(t, expected, actual, append([]interface{}{msg}, args...)...)
}
// EqualErrorf asserts that a function returned an error (i.e. not `nil`)
// and that it is equal to the provided error.
//
// actualObj, err := SomeFunction()
// assert.EqualErrorf(t, err, expectedErrorString, "error message %s", "formatted")
func EqualErrorf(t TestingT, theError error, errString string, msg string, args ...interface{}) bool {
return EqualError(t, theError, errString, append([]interface{}{msg}, args...)...)
}
// EqualValuesf asserts that two objects are equal or convertable to the same types
// and equal.
//
// assert.EqualValuesf(t, uint32(123, "error message %s", "formatted"), int32(123))
func EqualValuesf(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) bool {
return EqualValues(t, expected, actual, append([]interface{}{msg}, args...)...)
}
// Errorf asserts that a function returned an error (i.e. not `nil`).
//
// actualObj, err := SomeFunction()
// if assert.Errorf(t, err, "error message %s", "formatted") {
// assert.Equal(t, expectedErrorf, err)
// }
func Errorf(t TestingT, err error, msg string, args ...interface{}) bool {
return Error(t, err, append([]interface{}{msg}, args...)...)
}
// Exactlyf asserts that two objects are equal in value and type.
//
// assert.Exactlyf(t, int32(123, "error message %s", "formatted"), int64(123))
func Exactlyf(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) bool {
return Exactly(t, expected, actual, append([]interface{}{msg}, args...)...)
}
// Failf reports a failure through
func Failf(t TestingT, failureMessage string, msg string, args ...interface{}) bool {
return Fail(t, failureMessage, append([]interface{}{msg}, args...)...)
}
// FailNowf fails test
func FailNowf(t TestingT, failureMessage string, msg string, args ...interface{}) bool {
return FailNow(t, failureMessage, append([]interface{}{msg}, args...)...)
}
// Falsef asserts that the specified value is false.
//
// assert.Falsef(t, myBool, "error message %s", "formatted")
func Falsef(t TestingT, value bool, msg string, args ...interface{}) bool {
return False(t, value, append([]interface{}{msg}, args...)...)
}
// FileExistsf checks whether a file exists in the given path. It also fails if the path points to a directory or there is an error when trying to check the file.
func FileExistsf(t TestingT, path string, msg string, args ...interface{}) bool {
return FileExists(t, path, append([]interface{}{msg}, args...)...)
}
// HTTPBodyContainsf asserts that a specified handler returns a
// body that contains a string.
//
// assert.HTTPBodyContainsf(t, myHandler, "www.google.com", nil, "I'm Feeling Lucky", "error message %s", "formatted")
//
// Returns whether the assertion was successful (true) or not (false).
func HTTPBodyContainsf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msg string, args ...interface{}) bool {
return HTTPBodyContains(t, handler, method, url, values, str, append([]interface{}{msg}, args...)...)
}
// HTTPBodyNotContainsf asserts that a specified handler returns a
// body that does not contain a string.
//
// assert.HTTPBodyNotContainsf(t, myHandler, "www.google.com", nil, "I'm Feeling Lucky", "error message %s", "formatted")
//
// Returns whether the assertion was successful (true) or not (false).
func HTTPBodyNotContainsf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msg string, args ...interface{}) bool {
return HTTPBodyNotContains(t, handler, method, url, values, str, append([]interface{}{msg}, args...)...)
}
// HTTPErrorf asserts that a specified handler returns an error status code.
//
// assert.HTTPErrorf(t, myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}}
//
// Returns whether the assertion was successful (true, "error message %s", "formatted") or not (false).
func HTTPErrorf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) bool {
return HTTPError(t, handler, method, url, values, append([]interface{}{msg}, args...)...)
}
// HTTPRedirectf asserts that a specified handler returns a redirect status code.
//
// assert.HTTPRedirectf(t, myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}}
//
// Returns whether the assertion was successful (true, "error message %s", "formatted") or not (false).
func HTTPRedirectf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) bool {
return HTTPRedirect(t, handler, method, url, values, append([]interface{}{msg}, args...)...)
}
// HTTPSuccessf asserts that a specified handler returns a success status code.
//
// assert.HTTPSuccessf(t, myHandler, "POST", "http://www.google.com", nil, "error message %s", "formatted")
//
// Returns whether the assertion was successful (true) or not (false).
func HTTPSuccessf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) bool {
return HTTPSuccess(t, handler, method, url, values, append([]interface{}{msg}, args...)...)
}
// Implementsf asserts that an object is implemented by the specified interface.
//
// assert.Implementsf(t, (*MyInterface, "error message %s", "formatted")(nil), new(MyObject))
func Implementsf(t TestingT, interfaceObject interface{}, object interface{}, msg string, args ...interface{}) bool {
return Implements(t, interfaceObject, object, append([]interface{}{msg}, args...)...)
}
// InDeltaf asserts that the two numerals are within delta of each other.
//
// assert.InDeltaf(t, math.Pi, (22 / 7.0, "error message %s", "formatted"), 0.01)
func InDeltaf(t TestingT, expected interface{}, actual interface{}, delta float64, msg string, args ...interface{}) bool {
return InDelta(t, expected, actual, delta, append([]interface{}{msg}, args...)...)
}
// InDeltaMapValuesf is the same as InDelta, but it compares all values between two maps. Both maps must have exactly the same keys.
func InDeltaMapValuesf(t TestingT, expected interface{}, actual interface{}, delta float64, msg string, args ...interface{}) bool {
return InDeltaMapValues(t, expected, actual, delta, append([]interface{}{msg}, args...)...)
}
// InDeltaSlicef is the same as InDelta, except it compares two slices.
func InDeltaSlicef(t TestingT, expected interface{}, actual interface{}, delta float64, msg string, args ...interface{}) bool {
return InDeltaSlice(t, expected, actual, delta, append([]interface{}{msg}, args...)...)
}
// InEpsilonf asserts that expected and actual have a relative error less than epsilon
func InEpsilonf(t TestingT, expected interface{}, actual interface{}, epsilon float64, msg string, args ...interface{}) bool {
return InEpsilon(t, expected, actual, epsilon, append([]interface{}{msg}, args...)...)
}
// InEpsilonSlicef is the same as InEpsilon, except it compares each value from two slices.
func InEpsilonSlicef(t TestingT, expected interface{}, actual interface{}, epsilon float64, msg string, args ...interface{}) bool {
return InEpsilonSlice(t, expected, actual, epsilon, append([]interface{}{msg}, args...)...)
}
// IsTypef asserts that the specified objects are of the same type.
func IsTypef(t TestingT, expectedType interface{}, object interface{}, msg string, args ...interface{}) bool {
return IsType(t, expectedType, object, append([]interface{}{msg}, args...)...)
}
// JSONEqf asserts that two JSON strings are equivalent.
//
// assert.JSONEqf(t, `{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`, "error message %s", "formatted")
func JSONEqf(t TestingT, expected string, actual string, msg string, args ...interface{}) bool {
return JSONEq(t, expected, actual, append([]interface{}{msg}, args...)...)
}
// Lenf asserts that the specified object has specific length.
// Lenf also fails if the object has a type that len() not accept.
//
// assert.Lenf(t, mySlice, 3, "error message %s", "formatted")
func Lenf(t TestingT, object interface{}, length int, msg string, args ...interface{}) bool {
return Len(t, object, length, append([]interface{}{msg}, args...)...)
}
// Nilf asserts that the specified object is nil.
//
// assert.Nilf(t, err, "error message %s", "formatted")
func Nilf(t TestingT, object interface{}, msg string, args ...interface{}) bool {
return Nil(t, object, append([]interface{}{msg}, args...)...)
}
// NoErrorf asserts that a function returned no error (i.e. `nil`).
//
// actualObj, err := SomeFunction()
// if assert.NoErrorf(t, err, "error message %s", "formatted") {
// assert.Equal(t, expectedObj, actualObj)
// }
func NoErrorf(t TestingT, err error, msg string, args ...interface{}) bool {
return NoError(t, err, append([]interface{}{msg}, args...)...)
}
// NotContainsf asserts that the specified string, list(array, slice...) or map does NOT contain the
// specified substring or element.
//
// assert.NotContainsf(t, "Hello World", "Earth", "error message %s", "formatted")
// assert.NotContainsf(t, ["Hello", "World"], "Earth", "error message %s", "formatted")
// assert.NotContainsf(t, {"Hello": "World"}, "Earth", "error message %s", "formatted")
func NotContainsf(t TestingT, s interface{}, contains interface{}, msg string, args ...interface{}) bool {
return NotContains(t, s, contains, append([]interface{}{msg}, args...)...)
}
// NotEmptyf asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either
// a slice or a channel with len == 0.
//
// if assert.NotEmptyf(t, obj, "error message %s", "formatted") {
// assert.Equal(t, "two", obj[1])
// }
func NotEmptyf(t TestingT, object interface{}, msg string, args ...interface{}) bool {
return NotEmpty(t, object, append([]interface{}{msg}, args...)...)
}
// NotEqualf asserts that the specified values are NOT equal.
//
// assert.NotEqualf(t, obj1, obj2, "error message %s", "formatted")
//
// Pointer variable equality is determined based on the equality of the
// referenced values (as opposed to the memory addresses).
func NotEqualf(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) bool {
return NotEqual(t, expected, actual, append([]interface{}{msg}, args...)...)
}
// NotNilf asserts that the specified object is not nil.
//
// assert.NotNilf(t, err, "error message %s", "formatted")
func NotNilf(t TestingT, object interface{}, msg string, args ...interface{}) bool {
return NotNil(t, object, append([]interface{}{msg}, args...)...)
}
// NotPanicsf asserts that the code inside the specified PanicTestFunc does NOT panic.
//
// assert.NotPanicsf(t, func(){ RemainCalm() }, "error message %s", "formatted")
func NotPanicsf(t TestingT, f PanicTestFunc, msg string, args ...interface{}) bool {
return NotPanics(t, f, append([]interface{}{msg}, args...)...)
}
// NotRegexpf asserts that a specified regexp does not match a string.
//
// assert.NotRegexpf(t, regexp.MustCompile("starts", "error message %s", "formatted"), "it's starting")
// assert.NotRegexpf(t, "^start", "it's not starting", "error message %s", "formatted")
func NotRegexpf(t TestingT, rx interface{}, str interface{}, msg string, args ...interface{}) bool {
return NotRegexp(t, rx, str, append([]interface{}{msg}, args...)...)
}
// NotSubsetf asserts that the specified list(array, slice...) contains not all
// elements given in the specified subset(array, slice...).
//
// assert.NotSubsetf(t, [1, 3, 4], [1, 2], "But [1, 3, 4] does not contain [1, 2]", "error message %s", "formatted")
func NotSubsetf(t TestingT, list interface{}, subset interface{}, msg string, args ...interface{}) bool {
return NotSubset(t, list, subset, append([]interface{}{msg}, args...)...)
}
// NotZerof asserts that i is not the zero value for its type.
func NotZerof(t TestingT, i interface{}, msg string, args ...interface{}) bool {
return NotZero(t, i, append([]interface{}{msg}, args...)...)
}
// Panicsf asserts that the code inside the specified PanicTestFunc panics.
//
// assert.Panicsf(t, func(){ GoCrazy() }, "error message %s", "formatted")
func Panicsf(t TestingT, f PanicTestFunc, msg string, args ...interface{}) bool {
return Panics(t, f, append([]interface{}{msg}, args...)...)
}
// PanicsWithValuef asserts that the code inside the specified PanicTestFunc panics, and that
// the recovered panic value equals the expected panic value.
//
// assert.PanicsWithValuef(t, "crazy error", func(){ GoCrazy() }, "error message %s", "formatted")
func PanicsWithValuef(t TestingT, expected interface{}, f PanicTestFunc, msg string, args ...interface{}) bool {
return PanicsWithValue(t, expected, f, append([]interface{}{msg}, args...)...)
}
// Regexpf asserts that a specified regexp matches a string.
//
// assert.Regexpf(t, regexp.MustCompile("start", "error message %s", "formatted"), "it's starting")
// assert.Regexpf(t, "start...$", "it's not starting", "error message %s", "formatted")
func Regexpf(t TestingT, rx interface{}, str interface{}, msg string, args ...interface{}) bool {
return Regexp(t, rx, str, append([]interface{}{msg}, args...)...)
}
// Subsetf asserts that the specified list(array, slice...) contains all
// elements given in the specified subset(array, slice...).
//
// assert.Subsetf(t, [1, 2, 3], [1, 2], "But [1, 2, 3] does contain [1, 2]", "error message %s", "formatted")
func Subsetf(t TestingT, list interface{}, subset interface{}, msg string, args ...interface{}) bool {
return Subset(t, list, subset, append([]interface{}{msg}, args...)...)
}
// Truef asserts that the specified value is true.
//
// assert.Truef(t, myBool, "error message %s", "formatted")
func Truef(t TestingT, value bool, msg string, args ...interface{}) bool {
return True(t, value, append([]interface{}{msg}, args...)...)
}
// WithinDurationf asserts that the two times are within duration delta of each other.
//
// assert.WithinDurationf(t, time.Now(), time.Now(), 10*time.Second, "error message %s", "formatted")
func WithinDurationf(t TestingT, expected time.Time, actual time.Time, delta time.Duration, msg string, args ...interface{}) bool {
return WithinDuration(t, expected, actual, delta, append([]interface{}{msg}, args...)...)
}
// Zerof asserts that i is the zero value for its type.
func Zerof(t TestingT, i interface{}, msg string, args ...interface{}) bool {
return Zero(t, i, append([]interface{}{msg}, args...)...)
}

View File

@ -0,0 +1,4 @@
{{.CommentFormat}}
func {{.DocInfo.Name}}f(t TestingT, {{.ParamsFormat}}) bool {
return {{.DocInfo.Name}}(t, {{.ForwardedParamsFormat}})
}

View File

@ -0,0 +1,686 @@
/*
* CODE GENERATED AUTOMATICALLY WITH github.com/stretchr/testify/_codegen
* THIS FILE MUST NOT BE EDITED BY HAND
*/
package assert
import (
http "net/http"
url "net/url"
time "time"
)
// Condition uses a Comparison to assert a complex condition.
func (a *Assertions) Condition(comp Comparison, msgAndArgs ...interface{}) bool {
return Condition(a.t, comp, msgAndArgs...)
}
// Conditionf uses a Comparison to assert a complex condition.
func (a *Assertions) Conditionf(comp Comparison, msg string, args ...interface{}) bool {
return Conditionf(a.t, comp, msg, args...)
}
// Contains asserts that the specified string, list(array, slice...) or map contains the
// specified substring or element.
//
// a.Contains("Hello World", "World")
// a.Contains(["Hello", "World"], "World")
// a.Contains({"Hello": "World"}, "Hello")
func (a *Assertions) Contains(s interface{}, contains interface{}, msgAndArgs ...interface{}) bool {
return Contains(a.t, s, contains, msgAndArgs...)
}
// Containsf asserts that the specified string, list(array, slice...) or map contains the
// specified substring or element.
//
// a.Containsf("Hello World", "World", "error message %s", "formatted")
// a.Containsf(["Hello", "World"], "World", "error message %s", "formatted")
// a.Containsf({"Hello": "World"}, "Hello", "error message %s", "formatted")
func (a *Assertions) Containsf(s interface{}, contains interface{}, msg string, args ...interface{}) bool {
return Containsf(a.t, s, contains, msg, args...)
}
// DirExists checks whether a directory exists in the given path. It also fails if the path is a file rather a directory or there is an error checking whether it exists.
func (a *Assertions) DirExists(path string, msgAndArgs ...interface{}) bool {
return DirExists(a.t, path, msgAndArgs...)
}
// DirExistsf checks whether a directory exists in the given path. It also fails if the path is a file rather a directory or there is an error checking whether it exists.
func (a *Assertions) DirExistsf(path string, msg string, args ...interface{}) bool {
return DirExistsf(a.t, path, msg, args...)
}
// ElementsMatch asserts that the specified listA(array, slice...) is equal to specified
// listB(array, slice...) ignoring the order of the elements. If there are duplicate elements,
// the number of appearances of each of them in both lists should match.
//
// a.ElementsMatch([1, 3, 2, 3], [1, 3, 3, 2])
func (a *Assertions) ElementsMatch(listA interface{}, listB interface{}, msgAndArgs ...interface{}) bool {
return ElementsMatch(a.t, listA, listB, msgAndArgs...)
}
// ElementsMatchf asserts that the specified listA(array, slice...) is equal to specified
// listB(array, slice...) ignoring the order of the elements. If there are duplicate elements,
// the number of appearances of each of them in both lists should match.
//
// a.ElementsMatchf([1, 3, 2, 3], [1, 3, 3, 2], "error message %s", "formatted")
func (a *Assertions) ElementsMatchf(listA interface{}, listB interface{}, msg string, args ...interface{}) bool {
return ElementsMatchf(a.t, listA, listB, msg, args...)
}
// Empty asserts that the specified object is empty. I.e. nil, "", false, 0 or either
// a slice or a channel with len == 0.
//
// a.Empty(obj)
func (a *Assertions) Empty(object interface{}, msgAndArgs ...interface{}) bool {
return Empty(a.t, object, msgAndArgs...)
}
// Emptyf asserts that the specified object is empty. I.e. nil, "", false, 0 or either
// a slice or a channel with len == 0.
//
// a.Emptyf(obj, "error message %s", "formatted")
func (a *Assertions) Emptyf(object interface{}, msg string, args ...interface{}) bool {
return Emptyf(a.t, object, msg, args...)
}
// Equal asserts that two objects are equal.
//
// a.Equal(123, 123)
//
// Pointer variable equality is determined based on the equality of the
// referenced values (as opposed to the memory addresses). Function equality
// cannot be determined and will always fail.
func (a *Assertions) Equal(expected interface{}, actual interface{}, msgAndArgs ...interface{}) bool {
return Equal(a.t, expected, actual, msgAndArgs...)
}
// EqualError asserts that a function returned an error (i.e. not `nil`)
// and that it is equal to the provided error.
//
// actualObj, err := SomeFunction()
// a.EqualError(err, expectedErrorString)
func (a *Assertions) EqualError(theError error, errString string, msgAndArgs ...interface{}) bool {
return EqualError(a.t, theError, errString, msgAndArgs...)
}
// EqualErrorf asserts that a function returned an error (i.e. not `nil`)
// and that it is equal to the provided error.
//
// actualObj, err := SomeFunction()
// a.EqualErrorf(err, expectedErrorString, "error message %s", "formatted")
func (a *Assertions) EqualErrorf(theError error, errString string, msg string, args ...interface{}) bool {
return EqualErrorf(a.t, theError, errString, msg, args...)
}
// EqualValues asserts that two objects are equal or convertable to the same types
// and equal.
//
// a.EqualValues(uint32(123), int32(123))
func (a *Assertions) EqualValues(expected interface{}, actual interface{}, msgAndArgs ...interface{}) bool {
return EqualValues(a.t, expected, actual, msgAndArgs...)
}
// EqualValuesf asserts that two objects are equal or convertable to the same types
// and equal.
//
// a.EqualValuesf(uint32(123, "error message %s", "formatted"), int32(123))
func (a *Assertions) EqualValuesf(expected interface{}, actual interface{}, msg string, args ...interface{}) bool {
return EqualValuesf(a.t, expected, actual, msg, args...)
}
// Equalf asserts that two objects are equal.
//
// a.Equalf(123, 123, "error message %s", "formatted")
//
// Pointer variable equality is determined based on the equality of the
// referenced values (as opposed to the memory addresses). Function equality
// cannot be determined and will always fail.
func (a *Assertions) Equalf(expected interface{}, actual interface{}, msg string, args ...interface{}) bool {
return Equalf(a.t, expected, actual, msg, args...)
}
// Error asserts that a function returned an error (i.e. not `nil`).
//
// actualObj, err := SomeFunction()
// if a.Error(err) {
// assert.Equal(t, expectedError, err)
// }
func (a *Assertions) Error(err error, msgAndArgs ...interface{}) bool {
return Error(a.t, err, msgAndArgs...)
}
// Errorf asserts that a function returned an error (i.e. not `nil`).
//
// actualObj, err := SomeFunction()
// if a.Errorf(err, "error message %s", "formatted") {
// assert.Equal(t, expectedErrorf, err)
// }
func (a *Assertions) Errorf(err error, msg string, args ...interface{}) bool {
return Errorf(a.t, err, msg, args...)
}
// Exactly asserts that two objects are equal in value and type.
//
// a.Exactly(int32(123), int64(123))
func (a *Assertions) Exactly(expected interface{}, actual interface{}, msgAndArgs ...interface{}) bool {
return Exactly(a.t, expected, actual, msgAndArgs...)
}
// Exactlyf asserts that two objects are equal in value and type.
//
// a.Exactlyf(int32(123, "error message %s", "formatted"), int64(123))
func (a *Assertions) Exactlyf(expected interface{}, actual interface{}, msg string, args ...interface{}) bool {
return Exactlyf(a.t, expected, actual, msg, args...)
}
// Fail reports a failure through
func (a *Assertions) Fail(failureMessage string, msgAndArgs ...interface{}) bool {
return Fail(a.t, failureMessage, msgAndArgs...)
}
// FailNow fails test
func (a *Assertions) FailNow(failureMessage string, msgAndArgs ...interface{}) bool {
return FailNow(a.t, failureMessage, msgAndArgs...)
}
// FailNowf fails test
func (a *Assertions) FailNowf(failureMessage string, msg string, args ...interface{}) bool {
return FailNowf(a.t, failureMessage, msg, args...)
}
// Failf reports a failure through
func (a *Assertions) Failf(failureMessage string, msg string, args ...interface{}) bool {
return Failf(a.t, failureMessage, msg, args...)
}
// False asserts that the specified value is false.
//
// a.False(myBool)
func (a *Assertions) False(value bool, msgAndArgs ...interface{}) bool {
return False(a.t, value, msgAndArgs...)
}
// Falsef asserts that the specified value is false.
//
// a.Falsef(myBool, "error message %s", "formatted")
func (a *Assertions) Falsef(value bool, msg string, args ...interface{}) bool {
return Falsef(a.t, value, msg, args...)
}
// FileExists checks whether a file exists in the given path. It also fails if the path points to a directory or there is an error when trying to check the file.
func (a *Assertions) FileExists(path string, msgAndArgs ...interface{}) bool {
return FileExists(a.t, path, msgAndArgs...)
}
// FileExistsf checks whether a file exists in the given path. It also fails if the path points to a directory or there is an error when trying to check the file.
func (a *Assertions) FileExistsf(path string, msg string, args ...interface{}) bool {
return FileExistsf(a.t, path, msg, args...)
}
// HTTPBodyContains asserts that a specified handler returns a
// body that contains a string.
//
// a.HTTPBodyContains(myHandler, "www.google.com", nil, "I'm Feeling Lucky")
//
// Returns whether the assertion was successful (true) or not (false).
func (a *Assertions) HTTPBodyContains(handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msgAndArgs ...interface{}) bool {
return HTTPBodyContains(a.t, handler, method, url, values, str, msgAndArgs...)
}
// HTTPBodyContainsf asserts that a specified handler returns a
// body that contains a string.
//
// a.HTTPBodyContainsf(myHandler, "www.google.com", nil, "I'm Feeling Lucky", "error message %s", "formatted")
//
// Returns whether the assertion was successful (true) or not (false).
func (a *Assertions) HTTPBodyContainsf(handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msg string, args ...interface{}) bool {
return HTTPBodyContainsf(a.t, handler, method, url, values, str, msg, args...)
}
// HTTPBodyNotContains asserts that a specified handler returns a
// body that does not contain a string.
//
// a.HTTPBodyNotContains(myHandler, "www.google.com", nil, "I'm Feeling Lucky")
//
// Returns whether the assertion was successful (true) or not (false).
func (a *Assertions) HTTPBodyNotContains(handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msgAndArgs ...interface{}) bool {
return HTTPBodyNotContains(a.t, handler, method, url, values, str, msgAndArgs...)
}
// HTTPBodyNotContainsf asserts that a specified handler returns a
// body that does not contain a string.
//
// a.HTTPBodyNotContainsf(myHandler, "www.google.com", nil, "I'm Feeling Lucky", "error message %s", "formatted")
//
// Returns whether the assertion was successful (true) or not (false).
func (a *Assertions) HTTPBodyNotContainsf(handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msg string, args ...interface{}) bool {
return HTTPBodyNotContainsf(a.t, handler, method, url, values, str, msg, args...)
}
// HTTPError asserts that a specified handler returns an error status code.
//
// a.HTTPError(myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}}
//
// Returns whether the assertion was successful (true) or not (false).
func (a *Assertions) HTTPError(handler http.HandlerFunc, method string, url string, values url.Values, msgAndArgs ...interface{}) bool {
return HTTPError(a.t, handler, method, url, values, msgAndArgs...)
}
// HTTPErrorf asserts that a specified handler returns an error status code.
//
// a.HTTPErrorf(myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}}
//
// Returns whether the assertion was successful (true, "error message %s", "formatted") or not (false).
func (a *Assertions) HTTPErrorf(handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) bool {
return HTTPErrorf(a.t, handler, method, url, values, msg, args...)
}
// HTTPRedirect asserts that a specified handler returns a redirect status code.
//
// a.HTTPRedirect(myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}}
//
// Returns whether the assertion was successful (true) or not (false).
func (a *Assertions) HTTPRedirect(handler http.HandlerFunc, method string, url string, values url.Values, msgAndArgs ...interface{}) bool {
return HTTPRedirect(a.t, handler, method, url, values, msgAndArgs...)
}
// HTTPRedirectf asserts that a specified handler returns a redirect status code.
//
// a.HTTPRedirectf(myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}}
//
// Returns whether the assertion was successful (true, "error message %s", "formatted") or not (false).
func (a *Assertions) HTTPRedirectf(handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) bool {
return HTTPRedirectf(a.t, handler, method, url, values, msg, args...)
}
// HTTPSuccess asserts that a specified handler returns a success status code.
//
// a.HTTPSuccess(myHandler, "POST", "http://www.google.com", nil)
//
// Returns whether the assertion was successful (true) or not (false).
func (a *Assertions) HTTPSuccess(handler http.HandlerFunc, method string, url string, values url.Values, msgAndArgs ...interface{}) bool {
return HTTPSuccess(a.t, handler, method, url, values, msgAndArgs...)
}
// HTTPSuccessf asserts that a specified handler returns a success status code.
//
// a.HTTPSuccessf(myHandler, "POST", "http://www.google.com", nil, "error message %s", "formatted")
//
// Returns whether the assertion was successful (true) or not (false).
func (a *Assertions) HTTPSuccessf(handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) bool {
return HTTPSuccessf(a.t, handler, method, url, values, msg, args...)
}
// Implements asserts that an object is implemented by the specified interface.
//
// a.Implements((*MyInterface)(nil), new(MyObject))
func (a *Assertions) Implements(interfaceObject interface{}, object interface{}, msgAndArgs ...interface{}) bool {
return Implements(a.t, interfaceObject, object, msgAndArgs...)
}
// Implementsf asserts that an object is implemented by the specified interface.
//
// a.Implementsf((*MyInterface, "error message %s", "formatted")(nil), new(MyObject))
func (a *Assertions) Implementsf(interfaceObject interface{}, object interface{}, msg string, args ...interface{}) bool {
return Implementsf(a.t, interfaceObject, object, msg, args...)
}
// InDelta asserts that the two numerals are within delta of each other.
//
// a.InDelta(math.Pi, (22 / 7.0), 0.01)
func (a *Assertions) InDelta(expected interface{}, actual interface{}, delta float64, msgAndArgs ...interface{}) bool {
return InDelta(a.t, expected, actual, delta, msgAndArgs...)
}
// InDeltaMapValues is the same as InDelta, but it compares all values between two maps. Both maps must have exactly the same keys.
func (a *Assertions) InDeltaMapValues(expected interface{}, actual interface{}, delta float64, msgAndArgs ...interface{}) bool {
return InDeltaMapValues(a.t, expected, actual, delta, msgAndArgs...)
}
// InDeltaMapValuesf is the same as InDelta, but it compares all values between two maps. Both maps must have exactly the same keys.
func (a *Assertions) InDeltaMapValuesf(expected interface{}, actual interface{}, delta float64, msg string, args ...interface{}) bool {
return InDeltaMapValuesf(a.t, expected, actual, delta, msg, args...)
}
// InDeltaSlice is the same as InDelta, except it compares two slices.
func (a *Assertions) InDeltaSlice(expected interface{}, actual interface{}, delta float64, msgAndArgs ...interface{}) bool {
return InDeltaSlice(a.t, expected, actual, delta, msgAndArgs...)
}
// InDeltaSlicef is the same as InDelta, except it compares two slices.
func (a *Assertions) InDeltaSlicef(expected interface{}, actual interface{}, delta float64, msg string, args ...interface{}) bool {
return InDeltaSlicef(a.t, expected, actual, delta, msg, args...)
}
// InDeltaf asserts that the two numerals are within delta of each other.
//
// a.InDeltaf(math.Pi, (22 / 7.0, "error message %s", "formatted"), 0.01)
func (a *Assertions) InDeltaf(expected interface{}, actual interface{}, delta float64, msg string, args ...interface{}) bool {
return InDeltaf(a.t, expected, actual, delta, msg, args...)
}
// InEpsilon asserts that expected and actual have a relative error less than epsilon
func (a *Assertions) InEpsilon(expected interface{}, actual interface{}, epsilon float64, msgAndArgs ...interface{}) bool {
return InEpsilon(a.t, expected, actual, epsilon, msgAndArgs...)
}
// InEpsilonSlice is the same as InEpsilon, except it compares each value from two slices.
func (a *Assertions) InEpsilonSlice(expected interface{}, actual interface{}, epsilon float64, msgAndArgs ...interface{}) bool {
return InEpsilonSlice(a.t, expected, actual, epsilon, msgAndArgs...)
}
// InEpsilonSlicef is the same as InEpsilon, except it compares each value from two slices.
func (a *Assertions) InEpsilonSlicef(expected interface{}, actual interface{}, epsilon float64, msg string, args ...interface{}) bool {
return InEpsilonSlicef(a.t, expected, actual, epsilon, msg, args...)
}
// InEpsilonf asserts that expected and actual have a relative error less than epsilon
func (a *Assertions) InEpsilonf(expected interface{}, actual interface{}, epsilon float64, msg string, args ...interface{}) bool {
return InEpsilonf(a.t, expected, actual, epsilon, msg, args...)
}
// IsType asserts that the specified objects are of the same type.
func (a *Assertions) IsType(expectedType interface{}, object interface{}, msgAndArgs ...interface{}) bool {
return IsType(a.t, expectedType, object, msgAndArgs...)
}
// IsTypef asserts that the specified objects are of the same type.
func (a *Assertions) IsTypef(expectedType interface{}, object interface{}, msg string, args ...interface{}) bool {
return IsTypef(a.t, expectedType, object, msg, args...)
}
// JSONEq asserts that two JSON strings are equivalent.
//
// a.JSONEq(`{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`)
func (a *Assertions) JSONEq(expected string, actual string, msgAndArgs ...interface{}) bool {
return JSONEq(a.t, expected, actual, msgAndArgs...)
}
// JSONEqf asserts that two JSON strings are equivalent.
//
// a.JSONEqf(`{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`, "error message %s", "formatted")
func (a *Assertions) JSONEqf(expected string, actual string, msg string, args ...interface{}) bool {
return JSONEqf(a.t, expected, actual, msg, args...)
}
// Len asserts that the specified object has specific length.
// Len also fails if the object has a type that len() not accept.
//
// a.Len(mySlice, 3)
func (a *Assertions) Len(object interface{}, length int, msgAndArgs ...interface{}) bool {
return Len(a.t, object, length, msgAndArgs...)
}
// Lenf asserts that the specified object has specific length.
// Lenf also fails if the object has a type that len() not accept.
//
// a.Lenf(mySlice, 3, "error message %s", "formatted")
func (a *Assertions) Lenf(object interface{}, length int, msg string, args ...interface{}) bool {
return Lenf(a.t, object, length, msg, args...)
}
// Nil asserts that the specified object is nil.
//
// a.Nil(err)
func (a *Assertions) Nil(object interface{}, msgAndArgs ...interface{}) bool {
return Nil(a.t, object, msgAndArgs...)
}
// Nilf asserts that the specified object is nil.
//
// a.Nilf(err, "error message %s", "formatted")
func (a *Assertions) Nilf(object interface{}, msg string, args ...interface{}) bool {
return Nilf(a.t, object, msg, args...)
}
// NoError asserts that a function returned no error (i.e. `nil`).
//
// actualObj, err := SomeFunction()
// if a.NoError(err) {
// assert.Equal(t, expectedObj, actualObj)
// }
func (a *Assertions) NoError(err error, msgAndArgs ...interface{}) bool {
return NoError(a.t, err, msgAndArgs...)
}
// NoErrorf asserts that a function returned no error (i.e. `nil`).
//
// actualObj, err := SomeFunction()
// if a.NoErrorf(err, "error message %s", "formatted") {
// assert.Equal(t, expectedObj, actualObj)
// }
func (a *Assertions) NoErrorf(err error, msg string, args ...interface{}) bool {
return NoErrorf(a.t, err, msg, args...)
}
// NotContains asserts that the specified string, list(array, slice...) or map does NOT contain the
// specified substring or element.
//
// a.NotContains("Hello World", "Earth")
// a.NotContains(["Hello", "World"], "Earth")
// a.NotContains({"Hello": "World"}, "Earth")
func (a *Assertions) NotContains(s interface{}, contains interface{}, msgAndArgs ...interface{}) bool {
return NotContains(a.t, s, contains, msgAndArgs...)
}
// NotContainsf asserts that the specified string, list(array, slice...) or map does NOT contain the
// specified substring or element.
//
// a.NotContainsf("Hello World", "Earth", "error message %s", "formatted")
// a.NotContainsf(["Hello", "World"], "Earth", "error message %s", "formatted")
// a.NotContainsf({"Hello": "World"}, "Earth", "error message %s", "formatted")
func (a *Assertions) NotContainsf(s interface{}, contains interface{}, msg string, args ...interface{}) bool {
return NotContainsf(a.t, s, contains, msg, args...)
}
// NotEmpty asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either
// a slice or a channel with len == 0.
//
// if a.NotEmpty(obj) {
// assert.Equal(t, "two", obj[1])
// }
func (a *Assertions) NotEmpty(object interface{}, msgAndArgs ...interface{}) bool {
return NotEmpty(a.t, object, msgAndArgs...)
}
// NotEmptyf asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either
// a slice or a channel with len == 0.
//
// if a.NotEmptyf(obj, "error message %s", "formatted") {
// assert.Equal(t, "two", obj[1])
// }
func (a *Assertions) NotEmptyf(object interface{}, msg string, args ...interface{}) bool {
return NotEmptyf(a.t, object, msg, args...)
}
// NotEqual asserts that the specified values are NOT equal.
//
// a.NotEqual(obj1, obj2)
//
// Pointer variable equality is determined based on the equality of the
// referenced values (as opposed to the memory addresses).
func (a *Assertions) NotEqual(expected interface{}, actual interface{}, msgAndArgs ...interface{}) bool {
return NotEqual(a.t, expected, actual, msgAndArgs...)
}
// NotEqualf asserts that the specified values are NOT equal.
//
// a.NotEqualf(obj1, obj2, "error message %s", "formatted")
//
// Pointer variable equality is determined based on the equality of the
// referenced values (as opposed to the memory addresses).
func (a *Assertions) NotEqualf(expected interface{}, actual interface{}, msg string, args ...interface{}) bool {
return NotEqualf(a.t, expected, actual, msg, args...)
}
// NotNil asserts that the specified object is not nil.
//
// a.NotNil(err)
func (a *Assertions) NotNil(object interface{}, msgAndArgs ...interface{}) bool {
return NotNil(a.t, object, msgAndArgs...)
}
// NotNilf asserts that the specified object is not nil.
//
// a.NotNilf(err, "error message %s", "formatted")
func (a *Assertions) NotNilf(object interface{}, msg string, args ...interface{}) bool {
return NotNilf(a.t, object, msg, args...)
}
// NotPanics asserts that the code inside the specified PanicTestFunc does NOT panic.
//
// a.NotPanics(func(){ RemainCalm() })
func (a *Assertions) NotPanics(f PanicTestFunc, msgAndArgs ...interface{}) bool {
return NotPanics(a.t, f, msgAndArgs...)
}
// NotPanicsf asserts that the code inside the specified PanicTestFunc does NOT panic.
//
// a.NotPanicsf(func(){ RemainCalm() }, "error message %s", "formatted")
func (a *Assertions) NotPanicsf(f PanicTestFunc, msg string, args ...interface{}) bool {
return NotPanicsf(a.t, f, msg, args...)
}
// NotRegexp asserts that a specified regexp does not match a string.
//
// a.NotRegexp(regexp.MustCompile("starts"), "it's starting")
// a.NotRegexp("^start", "it's not starting")
func (a *Assertions) NotRegexp(rx interface{}, str interface{}, msgAndArgs ...interface{}) bool {
return NotRegexp(a.t, rx, str, msgAndArgs...)
}
// NotRegexpf asserts that a specified regexp does not match a string.
//
// a.NotRegexpf(regexp.MustCompile("starts", "error message %s", "formatted"), "it's starting")
// a.NotRegexpf("^start", "it's not starting", "error message %s", "formatted")
func (a *Assertions) NotRegexpf(rx interface{}, str interface{}, msg string, args ...interface{}) bool {
return NotRegexpf(a.t, rx, str, msg, args...)
}
// NotSubset asserts that the specified list(array, slice...) contains not all
// elements given in the specified subset(array, slice...).
//
// a.NotSubset([1, 3, 4], [1, 2], "But [1, 3, 4] does not contain [1, 2]")
func (a *Assertions) NotSubset(list interface{}, subset interface{}, msgAndArgs ...interface{}) bool {
return NotSubset(a.t, list, subset, msgAndArgs...)
}
// NotSubsetf asserts that the specified list(array, slice...) contains not all
// elements given in the specified subset(array, slice...).
//
// a.NotSubsetf([1, 3, 4], [1, 2], "But [1, 3, 4] does not contain [1, 2]", "error message %s", "formatted")
func (a *Assertions) NotSubsetf(list interface{}, subset interface{}, msg string, args ...interface{}) bool {
return NotSubsetf(a.t, list, subset, msg, args...)
}
// NotZero asserts that i is not the zero value for its type.
func (a *Assertions) NotZero(i interface{}, msgAndArgs ...interface{}) bool {
return NotZero(a.t, i, msgAndArgs...)
}
// NotZerof asserts that i is not the zero value for its type.
func (a *Assertions) NotZerof(i interface{}, msg string, args ...interface{}) bool {
return NotZerof(a.t, i, msg, args...)
}
// Panics asserts that the code inside the specified PanicTestFunc panics.
//
// a.Panics(func(){ GoCrazy() })
func (a *Assertions) Panics(f PanicTestFunc, msgAndArgs ...interface{}) bool {
return Panics(a.t, f, msgAndArgs...)
}
// PanicsWithValue asserts that the code inside the specified PanicTestFunc panics, and that
// the recovered panic value equals the expected panic value.
//
// a.PanicsWithValue("crazy error", func(){ GoCrazy() })
func (a *Assertions) PanicsWithValue(expected interface{}, f PanicTestFunc, msgAndArgs ...interface{}) bool {
return PanicsWithValue(a.t, expected, f, msgAndArgs...)
}
// PanicsWithValuef asserts that the code inside the specified PanicTestFunc panics, and that
// the recovered panic value equals the expected panic value.
//
// a.PanicsWithValuef("crazy error", func(){ GoCrazy() }, "error message %s", "formatted")
func (a *Assertions) PanicsWithValuef(expected interface{}, f PanicTestFunc, msg string, args ...interface{}) bool {
return PanicsWithValuef(a.t, expected, f, msg, args...)
}
// Panicsf asserts that the code inside the specified PanicTestFunc panics.
//
// a.Panicsf(func(){ GoCrazy() }, "error message %s", "formatted")
func (a *Assertions) Panicsf(f PanicTestFunc, msg string, args ...interface{}) bool {
return Panicsf(a.t, f, msg, args...)
}
// Regexp asserts that a specified regexp matches a string.
//
// a.Regexp(regexp.MustCompile("start"), "it's starting")
// a.Regexp("start...$", "it's not starting")
func (a *Assertions) Regexp(rx interface{}, str interface{}, msgAndArgs ...interface{}) bool {
return Regexp(a.t, rx, str, msgAndArgs...)
}
// Regexpf asserts that a specified regexp matches a string.
//
// a.Regexpf(regexp.MustCompile("start", "error message %s", "formatted"), "it's starting")
// a.Regexpf("start...$", "it's not starting", "error message %s", "formatted")
func (a *Assertions) Regexpf(rx interface{}, str interface{}, msg string, args ...interface{}) bool {
return Regexpf(a.t, rx, str, msg, args...)
}
// Subset asserts that the specified list(array, slice...) contains all
// elements given in the specified subset(array, slice...).
//
// a.Subset([1, 2, 3], [1, 2], "But [1, 2, 3] does contain [1, 2]")
func (a *Assertions) Subset(list interface{}, subset interface{}, msgAndArgs ...interface{}) bool {
return Subset(a.t, list, subset, msgAndArgs...)
}
// Subsetf asserts that the specified list(array, slice...) contains all
// elements given in the specified subset(array, slice...).
//
// a.Subsetf([1, 2, 3], [1, 2], "But [1, 2, 3] does contain [1, 2]", "error message %s", "formatted")
func (a *Assertions) Subsetf(list interface{}, subset interface{}, msg string, args ...interface{}) bool {
return Subsetf(a.t, list, subset, msg, args...)
}
// True asserts that the specified value is true.
//
// a.True(myBool)
func (a *Assertions) True(value bool, msgAndArgs ...interface{}) bool {
return True(a.t, value, msgAndArgs...)
}
// Truef asserts that the specified value is true.
//
// a.Truef(myBool, "error message %s", "formatted")
func (a *Assertions) Truef(value bool, msg string, args ...interface{}) bool {
return Truef(a.t, value, msg, args...)
}
// WithinDuration asserts that the two times are within duration delta of each other.
//
// a.WithinDuration(time.Now(), time.Now(), 10*time.Second)
func (a *Assertions) WithinDuration(expected time.Time, actual time.Time, delta time.Duration, msgAndArgs ...interface{}) bool {
return WithinDuration(a.t, expected, actual, delta, msgAndArgs...)
}
// WithinDurationf asserts that the two times are within duration delta of each other.
//
// a.WithinDurationf(time.Now(), time.Now(), 10*time.Second, "error message %s", "formatted")
func (a *Assertions) WithinDurationf(expected time.Time, actual time.Time, delta time.Duration, msg string, args ...interface{}) bool {
return WithinDurationf(a.t, expected, actual, delta, msg, args...)
}
// Zero asserts that i is the zero value for its type.
func (a *Assertions) Zero(i interface{}, msgAndArgs ...interface{}) bool {
return Zero(a.t, i, msgAndArgs...)
}
// Zerof asserts that i is the zero value for its type.
func (a *Assertions) Zerof(i interface{}, msg string, args ...interface{}) bool {
return Zerof(a.t, i, msg, args...)
}

View File

@ -0,0 +1,4 @@
{{.CommentWithoutT "a"}}
func (a *Assertions) {{.DocInfo.Name}}({{.Params}}) bool {
return {{.DocInfo.Name}}(a.t, {{.ForwardedParams}})
}

1256
vendor/github.com/stretchr/testify/assert/assertions.go generated vendored Normal file

File diff suppressed because it is too large Load Diff

45
vendor/github.com/stretchr/testify/assert/doc.go generated vendored Normal file
View File

@ -0,0 +1,45 @@
// Package assert provides a set of comprehensive testing tools for use with the normal Go testing system.
//
// Example Usage
//
// The following is a complete example using assert in a standard test function:
// import (
// "testing"
// "github.com/stretchr/testify/assert"
// )
//
// func TestSomething(t *testing.T) {
//
// var a string = "Hello"
// var b string = "Hello"
//
// assert.Equal(t, a, b, "The two words should be the same.")
//
// }
//
// if you assert many times, use the format below:
//
// import (
// "testing"
// "github.com/stretchr/testify/assert"
// )
//
// func TestSomething(t *testing.T) {
// assert := assert.New(t)
//
// var a string = "Hello"
// var b string = "Hello"
//
// assert.Equal(a, b, "The two words should be the same.")
// }
//
// Assertions
//
// Assertions allow you to easily write test code, and are global funcs in the `assert` package.
// All assertion functions take, as the first argument, the `*testing.T` object provided by the
// testing framework. This allows the assertion funcs to write the failings and other details to
// the correct place.
//
// Every assertion function also takes an optional string message as the final argument,
// allowing custom error messages to be appended to the message the assertion method outputs.
package assert

10
vendor/github.com/stretchr/testify/assert/errors.go generated vendored Normal file
View File

@ -0,0 +1,10 @@
package assert
import (
"errors"
)
// AnError is an error instance useful for testing. If the code does not care
// about error specifics, and only needs to return the error for example, this
// error should be used to make the test code more readable.
var AnError = errors.New("assert.AnError general error for testing")

View File

@ -0,0 +1,16 @@
package assert
// Assertions provides assertion methods around the
// TestingT interface.
type Assertions struct {
t TestingT
}
// New makes a new Assertions object for the specified TestingT.
func New(t TestingT) *Assertions {
return &Assertions{
t: t,
}
}
//go:generate go run ../_codegen/main.go -output-package=assert -template=assertion_forward.go.tmpl -include-format-funcs

View File

@ -0,0 +1,127 @@
package assert
import (
"fmt"
"net/http"
"net/http/httptest"
"net/url"
"strings"
)
// httpCode is a helper that returns HTTP code of the response. It returns -1 and
// an error if building a new request fails.
func httpCode(handler http.HandlerFunc, method, url string, values url.Values) (int, error) {
w := httptest.NewRecorder()
req, err := http.NewRequest(method, url+"?"+values.Encode(), nil)
if err != nil {
return -1, err
}
handler(w, req)
return w.Code, nil
}
// HTTPSuccess asserts that a specified handler returns a success status code.
//
// assert.HTTPSuccess(t, myHandler, "POST", "http://www.google.com", nil)
//
// Returns whether the assertion was successful (true) or not (false).
func HTTPSuccess(t TestingT, handler http.HandlerFunc, method, url string, values url.Values, msgAndArgs ...interface{}) bool {
code, err := httpCode(handler, method, url, values)
if err != nil {
Fail(t, fmt.Sprintf("Failed to build test request, got error: %s", err))
return false
}
isSuccessCode := code >= http.StatusOK && code <= http.StatusPartialContent
if !isSuccessCode {
Fail(t, fmt.Sprintf("Expected HTTP success status code for %q but received %d", url+"?"+values.Encode(), code))
}
return isSuccessCode
}
// HTTPRedirect asserts that a specified handler returns a redirect status code.
//
// assert.HTTPRedirect(t, myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}}
//
// Returns whether the assertion was successful (true) or not (false).
func HTTPRedirect(t TestingT, handler http.HandlerFunc, method, url string, values url.Values, msgAndArgs ...interface{}) bool {
code, err := httpCode(handler, method, url, values)
if err != nil {
Fail(t, fmt.Sprintf("Failed to build test request, got error: %s", err))
return false
}
isRedirectCode := code >= http.StatusMultipleChoices && code <= http.StatusTemporaryRedirect
if !isRedirectCode {
Fail(t, fmt.Sprintf("Expected HTTP redirect status code for %q but received %d", url+"?"+values.Encode(), code))
}
return isRedirectCode
}
// HTTPError asserts that a specified handler returns an error status code.
//
// assert.HTTPError(t, myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}}
//
// Returns whether the assertion was successful (true) or not (false).
func HTTPError(t TestingT, handler http.HandlerFunc, method, url string, values url.Values, msgAndArgs ...interface{}) bool {
code, err := httpCode(handler, method, url, values)
if err != nil {
Fail(t, fmt.Sprintf("Failed to build test request, got error: %s", err))
return false
}
isErrorCode := code >= http.StatusBadRequest
if !isErrorCode {
Fail(t, fmt.Sprintf("Expected HTTP error status code for %q but received %d", url+"?"+values.Encode(), code))
}
return isErrorCode
}
// HTTPBody is a helper that returns HTTP body of the response. It returns
// empty string if building a new request fails.
func HTTPBody(handler http.HandlerFunc, method, url string, values url.Values) string {
w := httptest.NewRecorder()
req, err := http.NewRequest(method, url+"?"+values.Encode(), nil)
if err != nil {
return ""
}
handler(w, req)
return w.Body.String()
}
// HTTPBodyContains asserts that a specified handler returns a
// body that contains a string.
//
// assert.HTTPBodyContains(t, myHandler, "www.google.com", nil, "I'm Feeling Lucky")
//
// Returns whether the assertion was successful (true) or not (false).
func HTTPBodyContains(t TestingT, handler http.HandlerFunc, method, url string, values url.Values, str interface{}, msgAndArgs ...interface{}) bool {
body := HTTPBody(handler, method, url, values)
contains := strings.Contains(body, fmt.Sprint(str))
if !contains {
Fail(t, fmt.Sprintf("Expected response body for \"%s\" to contain \"%s\" but found \"%s\"", url+"?"+values.Encode(), str, body))
}
return contains
}
// HTTPBodyNotContains asserts that a specified handler returns a
// body that does not contain a string.
//
// assert.HTTPBodyNotContains(t, myHandler, "www.google.com", nil, "I'm Feeling Lucky")
//
// Returns whether the assertion was successful (true) or not (false).
func HTTPBodyNotContains(t TestingT, handler http.HandlerFunc, method, url string, values url.Values, str interface{}, msgAndArgs ...interface{}) bool {
body := HTTPBody(handler, method, url, values)
contains := strings.Contains(body, fmt.Sprint(str))
if contains {
Fail(t, fmt.Sprintf("Expected response body for \"%s\" to NOT contain \"%s\" but found \"%s\"", url+"?"+values.Encode(), str, body))
}
return !contains
}

18
vendor/github.com/zyedidia/clipboard/.travis.yml generated vendored Normal file
View File

@ -0,0 +1,18 @@
language: go
go:
- go1.2.2
- go1.3.3
- go1.4.3
- go1.5.3
- go1.6
before_install:
- export DISPLAY=:99.0
- sh -e /etc/init.d/xvfb start
script:
- sudo apt-get install xsel
- go test -v .
- sudo apt-get install xclip
- go test -v .

27
vendor/github.com/zyedidia/clipboard/LICENSE generated vendored Normal file
View File

@ -0,0 +1,27 @@
Copyright (c) 2013 Ato Araki. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
* Neither the name of @atotto. nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

50
vendor/github.com/zyedidia/clipboard/README.md generated vendored Normal file
View File

@ -0,0 +1,50 @@
[![Build Status](https://travis-ci.org/atotto/clipboard.svg?branch=master)](https://travis-ci.org/atotto/clipboard) [![Build Status](https://drone.io/github.com/atotto/clipboard/status.png)](https://drone.io/github.com/atotto/clipboard/latest)
[![GoDoc](https://godoc.org/github.com/atotto/clipboard?status.svg)](http://godoc.org/github.com/atotto/clipboard)
# Clipboard for Go
Provide copying and pasting to the Clipboard for Go.
Download shell commands at https://drone.io/github.com/atotto/clipboard/files
Build:
$ go get github.com/atotto/clipboard
Platforms:
* OSX
* Windows 7 (probably work on other Windows)
* Linux, Unix (requires 'xclip' or 'xsel' command to be installed)
Document:
* http://godoc.org/github.com/atotto/clipboard
Notes:
* Text string only
* UTF-8 text encoding only (no conversion)
TODO:
* Clipboard watcher(?)
## Commands:
paste shell command:
$ go get github.com/atotto/clipboard/cmd/gopaste
$ # example:
$ gopaste > document.txt
copy shell command:
$ go get github.com/atotto/clipboard/cmd/gocopy
$ # example:
$ cat document.txt | gocopy

22
vendor/github.com/zyedidia/clipboard/clipboard.go generated vendored Normal file
View File

@ -0,0 +1,22 @@
// Copyright 2013 @atotto. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package clipboard read/write on clipboard
package clipboard
import ()
// ReadAll read string from clipboard
func ReadAll(register string) (string, error) {
return readAll(register)
}
// WriteAll write string to clipboard
func WriteAll(text string, register string) error {
return writeAll(text, register)
}
// Unsupported might be set true during clipboard init, to help callers decide
// whether or not to offer clipboard options.
var Unsupported bool

View File

@ -0,0 +1,58 @@
// Copyright 2013 @atotto. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build darwin
package clipboard
import (
"os/exec"
)
var (
pasteCmdArgs = "pbpaste"
copyCmdArgs = "pbcopy"
)
func getPasteCommand() *exec.Cmd {
return exec.Command(pasteCmdArgs)
}
func getCopyCommand() *exec.Cmd {
return exec.Command(copyCmdArgs)
}
func readAll(register string) (string, error) {
if register != "clipboard" {
return "", nil
}
pasteCmd := getPasteCommand()
out, err := pasteCmd.Output()
if err != nil {
return "", err
}
return string(out), nil
}
func writeAll(text string, register string) error {
if register != "clipboard" {
return nil
}
copyCmd := getCopyCommand()
in, err := copyCmd.StdinPipe()
if err != nil {
return err
}
if err := copyCmd.Start(); err != nil {
return err
}
if _, err := in.Write([]byte(text)); err != nil {
return err
}
if err := in.Close(); err != nil {
return err
}
return copyCmd.Wait()
}

105
vendor/github.com/zyedidia/clipboard/clipboard_unix.go generated vendored Normal file
View File

@ -0,0 +1,105 @@
// Copyright 2013 @atotto. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build freebsd linux netbsd openbsd solaris
package clipboard
import "os/exec"
const (
xsel = "xsel"
xclip = "xclip"
)
var (
internalClipboards map[string]string
)
func init() {
if _, err := exec.LookPath(xclip); err == nil {
if err := exec.Command("xclip", "-o").Run(); err == nil {
return
}
}
if _, err := exec.LookPath(xsel); err == nil {
if err := exec.Command("xsel").Run(); err == nil {
return
}
}
internalClipboards = make(map[string]string)
Unsupported = true
}
func copyCommand(register string) []string {
if _, err := exec.LookPath(xclip); err == nil {
return []string{xclip, "-in", "-selection", register}
}
if _, err := exec.LookPath(xsel); err == nil {
return []string{xsel, "--input", "--" + register}
}
return []string{}
}
func pasteCommand(register string) []string {
if _, err := exec.LookPath(xclip); err == nil {
return []string{xclip, "-out", "-selection", register}
}
if _, err := exec.LookPath(xsel); err == nil {
return []string{xsel, "--output", "--" + register}
}
return []string{}
}
func getPasteCommand(register string) *exec.Cmd {
pasteCmdArgs := pasteCommand(register)
return exec.Command(pasteCmdArgs[0], pasteCmdArgs[1:]...)
}
func getCopyCommand(register string) *exec.Cmd {
copyCmdArgs := copyCommand(register)
return exec.Command(copyCmdArgs[0], copyCmdArgs[1:]...)
}
func readAll(register string) (string, error) {
if Unsupported {
if text, ok := internalClipboards[register]; ok {
return text, nil
}
return "", nil
}
pasteCmd := getPasteCommand(register)
out, err := pasteCmd.Output()
if err != nil {
return "", err
}
return string(out), nil
}
func writeAll(text string, register string) error {
if Unsupported {
internalClipboards[register] = text
return nil
}
copyCmd := getCopyCommand(register)
in, err := copyCmd.StdinPipe()
if err != nil {
return err
}
if err := copyCmd.Start(); err != nil {
return err
}
if _, err := in.Write([]byte(text)); err != nil {
return err
}
if err := in.Close(); err != nil {
return err
}
return copyCmd.Wait()
}

View File

@ -0,0 +1,107 @@
// Copyright 2013 @atotto. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build windows
package clipboard
import (
"syscall"
"unsafe"
)
const (
cfUnicodetext = 13
gmemFixed = 0x0000
)
var (
user32 = syscall.MustLoadDLL("user32")
openClipboard = user32.MustFindProc("OpenClipboard")
closeClipboard = user32.MustFindProc("CloseClipboard")
emptyClipboard = user32.MustFindProc("EmptyClipboard")
getClipboardData = user32.MustFindProc("GetClipboardData")
setClipboardData = user32.MustFindProc("SetClipboardData")
kernel32 = syscall.NewLazyDLL("kernel32")
globalAlloc = kernel32.NewProc("GlobalAlloc")
globalFree = kernel32.NewProc("GlobalFree")
globalLock = kernel32.NewProc("GlobalLock")
globalUnlock = kernel32.NewProc("GlobalUnlock")
lstrcpy = kernel32.NewProc("lstrcpyW")
)
func readAll(register string) (string, error) {
if register != "clipboard" {
return "", nil
}
r, _, err := openClipboard.Call(0)
if r == 0 {
return "", err
}
defer closeClipboard.Call()
h, _, err := getClipboardData.Call(cfUnicodetext)
if r == 0 {
return "", err
}
l, _, err := globalLock.Call(h)
if l == 0 {
return "", err
}
text := syscall.UTF16ToString((*[1 << 20]uint16)(unsafe.Pointer(l))[:])
r, _, err = globalUnlock.Call(h)
if r == 0 {
return "", err
}
return text, nil
}
func writeAll(text string, register string) error {
if register != "clipboard" {
return nil
}
r, _, err := openClipboard.Call(0)
if r == 0 {
return err
}
defer closeClipboard.Call()
r, _, err = emptyClipboard.Call(0)
if r == 0 {
return err
}
data := syscall.StringToUTF16(text)
h, _, err := globalAlloc.Call(gmemFixed, uintptr(len(data)*int(unsafe.Sizeof(data[0]))))
if h == 0 {
return err
}
l, _, err := globalLock.Call(h)
if l == 0 {
return err
}
r, _, err = lstrcpy.Call(l, uintptr(unsafe.Pointer(&data[0])))
if r == 0 {
return err
}
r, _, err = globalUnlock.Call(h)
if r == 0 {
return err
}
r, _, err = setClipboardData.Call(cfUnicodetext, h)
if r == 0 {
return err
}
return nil
}

22
vendor/github.com/zyedidia/glob/LICENSE generated vendored Normal file
View File

@ -0,0 +1,22 @@
Glob is licensed under the MIT "Expat" License:
Copyright (c) 2016: Zachary Yedidia.
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

28
vendor/github.com/zyedidia/glob/README.md generated vendored Normal file
View File

@ -0,0 +1,28 @@
# String globbing in Go
[![GoDoc](https://godoc.org/github.com/zyedidia/glob?status.svg)](http://godoc.org/github.com/zyedidia/glob)
This package adds support for globs in Go.
It simply converts glob expressions to regexps. I try to follow the standard defined [here](http://pubs.opengroup.org/onlinepubs/009695399/utilities/xcu_chap02.html#tag_02_13).
# Example
```go
package main
import "github.com/zyedidia/glob"
func main() {
glob, err := glob.Compile("{*.go,*.c}")
if err != nil {
// Error
}
glob.Match([]byte("test.c")) // true
glob.Match([]byte("hello.go")) // true
glob.Match([]byte("test.d")) // false
}
```
You can call all the same functions on a glob that you can call on a regexp.

94
vendor/github.com/zyedidia/glob/glob.go generated vendored Normal file
View File

@ -0,0 +1,94 @@
// Package glob provides objects for matching strings with globs
package glob
import "regexp"
// Glob is a wrapper of *regexp.Regexp.
// It should contain a glob expression compiled into a regular expression.
type Glob struct {
*regexp.Regexp
}
// Compile a takes a glob expression as a string and transforms it
// into a *Glob object (which is really just a regular expression)
// Compile also returns a possible error.
func Compile(pattern string) (*Glob, error) {
r, err := globToRegex(pattern)
return &Glob{r}, err
}
func globToRegex(glob string) (*regexp.Regexp, error) {
regex := ""
inGroup := 0
inClass := 0
firstIndexInClass := -1
arr := []byte(glob)
for i := 0; i < len(arr); i++ {
ch := arr[i]
switch ch {
case '\\':
i++
if i >= len(arr) {
regex += "\\"
} else {
next := arr[i]
switch next {
case ',':
// Nothing
case 'Q', 'E':
regex += "\\\\"
default:
regex += "\\"
}
regex += string(next)
}
case '*':
if inClass == 0 {
regex += ".*"
} else {
regex += "*"
}
case '?':
if inClass == 0 {
regex += "."
} else {
regex += "?"
}
case '[':
inClass++
firstIndexInClass = i + 1
regex += "["
case ']':
inClass--
regex += "]"
case '.', '(', ')', '+', '|', '^', '$', '@', '%':
if inClass == 0 || (firstIndexInClass == i && ch == '^') {
regex += "\\"
}
regex += string(ch)
case '!':
if firstIndexInClass == i {
regex += "^"
} else {
regex += "!"
}
case '{':
inGroup++
regex += "("
case '}':
inGroup--
regex += ")"
case ',':
if inGroup > 0 {
regex += "|"
} else {
regex += ","
}
default:
regex += string(ch)
}
}
return regexp.Compile("^" + regex + "$")
}

3
vendor/golang.org/x/image/AUTHORS generated vendored Normal file
View File

@ -0,0 +1,3 @@
# This source code refers to The Go Authors for copyright purposes.
# The master list of authors is in the main Go distribution,
# visible at http://tip.golang.org/AUTHORS.

3
vendor/golang.org/x/image/CONTRIBUTORS generated vendored Normal file
View File

@ -0,0 +1,3 @@
# This source code was written by the Go contributors.
# The master list of contributors is in the main Go distribution,
# visible at http://tip.golang.org/CONTRIBUTORS.

27
vendor/golang.org/x/image/LICENSE generated vendored Normal file
View File

@ -0,0 +1,27 @@
Copyright (c) 2009 The Go Authors. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
* Neither the name of Google Inc. nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

22
vendor/golang.org/x/image/PATENTS generated vendored Normal file
View File

@ -0,0 +1,22 @@
Additional IP Rights Grant (Patents)
"This implementation" means the copyrightable works distributed by
Google as part of the Go project.
Google hereby grants to You a perpetual, worldwide, non-exclusive,
no-charge, royalty-free, irrevocable (except as stated in this section)
patent license to make, have made, use, offer to sell, sell, import,
transfer and otherwise run, modify and propagate the contents of this
implementation of Go, where such license applies only to those patent
claims, both currently owned or controlled by Google and acquired in
the future, licensable by Google that are necessarily infringed by this
implementation of Go. This grant does not include claims that would be
infringed only as a consequence of further modification of this
implementation. If you or your agent or exclusive licensee institute or
order or agree to the institution of patent litigation against any
entity (including a cross-claim or counterclaim in a lawsuit) alleging
that this implementation of Go or any code incorporated within this
implementation of Go constitutes direct or contributory patent
infringement, or inducement of patent infringement, then any patent
rights granted to you under this License for this implementation of Go
shall terminate as of the date such litigation is filed.

199
vendor/golang.org/x/image/bmp/reader.go generated vendored Normal file
View File

@ -0,0 +1,199 @@
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package bmp implements a BMP image decoder and encoder.
//
// The BMP specification is at http://www.digicamsoft.com/bmp/bmp.html.
package bmp // import "golang.org/x/image/bmp"
import (
"errors"
"image"
"image/color"
"io"
)
// ErrUnsupported means that the input BMP image uses a valid but unsupported
// feature.
var ErrUnsupported = errors.New("bmp: unsupported BMP image")
func readUint16(b []byte) uint16 {
return uint16(b[0]) | uint16(b[1])<<8
}
func readUint32(b []byte) uint32 {
return uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24
}
// decodePaletted reads an 8 bit-per-pixel BMP image from r.
// If topDown is false, the image rows will be read bottom-up.
func decodePaletted(r io.Reader, c image.Config, topDown bool) (image.Image, error) {
paletted := image.NewPaletted(image.Rect(0, 0, c.Width, c.Height), c.ColorModel.(color.Palette))
if c.Width == 0 || c.Height == 0 {
return paletted, nil
}
var tmp [4]byte
y0, y1, yDelta := c.Height-1, -1, -1
if topDown {
y0, y1, yDelta = 0, c.Height, +1
}
for y := y0; y != y1; y += yDelta {
p := paletted.Pix[y*paletted.Stride : y*paletted.Stride+c.Width]
if _, err := io.ReadFull(r, p); err != nil {
return nil, err
}
// Each row is 4-byte aligned.
if c.Width%4 != 0 {
_, err := io.ReadFull(r, tmp[:4-c.Width%4])
if err != nil {
return nil, err
}
}
}
return paletted, nil
}
// decodeRGB reads a 24 bit-per-pixel BMP image from r.
// If topDown is false, the image rows will be read bottom-up.
func decodeRGB(r io.Reader, c image.Config, topDown bool) (image.Image, error) {
rgba := image.NewRGBA(image.Rect(0, 0, c.Width, c.Height))
if c.Width == 0 || c.Height == 0 {
return rgba, nil
}
// There are 3 bytes per pixel, and each row is 4-byte aligned.
b := make([]byte, (3*c.Width+3)&^3)
y0, y1, yDelta := c.Height-1, -1, -1
if topDown {
y0, y1, yDelta = 0, c.Height, +1
}
for y := y0; y != y1; y += yDelta {
if _, err := io.ReadFull(r, b); err != nil {
return nil, err
}
p := rgba.Pix[y*rgba.Stride : y*rgba.Stride+c.Width*4]
for i, j := 0, 0; i < len(p); i, j = i+4, j+3 {
// BMP images are stored in BGR order rather than RGB order.
p[i+0] = b[j+2]
p[i+1] = b[j+1]
p[i+2] = b[j+0]
p[i+3] = 0xFF
}
}
return rgba, nil
}
// decodeNRGBA reads a 32 bit-per-pixel BMP image from r.
// If topDown is false, the image rows will be read bottom-up.
func decodeNRGBA(r io.Reader, c image.Config, topDown bool) (image.Image, error) {
rgba := image.NewNRGBA(image.Rect(0, 0, c.Width, c.Height))
if c.Width == 0 || c.Height == 0 {
return rgba, nil
}
y0, y1, yDelta := c.Height-1, -1, -1
if topDown {
y0, y1, yDelta = 0, c.Height, +1
}
for y := y0; y != y1; y += yDelta {
p := rgba.Pix[y*rgba.Stride : y*rgba.Stride+c.Width*4]
if _, err := io.ReadFull(r, p); err != nil {
return nil, err
}
for i := 0; i < len(p); i += 4 {
// BMP images are stored in BGRA order rather than RGBA order.
p[i+0], p[i+2] = p[i+2], p[i+0]
}
}
return rgba, nil
}
// Decode reads a BMP image from r and returns it as an image.Image.
// Limitation: The file must be 8, 24 or 32 bits per pixel.
func Decode(r io.Reader) (image.Image, error) {
c, bpp, topDown, err := decodeConfig(r)
if err != nil {
return nil, err
}
switch bpp {
case 8:
return decodePaletted(r, c, topDown)
case 24:
return decodeRGB(r, c, topDown)
case 32:
return decodeNRGBA(r, c, topDown)
}
panic("unreachable")
}
// DecodeConfig returns the color model and dimensions of a BMP image without
// decoding the entire image.
// Limitation: The file must be 8, 24 or 32 bits per pixel.
func DecodeConfig(r io.Reader) (image.Config, error) {
config, _, _, err := decodeConfig(r)
return config, err
}
func decodeConfig(r io.Reader) (config image.Config, bitsPerPixel int, topDown bool, err error) {
// We only support those BMP images that are a BITMAPFILEHEADER
// immediately followed by a BITMAPINFOHEADER.
const (
fileHeaderLen = 14
infoHeaderLen = 40
)
var b [1024]byte
if _, err := io.ReadFull(r, b[:fileHeaderLen+infoHeaderLen]); err != nil {
return image.Config{}, 0, false, err
}
if string(b[:2]) != "BM" {
return image.Config{}, 0, false, errors.New("bmp: invalid format")
}
offset := readUint32(b[10:14])
if readUint32(b[14:18]) != infoHeaderLen {
return image.Config{}, 0, false, ErrUnsupported
}
width := int(int32(readUint32(b[18:22])))
height := int(int32(readUint32(b[22:26])))
if height < 0 {
height, topDown = -height, true
}
if width < 0 || height < 0 {
return image.Config{}, 0, false, ErrUnsupported
}
// We only support 1 plane, 8 or 24 bits per pixel and no compression.
planes, bpp, compression := readUint16(b[26:28]), readUint16(b[28:30]), readUint32(b[30:34])
if planes != 1 || compression != 0 {
return image.Config{}, 0, false, ErrUnsupported
}
switch bpp {
case 8:
if offset != fileHeaderLen+infoHeaderLen+256*4 {
return image.Config{}, 0, false, ErrUnsupported
}
_, err = io.ReadFull(r, b[:256*4])
if err != nil {
return image.Config{}, 0, false, err
}
pcm := make(color.Palette, 256)
for i := range pcm {
// BMP images are stored in BGR order rather than RGB order.
// Every 4th byte is padding.
pcm[i] = color.RGBA{b[4*i+2], b[4*i+1], b[4*i+0], 0xFF}
}
return image.Config{ColorModel: pcm, Width: width, Height: height}, 8, topDown, nil
case 24:
if offset != fileHeaderLen+infoHeaderLen {
return image.Config{}, 0, false, ErrUnsupported
}
return image.Config{ColorModel: color.RGBAModel, Width: width, Height: height}, 24, topDown, nil
case 32:
if offset != fileHeaderLen+infoHeaderLen {
return image.Config{}, 0, false, ErrUnsupported
}
return image.Config{ColorModel: color.RGBAModel, Width: width, Height: height}, 32, topDown, nil
}
return image.Config{}, 0, false, ErrUnsupported
}
func init() {
image.RegisterFormat("bmp", "BM????\x00\x00\x00\x00", Decode, DecodeConfig)
}

166
vendor/golang.org/x/image/bmp/writer.go generated vendored Normal file
View File

@ -0,0 +1,166 @@
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package bmp
import (
"encoding/binary"
"errors"
"image"
"io"
)
type header struct {
sigBM [2]byte
fileSize uint32
resverved [2]uint16
pixOffset uint32
dibHeaderSize uint32
width uint32
height uint32
colorPlane uint16
bpp uint16
compression uint32
imageSize uint32
xPixelsPerMeter uint32
yPixelsPerMeter uint32
colorUse uint32
colorImportant uint32
}
func encodePaletted(w io.Writer, pix []uint8, dx, dy, stride, step int) error {
var padding []byte
if dx < step {
padding = make([]byte, step-dx)
}
for y := dy - 1; y >= 0; y-- {
min := y*stride + 0
max := y*stride + dx
if _, err := w.Write(pix[min:max]); err != nil {
return err
}
if padding != nil {
if _, err := w.Write(padding); err != nil {
return err
}
}
}
return nil
}
func encodeRGBA(w io.Writer, pix []uint8, dx, dy, stride, step int) error {
buf := make([]byte, step)
for y := dy - 1; y >= 0; y-- {
min := y*stride + 0
max := y*stride + dx*4
off := 0
for i := min; i < max; i += 4 {
buf[off+2] = pix[i+0]
buf[off+1] = pix[i+1]
buf[off+0] = pix[i+2]
off += 3
}
if _, err := w.Write(buf); err != nil {
return err
}
}
return nil
}
func encode(w io.Writer, m image.Image, step int) error {
b := m.Bounds()
buf := make([]byte, step)
for y := b.Max.Y - 1; y >= b.Min.Y; y-- {
off := 0
for x := b.Min.X; x < b.Max.X; x++ {
r, g, b, _ := m.At(x, y).RGBA()
buf[off+2] = byte(r >> 8)
buf[off+1] = byte(g >> 8)
buf[off+0] = byte(b >> 8)
off += 3
}
if _, err := w.Write(buf); err != nil {
return err
}
}
return nil
}
// Encode writes the image m to w in BMP format.
func Encode(w io.Writer, m image.Image) error {
d := m.Bounds().Size()
if d.X < 0 || d.Y < 0 {
return errors.New("bmp: negative bounds")
}
h := &header{
sigBM: [2]byte{'B', 'M'},
fileSize: 14 + 40,
pixOffset: 14 + 40,
dibHeaderSize: 40,
width: uint32(d.X),
height: uint32(d.Y),
colorPlane: 1,
}
var step int
var palette []byte
switch m := m.(type) {
case *image.Gray:
step = (d.X + 3) &^ 3
palette = make([]byte, 1024)
for i := 0; i < 256; i++ {
palette[i*4+0] = uint8(i)
palette[i*4+1] = uint8(i)
palette[i*4+2] = uint8(i)
palette[i*4+3] = 0xFF
}
h.imageSize = uint32(d.Y * step)
h.fileSize += uint32(len(palette)) + h.imageSize
h.pixOffset += uint32(len(palette))
h.bpp = 8
case *image.Paletted:
step = (d.X + 3) &^ 3
palette = make([]byte, 1024)
for i := 0; i < len(m.Palette) && i < 256; i++ {
r, g, b, _ := m.Palette[i].RGBA()
palette[i*4+0] = uint8(b >> 8)
palette[i*4+1] = uint8(g >> 8)
palette[i*4+2] = uint8(r >> 8)
palette[i*4+3] = 0xFF
}
h.imageSize = uint32(d.Y * step)
h.fileSize += uint32(len(palette)) + h.imageSize
h.pixOffset += uint32(len(palette))
h.bpp = 8
default:
step = (3*d.X + 3) &^ 3
h.imageSize = uint32(d.Y * step)
h.fileSize += h.imageSize
h.bpp = 24
}
if err := binary.Write(w, binary.LittleEndian, h); err != nil {
return err
}
if palette != nil {
if err := binary.Write(w, binary.LittleEndian, palette); err != nil {
return err
}
}
if d.X == 0 || d.Y == 0 {
return nil
}
switch m := m.(type) {
case *image.Gray:
return encodePaletted(w, m.Pix, d.X, d.Y, m.Stride, step)
case *image.Paletted:
return encodePaletted(w, m.Pix, d.X, d.Y, m.Stride, step)
case *image.RGBA:
return encodeRGBA(w, m.Pix, d.X, d.Y, m.Stride, step)
}
return encode(w, m, step)
}

193
vendor/golang.org/x/image/riff/riff.go generated vendored Normal file
View File

@ -0,0 +1,193 @@
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package riff implements the Resource Interchange File Format, used by media
// formats such as AVI, WAVE and WEBP.
//
// A RIFF stream contains a sequence of chunks. Each chunk consists of an 8-byte
// header (containing a 4-byte chunk type and a 4-byte chunk length), the chunk
// data (presented as an io.Reader), and some padding bytes.
//
// A detailed description of the format is at
// http://www.tactilemedia.com/info/MCI_Control_Info.html
package riff // import "golang.org/x/image/riff"
import (
"errors"
"io"
"io/ioutil"
"math"
)
var (
errMissingPaddingByte = errors.New("riff: missing padding byte")
errMissingRIFFChunkHeader = errors.New("riff: missing RIFF chunk header")
errListSubchunkTooLong = errors.New("riff: list subchunk too long")
errShortChunkData = errors.New("riff: short chunk data")
errShortChunkHeader = errors.New("riff: short chunk header")
errStaleReader = errors.New("riff: stale reader")
)
// u32 decodes the first four bytes of b as a little-endian integer.
func u32(b []byte) uint32 {
return uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24
}
const chunkHeaderSize = 8
// FourCC is a four character code.
type FourCC [4]byte
// LIST is the "LIST" FourCC.
var LIST = FourCC{'L', 'I', 'S', 'T'}
// NewReader returns the RIFF stream's form type, such as "AVI " or "WAVE", and
// its chunks as a *Reader.
func NewReader(r io.Reader) (formType FourCC, data *Reader, err error) {
var buf [chunkHeaderSize]byte
if _, err := io.ReadFull(r, buf[:]); err != nil {
if err == io.EOF || err == io.ErrUnexpectedEOF {
err = errMissingRIFFChunkHeader
}
return FourCC{}, nil, err
}
if buf[0] != 'R' || buf[1] != 'I' || buf[2] != 'F' || buf[3] != 'F' {
return FourCC{}, nil, errMissingRIFFChunkHeader
}
return NewListReader(u32(buf[4:]), r)
}
// NewListReader returns a LIST chunk's list type, such as "movi" or "wavl",
// and its chunks as a *Reader.
func NewListReader(chunkLen uint32, chunkData io.Reader) (listType FourCC, data *Reader, err error) {
if chunkLen < 4 {
return FourCC{}, nil, errShortChunkData
}
z := &Reader{r: chunkData}
if _, err := io.ReadFull(chunkData, z.buf[:4]); err != nil {
if err == io.EOF || err == io.ErrUnexpectedEOF {
err = errShortChunkData
}
return FourCC{}, nil, err
}
z.totalLen = chunkLen - 4
return FourCC{z.buf[0], z.buf[1], z.buf[2], z.buf[3]}, z, nil
}
// Reader reads chunks from an underlying io.Reader.
type Reader struct {
r io.Reader
err error
totalLen uint32
chunkLen uint32
chunkReader *chunkReader
buf [chunkHeaderSize]byte
padded bool
}
// Next returns the next chunk's ID, length and data. It returns io.EOF if there
// are no more chunks. The io.Reader returned becomes stale after the next Next
// call, and should no longer be used.
//
// It is valid to call Next even if all of the previous chunk's data has not
// been read.
func (z *Reader) Next() (chunkID FourCC, chunkLen uint32, chunkData io.Reader, err error) {
if z.err != nil {
return FourCC{}, 0, nil, z.err
}
// Drain the rest of the previous chunk.
if z.chunkLen != 0 {
want := z.chunkLen
var got int64
got, z.err = io.Copy(ioutil.Discard, z.chunkReader)
if z.err == nil && uint32(got) != want {
z.err = errShortChunkData
}
if z.err != nil {
return FourCC{}, 0, nil, z.err
}
}
z.chunkReader = nil
if z.padded {
if z.totalLen == 0 {
z.err = errListSubchunkTooLong
return FourCC{}, 0, nil, z.err
}
z.totalLen--
_, z.err = io.ReadFull(z.r, z.buf[:1])
if z.err != nil {
if z.err == io.EOF {
z.err = errMissingPaddingByte
}
return FourCC{}, 0, nil, z.err
}
}
// We are done if we have no more data.
if z.totalLen == 0 {
z.err = io.EOF
return FourCC{}, 0, nil, z.err
}
// Read the next chunk header.
if z.totalLen < chunkHeaderSize {
z.err = errShortChunkHeader
return FourCC{}, 0, nil, z.err
}
z.totalLen -= chunkHeaderSize
if _, z.err = io.ReadFull(z.r, z.buf[:chunkHeaderSize]); z.err != nil {
if z.err == io.EOF || z.err == io.ErrUnexpectedEOF {
z.err = errShortChunkHeader
}
return FourCC{}, 0, nil, z.err
}
chunkID = FourCC{z.buf[0], z.buf[1], z.buf[2], z.buf[3]}
z.chunkLen = u32(z.buf[4:])
if z.chunkLen > z.totalLen {
z.err = errListSubchunkTooLong
return FourCC{}, 0, nil, z.err
}
z.padded = z.chunkLen&1 == 1
z.chunkReader = &chunkReader{z}
return chunkID, z.chunkLen, z.chunkReader, nil
}
type chunkReader struct {
z *Reader
}
func (c *chunkReader) Read(p []byte) (int, error) {
if c != c.z.chunkReader {
return 0, errStaleReader
}
z := c.z
if z.err != nil {
if z.err == io.EOF {
return 0, errStaleReader
}
return 0, z.err
}
n := int(z.chunkLen)
if n == 0 {
return 0, io.EOF
}
if n < 0 {
// Converting uint32 to int overflowed.
n = math.MaxInt32
}
if n > len(p) {
n = len(p)
}
n, err := z.r.Read(p[:n])
z.totalLen -= uint32(n)
z.chunkLen -= uint32(n)
if err != io.EOF {
z.err = err
}
return n, err
}

69
vendor/golang.org/x/image/tiff/buffer.go generated vendored Normal file
View File

@ -0,0 +1,69 @@
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package tiff
import "io"
// buffer buffers an io.Reader to satisfy io.ReaderAt.
type buffer struct {
r io.Reader
buf []byte
}
// fill reads data from b.r until the buffer contains at least end bytes.
func (b *buffer) fill(end int) error {
m := len(b.buf)
if end > m {
if end > cap(b.buf) {
newcap := 1024
for newcap < end {
newcap *= 2
}
newbuf := make([]byte, end, newcap)
copy(newbuf, b.buf)
b.buf = newbuf
} else {
b.buf = b.buf[:end]
}
if n, err := io.ReadFull(b.r, b.buf[m:end]); err != nil {
end = m + n
b.buf = b.buf[:end]
return err
}
}
return nil
}
func (b *buffer) ReadAt(p []byte, off int64) (int, error) {
o := int(off)
end := o + len(p)
if int64(end) != off+int64(len(p)) {
return 0, io.ErrUnexpectedEOF
}
err := b.fill(end)
return copy(p, b.buf[o:end]), err
}
// Slice returns a slice of the underlying buffer. The slice contains
// n bytes starting at offset off.
func (b *buffer) Slice(off, n int) ([]byte, error) {
end := off + n
if err := b.fill(end); err != nil {
return nil, err
}
return b.buf[off:end], nil
}
// newReaderAt converts an io.Reader into an io.ReaderAt.
func newReaderAt(r io.Reader) io.ReaderAt {
if ra, ok := r.(io.ReaderAt); ok {
return ra
}
return &buffer{
r: r,
buf: make([]byte, 0, 1024),
}
}

58
vendor/golang.org/x/image/tiff/compress.go generated vendored Normal file
View File

@ -0,0 +1,58 @@
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package tiff
import (
"bufio"
"io"
)
type byteReader interface {
io.Reader
io.ByteReader
}
// unpackBits decodes the PackBits-compressed data in src and returns the
// uncompressed data.
//
// The PackBits compression format is described in section 9 (p. 42)
// of the TIFF spec.
func unpackBits(r io.Reader) ([]byte, error) {
buf := make([]byte, 128)
dst := make([]byte, 0, 1024)
br, ok := r.(byteReader)
if !ok {
br = bufio.NewReader(r)
}
for {
b, err := br.ReadByte()
if err != nil {
if err == io.EOF {
return dst, nil
}
return nil, err
}
code := int(int8(b))
switch {
case code >= 0:
n, err := io.ReadFull(br, buf[:code+1])
if err != nil {
return nil, err
}
dst = append(dst, buf[:n]...)
case code == -128:
// No-op.
default:
if b, err = br.ReadByte(); err != nil {
return nil, err
}
for j := 0; j < 1-code; j++ {
buf[j] = b
}
dst = append(dst, buf[:1-code]...)
}
}
}

133
vendor/golang.org/x/image/tiff/consts.go generated vendored Normal file
View File

@ -0,0 +1,133 @@
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package tiff
// A tiff image file contains one or more images. The metadata
// of each image is contained in an Image File Directory (IFD),
// which contains entries of 12 bytes each and is described
// on page 14-16 of the specification. An IFD entry consists of
//
// - a tag, which describes the signification of the entry,
// - the data type and length of the entry,
// - the data itself or a pointer to it if it is more than 4 bytes.
//
// The presence of a length means that each IFD is effectively an array.
const (
leHeader = "II\x2A\x00" // Header for little-endian files.
beHeader = "MM\x00\x2A" // Header for big-endian files.
ifdLen = 12 // Length of an IFD entry in bytes.
)
// Data types (p. 14-16 of the spec).
const (
dtByte = 1
dtASCII = 2
dtShort = 3
dtLong = 4
dtRational = 5
)
// The length of one instance of each data type in bytes.
var lengths = [...]uint32{0, 1, 1, 2, 4, 8}
// Tags (see p. 28-41 of the spec).
const (
tImageWidth = 256
tImageLength = 257
tBitsPerSample = 258
tCompression = 259
tPhotometricInterpretation = 262
tStripOffsets = 273
tSamplesPerPixel = 277
tRowsPerStrip = 278
tStripByteCounts = 279
tTileWidth = 322
tTileLength = 323
tTileOffsets = 324
tTileByteCounts = 325
tXResolution = 282
tYResolution = 283
tResolutionUnit = 296
tPredictor = 317
tColorMap = 320
tExtraSamples = 338
tSampleFormat = 339
)
// Compression types (defined in various places in the spec and supplements).
const (
cNone = 1
cCCITT = 2
cG3 = 3 // Group 3 Fax.
cG4 = 4 // Group 4 Fax.
cLZW = 5
cJPEGOld = 6 // Superseded by cJPEG.
cJPEG = 7
cDeflate = 8 // zlib compression.
cPackBits = 32773
cDeflateOld = 32946 // Superseded by cDeflate.
)
// Photometric interpretation values (see p. 37 of the spec).
const (
pWhiteIsZero = 0
pBlackIsZero = 1
pRGB = 2
pPaletted = 3
pTransMask = 4 // transparency mask
pCMYK = 5
pYCbCr = 6
pCIELab = 8
)
// Values for the tPredictor tag (page 64-65 of the spec).
const (
prNone = 1
prHorizontal = 2
)
// Values for the tResolutionUnit tag (page 18).
const (
resNone = 1
resPerInch = 2 // Dots per inch.
resPerCM = 3 // Dots per centimeter.
)
// imageMode represents the mode of the image.
type imageMode int
const (
mBilevel imageMode = iota
mPaletted
mGray
mGrayInvert
mRGB
mRGBA
mNRGBA
)
// CompressionType describes the type of compression used in Options.
type CompressionType int
const (
Uncompressed CompressionType = iota
Deflate
)
// specValue returns the compression type constant from the TIFF spec that
// is equivalent to c.
func (c CompressionType) specValue() uint32 {
switch c {
case Deflate:
return cDeflate
}
return cNone
}

272
vendor/golang.org/x/image/tiff/lzw/reader.go generated vendored Normal file
View File

@ -0,0 +1,272 @@
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package lzw implements the Lempel-Ziv-Welch compressed data format,
// described in T. A. Welch, ``A Technique for High-Performance Data
// Compression'', Computer, 17(6) (June 1984), pp 8-19.
//
// In particular, it implements LZW as used by the TIFF file format, including
// an "off by one" algorithmic difference when compared to standard LZW.
package lzw // import "golang.org/x/image/tiff/lzw"
/*
This file was branched from src/pkg/compress/lzw/reader.go in the
standard library. Differences from the original are marked with "NOTE".
The tif_lzw.c file in the libtiff C library has this comment:
----
The 5.0 spec describes a different algorithm than Aldus
implements. Specifically, Aldus does code length transitions
one code earlier than should be done (for real LZW).
Earlier versions of this library implemented the correct
LZW algorithm, but emitted codes in a bit order opposite
to the TIFF spec. Thus, to maintain compatibility w/ Aldus
we interpret MSB-LSB ordered codes to be images written w/
old versions of this library, but otherwise adhere to the
Aldus "off by one" algorithm.
----
The Go code doesn't read (invalid) TIFF files written by old versions of
libtiff, but the LZW algorithm in this package still differs from the one in
Go's standard package library to accomodate this "off by one" in valid TIFFs.
*/
import (
"bufio"
"errors"
"fmt"
"io"
)
// Order specifies the bit ordering in an LZW data stream.
type Order int
const (
// LSB means Least Significant Bits first, as used in the GIF file format.
LSB Order = iota
// MSB means Most Significant Bits first, as used in the TIFF and PDF
// file formats.
MSB
)
const (
maxWidth = 12
decoderInvalidCode = 0xffff
flushBuffer = 1 << maxWidth
)
// decoder is the state from which the readXxx method converts a byte
// stream into a code stream.
type decoder struct {
r io.ByteReader
bits uint32
nBits uint
width uint
read func(*decoder) (uint16, error) // readLSB or readMSB
litWidth int // width in bits of literal codes
err error
// The first 1<<litWidth codes are literal codes.
// The next two codes mean clear and EOF.
// Other valid codes are in the range [lo, hi] where lo := clear + 2,
// with the upper bound incrementing on each code seen.
// overflow is the code at which hi overflows the code width. NOTE: TIFF's LZW is "off by one".
// last is the most recently seen code, or decoderInvalidCode.
clear, eof, hi, overflow, last uint16
// Each code c in [lo, hi] expands to two or more bytes. For c != hi:
// suffix[c] is the last of these bytes.
// prefix[c] is the code for all but the last byte.
// This code can either be a literal code or another code in [lo, c).
// The c == hi case is a special case.
suffix [1 << maxWidth]uint8
prefix [1 << maxWidth]uint16
// output is the temporary output buffer.
// Literal codes are accumulated from the start of the buffer.
// Non-literal codes decode to a sequence of suffixes that are first
// written right-to-left from the end of the buffer before being copied
// to the start of the buffer.
// It is flushed when it contains >= 1<<maxWidth bytes,
// so that there is always room to decode an entire code.
output [2 * 1 << maxWidth]byte
o int // write index into output
toRead []byte // bytes to return from Read
}
// readLSB returns the next code for "Least Significant Bits first" data.
func (d *decoder) readLSB() (uint16, error) {
for d.nBits < d.width {
x, err := d.r.ReadByte()
if err != nil {
return 0, err
}
d.bits |= uint32(x) << d.nBits
d.nBits += 8
}
code := uint16(d.bits & (1<<d.width - 1))
d.bits >>= d.width
d.nBits -= d.width
return code, nil
}
// readMSB returns the next code for "Most Significant Bits first" data.
func (d *decoder) readMSB() (uint16, error) {
for d.nBits < d.width {
x, err := d.r.ReadByte()
if err != nil {
return 0, err
}
d.bits |= uint32(x) << (24 - d.nBits)
d.nBits += 8
}
code := uint16(d.bits >> (32 - d.width))
d.bits <<= d.width
d.nBits -= d.width
return code, nil
}
func (d *decoder) Read(b []byte) (int, error) {
for {
if len(d.toRead) > 0 {
n := copy(b, d.toRead)
d.toRead = d.toRead[n:]
return n, nil
}
if d.err != nil {
return 0, d.err
}
d.decode()
}
}
// decode decompresses bytes from r and leaves them in d.toRead.
// read specifies how to decode bytes into codes.
// litWidth is the width in bits of literal codes.
func (d *decoder) decode() {
// Loop over the code stream, converting codes into decompressed bytes.
loop:
for {
code, err := d.read(d)
if err != nil {
if err == io.EOF {
err = io.ErrUnexpectedEOF
}
d.err = err
break
}
switch {
case code < d.clear:
// We have a literal code.
d.output[d.o] = uint8(code)
d.o++
if d.last != decoderInvalidCode {
// Save what the hi code expands to.
d.suffix[d.hi] = uint8(code)
d.prefix[d.hi] = d.last
}
case code == d.clear:
d.width = 1 + uint(d.litWidth)
d.hi = d.eof
d.overflow = 1 << d.width
d.last = decoderInvalidCode
continue
case code == d.eof:
d.err = io.EOF
break loop
case code <= d.hi:
c, i := code, len(d.output)-1
if code == d.hi {
// code == hi is a special case which expands to the last expansion
// followed by the head of the last expansion. To find the head, we walk
// the prefix chain until we find a literal code.
c = d.last
for c >= d.clear {
c = d.prefix[c]
}
d.output[i] = uint8(c)
i--
c = d.last
}
// Copy the suffix chain into output and then write that to w.
for c >= d.clear {
d.output[i] = d.suffix[c]
i--
c = d.prefix[c]
}
d.output[i] = uint8(c)
d.o += copy(d.output[d.o:], d.output[i:])
if d.last != decoderInvalidCode {
// Save what the hi code expands to.
d.suffix[d.hi] = uint8(c)
d.prefix[d.hi] = d.last
}
default:
d.err = errors.New("lzw: invalid code")
break loop
}
d.last, d.hi = code, d.hi+1
if d.hi+1 >= d.overflow { // NOTE: the "+1" is where TIFF's LZW differs from the standard algorithm.
if d.width == maxWidth {
d.last = decoderInvalidCode
} else {
d.width++
d.overflow <<= 1
}
}
if d.o >= flushBuffer {
break
}
}
// Flush pending output.
d.toRead = d.output[:d.o]
d.o = 0
}
var errClosed = errors.New("lzw: reader/writer is closed")
func (d *decoder) Close() error {
d.err = errClosed // in case any Reads come along
return nil
}
// NewReader creates a new io.ReadCloser.
// Reads from the returned io.ReadCloser read and decompress data from r.
// If r does not also implement io.ByteReader,
// the decompressor may read more data than necessary from r.
// It is the caller's responsibility to call Close on the ReadCloser when
// finished reading.
// The number of bits to use for literal codes, litWidth, must be in the
// range [2,8] and is typically 8. It must equal the litWidth
// used during compression.
func NewReader(r io.Reader, order Order, litWidth int) io.ReadCloser {
d := new(decoder)
switch order {
case LSB:
d.read = (*decoder).readLSB
case MSB:
d.read = (*decoder).readMSB
default:
d.err = errors.New("lzw: unknown order")
return d
}
if litWidth < 2 || 8 < litWidth {
d.err = fmt.Errorf("lzw: litWidth %d out of range", litWidth)
return d
}
if br, ok := r.(io.ByteReader); ok {
d.r = br
} else {
d.r = bufio.NewReader(r)
}
d.litWidth = litWidth
d.width = 1 + uint(litWidth)
d.clear = uint16(1) << uint(litWidth)
d.eof, d.hi = d.clear+1, d.clear+1
d.overflow = uint16(1) << d.width
d.last = decoderInvalidCode
return d
}

684
vendor/golang.org/x/image/tiff/reader.go generated vendored Normal file
View File

@ -0,0 +1,684 @@
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package tiff implements a TIFF image decoder and encoder.
//
// The TIFF specification is at http://partners.adobe.com/public/developer/en/tiff/TIFF6.pdf
package tiff // import "golang.org/x/image/tiff"
import (
"compress/zlib"
"encoding/binary"
"fmt"
"image"
"image/color"
"io"
"io/ioutil"
"math"
"golang.org/x/image/tiff/lzw"
)
// A FormatError reports that the input is not a valid TIFF image.
type FormatError string
func (e FormatError) Error() string {
return "tiff: invalid format: " + string(e)
}
// An UnsupportedError reports that the input uses a valid but
// unimplemented feature.
type UnsupportedError string
func (e UnsupportedError) Error() string {
return "tiff: unsupported feature: " + string(e)
}
var errNoPixels = FormatError("not enough pixel data")
type decoder struct {
r io.ReaderAt
byteOrder binary.ByteOrder
config image.Config
mode imageMode
bpp uint
features map[int][]uint
palette []color.Color
buf []byte
off int // Current offset in buf.
v uint32 // Buffer value for reading with arbitrary bit depths.
nbits uint // Remaining number of bits in v.
}
// firstVal returns the first uint of the features entry with the given tag,
// or 0 if the tag does not exist.
func (d *decoder) firstVal(tag int) uint {
f := d.features[tag]
if len(f) == 0 {
return 0
}
return f[0]
}
// ifdUint decodes the IFD entry in p, which must be of the Byte, Short
// or Long type, and returns the decoded uint values.
func (d *decoder) ifdUint(p []byte) (u []uint, err error) {
var raw []byte
if len(p) < ifdLen {
return nil, FormatError("bad IFD entry")
}
datatype := d.byteOrder.Uint16(p[2:4])
if dt := int(datatype); dt <= 0 || dt >= len(lengths) {
return nil, UnsupportedError("IFD entry datatype")
}
count := d.byteOrder.Uint32(p[4:8])
if count > math.MaxInt32/lengths[datatype] {
return nil, FormatError("IFD data too large")
}
if datalen := lengths[datatype] * count; datalen > 4 {
// The IFD contains a pointer to the real value.
raw = make([]byte, datalen)
_, err = d.r.ReadAt(raw, int64(d.byteOrder.Uint32(p[8:12])))
} else {
raw = p[8 : 8+datalen]
}
if err != nil {
return nil, err
}
u = make([]uint, count)
switch datatype {
case dtByte:
for i := uint32(0); i < count; i++ {
u[i] = uint(raw[i])
}
case dtShort:
for i := uint32(0); i < count; i++ {
u[i] = uint(d.byteOrder.Uint16(raw[2*i : 2*(i+1)]))
}
case dtLong:
for i := uint32(0); i < count; i++ {
u[i] = uint(d.byteOrder.Uint32(raw[4*i : 4*(i+1)]))
}
default:
return nil, UnsupportedError("data type")
}
return u, nil
}
// parseIFD decides whether the the IFD entry in p is "interesting" and
// stows away the data in the decoder. It returns the tag number of the
// entry and an error, if any.
func (d *decoder) parseIFD(p []byte) (int, error) {
tag := d.byteOrder.Uint16(p[0:2])
switch tag {
case tBitsPerSample,
tExtraSamples,
tPhotometricInterpretation,
tCompression,
tPredictor,
tStripOffsets,
tStripByteCounts,
tRowsPerStrip,
tTileWidth,
tTileLength,
tTileOffsets,
tTileByteCounts,
tImageLength,
tImageWidth:
val, err := d.ifdUint(p)
if err != nil {
return 0, err
}
d.features[int(tag)] = val
case tColorMap:
val, err := d.ifdUint(p)
if err != nil {
return 0, err
}
numcolors := len(val) / 3
if len(val)%3 != 0 || numcolors <= 0 || numcolors > 256 {
return 0, FormatError("bad ColorMap length")
}
d.palette = make([]color.Color, numcolors)
for i := 0; i < numcolors; i++ {
d.palette[i] = color.RGBA64{
uint16(val[i]),
uint16(val[i+numcolors]),
uint16(val[i+2*numcolors]),
0xffff,
}
}
case tSampleFormat:
// Page 27 of the spec: If the SampleFormat is present and
// the value is not 1 [= unsigned integer data], a Baseline
// TIFF reader that cannot handle the SampleFormat value
// must terminate the import process gracefully.
val, err := d.ifdUint(p)
if err != nil {
return 0, err
}
for _, v := range val {
if v != 1 {
return 0, UnsupportedError("sample format")
}
}
}
return int(tag), nil
}
// readBits reads n bits from the internal buffer starting at the current offset.
func (d *decoder) readBits(n uint) (v uint32, ok bool) {
for d.nbits < n {
d.v <<= 8
if d.off >= len(d.buf) {
return 0, false
}
d.v |= uint32(d.buf[d.off])
d.off++
d.nbits += 8
}
d.nbits -= n
rv := d.v >> d.nbits
d.v &^= rv << d.nbits
return rv, true
}
// flushBits discards the unread bits in the buffer used by readBits.
// It is used at the end of a line.
func (d *decoder) flushBits() {
d.v = 0
d.nbits = 0
}
// minInt returns the smaller of x or y.
func minInt(a, b int) int {
if a <= b {
return a
}
return b
}
// decode decodes the raw data of an image.
// It reads from d.buf and writes the strip or tile into dst.
func (d *decoder) decode(dst image.Image, xmin, ymin, xmax, ymax int) error {
d.off = 0
// Apply horizontal predictor if necessary.
// In this case, p contains the color difference to the preceding pixel.
// See page 64-65 of the spec.
if d.firstVal(tPredictor) == prHorizontal {
switch d.bpp {
case 16:
var off int
n := 2 * len(d.features[tBitsPerSample]) // bytes per sample times samples per pixel
for y := ymin; y < ymax; y++ {
off += n
for x := 0; x < (xmax-xmin-1)*n; x += 2 {
if off+2 > len(d.buf) {
return errNoPixels
}
v0 := d.byteOrder.Uint16(d.buf[off-n : off-n+2])
v1 := d.byteOrder.Uint16(d.buf[off : off+2])
d.byteOrder.PutUint16(d.buf[off:off+2], v1+v0)
off += 2
}
}
case 8:
var off int
n := 1 * len(d.features[tBitsPerSample]) // bytes per sample times samples per pixel
for y := ymin; y < ymax; y++ {
off += n
for x := 0; x < (xmax-xmin-1)*n; x++ {
if off >= len(d.buf) {
return errNoPixels
}
d.buf[off] += d.buf[off-n]
off++
}
}
case 1:
return UnsupportedError("horizontal predictor with 1 BitsPerSample")
}
}
rMaxX := minInt(xmax, dst.Bounds().Max.X)
rMaxY := minInt(ymax, dst.Bounds().Max.Y)
switch d.mode {
case mGray, mGrayInvert:
if d.bpp == 16 {
img := dst.(*image.Gray16)
for y := ymin; y < rMaxY; y++ {
for x := xmin; x < rMaxX; x++ {
if d.off+2 > len(d.buf) {
return errNoPixels
}
v := d.byteOrder.Uint16(d.buf[d.off : d.off+2])
d.off += 2
if d.mode == mGrayInvert {
v = 0xffff - v
}
img.SetGray16(x, y, color.Gray16{v})
}
if rMaxX == img.Bounds().Max.X {
d.off += 2 * (xmax - img.Bounds().Max.X)
}
}
} else {
img := dst.(*image.Gray)
max := uint32((1 << d.bpp) - 1)
for y := ymin; y < rMaxY; y++ {
for x := xmin; x < rMaxX; x++ {
v, ok := d.readBits(d.bpp)
if !ok {
return errNoPixels
}
v = v * 0xff / max
if d.mode == mGrayInvert {
v = 0xff - v
}
img.SetGray(x, y, color.Gray{uint8(v)})
}
d.flushBits()
}
}
case mPaletted:
img := dst.(*image.Paletted)
for y := ymin; y < rMaxY; y++ {
for x := xmin; x < rMaxX; x++ {
v, ok := d.readBits(d.bpp)
if !ok {
return errNoPixels
}
img.SetColorIndex(x, y, uint8(v))
}
d.flushBits()
}
case mRGB:
if d.bpp == 16 {
img := dst.(*image.RGBA64)
for y := ymin; y < rMaxY; y++ {
for x := xmin; x < rMaxX; x++ {
if d.off+6 > len(d.buf) {
return errNoPixels
}
r := d.byteOrder.Uint16(d.buf[d.off+0 : d.off+2])
g := d.byteOrder.Uint16(d.buf[d.off+2 : d.off+4])
b := d.byteOrder.Uint16(d.buf[d.off+4 : d.off+6])
d.off += 6
img.SetRGBA64(x, y, color.RGBA64{r, g, b, 0xffff})
}
}
} else {
img := dst.(*image.RGBA)
for y := ymin; y < rMaxY; y++ {
min := img.PixOffset(xmin, y)
max := img.PixOffset(rMaxX, y)
off := (y - ymin) * (xmax - xmin) * 3
for i := min; i < max; i += 4 {
if off+3 > len(d.buf) {
return errNoPixels
}
img.Pix[i+0] = d.buf[off+0]
img.Pix[i+1] = d.buf[off+1]
img.Pix[i+2] = d.buf[off+2]
img.Pix[i+3] = 0xff
off += 3
}
}
}
case mNRGBA:
if d.bpp == 16 {
img := dst.(*image.NRGBA64)
for y := ymin; y < rMaxY; y++ {
for x := xmin; x < rMaxX; x++ {
if d.off+8 > len(d.buf) {
return errNoPixels
}
r := d.byteOrder.Uint16(d.buf[d.off+0 : d.off+2])
g := d.byteOrder.Uint16(d.buf[d.off+2 : d.off+4])
b := d.byteOrder.Uint16(d.buf[d.off+4 : d.off+6])
a := d.byteOrder.Uint16(d.buf[d.off+6 : d.off+8])
d.off += 8
img.SetNRGBA64(x, y, color.NRGBA64{r, g, b, a})
}
}
} else {
img := dst.(*image.NRGBA)
for y := ymin; y < rMaxY; y++ {
min := img.PixOffset(xmin, y)
max := img.PixOffset(rMaxX, y)
i0, i1 := (y-ymin)*(xmax-xmin)*4, (y-ymin+1)*(xmax-xmin)*4
if i1 > len(d.buf) {
return errNoPixels
}
copy(img.Pix[min:max], d.buf[i0:i1])
}
}
case mRGBA:
if d.bpp == 16 {
img := dst.(*image.RGBA64)
for y := ymin; y < rMaxY; y++ {
for x := xmin; x < rMaxX; x++ {
if d.off+8 > len(d.buf) {
return errNoPixels
}
r := d.byteOrder.Uint16(d.buf[d.off+0 : d.off+2])
g := d.byteOrder.Uint16(d.buf[d.off+2 : d.off+4])
b := d.byteOrder.Uint16(d.buf[d.off+4 : d.off+6])
a := d.byteOrder.Uint16(d.buf[d.off+6 : d.off+8])
d.off += 8
img.SetRGBA64(x, y, color.RGBA64{r, g, b, a})
}
}
} else {
img := dst.(*image.RGBA)
for y := ymin; y < rMaxY; y++ {
min := img.PixOffset(xmin, y)
max := img.PixOffset(rMaxX, y)
i0, i1 := (y-ymin)*(xmax-xmin)*4, (y-ymin+1)*(xmax-xmin)*4
if i1 > len(d.buf) {
return errNoPixels
}
copy(img.Pix[min:max], d.buf[i0:i1])
}
}
}
return nil
}
func newDecoder(r io.Reader) (*decoder, error) {
d := &decoder{
r: newReaderAt(r),
features: make(map[int][]uint),
}
p := make([]byte, 8)
if _, err := d.r.ReadAt(p, 0); err != nil {
return nil, err
}
switch string(p[0:4]) {
case leHeader:
d.byteOrder = binary.LittleEndian
case beHeader:
d.byteOrder = binary.BigEndian
default:
return nil, FormatError("malformed header")
}
ifdOffset := int64(d.byteOrder.Uint32(p[4:8]))
// The first two bytes contain the number of entries (12 bytes each).
if _, err := d.r.ReadAt(p[0:2], ifdOffset); err != nil {
return nil, err
}
numItems := int(d.byteOrder.Uint16(p[0:2]))
// All IFD entries are read in one chunk.
p = make([]byte, ifdLen*numItems)
if _, err := d.r.ReadAt(p, ifdOffset+2); err != nil {
return nil, err
}
prevTag := -1
for i := 0; i < len(p); i += ifdLen {
tag, err := d.parseIFD(p[i : i+ifdLen])
if err != nil {
return nil, err
}
if tag <= prevTag {
return nil, FormatError("tags are not sorted in ascending order")
}
prevTag = tag
}
d.config.Width = int(d.firstVal(tImageWidth))
d.config.Height = int(d.firstVal(tImageLength))
if _, ok := d.features[tBitsPerSample]; !ok {
return nil, FormatError("BitsPerSample tag missing")
}
d.bpp = d.firstVal(tBitsPerSample)
switch d.bpp {
case 0:
return nil, FormatError("BitsPerSample must not be 0")
case 1, 8, 16:
// Nothing to do, these are accepted by this implementation.
default:
return nil, UnsupportedError(fmt.Sprintf("BitsPerSample of %v", d.bpp))
}
// Determine the image mode.
switch d.firstVal(tPhotometricInterpretation) {
case pRGB:
if d.bpp == 16 {
for _, b := range d.features[tBitsPerSample] {
if b != 16 {
return nil, FormatError("wrong number of samples for 16bit RGB")
}
}
} else {
for _, b := range d.features[tBitsPerSample] {
if b != 8 {
return nil, FormatError("wrong number of samples for 8bit RGB")
}
}
}
// RGB images normally have 3 samples per pixel.
// If there are more, ExtraSamples (p. 31-32 of the spec)
// gives their meaning (usually an alpha channel).
//
// This implementation does not support extra samples
// of an unspecified type.
switch len(d.features[tBitsPerSample]) {
case 3:
d.mode = mRGB
if d.bpp == 16 {
d.config.ColorModel = color.RGBA64Model
} else {
d.config.ColorModel = color.RGBAModel
}
case 4:
switch d.firstVal(tExtraSamples) {
case 1:
d.mode = mRGBA
if d.bpp == 16 {
d.config.ColorModel = color.RGBA64Model
} else {
d.config.ColorModel = color.RGBAModel
}
case 2:
d.mode = mNRGBA
if d.bpp == 16 {
d.config.ColorModel = color.NRGBA64Model
} else {
d.config.ColorModel = color.NRGBAModel
}
default:
return nil, FormatError("wrong number of samples for RGB")
}
default:
return nil, FormatError("wrong number of samples for RGB")
}
case pPaletted:
d.mode = mPaletted
d.config.ColorModel = color.Palette(d.palette)
case pWhiteIsZero:
d.mode = mGrayInvert
if d.bpp == 16 {
d.config.ColorModel = color.Gray16Model
} else {
d.config.ColorModel = color.GrayModel
}
case pBlackIsZero:
d.mode = mGray
if d.bpp == 16 {
d.config.ColorModel = color.Gray16Model
} else {
d.config.ColorModel = color.GrayModel
}
default:
return nil, UnsupportedError("color model")
}
return d, nil
}
// DecodeConfig returns the color model and dimensions of a TIFF image without
// decoding the entire image.
func DecodeConfig(r io.Reader) (image.Config, error) {
d, err := newDecoder(r)
if err != nil {
return image.Config{}, err
}
return d.config, nil
}
// Decode reads a TIFF image from r and returns it as an image.Image.
// The type of Image returned depends on the contents of the TIFF.
func Decode(r io.Reader) (img image.Image, err error) {
d, err := newDecoder(r)
if err != nil {
return
}
blockPadding := false
blockWidth := d.config.Width
blockHeight := d.config.Height
blocksAcross := 1
blocksDown := 1
if d.config.Width == 0 {
blocksAcross = 0
}
if d.config.Height == 0 {
blocksDown = 0
}
var blockOffsets, blockCounts []uint
if int(d.firstVal(tTileWidth)) != 0 {
blockPadding = true
blockWidth = int(d.firstVal(tTileWidth))
blockHeight = int(d.firstVal(tTileLength))
if blockWidth != 0 {
blocksAcross = (d.config.Width + blockWidth - 1) / blockWidth
}
if blockHeight != 0 {
blocksDown = (d.config.Height + blockHeight - 1) / blockHeight
}
blockCounts = d.features[tTileByteCounts]
blockOffsets = d.features[tTileOffsets]
} else {
if int(d.firstVal(tRowsPerStrip)) != 0 {
blockHeight = int(d.firstVal(tRowsPerStrip))
}
if blockHeight != 0 {
blocksDown = (d.config.Height + blockHeight - 1) / blockHeight
}
blockOffsets = d.features[tStripOffsets]
blockCounts = d.features[tStripByteCounts]
}
// Check if we have the right number of strips/tiles, offsets and counts.
if n := blocksAcross * blocksDown; len(blockOffsets) < n || len(blockCounts) < n {
return nil, FormatError("inconsistent header")
}
imgRect := image.Rect(0, 0, d.config.Width, d.config.Height)
switch d.mode {
case mGray, mGrayInvert:
if d.bpp == 16 {
img = image.NewGray16(imgRect)
} else {
img = image.NewGray(imgRect)
}
case mPaletted:
img = image.NewPaletted(imgRect, d.palette)
case mNRGBA:
if d.bpp == 16 {
img = image.NewNRGBA64(imgRect)
} else {
img = image.NewNRGBA(imgRect)
}
case mRGB, mRGBA:
if d.bpp == 16 {
img = image.NewRGBA64(imgRect)
} else {
img = image.NewRGBA(imgRect)
}
}
for i := 0; i < blocksAcross; i++ {
blkW := blockWidth
if !blockPadding && i == blocksAcross-1 && d.config.Width%blockWidth != 0 {
blkW = d.config.Width % blockWidth
}
for j := 0; j < blocksDown; j++ {
blkH := blockHeight
if !blockPadding && j == blocksDown-1 && d.config.Height%blockHeight != 0 {
blkH = d.config.Height % blockHeight
}
offset := int64(blockOffsets[j*blocksAcross+i])
n := int64(blockCounts[j*blocksAcross+i])
switch d.firstVal(tCompression) {
// According to the spec, Compression does not have a default value,
// but some tools interpret a missing Compression value as none so we do
// the same.
case cNone, 0:
if b, ok := d.r.(*buffer); ok {
d.buf, err = b.Slice(int(offset), int(n))
} else {
d.buf = make([]byte, n)
_, err = d.r.ReadAt(d.buf, offset)
}
case cLZW:
r := lzw.NewReader(io.NewSectionReader(d.r, offset, n), lzw.MSB, 8)
d.buf, err = ioutil.ReadAll(r)
r.Close()
case cDeflate, cDeflateOld:
var r io.ReadCloser
r, err = zlib.NewReader(io.NewSectionReader(d.r, offset, n))
if err != nil {
return nil, err
}
d.buf, err = ioutil.ReadAll(r)
r.Close()
case cPackBits:
d.buf, err = unpackBits(io.NewSectionReader(d.r, offset, n))
default:
err = UnsupportedError(fmt.Sprintf("compression value %d", d.firstVal(tCompression)))
}
if err != nil {
return nil, err
}
xmin := i * blockWidth
ymin := j * blockHeight
xmax := xmin + blkW
ymax := ymin + blkH
err = d.decode(img, xmin, ymin, xmax, ymax)
if err != nil {
return nil, err
}
}
}
return
}
func init() {
image.RegisterFormat("tiff", leHeader, Decode, DecodeConfig)
image.RegisterFormat("tiff", beHeader, Decode, DecodeConfig)
}

438
vendor/golang.org/x/image/tiff/writer.go generated vendored Normal file
View File

@ -0,0 +1,438 @@
// Copyright 2012 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package tiff
import (
"bytes"
"compress/zlib"
"encoding/binary"
"image"
"io"
"sort"
)
// The TIFF format allows to choose the order of the different elements freely.
// The basic structure of a TIFF file written by this package is:
//
// 1. Header (8 bytes).
// 2. Image data.
// 3. Image File Directory (IFD).
// 4. "Pointer area" for larger entries in the IFD.
// We only write little-endian TIFF files.
var enc = binary.LittleEndian
// An ifdEntry is a single entry in an Image File Directory.
// A value of type dtRational is composed of two 32-bit values,
// thus data contains two uints (numerator and denominator) for a single number.
type ifdEntry struct {
tag int
datatype int
data []uint32
}
func (e ifdEntry) putData(p []byte) {
for _, d := range e.data {
switch e.datatype {
case dtByte, dtASCII:
p[0] = byte(d)
p = p[1:]
case dtShort:
enc.PutUint16(p, uint16(d))
p = p[2:]
case dtLong, dtRational:
enc.PutUint32(p, uint32(d))
p = p[4:]
}
}
}
type byTag []ifdEntry
func (d byTag) Len() int { return len(d) }
func (d byTag) Less(i, j int) bool { return d[i].tag < d[j].tag }
func (d byTag) Swap(i, j int) { d[i], d[j] = d[j], d[i] }
func encodeGray(w io.Writer, pix []uint8, dx, dy, stride int, predictor bool) error {
if !predictor {
return writePix(w, pix, dy, dx, stride)
}
buf := make([]byte, dx)
for y := 0; y < dy; y++ {
min := y*stride + 0
max := y*stride + dx
off := 0
var v0 uint8
for i := min; i < max; i++ {
v1 := pix[i]
buf[off] = v1 - v0
v0 = v1
off++
}
if _, err := w.Write(buf); err != nil {
return err
}
}
return nil
}
func encodeGray16(w io.Writer, pix []uint8, dx, dy, stride int, predictor bool) error {
buf := make([]byte, dx*2)
for y := 0; y < dy; y++ {
min := y*stride + 0
max := y*stride + dx*2
off := 0
var v0 uint16
for i := min; i < max; i += 2 {
// An image.Gray16's Pix is in big-endian order.
v1 := uint16(pix[i])<<8 | uint16(pix[i+1])
if predictor {
v0, v1 = v1, v1-v0
}
// We only write little-endian TIFF files.
buf[off+0] = byte(v1)
buf[off+1] = byte(v1 >> 8)
off += 2
}
if _, err := w.Write(buf); err != nil {
return err
}
}
return nil
}
func encodeRGBA(w io.Writer, pix []uint8, dx, dy, stride int, predictor bool) error {
if !predictor {
return writePix(w, pix, dy, dx*4, stride)
}
buf := make([]byte, dx*4)
for y := 0; y < dy; y++ {
min := y*stride + 0
max := y*stride + dx*4
off := 0
var r0, g0, b0, a0 uint8
for i := min; i < max; i += 4 {
r1, g1, b1, a1 := pix[i+0], pix[i+1], pix[i+2], pix[i+3]
buf[off+0] = r1 - r0
buf[off+1] = g1 - g0
buf[off+2] = b1 - b0
buf[off+3] = a1 - a0
off += 4
r0, g0, b0, a0 = r1, g1, b1, a1
}
if _, err := w.Write(buf); err != nil {
return err
}
}
return nil
}
func encodeRGBA64(w io.Writer, pix []uint8, dx, dy, stride int, predictor bool) error {
buf := make([]byte, dx*8)
for y := 0; y < dy; y++ {
min := y*stride + 0
max := y*stride + dx*8
off := 0
var r0, g0, b0, a0 uint16
for i := min; i < max; i += 8 {
// An image.RGBA64's Pix is in big-endian order.
r1 := uint16(pix[i+0])<<8 | uint16(pix[i+1])
g1 := uint16(pix[i+2])<<8 | uint16(pix[i+3])
b1 := uint16(pix[i+4])<<8 | uint16(pix[i+5])
a1 := uint16(pix[i+6])<<8 | uint16(pix[i+7])
if predictor {
r0, r1 = r1, r1-r0
g0, g1 = g1, g1-g0
b0, b1 = b1, b1-b0
a0, a1 = a1, a1-a0
}
// We only write little-endian TIFF files.
buf[off+0] = byte(r1)
buf[off+1] = byte(r1 >> 8)
buf[off+2] = byte(g1)
buf[off+3] = byte(g1 >> 8)
buf[off+4] = byte(b1)
buf[off+5] = byte(b1 >> 8)
buf[off+6] = byte(a1)
buf[off+7] = byte(a1 >> 8)
off += 8
}
if _, err := w.Write(buf); err != nil {
return err
}
}
return nil
}
func encode(w io.Writer, m image.Image, predictor bool) error {
bounds := m.Bounds()
buf := make([]byte, 4*bounds.Dx())
for y := bounds.Min.Y; y < bounds.Max.Y; y++ {
off := 0
if predictor {
var r0, g0, b0, a0 uint8
for x := bounds.Min.X; x < bounds.Max.X; x++ {
r, g, b, a := m.At(x, y).RGBA()
r1 := uint8(r >> 8)
g1 := uint8(g >> 8)
b1 := uint8(b >> 8)
a1 := uint8(a >> 8)
buf[off+0] = r1 - r0
buf[off+1] = g1 - g0
buf[off+2] = b1 - b0
buf[off+3] = a1 - a0
off += 4
r0, g0, b0, a0 = r1, g1, b1, a1
}
} else {
for x := bounds.Min.X; x < bounds.Max.X; x++ {
r, g, b, a := m.At(x, y).RGBA()
buf[off+0] = uint8(r >> 8)
buf[off+1] = uint8(g >> 8)
buf[off+2] = uint8(b >> 8)
buf[off+3] = uint8(a >> 8)
off += 4
}
}
if _, err := w.Write(buf); err != nil {
return err
}
}
return nil
}
// writePix writes the internal byte array of an image to w. It is less general
// but much faster then encode. writePix is used when pix directly
// corresponds to one of the TIFF image types.
func writePix(w io.Writer, pix []byte, nrows, length, stride int) error {
if length == stride {
_, err := w.Write(pix[:nrows*length])
return err
}
for ; nrows > 0; nrows-- {
if _, err := w.Write(pix[:length]); err != nil {
return err
}
pix = pix[stride:]
}
return nil
}
func writeIFD(w io.Writer, ifdOffset int, d []ifdEntry) error {
var buf [ifdLen]byte
// Make space for "pointer area" containing IFD entry data
// longer than 4 bytes.
parea := make([]byte, 1024)
pstart := ifdOffset + ifdLen*len(d) + 6
var o int // Current offset in parea.
// The IFD has to be written with the tags in ascending order.
sort.Sort(byTag(d))
// Write the number of entries in this IFD.
if err := binary.Write(w, enc, uint16(len(d))); err != nil {
return err
}
for _, ent := range d {
enc.PutUint16(buf[0:2], uint16(ent.tag))
enc.PutUint16(buf[2:4], uint16(ent.datatype))
count := uint32(len(ent.data))
if ent.datatype == dtRational {
count /= 2
}
enc.PutUint32(buf[4:8], count)
datalen := int(count * lengths[ent.datatype])
if datalen <= 4 {
ent.putData(buf[8:12])
} else {
if (o + datalen) > len(parea) {
newlen := len(parea) + 1024
for (o + datalen) > newlen {
newlen += 1024
}
newarea := make([]byte, newlen)
copy(newarea, parea)
parea = newarea
}
ent.putData(parea[o : o+datalen])
enc.PutUint32(buf[8:12], uint32(pstart+o))
o += datalen
}
if _, err := w.Write(buf[:]); err != nil {
return err
}
}
// The IFD ends with the offset of the next IFD in the file,
// or zero if it is the last one (page 14).
if err := binary.Write(w, enc, uint32(0)); err != nil {
return err
}
_, err := w.Write(parea[:o])
return err
}
// Options are the encoding parameters.
type Options struct {
// Compression is the type of compression used.
Compression CompressionType
// Predictor determines whether a differencing predictor is used;
// if true, instead of each pixel's color, the color difference to the
// preceding one is saved. This improves the compression for certain
// types of images and compressors. For example, it works well for
// photos with Deflate compression.
Predictor bool
}
// Encode writes the image m to w. opt determines the options used for
// encoding, such as the compression type. If opt is nil, an uncompressed
// image is written.
func Encode(w io.Writer, m image.Image, opt *Options) error {
d := m.Bounds().Size()
compression := uint32(cNone)
predictor := false
if opt != nil {
compression = opt.Compression.specValue()
// The predictor field is only used with LZW. See page 64 of the spec.
predictor = opt.Predictor && compression == cLZW
}
_, err := io.WriteString(w, leHeader)
if err != nil {
return err
}
// Compressed data is written into a buffer first, so that we
// know the compressed size.
var buf bytes.Buffer
// dst holds the destination for the pixel data of the image --
// either w or a writer to buf.
var dst io.Writer
// imageLen is the length of the pixel data in bytes.
// The offset of the IFD is imageLen + 8 header bytes.
var imageLen int
switch compression {
case cNone:
dst = w
// Write IFD offset before outputting pixel data.
switch m.(type) {
case *image.Paletted:
imageLen = d.X * d.Y * 1
case *image.Gray:
imageLen = d.X * d.Y * 1
case *image.Gray16:
imageLen = d.X * d.Y * 2
case *image.RGBA64:
imageLen = d.X * d.Y * 8
case *image.NRGBA64:
imageLen = d.X * d.Y * 8
default:
imageLen = d.X * d.Y * 4
}
err = binary.Write(w, enc, uint32(imageLen+8))
if err != nil {
return err
}
case cDeflate:
dst = zlib.NewWriter(&buf)
}
pr := uint32(prNone)
photometricInterpretation := uint32(pRGB)
samplesPerPixel := uint32(4)
bitsPerSample := []uint32{8, 8, 8, 8}
extraSamples := uint32(0)
colorMap := []uint32{}
if predictor {
pr = prHorizontal
}
switch m := m.(type) {
case *image.Paletted:
photometricInterpretation = pPaletted
samplesPerPixel = 1
bitsPerSample = []uint32{8}
colorMap = make([]uint32, 256*3)
for i := 0; i < 256 && i < len(m.Palette); i++ {
r, g, b, _ := m.Palette[i].RGBA()
colorMap[i+0*256] = uint32(r)
colorMap[i+1*256] = uint32(g)
colorMap[i+2*256] = uint32(b)
}
err = encodeGray(dst, m.Pix, d.X, d.Y, m.Stride, predictor)
case *image.Gray:
photometricInterpretation = pBlackIsZero
samplesPerPixel = 1
bitsPerSample = []uint32{8}
err = encodeGray(dst, m.Pix, d.X, d.Y, m.Stride, predictor)
case *image.Gray16:
photometricInterpretation = pBlackIsZero
samplesPerPixel = 1
bitsPerSample = []uint32{16}
err = encodeGray16(dst, m.Pix, d.X, d.Y, m.Stride, predictor)
case *image.NRGBA:
extraSamples = 2 // Unassociated alpha.
err = encodeRGBA(dst, m.Pix, d.X, d.Y, m.Stride, predictor)
case *image.NRGBA64:
extraSamples = 2 // Unassociated alpha.
bitsPerSample = []uint32{16, 16, 16, 16}
err = encodeRGBA64(dst, m.Pix, d.X, d.Y, m.Stride, predictor)
case *image.RGBA:
extraSamples = 1 // Associated alpha.
err = encodeRGBA(dst, m.Pix, d.X, d.Y, m.Stride, predictor)
case *image.RGBA64:
extraSamples = 1 // Associated alpha.
bitsPerSample = []uint32{16, 16, 16, 16}
err = encodeRGBA64(dst, m.Pix, d.X, d.Y, m.Stride, predictor)
default:
extraSamples = 1 // Associated alpha.
err = encode(dst, m, predictor)
}
if err != nil {
return err
}
if compression != cNone {
if err = dst.(io.Closer).Close(); err != nil {
return err
}
imageLen = buf.Len()
if err = binary.Write(w, enc, uint32(imageLen+8)); err != nil {
return err
}
if _, err = buf.WriteTo(w); err != nil {
return err
}
}
ifd := []ifdEntry{
{tImageWidth, dtShort, []uint32{uint32(d.X)}},
{tImageLength, dtShort, []uint32{uint32(d.Y)}},
{tBitsPerSample, dtShort, bitsPerSample},
{tCompression, dtShort, []uint32{compression}},
{tPhotometricInterpretation, dtShort, []uint32{photometricInterpretation}},
{tStripOffsets, dtLong, []uint32{8}},
{tSamplesPerPixel, dtShort, []uint32{samplesPerPixel}},
{tRowsPerStrip, dtShort, []uint32{uint32(d.Y)}},
{tStripByteCounts, dtLong, []uint32{uint32(imageLen)}},
// There is currently no support for storing the image
// resolution, so give a bogus value of 72x72 dpi.
{tXResolution, dtRational, []uint32{72, 1}},
{tYResolution, dtRational, []uint32{72, 1}},
{tResolutionUnit, dtShort, []uint32{resPerInch}},
}
if pr != prNone {
ifd = append(ifd, ifdEntry{tPredictor, dtShort, []uint32{pr}})
}
if len(colorMap) != 0 {
ifd = append(ifd, ifdEntry{tColorMap, dtShort, colorMap})
}
if extraSamples > 0 {
ifd = append(ifd, ifdEntry{tExtraSamples, dtShort, []uint32{extraSamples}})
}
return writeIFD(w, imageLen+8, ifd)
}

403
vendor/golang.org/x/image/vp8/decode.go generated vendored Normal file
View File

@ -0,0 +1,403 @@
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package vp8 implements a decoder for the VP8 lossy image format.
//
// The VP8 specification is RFC 6386.
package vp8 // import "golang.org/x/image/vp8"
// This file implements the top-level decoding algorithm.
import (
"errors"
"image"
"io"
)
// limitReader wraps an io.Reader to read at most n bytes from it.
type limitReader struct {
r io.Reader
n int
}
// ReadFull reads exactly len(p) bytes into p.
func (r *limitReader) ReadFull(p []byte) error {
if len(p) > r.n {
return io.ErrUnexpectedEOF
}
n, err := io.ReadFull(r.r, p)
r.n -= n
return err
}
// FrameHeader is a frame header, as specified in section 9.1.
type FrameHeader struct {
KeyFrame bool
VersionNumber uint8
ShowFrame bool
FirstPartitionLen uint32
Width int
Height int
XScale uint8
YScale uint8
}
const (
nSegment = 4
nSegmentProb = 3
)
// segmentHeader holds segment-related header information.
type segmentHeader struct {
useSegment bool
updateMap bool
relativeDelta bool
quantizer [nSegment]int8
filterStrength [nSegment]int8
prob [nSegmentProb]uint8
}
const (
nRefLFDelta = 4
nModeLFDelta = 4
)
// filterHeader holds filter-related header information.
type filterHeader struct {
simple bool
level int8
sharpness uint8
useLFDelta bool
refLFDelta [nRefLFDelta]int8
modeLFDelta [nModeLFDelta]int8
perSegmentLevel [nSegment]int8
}
// mb is the per-macroblock decode state. A decoder maintains mbw+1 of these
// as it is decoding macroblocks left-to-right and top-to-bottom: mbw for the
// macroblocks in the row above, and one for the macroblock to the left.
type mb struct {
// pred is the predictor mode for the 4 bottom or right 4x4 luma regions.
pred [4]uint8
// nzMask is a mask of 8 bits: 4 for the bottom or right 4x4 luma regions,
// and 2 + 2 for the bottom or right 4x4 chroma regions. A 1 bit indicates
// that that region has non-zero coefficients.
nzMask uint8
// nzY16 is a 0/1 value that is 1 if the macroblock used Y16 prediction and
// had non-zero coefficients.
nzY16 uint8
}
// Decoder decodes VP8 bitstreams into frames. Decoding one frame consists of
// calling Init, DecodeFrameHeader and then DecodeFrame in that order.
// A Decoder can be re-used to decode multiple frames.
type Decoder struct {
// r is the input bitsream.
r limitReader
// scratch is a scratch buffer.
scratch [8]byte
// img is the YCbCr image to decode into.
img *image.YCbCr
// mbw and mbh are the number of 16x16 macroblocks wide and high the image is.
mbw, mbh int
// frameHeader is the frame header. When decoding multiple frames,
// frames that aren't key frames will inherit the Width, Height,
// XScale and YScale of the most recent key frame.
frameHeader FrameHeader
// Other headers.
segmentHeader segmentHeader
filterHeader filterHeader
// The image data is divided into a number of independent partitions.
// There is 1 "first partition" and between 1 and 8 "other partitions"
// for coefficient data.
fp partition
op [8]partition
nOP int
// Quantization factors.
quant [nSegment]quant
// DCT/WHT coefficient decoding probabilities.
tokenProb [nPlane][nBand][nContext][nProb]uint8
useSkipProb bool
skipProb uint8
// Loop filter parameters.
filterParams [nSegment][2]filterParam
perMBFilterParams []filterParam
// The eight fields below relate to the current macroblock being decoded.
//
// Segment-based adjustments.
segment int
// Per-macroblock state for the macroblock immediately left of and those
// macroblocks immediately above the current macroblock.
leftMB mb
upMB []mb
// Bitmasks for which 4x4 regions of coeff contain non-zero coefficients.
nzDCMask, nzACMask uint32
// Predictor modes.
usePredY16 bool // The libwebp C code calls this !is_i4x4_.
predY16 uint8
predC8 uint8
predY4 [4][4]uint8
// The two fields below form a workspace for reconstructing a macroblock.
// Their specific sizes are documented in reconstruct.go.
coeff [1*16*16 + 2*8*8 + 1*4*4]int16
ybr [1 + 16 + 1 + 8][32]uint8
}
// NewDecoder returns a new Decoder.
func NewDecoder() *Decoder {
return &Decoder{}
}
// Init initializes the decoder to read at most n bytes from r.
func (d *Decoder) Init(r io.Reader, n int) {
d.r = limitReader{r, n}
}
// DecodeFrameHeader decodes the frame header.
func (d *Decoder) DecodeFrameHeader() (fh FrameHeader, err error) {
// All frame headers are at least 3 bytes long.
b := d.scratch[:3]
if err = d.r.ReadFull(b); err != nil {
return
}
d.frameHeader.KeyFrame = (b[0] & 1) == 0
d.frameHeader.VersionNumber = (b[0] >> 1) & 7
d.frameHeader.ShowFrame = (b[0]>>4)&1 == 1
d.frameHeader.FirstPartitionLen = uint32(b[0])>>5 | uint32(b[1])<<3 | uint32(b[2])<<11
if !d.frameHeader.KeyFrame {
return d.frameHeader, nil
}
// Frame headers for key frames are an additional 7 bytes long.
b = d.scratch[:7]
if err = d.r.ReadFull(b); err != nil {
return
}
// Check the magic sync code.
if b[0] != 0x9d || b[1] != 0x01 || b[2] != 0x2a {
err = errors.New("vp8: invalid format")
return
}
d.frameHeader.Width = int(b[4]&0x3f)<<8 | int(b[3])
d.frameHeader.Height = int(b[6]&0x3f)<<8 | int(b[5])
d.frameHeader.XScale = b[4] >> 6
d.frameHeader.YScale = b[6] >> 6
d.mbw = (d.frameHeader.Width + 0x0f) >> 4
d.mbh = (d.frameHeader.Height + 0x0f) >> 4
d.segmentHeader = segmentHeader{
prob: [3]uint8{0xff, 0xff, 0xff},
}
d.tokenProb = defaultTokenProb
d.segment = 0
return d.frameHeader, nil
}
// ensureImg ensures that d.img is large enough to hold the decoded frame.
func (d *Decoder) ensureImg() {
if d.img != nil {
p0, p1 := d.img.Rect.Min, d.img.Rect.Max
if p0.X == 0 && p0.Y == 0 && p1.X >= 16*d.mbw && p1.Y >= 16*d.mbh {
return
}
}
m := image.NewYCbCr(image.Rect(0, 0, 16*d.mbw, 16*d.mbh), image.YCbCrSubsampleRatio420)
d.img = m.SubImage(image.Rect(0, 0, d.frameHeader.Width, d.frameHeader.Height)).(*image.YCbCr)
d.perMBFilterParams = make([]filterParam, d.mbw*d.mbh)
d.upMB = make([]mb, d.mbw)
}
// parseSegmentHeader parses the segment header, as specified in section 9.3.
func (d *Decoder) parseSegmentHeader() {
d.segmentHeader.useSegment = d.fp.readBit(uniformProb)
if !d.segmentHeader.useSegment {
d.segmentHeader.updateMap = false
return
}
d.segmentHeader.updateMap = d.fp.readBit(uniformProb)
if d.fp.readBit(uniformProb) {
d.segmentHeader.relativeDelta = !d.fp.readBit(uniformProb)
for i := range d.segmentHeader.quantizer {
d.segmentHeader.quantizer[i] = int8(d.fp.readOptionalInt(uniformProb, 7))
}
for i := range d.segmentHeader.filterStrength {
d.segmentHeader.filterStrength[i] = int8(d.fp.readOptionalInt(uniformProb, 6))
}
}
if !d.segmentHeader.updateMap {
return
}
for i := range d.segmentHeader.prob {
if d.fp.readBit(uniformProb) {
d.segmentHeader.prob[i] = uint8(d.fp.readUint(uniformProb, 8))
} else {
d.segmentHeader.prob[i] = 0xff
}
}
}
// parseFilterHeader parses the filter header, as specified in section 9.4.
func (d *Decoder) parseFilterHeader() {
d.filterHeader.simple = d.fp.readBit(uniformProb)
d.filterHeader.level = int8(d.fp.readUint(uniformProb, 6))
d.filterHeader.sharpness = uint8(d.fp.readUint(uniformProb, 3))
d.filterHeader.useLFDelta = d.fp.readBit(uniformProb)
if d.filterHeader.useLFDelta && d.fp.readBit(uniformProb) {
for i := range d.filterHeader.refLFDelta {
d.filterHeader.refLFDelta[i] = int8(d.fp.readOptionalInt(uniformProb, 6))
}
for i := range d.filterHeader.modeLFDelta {
d.filterHeader.modeLFDelta[i] = int8(d.fp.readOptionalInt(uniformProb, 6))
}
}
if d.filterHeader.level == 0 {
return
}
if d.segmentHeader.useSegment {
for i := range d.filterHeader.perSegmentLevel {
strength := d.segmentHeader.filterStrength[i]
if d.segmentHeader.relativeDelta {
strength += d.filterHeader.level
}
d.filterHeader.perSegmentLevel[i] = strength
}
} else {
d.filterHeader.perSegmentLevel[0] = d.filterHeader.level
}
d.computeFilterParams()
}
// parseOtherPartitions parses the other partitions, as specified in section 9.5.
func (d *Decoder) parseOtherPartitions() error {
const maxNOP = 1 << 3
var partLens [maxNOP]int
d.nOP = 1 << d.fp.readUint(uniformProb, 2)
// The final partition length is implied by the the remaining chunk data
// (d.r.n) and the other d.nOP-1 partition lengths. Those d.nOP-1 partition
// lengths are stored as 24-bit uints, i.e. up to 16 MiB per partition.
n := 3 * (d.nOP - 1)
partLens[d.nOP-1] = d.r.n - n
if partLens[d.nOP-1] < 0 {
return io.ErrUnexpectedEOF
}
if n > 0 {
buf := make([]byte, n)
if err := d.r.ReadFull(buf); err != nil {
return err
}
for i := 0; i < d.nOP-1; i++ {
pl := int(buf[3*i+0]) | int(buf[3*i+1])<<8 | int(buf[3*i+2])<<16
if pl > partLens[d.nOP-1] {
return io.ErrUnexpectedEOF
}
partLens[i] = pl
partLens[d.nOP-1] -= pl
}
}
// We check if the final partition length can also fit into a 24-bit uint.
// Strictly speaking, this isn't part of the spec, but it guards against a
// malicious WEBP image that is too large to ReadFull the encoded DCT
// coefficients into memory, whether that's because the actual WEBP file is
// too large, or whether its RIFF metadata lists too large a chunk.
if 1<<24 <= partLens[d.nOP-1] {
return errors.New("vp8: too much data to decode")
}
buf := make([]byte, d.r.n)
if err := d.r.ReadFull(buf); err != nil {
return err
}
for i, pl := range partLens {
if i == d.nOP {
break
}
d.op[i].init(buf[:pl])
buf = buf[pl:]
}
return nil
}
// parseOtherHeaders parses header information other than the frame header.
func (d *Decoder) parseOtherHeaders() error {
// Initialize and parse the first partition.
firstPartition := make([]byte, d.frameHeader.FirstPartitionLen)
if err := d.r.ReadFull(firstPartition); err != nil {
return err
}
d.fp.init(firstPartition)
if d.frameHeader.KeyFrame {
// Read and ignore the color space and pixel clamp values. They are
// specified in section 9.2, but are unimplemented.
d.fp.readBit(uniformProb)
d.fp.readBit(uniformProb)
}
d.parseSegmentHeader()
d.parseFilterHeader()
if err := d.parseOtherPartitions(); err != nil {
return err
}
d.parseQuant()
if !d.frameHeader.KeyFrame {
// Golden and AltRef frames are specified in section 9.7.
// TODO(nigeltao): implement. Note that they are only used for video, not still images.
return errors.New("vp8: Golden / AltRef frames are not implemented")
}
// Read and ignore the refreshLastFrameBuffer bit, specified in section 9.8.
// It applies only to video, and not still images.
d.fp.readBit(uniformProb)
d.parseTokenProb()
d.useSkipProb = d.fp.readBit(uniformProb)
if d.useSkipProb {
d.skipProb = uint8(d.fp.readUint(uniformProb, 8))
}
if d.fp.unexpectedEOF {
return io.ErrUnexpectedEOF
}
return nil
}
// DecodeFrame decodes the frame and returns it as an YCbCr image.
// The image's contents are valid up until the next call to Decoder.Init.
func (d *Decoder) DecodeFrame() (*image.YCbCr, error) {
d.ensureImg()
if err := d.parseOtherHeaders(); err != nil {
return nil, err
}
// Reconstruct the rows.
for mbx := 0; mbx < d.mbw; mbx++ {
d.upMB[mbx] = mb{}
}
for mby := 0; mby < d.mbh; mby++ {
d.leftMB = mb{}
for mbx := 0; mbx < d.mbw; mbx++ {
skip := d.reconstruct(mbx, mby)
fs := d.filterParams[d.segment][btou(!d.usePredY16)]
fs.inner = fs.inner || !skip
d.perMBFilterParams[d.mbw*mby+mbx] = fs
}
}
if d.fp.unexpectedEOF {
return nil, io.ErrUnexpectedEOF
}
for i := 0; i < d.nOP; i++ {
if d.op[i].unexpectedEOF {
return nil, io.ErrUnexpectedEOF
}
}
// Apply the loop filter.
//
// Even if we are using per-segment levels, section 15 says that "loop
// filtering must be skipped entirely if loop_filter_level at either the
// frame header level or macroblock override level is 0".
if d.filterHeader.level != 0 {
if d.filterHeader.simple {
d.simpleFilter()
} else {
d.normalFilter()
}
}
return d.img, nil
}

273
vendor/golang.org/x/image/vp8/filter.go generated vendored Normal file
View File

@ -0,0 +1,273 @@
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package vp8
// filter2 modifies a 2-pixel wide or 2-pixel high band along an edge.
func filter2(pix []byte, level, index, iStep, jStep int) {
for n := 16; n > 0; n, index = n-1, index+iStep {
p1 := int(pix[index-2*jStep])
p0 := int(pix[index-1*jStep])
q0 := int(pix[index+0*jStep])
q1 := int(pix[index+1*jStep])
if abs(p0-q0)<<1+abs(p1-q1)>>1 > level {
continue
}
a := 3*(q0-p0) + clamp127(p1-q1)
a1 := clamp15((a + 4) >> 3)
a2 := clamp15((a + 3) >> 3)
pix[index-1*jStep] = clamp255(p0 + a2)
pix[index+0*jStep] = clamp255(q0 - a1)
}
}
// filter246 modifies a 2-, 4- or 6-pixel wide or high band along an edge.
func filter246(pix []byte, n, level, ilevel, hlevel, index, iStep, jStep int, fourNotSix bool) {
for ; n > 0; n, index = n-1, index+iStep {
p3 := int(pix[index-4*jStep])
p2 := int(pix[index-3*jStep])
p1 := int(pix[index-2*jStep])
p0 := int(pix[index-1*jStep])
q0 := int(pix[index+0*jStep])
q1 := int(pix[index+1*jStep])
q2 := int(pix[index+2*jStep])
q3 := int(pix[index+3*jStep])
if abs(p0-q0)<<1+abs(p1-q1)>>1 > level {
continue
}
if abs(p3-p2) > ilevel ||
abs(p2-p1) > ilevel ||
abs(p1-p0) > ilevel ||
abs(q1-q0) > ilevel ||
abs(q2-q1) > ilevel ||
abs(q3-q2) > ilevel {
continue
}
if abs(p1-p0) > hlevel || abs(q1-q0) > hlevel {
// Filter 2 pixels.
a := 3*(q0-p0) + clamp127(p1-q1)
a1 := clamp15((a + 4) >> 3)
a2 := clamp15((a + 3) >> 3)
pix[index-1*jStep] = clamp255(p0 + a2)
pix[index+0*jStep] = clamp255(q0 - a1)
} else if fourNotSix {
// Filter 4 pixels.
a := 3 * (q0 - p0)
a1 := clamp15((a + 4) >> 3)
a2 := clamp15((a + 3) >> 3)
a3 := (a1 + 1) >> 1
pix[index-2*jStep] = clamp255(p1 + a3)
pix[index-1*jStep] = clamp255(p0 + a2)
pix[index+0*jStep] = clamp255(q0 - a1)
pix[index+1*jStep] = clamp255(q1 - a3)
} else {
// Filter 6 pixels.
a := clamp127(3*(q0-p0) + clamp127(p1-q1))
a1 := (27*a + 63) >> 7
a2 := (18*a + 63) >> 7
a3 := (9*a + 63) >> 7
pix[index-3*jStep] = clamp255(p2 + a3)
pix[index-2*jStep] = clamp255(p1 + a2)
pix[index-1*jStep] = clamp255(p0 + a1)
pix[index+0*jStep] = clamp255(q0 - a1)
pix[index+1*jStep] = clamp255(q1 - a2)
pix[index+2*jStep] = clamp255(q2 - a3)
}
}
}
// simpleFilter implements the simple filter, as specified in section 15.2.
func (d *Decoder) simpleFilter() {
for mby := 0; mby < d.mbh; mby++ {
for mbx := 0; mbx < d.mbw; mbx++ {
f := d.perMBFilterParams[d.mbw*mby+mbx]
if f.level == 0 {
continue
}
l := int(f.level)
yIndex := (mby*d.img.YStride + mbx) * 16
if mbx > 0 {
filter2(d.img.Y, l+4, yIndex, d.img.YStride, 1)
}
if f.inner {
filter2(d.img.Y, l, yIndex+0x4, d.img.YStride, 1)
filter2(d.img.Y, l, yIndex+0x8, d.img.YStride, 1)
filter2(d.img.Y, l, yIndex+0xc, d.img.YStride, 1)
}
if mby > 0 {
filter2(d.img.Y, l+4, yIndex, 1, d.img.YStride)
}
if f.inner {
filter2(d.img.Y, l, yIndex+d.img.YStride*0x4, 1, d.img.YStride)
filter2(d.img.Y, l, yIndex+d.img.YStride*0x8, 1, d.img.YStride)
filter2(d.img.Y, l, yIndex+d.img.YStride*0xc, 1, d.img.YStride)
}
}
}
}
// normalFilter implements the normal filter, as specified in section 15.3.
func (d *Decoder) normalFilter() {
for mby := 0; mby < d.mbh; mby++ {
for mbx := 0; mbx < d.mbw; mbx++ {
f := d.perMBFilterParams[d.mbw*mby+mbx]
if f.level == 0 {
continue
}
l, il, hl := int(f.level), int(f.ilevel), int(f.hlevel)
yIndex := (mby*d.img.YStride + mbx) * 16
cIndex := (mby*d.img.CStride + mbx) * 8
if mbx > 0 {
filter246(d.img.Y, 16, l+4, il, hl, yIndex, d.img.YStride, 1, false)
filter246(d.img.Cb, 8, l+4, il, hl, cIndex, d.img.CStride, 1, false)
filter246(d.img.Cr, 8, l+4, il, hl, cIndex, d.img.CStride, 1, false)
}
if f.inner {
filter246(d.img.Y, 16, l, il, hl, yIndex+0x4, d.img.YStride, 1, true)
filter246(d.img.Y, 16, l, il, hl, yIndex+0x8, d.img.YStride, 1, true)
filter246(d.img.Y, 16, l, il, hl, yIndex+0xc, d.img.YStride, 1, true)
filter246(d.img.Cb, 8, l, il, hl, cIndex+0x4, d.img.CStride, 1, true)
filter246(d.img.Cr, 8, l, il, hl, cIndex+0x4, d.img.CStride, 1, true)
}
if mby > 0 {
filter246(d.img.Y, 16, l+4, il, hl, yIndex, 1, d.img.YStride, false)
filter246(d.img.Cb, 8, l+4, il, hl, cIndex, 1, d.img.CStride, false)
filter246(d.img.Cr, 8, l+4, il, hl, cIndex, 1, d.img.CStride, false)
}
if f.inner {
filter246(d.img.Y, 16, l, il, hl, yIndex+d.img.YStride*0x4, 1, d.img.YStride, true)
filter246(d.img.Y, 16, l, il, hl, yIndex+d.img.YStride*0x8, 1, d.img.YStride, true)
filter246(d.img.Y, 16, l, il, hl, yIndex+d.img.YStride*0xc, 1, d.img.YStride, true)
filter246(d.img.Cb, 8, l, il, hl, cIndex+d.img.CStride*0x4, 1, d.img.CStride, true)
filter246(d.img.Cr, 8, l, il, hl, cIndex+d.img.CStride*0x4, 1, d.img.CStride, true)
}
}
}
}
// filterParam holds the loop filter parameters for a macroblock.
type filterParam struct {
// The first three fields are thresholds used by the loop filter to smooth
// over the edges and interior of a macroblock. level is used by both the
// simple and normal filters. The inner level and high edge variance level
// are only used by the normal filter.
level, ilevel, hlevel uint8
// inner is whether the inner loop filter cannot be optimized out as a
// no-op for this particular macroblock.
inner bool
}
// computeFilterParams computes the loop filter parameters, as specified in
// section 15.4.
func (d *Decoder) computeFilterParams() {
for i := range d.filterParams {
baseLevel := d.filterHeader.level
if d.segmentHeader.useSegment {
baseLevel = d.segmentHeader.filterStrength[i]
if d.segmentHeader.relativeDelta {
baseLevel += d.filterHeader.level
}
}
for j := range d.filterParams[i] {
p := &d.filterParams[i][j]
p.inner = j != 0
level := baseLevel
if d.filterHeader.useLFDelta {
// The libwebp C code has a "TODO: only CURRENT is handled for now."
level += d.filterHeader.refLFDelta[0]
if j != 0 {
level += d.filterHeader.modeLFDelta[0]
}
}
if level <= 0 {
p.level = 0
continue
}
if level > 63 {
level = 63
}
ilevel := level
if d.filterHeader.sharpness > 0 {
if d.filterHeader.sharpness > 4 {
ilevel >>= 2
} else {
ilevel >>= 1
}
if x := int8(9 - d.filterHeader.sharpness); ilevel > x {
ilevel = x
}
}
if ilevel < 1 {
ilevel = 1
}
p.ilevel = uint8(ilevel)
p.level = uint8(2*level + ilevel)
if d.frameHeader.KeyFrame {
if level < 15 {
p.hlevel = 0
} else if level < 40 {
p.hlevel = 1
} else {
p.hlevel = 2
}
} else {
if level < 15 {
p.hlevel = 0
} else if level < 20 {
p.hlevel = 1
} else if level < 40 {
p.hlevel = 2
} else {
p.hlevel = 3
}
}
}
}
}
// intSize is either 32 or 64.
const intSize = 32 << (^uint(0) >> 63)
func abs(x int) int {
// m := -1 if x < 0. m := 0 otherwise.
m := x >> (intSize - 1)
// In two's complement representation, the negative number
// of any number (except the smallest one) can be computed
// by flipping all the bits and add 1. This is faster than
// code with a branch.
// See Hacker's Delight, section 2-4.
return (x ^ m) - m
}
func clamp15(x int) int {
if x < -16 {
return -16
}
if x > 15 {
return 15
}
return x
}
func clamp127(x int) int {
if x < -128 {
return -128
}
if x > 127 {
return 127
}
return x
}
func clamp255(x int) uint8 {
if x < 0 {
return 0
}
if x > 255 {
return 255
}
return uint8(x)
}

98
vendor/golang.org/x/image/vp8/idct.go generated vendored Normal file
View File

@ -0,0 +1,98 @@
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package vp8
// This file implements the inverse Discrete Cosine Transform and the inverse
// Walsh Hadamard Transform (WHT), as specified in sections 14.3 and 14.4.
func clip8(i int32) uint8 {
if i < 0 {
return 0
}
if i > 255 {
return 255
}
return uint8(i)
}
func (z *Decoder) inverseDCT4(y, x, coeffBase int) {
const (
c1 = 85627 // 65536 * cos(pi/8) * sqrt(2).
c2 = 35468 // 65536 * sin(pi/8) * sqrt(2).
)
var m [4][4]int32
for i := 0; i < 4; i++ {
a := int32(z.coeff[coeffBase+0]) + int32(z.coeff[coeffBase+8])
b := int32(z.coeff[coeffBase+0]) - int32(z.coeff[coeffBase+8])
c := (int32(z.coeff[coeffBase+4])*c2)>>16 - (int32(z.coeff[coeffBase+12])*c1)>>16
d := (int32(z.coeff[coeffBase+4])*c1)>>16 + (int32(z.coeff[coeffBase+12])*c2)>>16
m[i][0] = a + d
m[i][1] = b + c
m[i][2] = b - c
m[i][3] = a - d
coeffBase++
}
for j := 0; j < 4; j++ {
dc := m[0][j] + 4
a := dc + m[2][j]
b := dc - m[2][j]
c := (m[1][j]*c2)>>16 - (m[3][j]*c1)>>16
d := (m[1][j]*c1)>>16 + (m[3][j]*c2)>>16
z.ybr[y+j][x+0] = clip8(int32(z.ybr[y+j][x+0]) + (a+d)>>3)
z.ybr[y+j][x+1] = clip8(int32(z.ybr[y+j][x+1]) + (b+c)>>3)
z.ybr[y+j][x+2] = clip8(int32(z.ybr[y+j][x+2]) + (b-c)>>3)
z.ybr[y+j][x+3] = clip8(int32(z.ybr[y+j][x+3]) + (a-d)>>3)
}
}
func (z *Decoder) inverseDCT4DCOnly(y, x, coeffBase int) {
dc := (int32(z.coeff[coeffBase+0]) + 4) >> 3
for j := 0; j < 4; j++ {
for i := 0; i < 4; i++ {
z.ybr[y+j][x+i] = clip8(int32(z.ybr[y+j][x+i]) + dc)
}
}
}
func (z *Decoder) inverseDCT8(y, x, coeffBase int) {
z.inverseDCT4(y+0, x+0, coeffBase+0*16)
z.inverseDCT4(y+0, x+4, coeffBase+1*16)
z.inverseDCT4(y+4, x+0, coeffBase+2*16)
z.inverseDCT4(y+4, x+4, coeffBase+3*16)
}
func (z *Decoder) inverseDCT8DCOnly(y, x, coeffBase int) {
z.inverseDCT4DCOnly(y+0, x+0, coeffBase+0*16)
z.inverseDCT4DCOnly(y+0, x+4, coeffBase+1*16)
z.inverseDCT4DCOnly(y+4, x+0, coeffBase+2*16)
z.inverseDCT4DCOnly(y+4, x+4, coeffBase+3*16)
}
func (d *Decoder) inverseWHT16() {
var m [16]int32
for i := 0; i < 4; i++ {
a0 := int32(d.coeff[384+0+i]) + int32(d.coeff[384+12+i])
a1 := int32(d.coeff[384+4+i]) + int32(d.coeff[384+8+i])
a2 := int32(d.coeff[384+4+i]) - int32(d.coeff[384+8+i])
a3 := int32(d.coeff[384+0+i]) - int32(d.coeff[384+12+i])
m[0+i] = a0 + a1
m[8+i] = a0 - a1
m[4+i] = a3 + a2
m[12+i] = a3 - a2
}
out := 0
for i := 0; i < 4; i++ {
dc := m[0+i*4] + 3
a0 := dc + m[3+i*4]
a1 := m[1+i*4] + m[2+i*4]
a2 := m[1+i*4] - m[2+i*4]
a3 := dc - m[3+i*4]
d.coeff[out+0] = int16((a0 + a1) >> 3)
d.coeff[out+16] = int16((a3 + a2) >> 3)
d.coeff[out+32] = int16((a0 - a1) >> 3)
d.coeff[out+48] = int16((a3 - a2) >> 3)
out += 64
}
}

Some files were not shown because too many files have changed in this diff Show More