*/
+ nil,
+ /* 47 timeSecfrac <- <('.' digit+)> */
+ nil,
+ /* 48 timeNumoffset <- <(('-' / '+') timeHour ':' timeMinute)> */
+ nil,
+ /* 49 timeOffset <- <('Z' / timeNumoffset)> */
+ nil,
+ /* 50 partialTime <- <(timeHour ':' timeMinute ':' timeSecond timeSecfrac?)> */
+ func() bool {
+ position289, tokenIndex289 := position, tokenIndex
+ {
+ position290 := position
+ if !_rules[ruletimeHour]() {
+ goto l289
+ }
+ if buffer[position] != rune(':') {
+ goto l289
+ }
+ position++
+ if !_rules[ruletimeMinute]() {
+ goto l289
+ }
+ if buffer[position] != rune(':') {
+ goto l289
+ }
+ position++
+ {
+ position291 := position
+ if !_rules[ruledigitDual]() {
+ goto l289
+ }
+ add(ruletimeSecond, position291)
+ }
+ {
+ position292, tokenIndex292 := position, tokenIndex
+ {
+ position294 := position
+ if buffer[position] != rune('.') {
+ goto l292
+ }
+ position++
+ if !_rules[ruledigit]() {
+ goto l292
+ }
+ l295:
+ {
+ position296, tokenIndex296 := position, tokenIndex
+ if !_rules[ruledigit]() {
+ goto l296
+ }
+ goto l295
+ l296:
+ position, tokenIndex = position296, tokenIndex296
+ }
+ add(ruletimeSecfrac, position294)
+ }
+ goto l293
+ l292:
+ position, tokenIndex = position292, tokenIndex292
+ }
+ l293:
+ add(rulepartialTime, position290)
+ }
+ return true
+ l289:
+ position, tokenIndex = position289, tokenIndex289
+ return false
+ },
+ /* 51 fullDate <- <(dateFullYear '-' dateMonth '-' dateMDay)> */
+ nil,
+ /* 52 fullTime <- <(partialTime timeOffset?)> */
+ nil,
+ /* 53 datetime <- <((fullDate ('T' fullTime)?) / partialTime)> */
+ nil,
+ /* 54 digit <- <[0-9]> */
+ func() bool {
+ position300, tokenIndex300 := position, tokenIndex
+ {
+ position301 := position
+ if c := buffer[position]; c < rune('0') || c > rune('9') {
+ goto l300
+ }
+ position++
+ add(ruledigit, position301)
+ }
+ return true
+ l300:
+ position, tokenIndex = position300, tokenIndex300
+ return false
+ },
+ /* 55 digitDual <- <(digit digit)> */
+ func() bool {
+ position302, tokenIndex302 := position, tokenIndex
+ {
+ position303 := position
+ if !_rules[ruledigit]() {
+ goto l302
+ }
+ if !_rules[ruledigit]() {
+ goto l302
+ }
+ add(ruledigitDual, position303)
+ }
+ return true
+ l302:
+ position, tokenIndex = position302, tokenIndex302
+ return false
+ },
+ /* 56 digitQuad <- <(digitDual digitDual)> */
+ nil,
+ /* 57 array <- <('[' Action23 wsnl arrayValues? wsnl ']')> */
+ nil,
+ /* 58 arrayValues <- <(val Action24 (wsnl comment? wsnl arraySep wsnl comment? wsnl val Action25)* wsnl arraySep? wsnl comment?)> */
+ nil,
+ /* 59 arraySep <- <','> */
+ func() bool {
+ position307, tokenIndex307 := position, tokenIndex
+ {
+ position308 := position
+ if buffer[position] != rune(',') {
+ goto l307
+ }
+ position++
+ add(rulearraySep, position308)
+ }
+ return true
+ l307:
+ position, tokenIndex = position307, tokenIndex307
+ return false
+ },
+ /* 61 Action0 <- <{ _ = buffer }> */
+ nil,
+ nil,
+ /* 63 Action1 <- <{ p.SetTableString(begin, end) }> */
+ nil,
+ /* 64 Action2 <- <{ p.AddLineCount(end - begin) }> */
+ nil,
+ /* 65 Action3 <- <{ p.AddLineCount(end - begin) }> */
+ nil,
+ /* 66 Action4 <- <{ p.AddKeyValue() }> */
+ nil,
+ /* 67 Action5 <- <{ p.SetKey(p.buffer, begin, end) }> */
+ nil,
+ /* 68 Action6 <- <{ p.SetKey(p.buffer, begin, end) }> */
+ nil,
+ /* 69 Action7 <- <{ p.SetTime(begin, end) }> */
+ nil,
+ /* 70 Action8 <- <{ p.SetFloat64(begin, end) }> */
+ nil,
+ /* 71 Action9 <- <{ p.SetInt64(begin, end) }> */
+ nil,
+ /* 72 Action10 <- <{ p.SetString(begin, end) }> */
+ nil,
+ /* 73 Action11 <- <{ p.SetBool(begin, end) }> */
+ nil,
+ /* 74 Action12 <- <{ p.SetArray(begin, end) }> */
+ nil,
+ /* 75 Action13 <- <{ p.SetTable(p.buffer, begin, end) }> */
+ nil,
+ /* 76 Action14 <- <{ p.SetArrayTable(p.buffer, begin, end) }> */
+ nil,
+ /* 77 Action15 <- <{ p.StartInlineTable() }> */
+ nil,
+ /* 78 Action16 <- <{ p.EndInlineTable() }> */
+ nil,
+ /* 79 Action17 <- <{ p.AddTableKey() }> */
+ nil,
+ /* 80 Action18 <- <{ p.SetBasicString(p.buffer, begin, end) }> */
+ nil,
+ /* 81 Action19 <- <{ p.SetMultilineString() }> */
+ nil,
+ /* 82 Action20 <- <{ p.AddMultilineBasicBody(p.buffer, begin, end) }> */
+ nil,
+ /* 83 Action21 <- <{ p.SetLiteralString(p.buffer, begin, end) }> */
+ nil,
+ /* 84 Action22 <- <{ p.SetMultilineLiteralString(p.buffer, begin, end) }> */
+ nil,
+ /* 85 Action23 <- <{ p.StartArray() }> */
+ nil,
+ /* 86 Action24 <- <{ p.AddArrayVal() }> */
+ nil,
+ /* 87 Action25 <- <{ p.AddArrayVal() }> */
+ nil,
+ }
+ p.rules = _rules
+}
diff --git a/vendor/github.com/naoina/toml/util.go b/vendor/github.com/naoina/toml/util.go
new file mode 100644
index 00000000..f882f4e5
--- /dev/null
+++ b/vendor/github.com/naoina/toml/util.go
@@ -0,0 +1,65 @@
+package toml
+
+import (
+ "fmt"
+ "reflect"
+ "strings"
+)
+
+const fieldTagName = "toml"
+
+// fieldCache maps normalized field names to their position in a struct.
+type fieldCache struct {
+ named map[string]fieldInfo // fields with an explicit name in tag
+ auto map[string]fieldInfo // fields with auto-assigned normalized names
+}
+
+type fieldInfo struct {
+ index []int
+ name string
+ ignored bool
+}
+
+func makeFieldCache(cfg *Config, rt reflect.Type) fieldCache {
+ named, auto := make(map[string]fieldInfo), make(map[string]fieldInfo)
+ for i := 0; i < rt.NumField(); i++ {
+ ft := rt.Field(i)
+ // skip unexported fields
+ if ft.PkgPath != "" && !ft.Anonymous {
+ continue
+ }
+ col, _ := extractTag(ft.Tag.Get(fieldTagName))
+ info := fieldInfo{index: ft.Index, name: ft.Name, ignored: col == "-"}
+ if col == "" || col == "-" {
+ auto[cfg.NormFieldName(rt, ft.Name)] = info
+ } else {
+ named[col] = info
+ }
+ }
+ return fieldCache{named, auto}
+}
+
+func (fc fieldCache) findField(cfg *Config, rv reflect.Value, name string) (reflect.Value, string, error) {
+ info, found := fc.named[name]
+ if !found {
+ info, found = fc.auto[cfg.NormFieldName(rv.Type(), name)]
+ }
+ if !found {
+ if cfg.MissingField == nil {
+ return reflect.Value{}, "", fmt.Errorf("field corresponding to `%s' is not defined in %v", name, rv.Type())
+ } else {
+ return reflect.Value{}, "", cfg.MissingField(rv.Type(), name)
+ }
+ } else if info.ignored {
+ return reflect.Value{}, "", fmt.Errorf("field corresponding to `%s' in %v cannot be set through TOML", name, rv.Type())
+ }
+ return rv.FieldByIndex(info.index), info.name, nil
+}
+
+func extractTag(tag string) (col, rest string) {
+ tags := strings.SplitN(tag, ",", 2)
+ if len(tags) == 2 {
+ return strings.TrimSpace(tags[0]), strings.TrimSpace(tags[1])
+ }
+ return strings.TrimSpace(tags[0]), ""
+}
diff --git a/vendor/github.com/pelletier/go-toml/LICENSE b/vendor/github.com/pelletier/go-toml/LICENSE
new file mode 100644
index 00000000..583bdae6
--- /dev/null
+++ b/vendor/github.com/pelletier/go-toml/LICENSE
@@ -0,0 +1,21 @@
+The MIT License (MIT)
+
+Copyright (c) 2013 - 2017 Thomas Pelletier, Eric Anderton
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/vendor/github.com/pelletier/go-toml/PULL_REQUEST_TEMPLATE.md b/vendor/github.com/pelletier/go-toml/PULL_REQUEST_TEMPLATE.md
new file mode 100644
index 00000000..041cdc4a
--- /dev/null
+++ b/vendor/github.com/pelletier/go-toml/PULL_REQUEST_TEMPLATE.md
@@ -0,0 +1,5 @@
+**Issue:** add link to pelletier/go-toml issue here
+
+Explanation of what this pull request does.
+
+More detailed description of the decisions being made and the reasons why (if the patch is non-trivial).
diff --git a/vendor/github.com/pelletier/go-toml/README.md b/vendor/github.com/pelletier/go-toml/README.md
new file mode 100644
index 00000000..f003827f
--- /dev/null
+++ b/vendor/github.com/pelletier/go-toml/README.md
@@ -0,0 +1,133 @@
+# go-toml
+
+Go library for the [TOML](https://github.com/mojombo/toml) format.
+
+This library supports TOML version
+[v0.4.0](https://github.com/toml-lang/toml/blob/master/versions/en/toml-v0.4.0.md)
+
+[](http://godoc.org/github.com/pelletier/go-toml)
+[](https://github.com/pelletier/go-toml/blob/master/LICENSE)
+[](https://travis-ci.org/pelletier/go-toml)
+[](https://ci.appveyor.com/project/pelletier/go-toml/branch/master)
+[](https://codecov.io/gh/pelletier/go-toml)
+[](https://goreportcard.com/report/github.com/pelletier/go-toml)
+[](https://app.fossa.io/projects/git%2Bgithub.com%2Fpelletier%2Fgo-toml?ref=badge_shield)
+
+## Features
+
+Go-toml provides the following features for using data parsed from TOML documents:
+
+* Load TOML documents from files and string data
+* Easily navigate TOML structure using Tree
+* Mashaling and unmarshaling to and from data structures
+* Line & column position data for all parsed elements
+* [Query support similar to JSON-Path](query/)
+* Syntax errors contain line and column numbers
+
+## Import
+
+```go
+import "github.com/pelletier/go-toml"
+```
+
+## Usage example
+
+Read a TOML document:
+
+```go
+config, _ := toml.Load(`
+[postgres]
+user = "pelletier"
+password = "mypassword"`)
+// retrieve data directly
+user := config.Get("postgres.user").(string)
+
+// or using an intermediate object
+postgresConfig := config.Get("postgres").(*toml.Tree)
+password := postgresConfig.Get("password").(string)
+```
+
+Or use Unmarshal:
+
+```go
+type Postgres struct {
+ User string
+ Password string
+}
+type Config struct {
+ Postgres Postgres
+}
+
+doc := []byte(`
+[Postgres]
+User = "pelletier"
+Password = "mypassword"`)
+
+config := Config{}
+toml.Unmarshal(doc, &config)
+fmt.Println("user=", config.Postgres.User)
+```
+
+Or use a query:
+
+```go
+// use a query to gather elements without walking the tree
+q, _ := query.Compile("$..[user,password]")
+results := q.Execute(config)
+for ii, item := range results.Values() {
+ fmt.Println("Query result %d: %v", ii, item)
+}
+```
+
+## Documentation
+
+The documentation and additional examples are available at
+[godoc.org](http://godoc.org/github.com/pelletier/go-toml).
+
+## Tools
+
+Go-toml provides two handy command line tools:
+
+* `tomll`: Reads TOML files and lint them.
+
+ ```
+ go install github.com/pelletier/go-toml/cmd/tomll
+ tomll --help
+ ```
+* `tomljson`: Reads a TOML file and outputs its JSON representation.
+
+ ```
+ go install github.com/pelletier/go-toml/cmd/tomljson
+ tomljson --help
+ ```
+
+## Contribute
+
+Feel free to report bugs and patches using GitHub's pull requests system on
+[pelletier/go-toml](https://github.com/pelletier/go-toml). Any feedback would be
+much appreciated!
+
+### Run tests
+
+You have to make sure two kind of tests run:
+
+1. The Go unit tests
+2. The TOML examples base
+
+You can run both of them using `./test.sh`.
+
+### Fuzzing
+
+The script `./fuzz.sh` is available to
+run [go-fuzz](https://github.com/dvyukov/go-fuzz) on go-toml.
+
+## Versioning
+
+Go-toml follows [Semantic Versioning](http://semver.org/). The supported version
+of [TOML](https://github.com/toml-lang/toml) is indicated at the beginning of
+this document. The last two major versions of Go are supported
+(see [Go Release Policy](https://golang.org/doc/devel/release.html#policy)).
+
+## License
+
+The MIT License (MIT). Read [LICENSE](LICENSE).
diff --git a/vendor/github.com/pelletier/go-toml/appveyor.yml b/vendor/github.com/pelletier/go-toml/appveyor.yml
new file mode 100644
index 00000000..f31f8594
--- /dev/null
+++ b/vendor/github.com/pelletier/go-toml/appveyor.yml
@@ -0,0 +1,36 @@
+version: "{build}"
+
+# Source Config
+clone_folder: c:\gopath\src\github.com\pelletier\go-toml
+
+# Build host
+environment:
+ GOPATH: c:\gopath
+ DEPTESTBYPASS501: 1
+ GOVERSION: 1.11
+
+init:
+ - git config --global core.autocrlf input
+
+# Build
+install:
+ # Install the specific Go version.
+ - rmdir c:\go /s /q
+ - appveyor DownloadFile https://storage.googleapis.com/golang/go%GOVERSION%.windows-amd64.msi
+ - msiexec /i go%GOVERSION%.windows-amd64.msi /q
+ - choco install bzr
+ - set Path=c:\go\bin;c:\gopath\bin;C:\Program Files (x86)\Bazaar\;C:\Program Files\Mercurial\%Path%
+ - go version
+ - go env
+
+build: false
+deploy: false
+
+test_script:
+ - go get github.com/davecgh/go-spew/spew
+ - go get gopkg.in/yaml.v2
+ - go get github.com/BurntSushi/toml
+ - go build github.com/pelletier/go-toml
+ - go test github.com/pelletier/go-toml
+ - go test github.com/pelletier/go-toml/cmd/tomljson
+ - go test github.com/pelletier/go-toml/query
diff --git a/vendor/github.com/pelletier/go-toml/benchmark.json b/vendor/github.com/pelletier/go-toml/benchmark.json
new file mode 100644
index 00000000..86f99c6a
--- /dev/null
+++ b/vendor/github.com/pelletier/go-toml/benchmark.json
@@ -0,0 +1,164 @@
+{
+ "array": {
+ "key1": [
+ 1,
+ 2,
+ 3
+ ],
+ "key2": [
+ "red",
+ "yellow",
+ "green"
+ ],
+ "key3": [
+ [
+ 1,
+ 2
+ ],
+ [
+ 3,
+ 4,
+ 5
+ ]
+ ],
+ "key4": [
+ [
+ 1,
+ 2
+ ],
+ [
+ "a",
+ "b",
+ "c"
+ ]
+ ],
+ "key5": [
+ 1,
+ 2,
+ 3
+ ],
+ "key6": [
+ 1,
+ 2
+ ]
+ },
+ "boolean": {
+ "False": false,
+ "True": true
+ },
+ "datetime": {
+ "key1": "1979-05-27T07:32:00Z",
+ "key2": "1979-05-27T00:32:00-07:00",
+ "key3": "1979-05-27T00:32:00.999999-07:00"
+ },
+ "float": {
+ "both": {
+ "key": 6.626e-34
+ },
+ "exponent": {
+ "key1": 5e+22,
+ "key2": 1000000,
+ "key3": -0.02
+ },
+ "fractional": {
+ "key1": 1,
+ "key2": 3.1415,
+ "key3": -0.01
+ },
+ "underscores": {
+ "key1": 9224617.445991227,
+ "key2": 1e+100
+ }
+ },
+ "fruit": [{
+ "name": "apple",
+ "physical": {
+ "color": "red",
+ "shape": "round"
+ },
+ "variety": [{
+ "name": "red delicious"
+ },
+ {
+ "name": "granny smith"
+ }
+ ]
+ },
+ {
+ "name": "banana",
+ "variety": [{
+ "name": "plantain"
+ }]
+ }
+ ],
+ "integer": {
+ "key1": 99,
+ "key2": 42,
+ "key3": 0,
+ "key4": -17,
+ "underscores": {
+ "key1": 1000,
+ "key2": 5349221,
+ "key3": 12345
+ }
+ },
+ "products": [{
+ "name": "Hammer",
+ "sku": 738594937
+ },
+ {},
+ {
+ "color": "gray",
+ "name": "Nail",
+ "sku": 284758393
+ }
+ ],
+ "string": {
+ "basic": {
+ "basic": "I'm a string. \"You can quote me\". Name\tJosé\nLocation\tSF."
+ },
+ "literal": {
+ "multiline": {
+ "lines": "The first newline is\ntrimmed in raw strings.\n All other whitespace\n is preserved.\n",
+ "regex2": "I [dw]on't need \\d{2} apples"
+ },
+ "quoted": "Tom \"Dubs\" Preston-Werner",
+ "regex": "\u003c\\i\\c*\\s*\u003e",
+ "winpath": "C:\\Users\\nodejs\\templates",
+ "winpath2": "\\\\ServerX\\admin$\\system32\\"
+ },
+ "multiline": {
+ "continued": {
+ "key1": "The quick brown fox jumps over the lazy dog.",
+ "key2": "The quick brown fox jumps over the lazy dog.",
+ "key3": "The quick brown fox jumps over the lazy dog."
+ },
+ "key1": "One\nTwo",
+ "key2": "One\nTwo",
+ "key3": "One\nTwo"
+ }
+ },
+ "table": {
+ "inline": {
+ "name": {
+ "first": "Tom",
+ "last": "Preston-Werner"
+ },
+ "point": {
+ "x": 1,
+ "y": 2
+ }
+ },
+ "key": "value",
+ "subtable": {
+ "key": "another value"
+ }
+ },
+ "x": {
+ "y": {
+ "z": {
+ "w": {}
+ }
+ }
+ }
+}
diff --git a/vendor/github.com/pelletier/go-toml/benchmark.sh b/vendor/github.com/pelletier/go-toml/benchmark.sh
new file mode 100755
index 00000000..8b8bb528
--- /dev/null
+++ b/vendor/github.com/pelletier/go-toml/benchmark.sh
@@ -0,0 +1,32 @@
+#!/bin/bash
+
+set -e
+
+reference_ref=${1:-master}
+reference_git=${2:-.}
+
+if ! `hash benchstat 2>/dev/null`; then
+ echo "Installing benchstat"
+ go get golang.org/x/perf/cmd/benchstat
+ go install golang.org/x/perf/cmd/benchstat
+fi
+
+tempdir=`mktemp -d /tmp/go-toml-benchmark-XXXXXX`
+ref_tempdir="${tempdir}/ref"
+ref_benchmark="${ref_tempdir}/benchmark-`echo -n ${reference_ref}|tr -s '/' '-'`.txt"
+local_benchmark="`pwd`/benchmark-local.txt"
+
+echo "=== ${reference_ref} (${ref_tempdir})"
+git clone ${reference_git} ${ref_tempdir} >/dev/null 2>/dev/null
+pushd ${ref_tempdir} >/dev/null
+git checkout ${reference_ref} >/dev/null 2>/dev/null
+go test -bench=. -benchmem | tee ${ref_benchmark}
+popd >/dev/null
+
+echo ""
+echo "=== local"
+go test -bench=. -benchmem | tee ${local_benchmark}
+
+echo ""
+echo "=== diff"
+benchstat -delta-test=none ${ref_benchmark} ${local_benchmark}
\ No newline at end of file
diff --git a/vendor/github.com/pelletier/go-toml/benchmark.toml b/vendor/github.com/pelletier/go-toml/benchmark.toml
new file mode 100644
index 00000000..dfd77e09
--- /dev/null
+++ b/vendor/github.com/pelletier/go-toml/benchmark.toml
@@ -0,0 +1,244 @@
+################################################################################
+## Comment
+
+# Speak your mind with the hash symbol. They go from the symbol to the end of
+# the line.
+
+
+################################################################################
+## Table
+
+# Tables (also known as hash tables or dictionaries) are collections of
+# key/value pairs. They appear in square brackets on a line by themselves.
+
+[table]
+
+key = "value" # Yeah, you can do this.
+
+# Nested tables are denoted by table names with dots in them. Name your tables
+# whatever crap you please, just don't use #, ., [ or ].
+
+[table.subtable]
+
+key = "another value"
+
+# You don't need to specify all the super-tables if you don't want to. TOML
+# knows how to do it for you.
+
+# [x] you
+# [x.y] don't
+# [x.y.z] need these
+[x.y.z.w] # for this to work
+
+
+################################################################################
+## Inline Table
+
+# Inline tables provide a more compact syntax for expressing tables. They are
+# especially useful for grouped data that can otherwise quickly become verbose.
+# Inline tables are enclosed in curly braces `{` and `}`. No newlines are
+# allowed between the curly braces unless they are valid within a value.
+
+[table.inline]
+
+name = { first = "Tom", last = "Preston-Werner" }
+point = { x = 1, y = 2 }
+
+
+################################################################################
+## String
+
+# There are four ways to express strings: basic, multi-line basic, literal, and
+# multi-line literal. All strings must contain only valid UTF-8 characters.
+
+[string.basic]
+
+basic = "I'm a string. \"You can quote me\". Name\tJos\u00E9\nLocation\tSF."
+
+[string.multiline]
+
+# The following strings are byte-for-byte equivalent:
+key1 = "One\nTwo"
+key2 = """One\nTwo"""
+key3 = """
+One
+Two"""
+
+[string.multiline.continued]
+
+# The following strings are byte-for-byte equivalent:
+key1 = "The quick brown fox jumps over the lazy dog."
+
+key2 = """
+The quick brown \
+
+
+ fox jumps over \
+ the lazy dog."""
+
+key3 = """\
+ The quick brown \
+ fox jumps over \
+ the lazy dog.\
+ """
+
+[string.literal]
+
+# What you see is what you get.
+winpath = 'C:\Users\nodejs\templates'
+winpath2 = '\\ServerX\admin$\system32\'
+quoted = 'Tom "Dubs" Preston-Werner'
+regex = '<\i\c*\s*>'
+
+
+[string.literal.multiline]
+
+regex2 = '''I [dw]on't need \d{2} apples'''
+lines = '''
+The first newline is
+trimmed in raw strings.
+ All other whitespace
+ is preserved.
+'''
+
+
+################################################################################
+## Integer
+
+# Integers are whole numbers. Positive numbers may be prefixed with a plus sign.
+# Negative numbers are prefixed with a minus sign.
+
+[integer]
+
+key1 = +99
+key2 = 42
+key3 = 0
+key4 = -17
+
+[integer.underscores]
+
+# For large numbers, you may use underscores to enhance readability. Each
+# underscore must be surrounded by at least one digit.
+key1 = 1_000
+key2 = 5_349_221
+key3 = 1_2_3_4_5 # valid but inadvisable
+
+
+################################################################################
+## Float
+
+# A float consists of an integer part (which may be prefixed with a plus or
+# minus sign) followed by a fractional part and/or an exponent part.
+
+[float.fractional]
+
+key1 = +1.0
+key2 = 3.1415
+key3 = -0.01
+
+[float.exponent]
+
+key1 = 5e+22
+key2 = 1e6
+key3 = -2E-2
+
+[float.both]
+
+key = 6.626e-34
+
+[float.underscores]
+
+key1 = 9_224_617.445_991_228_313
+key2 = 1e1_00
+
+
+################################################################################
+## Boolean
+
+# Booleans are just the tokens you're used to. Always lowercase.
+
+[boolean]
+
+True = true
+False = false
+
+
+################################################################################
+## Datetime
+
+# Datetimes are RFC 3339 dates.
+
+[datetime]
+
+key1 = 1979-05-27T07:32:00Z
+key2 = 1979-05-27T00:32:00-07:00
+key3 = 1979-05-27T00:32:00.999999-07:00
+
+
+################################################################################
+## Array
+
+# Arrays are square brackets with other primitives inside. Whitespace is
+# ignored. Elements are separated by commas. Data types may not be mixed.
+
+[array]
+
+key1 = [ 1, 2, 3 ]
+key2 = [ "red", "yellow", "green" ]
+key3 = [ [ 1, 2 ], [3, 4, 5] ]
+#key4 = [ [ 1, 2 ], ["a", "b", "c"] ] # this is ok
+
+# Arrays can also be multiline. So in addition to ignoring whitespace, arrays
+# also ignore newlines between the brackets. Terminating commas are ok before
+# the closing bracket.
+
+key5 = [
+ 1, 2, 3
+]
+key6 = [
+ 1,
+ 2, # this is ok
+]
+
+
+################################################################################
+## Array of Tables
+
+# These can be expressed by using a table name in double brackets. Each table
+# with the same double bracketed name will be an element in the array. The
+# tables are inserted in the order encountered.
+
+[[products]]
+
+name = "Hammer"
+sku = 738594937
+
+[[products]]
+
+[[products]]
+
+name = "Nail"
+sku = 284758393
+color = "gray"
+
+
+# You can create nested arrays of tables as well.
+
+[[fruit]]
+ name = "apple"
+
+ [fruit.physical]
+ color = "red"
+ shape = "round"
+
+ [[fruit.variety]]
+ name = "red delicious"
+
+ [[fruit.variety]]
+ name = "granny smith"
+
+[[fruit]]
+ name = "banana"
+
+ [[fruit.variety]]
+ name = "plantain"
diff --git a/vendor/github.com/pelletier/go-toml/benchmark.yml b/vendor/github.com/pelletier/go-toml/benchmark.yml
new file mode 100644
index 00000000..0bd19f08
--- /dev/null
+++ b/vendor/github.com/pelletier/go-toml/benchmark.yml
@@ -0,0 +1,121 @@
+---
+array:
+ key1:
+ - 1
+ - 2
+ - 3
+ key2:
+ - red
+ - yellow
+ - green
+ key3:
+ - - 1
+ - 2
+ - - 3
+ - 4
+ - 5
+ key4:
+ - - 1
+ - 2
+ - - a
+ - b
+ - c
+ key5:
+ - 1
+ - 2
+ - 3
+ key6:
+ - 1
+ - 2
+boolean:
+ 'False': false
+ 'True': true
+datetime:
+ key1: '1979-05-27T07:32:00Z'
+ key2: '1979-05-27T00:32:00-07:00'
+ key3: '1979-05-27T00:32:00.999999-07:00'
+float:
+ both:
+ key: 6.626e-34
+ exponent:
+ key1: 5.0e+22
+ key2: 1000000
+ key3: -0.02
+ fractional:
+ key1: 1
+ key2: 3.1415
+ key3: -0.01
+ underscores:
+ key1: 9224617.445991227
+ key2: 1.0e+100
+fruit:
+- name: apple
+ physical:
+ color: red
+ shape: round
+ variety:
+ - name: red delicious
+ - name: granny smith
+- name: banana
+ variety:
+ - name: plantain
+integer:
+ key1: 99
+ key2: 42
+ key3: 0
+ key4: -17
+ underscores:
+ key1: 1000
+ key2: 5349221
+ key3: 12345
+products:
+- name: Hammer
+ sku: 738594937
+- {}
+- color: gray
+ name: Nail
+ sku: 284758393
+string:
+ basic:
+ basic: "I'm a string. \"You can quote me\". Name\tJosé\nLocation\tSF."
+ literal:
+ multiline:
+ lines: |
+ The first newline is
+ trimmed in raw strings.
+ All other whitespace
+ is preserved.
+ regex2: I [dw]on't need \d{2} apples
+ quoted: Tom "Dubs" Preston-Werner
+ regex: "<\\i\\c*\\s*>"
+ winpath: C:\Users\nodejs\templates
+ winpath2: "\\\\ServerX\\admin$\\system32\\"
+ multiline:
+ continued:
+ key1: The quick brown fox jumps over the lazy dog.
+ key2: The quick brown fox jumps over the lazy dog.
+ key3: The quick brown fox jumps over the lazy dog.
+ key1: |-
+ One
+ Two
+ key2: |-
+ One
+ Two
+ key3: |-
+ One
+ Two
+table:
+ inline:
+ name:
+ first: Tom
+ last: Preston-Werner
+ point:
+ x: 1
+ y: 2
+ key: value
+ subtable:
+ key: another value
+x:
+ y:
+ z:
+ w: {}
diff --git a/vendor/github.com/pelletier/go-toml/doc.go b/vendor/github.com/pelletier/go-toml/doc.go
new file mode 100644
index 00000000..d5fd98c0
--- /dev/null
+++ b/vendor/github.com/pelletier/go-toml/doc.go
@@ -0,0 +1,23 @@
+// Package toml is a TOML parser and manipulation library.
+//
+// This version supports the specification as described in
+// https://github.com/toml-lang/toml/blob/master/versions/en/toml-v0.4.0.md
+//
+// Marshaling
+//
+// Go-toml can marshal and unmarshal TOML documents from and to data
+// structures.
+//
+// TOML document as a tree
+//
+// Go-toml can operate on a TOML document as a tree. Use one of the Load*
+// functions to parse TOML data and obtain a Tree instance, then one of its
+// methods to manipulate the tree.
+//
+// JSONPath-like queries
+//
+// The package github.com/pelletier/go-toml/query implements a system
+// similar to JSONPath to quickly retrieve elements of a TOML document using a
+// single expression. See the package documentation for more information.
+//
+package toml
diff --git a/vendor/github.com/pelletier/go-toml/example-crlf.toml b/vendor/github.com/pelletier/go-toml/example-crlf.toml
new file mode 100644
index 00000000..12950a16
--- /dev/null
+++ b/vendor/github.com/pelletier/go-toml/example-crlf.toml
@@ -0,0 +1,29 @@
+# This is a TOML document. Boom.
+
+title = "TOML Example"
+
+[owner]
+name = "Tom Preston-Werner"
+organization = "GitHub"
+bio = "GitHub Cofounder & CEO\nLikes tater tots and beer."
+dob = 1979-05-27T07:32:00Z # First class dates? Why not?
+
+[database]
+server = "192.168.1.1"
+ports = [ 8001, 8001, 8002 ]
+connection_max = 5000
+enabled = true
+
+[servers]
+
+ # You can indent as you please. Tabs or spaces. TOML don't care.
+ [servers.alpha]
+ ip = "10.0.0.1"
+ dc = "eqdc10"
+
+ [servers.beta]
+ ip = "10.0.0.2"
+ dc = "eqdc10"
+
+[clients]
+data = [ ["gamma", "delta"], [1, 2] ] # just an update to make sure parsers support it
diff --git a/vendor/github.com/pelletier/go-toml/example.toml b/vendor/github.com/pelletier/go-toml/example.toml
new file mode 100644
index 00000000..3d902f28
--- /dev/null
+++ b/vendor/github.com/pelletier/go-toml/example.toml
@@ -0,0 +1,29 @@
+# This is a TOML document. Boom.
+
+title = "TOML Example"
+
+[owner]
+name = "Tom Preston-Werner"
+organization = "GitHub"
+bio = "GitHub Cofounder & CEO\nLikes tater tots and beer."
+dob = 1979-05-27T07:32:00Z # First class dates? Why not?
+
+[database]
+server = "192.168.1.1"
+ports = [ 8001, 8001, 8002 ]
+connection_max = 5000
+enabled = true
+
+[servers]
+
+ # You can indent as you please. Tabs or spaces. TOML don't care.
+ [servers.alpha]
+ ip = "10.0.0.1"
+ dc = "eqdc10"
+
+ [servers.beta]
+ ip = "10.0.0.2"
+ dc = "eqdc10"
+
+[clients]
+data = [ ["gamma", "delta"], [1, 2] ] # just an update to make sure parsers support it
diff --git a/vendor/github.com/pelletier/go-toml/fuzz.go b/vendor/github.com/pelletier/go-toml/fuzz.go
new file mode 100644
index 00000000..14570c8d
--- /dev/null
+++ b/vendor/github.com/pelletier/go-toml/fuzz.go
@@ -0,0 +1,31 @@
+// +build gofuzz
+
+package toml
+
+func Fuzz(data []byte) int {
+ tree, err := LoadBytes(data)
+ if err != nil {
+ if tree != nil {
+ panic("tree must be nil if there is an error")
+ }
+ return 0
+ }
+
+ str, err := tree.ToTomlString()
+ if err != nil {
+ if str != "" {
+ panic(`str must be "" if there is an error`)
+ }
+ panic(err)
+ }
+
+ tree, err = Load(str)
+ if err != nil {
+ if tree != nil {
+ panic("tree must be nil if there is an error")
+ }
+ return 0
+ }
+
+ return 1
+}
diff --git a/vendor/github.com/pelletier/go-toml/fuzz.sh b/vendor/github.com/pelletier/go-toml/fuzz.sh
new file mode 100755
index 00000000..3204b4c4
--- /dev/null
+++ b/vendor/github.com/pelletier/go-toml/fuzz.sh
@@ -0,0 +1,15 @@
+#! /bin/sh
+set -eu
+
+go get github.com/dvyukov/go-fuzz/go-fuzz
+go get github.com/dvyukov/go-fuzz/go-fuzz-build
+
+if [ ! -e toml-fuzz.zip ]; then
+ go-fuzz-build github.com/pelletier/go-toml
+fi
+
+rm -fr fuzz
+mkdir -p fuzz/corpus
+cp *.toml fuzz/corpus
+
+go-fuzz -bin=toml-fuzz.zip -workdir=fuzz
diff --git a/vendor/github.com/pelletier/go-toml/keysparsing.go b/vendor/github.com/pelletier/go-toml/keysparsing.go
new file mode 100644
index 00000000..284db646
--- /dev/null
+++ b/vendor/github.com/pelletier/go-toml/keysparsing.go
@@ -0,0 +1,85 @@
+// Parsing keys handling both bare and quoted keys.
+
+package toml
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "unicode"
+)
+
+// Convert the bare key group string to an array.
+// The input supports double quotation to allow "." inside the key name,
+// but escape sequences are not supported. Lexers must unescape them beforehand.
+func parseKey(key string) ([]string, error) {
+ groups := []string{}
+ var buffer bytes.Buffer
+ inQuotes := false
+ wasInQuotes := false
+ ignoreSpace := true
+ expectDot := false
+
+ for _, char := range key {
+ if ignoreSpace {
+ if char == ' ' {
+ continue
+ }
+ ignoreSpace = false
+ }
+ switch char {
+ case '"':
+ if inQuotes {
+ groups = append(groups, buffer.String())
+ buffer.Reset()
+ wasInQuotes = true
+ }
+ inQuotes = !inQuotes
+ expectDot = false
+ case '.':
+ if inQuotes {
+ buffer.WriteRune(char)
+ } else {
+ if !wasInQuotes {
+ if buffer.Len() == 0 {
+ return nil, errors.New("empty table key")
+ }
+ groups = append(groups, buffer.String())
+ buffer.Reset()
+ }
+ ignoreSpace = true
+ expectDot = false
+ wasInQuotes = false
+ }
+ case ' ':
+ if inQuotes {
+ buffer.WriteRune(char)
+ } else {
+ expectDot = true
+ }
+ default:
+ if !inQuotes && !isValidBareChar(char) {
+ return nil, fmt.Errorf("invalid bare character: %c", char)
+ }
+ if !inQuotes && expectDot {
+ return nil, errors.New("what?")
+ }
+ buffer.WriteRune(char)
+ expectDot = false
+ }
+ }
+ if inQuotes {
+ return nil, errors.New("mismatched quotes")
+ }
+ if buffer.Len() > 0 {
+ groups = append(groups, buffer.String())
+ }
+ if len(groups) == 0 {
+ return nil, errors.New("empty key")
+ }
+ return groups, nil
+}
+
+func isValidBareChar(r rune) bool {
+ return isAlphanumeric(r) || r == '-' || unicode.IsNumber(r)
+}
diff --git a/vendor/github.com/pelletier/go-toml/lexer.go b/vendor/github.com/pelletier/go-toml/lexer.go
new file mode 100644
index 00000000..d11de428
--- /dev/null
+++ b/vendor/github.com/pelletier/go-toml/lexer.go
@@ -0,0 +1,750 @@
+// TOML lexer.
+//
+// Written using the principles developed by Rob Pike in
+// http://www.youtube.com/watch?v=HxaD_trXwRE
+
+package toml
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "regexp"
+ "strconv"
+ "strings"
+)
+
+var dateRegexp *regexp.Regexp
+
+// Define state functions
+type tomlLexStateFn func() tomlLexStateFn
+
+// Define lexer
+type tomlLexer struct {
+ inputIdx int
+ input []rune // Textual source
+ currentTokenStart int
+ currentTokenStop int
+ tokens []token
+ depth int
+ line int
+ col int
+ endbufferLine int
+ endbufferCol int
+}
+
+// Basic read operations on input
+
+func (l *tomlLexer) read() rune {
+ r := l.peek()
+ if r == '\n' {
+ l.endbufferLine++
+ l.endbufferCol = 1
+ } else {
+ l.endbufferCol++
+ }
+ l.inputIdx++
+ return r
+}
+
+func (l *tomlLexer) next() rune {
+ r := l.read()
+
+ if r != eof {
+ l.currentTokenStop++
+ }
+ return r
+}
+
+func (l *tomlLexer) ignore() {
+ l.currentTokenStart = l.currentTokenStop
+ l.line = l.endbufferLine
+ l.col = l.endbufferCol
+}
+
+func (l *tomlLexer) skip() {
+ l.next()
+ l.ignore()
+}
+
+func (l *tomlLexer) fastForward(n int) {
+ for i := 0; i < n; i++ {
+ l.next()
+ }
+}
+
+func (l *tomlLexer) emitWithValue(t tokenType, value string) {
+ l.tokens = append(l.tokens, token{
+ Position: Position{l.line, l.col},
+ typ: t,
+ val: value,
+ })
+ l.ignore()
+}
+
+func (l *tomlLexer) emit(t tokenType) {
+ l.emitWithValue(t, string(l.input[l.currentTokenStart:l.currentTokenStop]))
+}
+
+func (l *tomlLexer) peek() rune {
+ if l.inputIdx >= len(l.input) {
+ return eof
+ }
+ return l.input[l.inputIdx]
+}
+
+func (l *tomlLexer) peekString(size int) string {
+ maxIdx := len(l.input)
+ upperIdx := l.inputIdx + size // FIXME: potential overflow
+ if upperIdx > maxIdx {
+ upperIdx = maxIdx
+ }
+ return string(l.input[l.inputIdx:upperIdx])
+}
+
+func (l *tomlLexer) follow(next string) bool {
+ return next == l.peekString(len(next))
+}
+
+// Error management
+
+func (l *tomlLexer) errorf(format string, args ...interface{}) tomlLexStateFn {
+ l.tokens = append(l.tokens, token{
+ Position: Position{l.line, l.col},
+ typ: tokenError,
+ val: fmt.Sprintf(format, args...),
+ })
+ return nil
+}
+
+// State functions
+
+func (l *tomlLexer) lexVoid() tomlLexStateFn {
+ for {
+ next := l.peek()
+ switch next {
+ case '[':
+ return l.lexTableKey
+ case '#':
+ return l.lexComment(l.lexVoid)
+ case '=':
+ return l.lexEqual
+ case '\r':
+ fallthrough
+ case '\n':
+ l.skip()
+ continue
+ }
+
+ if isSpace(next) {
+ l.skip()
+ }
+
+ if l.depth > 0 {
+ return l.lexRvalue
+ }
+
+ if isKeyStartChar(next) {
+ return l.lexKey
+ }
+
+ if next == eof {
+ l.next()
+ break
+ }
+ }
+
+ l.emit(tokenEOF)
+ return nil
+}
+
+func (l *tomlLexer) lexRvalue() tomlLexStateFn {
+ for {
+ next := l.peek()
+ switch next {
+ case '.':
+ return l.errorf("cannot start float with a dot")
+ case '=':
+ return l.lexEqual
+ case '[':
+ l.depth++
+ return l.lexLeftBracket
+ case ']':
+ l.depth--
+ return l.lexRightBracket
+ case '{':
+ return l.lexLeftCurlyBrace
+ case '}':
+ return l.lexRightCurlyBrace
+ case '#':
+ return l.lexComment(l.lexRvalue)
+ case '"':
+ return l.lexString
+ case '\'':
+ return l.lexLiteralString
+ case ',':
+ return l.lexComma
+ case '\r':
+ fallthrough
+ case '\n':
+ l.skip()
+ if l.depth == 0 {
+ return l.lexVoid
+ }
+ return l.lexRvalue
+ case '_':
+ return l.errorf("cannot start number with underscore")
+ }
+
+ if l.follow("true") {
+ return l.lexTrue
+ }
+
+ if l.follow("false") {
+ return l.lexFalse
+ }
+
+ if l.follow("inf") {
+ return l.lexInf
+ }
+
+ if l.follow("nan") {
+ return l.lexNan
+ }
+
+ if isSpace(next) {
+ l.skip()
+ continue
+ }
+
+ if next == eof {
+ l.next()
+ break
+ }
+
+ possibleDate := l.peekString(35)
+ dateMatch := dateRegexp.FindString(possibleDate)
+ if dateMatch != "" {
+ l.fastForward(len(dateMatch))
+ return l.lexDate
+ }
+
+ if next == '+' || next == '-' || isDigit(next) {
+ return l.lexNumber
+ }
+
+ if isAlphanumeric(next) {
+ return l.lexKey
+ }
+
+ return l.errorf("no value can start with %c", next)
+ }
+
+ l.emit(tokenEOF)
+ return nil
+}
+
+func (l *tomlLexer) lexLeftCurlyBrace() tomlLexStateFn {
+ l.next()
+ l.emit(tokenLeftCurlyBrace)
+ return l.lexRvalue
+}
+
+func (l *tomlLexer) lexRightCurlyBrace() tomlLexStateFn {
+ l.next()
+ l.emit(tokenRightCurlyBrace)
+ return l.lexRvalue
+}
+
+func (l *tomlLexer) lexDate() tomlLexStateFn {
+ l.emit(tokenDate)
+ return l.lexRvalue
+}
+
+func (l *tomlLexer) lexTrue() tomlLexStateFn {
+ l.fastForward(4)
+ l.emit(tokenTrue)
+ return l.lexRvalue
+}
+
+func (l *tomlLexer) lexFalse() tomlLexStateFn {
+ l.fastForward(5)
+ l.emit(tokenFalse)
+ return l.lexRvalue
+}
+
+func (l *tomlLexer) lexInf() tomlLexStateFn {
+ l.fastForward(3)
+ l.emit(tokenInf)
+ return l.lexRvalue
+}
+
+func (l *tomlLexer) lexNan() tomlLexStateFn {
+ l.fastForward(3)
+ l.emit(tokenNan)
+ return l.lexRvalue
+}
+
+func (l *tomlLexer) lexEqual() tomlLexStateFn {
+ l.next()
+ l.emit(tokenEqual)
+ return l.lexRvalue
+}
+
+func (l *tomlLexer) lexComma() tomlLexStateFn {
+ l.next()
+ l.emit(tokenComma)
+ return l.lexRvalue
+}
+
+// Parse the key and emits its value without escape sequences.
+// bare keys, basic string keys and literal string keys are supported.
+func (l *tomlLexer) lexKey() tomlLexStateFn {
+ growingString := ""
+
+ for r := l.peek(); isKeyChar(r) || r == '\n' || r == '\r'; r = l.peek() {
+ if r == '"' {
+ l.next()
+ str, err := l.lexStringAsString(`"`, false, true)
+ if err != nil {
+ return l.errorf(err.Error())
+ }
+ growingString += str
+ l.next()
+ continue
+ } else if r == '\'' {
+ l.next()
+ str, err := l.lexLiteralStringAsString(`'`, false)
+ if err != nil {
+ return l.errorf(err.Error())
+ }
+ growingString += str
+ l.next()
+ continue
+ } else if r == '\n' {
+ return l.errorf("keys cannot contain new lines")
+ } else if isSpace(r) {
+ break
+ } else if !isValidBareChar(r) {
+ return l.errorf("keys cannot contain %c character", r)
+ }
+ growingString += string(r)
+ l.next()
+ }
+ l.emitWithValue(tokenKey, growingString)
+ return l.lexVoid
+}
+
+func (l *tomlLexer) lexComment(previousState tomlLexStateFn) tomlLexStateFn {
+ return func() tomlLexStateFn {
+ for next := l.peek(); next != '\n' && next != eof; next = l.peek() {
+ if next == '\r' && l.follow("\r\n") {
+ break
+ }
+ l.next()
+ }
+ l.ignore()
+ return previousState
+ }
+}
+
+func (l *tomlLexer) lexLeftBracket() tomlLexStateFn {
+ l.next()
+ l.emit(tokenLeftBracket)
+ return l.lexRvalue
+}
+
+func (l *tomlLexer) lexLiteralStringAsString(terminator string, discardLeadingNewLine bool) (string, error) {
+ growingString := ""
+
+ if discardLeadingNewLine {
+ if l.follow("\r\n") {
+ l.skip()
+ l.skip()
+ } else if l.peek() == '\n' {
+ l.skip()
+ }
+ }
+
+ // find end of string
+ for {
+ if l.follow(terminator) {
+ return growingString, nil
+ }
+
+ next := l.peek()
+ if next == eof {
+ break
+ }
+ growingString += string(l.next())
+ }
+
+ return "", errors.New("unclosed string")
+}
+
+func (l *tomlLexer) lexLiteralString() tomlLexStateFn {
+ l.skip()
+
+ // handle special case for triple-quote
+ terminator := "'"
+ discardLeadingNewLine := false
+ if l.follow("''") {
+ l.skip()
+ l.skip()
+ terminator = "'''"
+ discardLeadingNewLine = true
+ }
+
+ str, err := l.lexLiteralStringAsString(terminator, discardLeadingNewLine)
+ if err != nil {
+ return l.errorf(err.Error())
+ }
+
+ l.emitWithValue(tokenString, str)
+ l.fastForward(len(terminator))
+ l.ignore()
+ return l.lexRvalue
+}
+
+// Lex a string and return the results as a string.
+// Terminator is the substring indicating the end of the token.
+// The resulting string does not include the terminator.
+func (l *tomlLexer) lexStringAsString(terminator string, discardLeadingNewLine, acceptNewLines bool) (string, error) {
+ growingString := ""
+
+ if discardLeadingNewLine {
+ if l.follow("\r\n") {
+ l.skip()
+ l.skip()
+ } else if l.peek() == '\n' {
+ l.skip()
+ }
+ }
+
+ for {
+ if l.follow(terminator) {
+ return growingString, nil
+ }
+
+ if l.follow("\\") {
+ l.next()
+ switch l.peek() {
+ case '\r':
+ fallthrough
+ case '\n':
+ fallthrough
+ case '\t':
+ fallthrough
+ case ' ':
+ // skip all whitespace chars following backslash
+ for strings.ContainsRune("\r\n\t ", l.peek()) {
+ l.next()
+ }
+ case '"':
+ growingString += "\""
+ l.next()
+ case 'n':
+ growingString += "\n"
+ l.next()
+ case 'b':
+ growingString += "\b"
+ l.next()
+ case 'f':
+ growingString += "\f"
+ l.next()
+ case '/':
+ growingString += "/"
+ l.next()
+ case 't':
+ growingString += "\t"
+ l.next()
+ case 'r':
+ growingString += "\r"
+ l.next()
+ case '\\':
+ growingString += "\\"
+ l.next()
+ case 'u':
+ l.next()
+ code := ""
+ for i := 0; i < 4; i++ {
+ c := l.peek()
+ if !isHexDigit(c) {
+ return "", errors.New("unfinished unicode escape")
+ }
+ l.next()
+ code = code + string(c)
+ }
+ intcode, err := strconv.ParseInt(code, 16, 32)
+ if err != nil {
+ return "", errors.New("invalid unicode escape: \\u" + code)
+ }
+ growingString += string(rune(intcode))
+ case 'U':
+ l.next()
+ code := ""
+ for i := 0; i < 8; i++ {
+ c := l.peek()
+ if !isHexDigit(c) {
+ return "", errors.New("unfinished unicode escape")
+ }
+ l.next()
+ code = code + string(c)
+ }
+ intcode, err := strconv.ParseInt(code, 16, 64)
+ if err != nil {
+ return "", errors.New("invalid unicode escape: \\U" + code)
+ }
+ growingString += string(rune(intcode))
+ default:
+ return "", errors.New("invalid escape sequence: \\" + string(l.peek()))
+ }
+ } else {
+ r := l.peek()
+
+ if 0x00 <= r && r <= 0x1F && !(acceptNewLines && (r == '\n' || r == '\r')) {
+ return "", fmt.Errorf("unescaped control character %U", r)
+ }
+ l.next()
+ growingString += string(r)
+ }
+
+ if l.peek() == eof {
+ break
+ }
+ }
+
+ return "", errors.New("unclosed string")
+}
+
+func (l *tomlLexer) lexString() tomlLexStateFn {
+ l.skip()
+
+ // handle special case for triple-quote
+ terminator := `"`
+ discardLeadingNewLine := false
+ acceptNewLines := false
+ if l.follow(`""`) {
+ l.skip()
+ l.skip()
+ terminator = `"""`
+ discardLeadingNewLine = true
+ acceptNewLines = true
+ }
+
+ str, err := l.lexStringAsString(terminator, discardLeadingNewLine, acceptNewLines)
+
+ if err != nil {
+ return l.errorf(err.Error())
+ }
+
+ l.emitWithValue(tokenString, str)
+ l.fastForward(len(terminator))
+ l.ignore()
+ return l.lexRvalue
+}
+
+func (l *tomlLexer) lexTableKey() tomlLexStateFn {
+ l.next()
+
+ if l.peek() == '[' {
+ // token '[[' signifies an array of tables
+ l.next()
+ l.emit(tokenDoubleLeftBracket)
+ return l.lexInsideTableArrayKey
+ }
+ // vanilla table key
+ l.emit(tokenLeftBracket)
+ return l.lexInsideTableKey
+}
+
+// Parse the key till "]]", but only bare keys are supported
+func (l *tomlLexer) lexInsideTableArrayKey() tomlLexStateFn {
+ for r := l.peek(); r != eof; r = l.peek() {
+ switch r {
+ case ']':
+ if l.currentTokenStop > l.currentTokenStart {
+ l.emit(tokenKeyGroupArray)
+ }
+ l.next()
+ if l.peek() != ']' {
+ break
+ }
+ l.next()
+ l.emit(tokenDoubleRightBracket)
+ return l.lexVoid
+ case '[':
+ return l.errorf("table array key cannot contain ']'")
+ default:
+ l.next()
+ }
+ }
+ return l.errorf("unclosed table array key")
+}
+
+// Parse the key till "]" but only bare keys are supported
+func (l *tomlLexer) lexInsideTableKey() tomlLexStateFn {
+ for r := l.peek(); r != eof; r = l.peek() {
+ switch r {
+ case ']':
+ if l.currentTokenStop > l.currentTokenStart {
+ l.emit(tokenKeyGroup)
+ }
+ l.next()
+ l.emit(tokenRightBracket)
+ return l.lexVoid
+ case '[':
+ return l.errorf("table key cannot contain ']'")
+ default:
+ l.next()
+ }
+ }
+ return l.errorf("unclosed table key")
+}
+
+func (l *tomlLexer) lexRightBracket() tomlLexStateFn {
+ l.next()
+ l.emit(tokenRightBracket)
+ return l.lexRvalue
+}
+
+type validRuneFn func(r rune) bool
+
+func isValidHexRune(r rune) bool {
+ return r >= 'a' && r <= 'f' ||
+ r >= 'A' && r <= 'F' ||
+ r >= '0' && r <= '9' ||
+ r == '_'
+}
+
+func isValidOctalRune(r rune) bool {
+ return r >= '0' && r <= '7' || r == '_'
+}
+
+func isValidBinaryRune(r rune) bool {
+ return r == '0' || r == '1' || r == '_'
+}
+
+func (l *tomlLexer) lexNumber() tomlLexStateFn {
+ r := l.peek()
+
+ if r == '0' {
+ follow := l.peekString(2)
+ if len(follow) == 2 {
+ var isValidRune validRuneFn
+ switch follow[1] {
+ case 'x':
+ isValidRune = isValidHexRune
+ case 'o':
+ isValidRune = isValidOctalRune
+ case 'b':
+ isValidRune = isValidBinaryRune
+ default:
+ if follow[1] >= 'a' && follow[1] <= 'z' || follow[1] >= 'A' && follow[1] <= 'Z' {
+ return l.errorf("unknown number base: %s. possible options are x (hex) o (octal) b (binary)", string(follow[1]))
+ }
+ }
+
+ if isValidRune != nil {
+ l.next()
+ l.next()
+ digitSeen := false
+ for {
+ next := l.peek()
+ if !isValidRune(next) {
+ break
+ }
+ digitSeen = true
+ l.next()
+ }
+
+ if !digitSeen {
+ return l.errorf("number needs at least one digit")
+ }
+
+ l.emit(tokenInteger)
+
+ return l.lexRvalue
+ }
+ }
+ }
+
+ if r == '+' || r == '-' {
+ l.next()
+ if l.follow("inf") {
+ return l.lexInf
+ }
+ if l.follow("nan") {
+ return l.lexNan
+ }
+ }
+
+ pointSeen := false
+ expSeen := false
+ digitSeen := false
+ for {
+ next := l.peek()
+ if next == '.' {
+ if pointSeen {
+ return l.errorf("cannot have two dots in one float")
+ }
+ l.next()
+ if !isDigit(l.peek()) {
+ return l.errorf("float cannot end with a dot")
+ }
+ pointSeen = true
+ } else if next == 'e' || next == 'E' {
+ expSeen = true
+ l.next()
+ r := l.peek()
+ if r == '+' || r == '-' {
+ l.next()
+ }
+ } else if isDigit(next) {
+ digitSeen = true
+ l.next()
+ } else if next == '_' {
+ l.next()
+ } else {
+ break
+ }
+ if pointSeen && !digitSeen {
+ return l.errorf("cannot start float with a dot")
+ }
+ }
+
+ if !digitSeen {
+ return l.errorf("no digit in that number")
+ }
+ if pointSeen || expSeen {
+ l.emit(tokenFloat)
+ } else {
+ l.emit(tokenInteger)
+ }
+ return l.lexRvalue
+}
+
+func (l *tomlLexer) run() {
+ for state := l.lexVoid; state != nil; {
+ state = state()
+ }
+}
+
+func init() {
+ dateRegexp = regexp.MustCompile(`^\d{1,4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}(\.\d{1,9})?(Z|[+-]\d{2}:\d{2})`)
+}
+
+// Entry point
+func lexToml(inputBytes []byte) []token {
+ runes := bytes.Runes(inputBytes)
+ l := &tomlLexer{
+ input: runes,
+ tokens: make([]token, 0, 256),
+ line: 1,
+ col: 1,
+ endbufferLine: 1,
+ endbufferCol: 1,
+ }
+ l.run()
+ return l.tokens
+}
diff --git a/vendor/github.com/pelletier/go-toml/marshal.go b/vendor/github.com/pelletier/go-toml/marshal.go
new file mode 100644
index 00000000..e7ef802b
--- /dev/null
+++ b/vendor/github.com/pelletier/go-toml/marshal.go
@@ -0,0 +1,687 @@
+package toml
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "io"
+ "reflect"
+ "strconv"
+ "strings"
+ "time"
+)
+
+const (
+ tagFieldName = "toml"
+ tagFieldComment = "comment"
+ tagCommented = "commented"
+ tagMultiline = "multiline"
+)
+
+type tomlOpts struct {
+ name string
+ comment string
+ commented bool
+ multiline bool
+ include bool
+ omitempty bool
+}
+
+type encOpts struct {
+ quoteMapKeys bool
+ arraysOneElementPerLine bool
+}
+
+var encOptsDefaults = encOpts{
+ quoteMapKeys: false,
+}
+
+type annotation struct {
+ tag string
+ comment string
+ commented string
+ multiline string
+}
+
+var annotationDefault = annotation{
+ tag: tagFieldName,
+ comment: tagFieldComment,
+ commented: tagCommented,
+ multiline: tagMultiline,
+}
+
+var timeType = reflect.TypeOf(time.Time{})
+var marshalerType = reflect.TypeOf(new(Marshaler)).Elem()
+
+// Check if the given marshall type maps to a Tree primitive
+func isPrimitive(mtype reflect.Type) bool {
+ switch mtype.Kind() {
+ case reflect.Ptr:
+ return isPrimitive(mtype.Elem())
+ case reflect.Bool:
+ return true
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ return true
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
+ return true
+ case reflect.Float32, reflect.Float64:
+ return true
+ case reflect.String:
+ return true
+ case reflect.Struct:
+ return mtype == timeType || isCustomMarshaler(mtype)
+ default:
+ return false
+ }
+}
+
+// Check if the given marshall type maps to a Tree slice
+func isTreeSlice(mtype reflect.Type) bool {
+ switch mtype.Kind() {
+ case reflect.Slice:
+ return !isOtherSlice(mtype)
+ default:
+ return false
+ }
+}
+
+// Check if the given marshall type maps to a non-Tree slice
+func isOtherSlice(mtype reflect.Type) bool {
+ switch mtype.Kind() {
+ case reflect.Ptr:
+ return isOtherSlice(mtype.Elem())
+ case reflect.Slice:
+ return isPrimitive(mtype.Elem()) || isOtherSlice(mtype.Elem())
+ default:
+ return false
+ }
+}
+
+// Check if the given marshall type maps to a Tree
+func isTree(mtype reflect.Type) bool {
+ switch mtype.Kind() {
+ case reflect.Map:
+ return true
+ case reflect.Struct:
+ return !isPrimitive(mtype)
+ default:
+ return false
+ }
+}
+
+func isCustomMarshaler(mtype reflect.Type) bool {
+ return mtype.Implements(marshalerType)
+}
+
+func callCustomMarshaler(mval reflect.Value) ([]byte, error) {
+ return mval.Interface().(Marshaler).MarshalTOML()
+}
+
+// Marshaler is the interface implemented by types that
+// can marshal themselves into valid TOML.
+type Marshaler interface {
+ MarshalTOML() ([]byte, error)
+}
+
+/*
+Marshal returns the TOML encoding of v. Behavior is similar to the Go json
+encoder, except that there is no concept of a Marshaler interface or MarshalTOML
+function for sub-structs, and currently only definite types can be marshaled
+(i.e. no `interface{}`).
+
+The following struct annotations are supported:
+
+ toml:"Field" Overrides the field's name to output.
+ omitempty When set, empty values and groups are not emitted.
+ comment:"comment" Emits a # comment on the same line. This supports new lines.
+ commented:"true" Emits the value as commented.
+
+Note that pointers are automatically assigned the "omitempty" option, as TOML
+explicitly does not handle null values (saying instead the label should be
+dropped).
+
+Tree structural types and corresponding marshal types:
+
+ *Tree (*)struct, (*)map[string]interface{}
+ []*Tree (*)[](*)struct, (*)[](*)map[string]interface{}
+ []interface{} (as interface{}) (*)[]primitive, (*)[]([]interface{})
+ interface{} (*)primitive
+
+Tree primitive types and corresponding marshal types:
+
+ uint64 uint, uint8-uint64, pointers to same
+ int64 int, int8-uint64, pointers to same
+ float64 float32, float64, pointers to same
+ string string, pointers to same
+ bool bool, pointers to same
+ time.Time time.Time{}, pointers to same
+*/
+func Marshal(v interface{}) ([]byte, error) {
+ return NewEncoder(nil).marshal(v)
+}
+
+// Encoder writes TOML values to an output stream.
+type Encoder struct {
+ w io.Writer
+ encOpts
+ annotation
+}
+
+// NewEncoder returns a new encoder that writes to w.
+func NewEncoder(w io.Writer) *Encoder {
+ return &Encoder{
+ w: w,
+ encOpts: encOptsDefaults,
+ annotation: annotationDefault,
+ }
+}
+
+// Encode writes the TOML encoding of v to the stream.
+//
+// See the documentation for Marshal for details.
+func (e *Encoder) Encode(v interface{}) error {
+ b, err := e.marshal(v)
+ if err != nil {
+ return err
+ }
+ if _, err := e.w.Write(b); err != nil {
+ return err
+ }
+ return nil
+}
+
+// QuoteMapKeys sets up the encoder to encode
+// maps with string type keys with quoted TOML keys.
+//
+// This relieves the character limitations on map keys.
+func (e *Encoder) QuoteMapKeys(v bool) *Encoder {
+ e.quoteMapKeys = v
+ return e
+}
+
+// ArraysWithOneElementPerLine sets up the encoder to encode arrays
+// with more than one element on multiple lines instead of one.
+//
+// For example:
+//
+// A = [1,2,3]
+//
+// Becomes
+//
+// A = [
+// 1,
+// 2,
+// 3,
+// ]
+func (e *Encoder) ArraysWithOneElementPerLine(v bool) *Encoder {
+ e.arraysOneElementPerLine = v
+ return e
+}
+
+// SetTagName allows changing default tag "toml"
+func (e *Encoder) SetTagName(v string) *Encoder {
+ e.tag = v
+ return e
+}
+
+// SetTagComment allows changing default tag "comment"
+func (e *Encoder) SetTagComment(v string) *Encoder {
+ e.comment = v
+ return e
+}
+
+// SetTagCommented allows changing default tag "commented"
+func (e *Encoder) SetTagCommented(v string) *Encoder {
+ e.commented = v
+ return e
+}
+
+// SetTagMultiline allows changing default tag "multiline"
+func (e *Encoder) SetTagMultiline(v string) *Encoder {
+ e.multiline = v
+ return e
+}
+
+func (e *Encoder) marshal(v interface{}) ([]byte, error) {
+ mtype := reflect.TypeOf(v)
+
+ switch mtype.Kind() {
+ case reflect.Struct, reflect.Map:
+ case reflect.Ptr:
+ if mtype.Elem().Kind() != reflect.Struct {
+ return []byte{}, errors.New("Only pointer to struct can be marshaled to TOML")
+ }
+ default:
+ return []byte{}, errors.New("Only a struct or map can be marshaled to TOML")
+ }
+
+ sval := reflect.ValueOf(v)
+ if isCustomMarshaler(mtype) {
+ return callCustomMarshaler(sval)
+ }
+ t, err := e.valueToTree(mtype, sval)
+ if err != nil {
+ return []byte{}, err
+ }
+
+ var buf bytes.Buffer
+ _, err = t.writeTo(&buf, "", "", 0, e.arraysOneElementPerLine)
+
+ return buf.Bytes(), err
+}
+
+// Convert given marshal struct or map value to toml tree
+func (e *Encoder) valueToTree(mtype reflect.Type, mval reflect.Value) (*Tree, error) {
+ if mtype.Kind() == reflect.Ptr {
+ return e.valueToTree(mtype.Elem(), mval.Elem())
+ }
+ tval := newTree()
+ switch mtype.Kind() {
+ case reflect.Struct:
+ for i := 0; i < mtype.NumField(); i++ {
+ mtypef, mvalf := mtype.Field(i), mval.Field(i)
+ opts := tomlOptions(mtypef, e.annotation)
+ if opts.include && (!opts.omitempty || !isZero(mvalf)) {
+ val, err := e.valueToToml(mtypef.Type, mvalf)
+ if err != nil {
+ return nil, err
+ }
+
+ tval.SetWithOptions(opts.name, SetOptions{
+ Comment: opts.comment,
+ Commented: opts.commented,
+ Multiline: opts.multiline,
+ }, val)
+ }
+ }
+ case reflect.Map:
+ for _, key := range mval.MapKeys() {
+ mvalf := mval.MapIndex(key)
+ val, err := e.valueToToml(mtype.Elem(), mvalf)
+ if err != nil {
+ return nil, err
+ }
+ if e.quoteMapKeys {
+ keyStr, err := tomlValueStringRepresentation(key.String(), "", e.arraysOneElementPerLine)
+ if err != nil {
+ return nil, err
+ }
+ tval.SetPath([]string{keyStr}, val)
+ } else {
+ tval.Set(key.String(), val)
+ }
+ }
+ }
+ return tval, nil
+}
+
+// Convert given marshal slice to slice of Toml trees
+func (e *Encoder) valueToTreeSlice(mtype reflect.Type, mval reflect.Value) ([]*Tree, error) {
+ tval := make([]*Tree, mval.Len(), mval.Len())
+ for i := 0; i < mval.Len(); i++ {
+ val, err := e.valueToTree(mtype.Elem(), mval.Index(i))
+ if err != nil {
+ return nil, err
+ }
+ tval[i] = val
+ }
+ return tval, nil
+}
+
+// Convert given marshal slice to slice of toml values
+func (e *Encoder) valueToOtherSlice(mtype reflect.Type, mval reflect.Value) (interface{}, error) {
+ tval := make([]interface{}, mval.Len(), mval.Len())
+ for i := 0; i < mval.Len(); i++ {
+ val, err := e.valueToToml(mtype.Elem(), mval.Index(i))
+ if err != nil {
+ return nil, err
+ }
+ tval[i] = val
+ }
+ return tval, nil
+}
+
+// Convert given marshal value to toml value
+func (e *Encoder) valueToToml(mtype reflect.Type, mval reflect.Value) (interface{}, error) {
+ if mtype.Kind() == reflect.Ptr {
+ return e.valueToToml(mtype.Elem(), mval.Elem())
+ }
+ switch {
+ case isCustomMarshaler(mtype):
+ return callCustomMarshaler(mval)
+ case isTree(mtype):
+ return e.valueToTree(mtype, mval)
+ case isTreeSlice(mtype):
+ return e.valueToTreeSlice(mtype, mval)
+ case isOtherSlice(mtype):
+ return e.valueToOtherSlice(mtype, mval)
+ default:
+ switch mtype.Kind() {
+ case reflect.Bool:
+ return mval.Bool(), nil
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ if mtype.Kind() == reflect.Int64 && mtype == reflect.TypeOf(time.Duration(1)) {
+ return fmt.Sprint(mval), nil
+ }
+ return mval.Int(), nil
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
+ return mval.Uint(), nil
+ case reflect.Float32, reflect.Float64:
+ return mval.Float(), nil
+ case reflect.String:
+ return mval.String(), nil
+ case reflect.Struct:
+ return mval.Interface().(time.Time), nil
+ default:
+ return nil, fmt.Errorf("Marshal can't handle %v(%v)", mtype, mtype.Kind())
+ }
+ }
+}
+
+// Unmarshal attempts to unmarshal the Tree into a Go struct pointed by v.
+// Neither Unmarshaler interfaces nor UnmarshalTOML functions are supported for
+// sub-structs, and only definite types can be unmarshaled.
+func (t *Tree) Unmarshal(v interface{}) error {
+ d := Decoder{tval: t, tagName: tagFieldName}
+ return d.unmarshal(v)
+}
+
+// Marshal returns the TOML encoding of Tree.
+// See Marshal() documentation for types mapping table.
+func (t *Tree) Marshal() ([]byte, error) {
+ var buf bytes.Buffer
+ err := NewEncoder(&buf).Encode(t)
+ return buf.Bytes(), err
+}
+
+// Unmarshal parses the TOML-encoded data and stores the result in the value
+// pointed to by v. Behavior is similar to the Go json encoder, except that there
+// is no concept of an Unmarshaler interface or UnmarshalTOML function for
+// sub-structs, and currently only definite types can be unmarshaled to (i.e. no
+// `interface{}`).
+//
+// The following struct annotations are supported:
+//
+// toml:"Field" Overrides the field's name to map to.
+//
+// See Marshal() documentation for types mapping table.
+func Unmarshal(data []byte, v interface{}) error {
+ t, err := LoadReader(bytes.NewReader(data))
+ if err != nil {
+ return err
+ }
+ return t.Unmarshal(v)
+}
+
+// Decoder reads and decodes TOML values from an input stream.
+type Decoder struct {
+ r io.Reader
+ tval *Tree
+ encOpts
+ tagName string
+}
+
+// NewDecoder returns a new decoder that reads from r.
+func NewDecoder(r io.Reader) *Decoder {
+ return &Decoder{
+ r: r,
+ encOpts: encOptsDefaults,
+ tagName: tagFieldName,
+ }
+}
+
+// Decode reads a TOML-encoded value from it's input
+// and unmarshals it in the value pointed at by v.
+//
+// See the documentation for Marshal for details.
+func (d *Decoder) Decode(v interface{}) error {
+ var err error
+ d.tval, err = LoadReader(d.r)
+ if err != nil {
+ return err
+ }
+ return d.unmarshal(v)
+}
+
+// SetTagName allows changing default tag "toml"
+func (d *Decoder) SetTagName(v string) *Decoder {
+ d.tagName = v
+ return d
+}
+
+func (d *Decoder) unmarshal(v interface{}) error {
+ mtype := reflect.TypeOf(v)
+ if mtype.Kind() != reflect.Ptr || mtype.Elem().Kind() != reflect.Struct {
+ return errors.New("Only a pointer to struct can be unmarshaled from TOML")
+ }
+
+ sval, err := d.valueFromTree(mtype.Elem(), d.tval)
+ if err != nil {
+ return err
+ }
+ reflect.ValueOf(v).Elem().Set(sval)
+ return nil
+}
+
+// Convert toml tree to marshal struct or map, using marshal type
+func (d *Decoder) valueFromTree(mtype reflect.Type, tval *Tree) (reflect.Value, error) {
+ if mtype.Kind() == reflect.Ptr {
+ return d.unwrapPointer(mtype, tval)
+ }
+ var mval reflect.Value
+ switch mtype.Kind() {
+ case reflect.Struct:
+ mval = reflect.New(mtype).Elem()
+ for i := 0; i < mtype.NumField(); i++ {
+ mtypef := mtype.Field(i)
+ an := annotation{tag: d.tagName}
+ opts := tomlOptions(mtypef, an)
+ if opts.include {
+ baseKey := opts.name
+ keysToTry := []string{
+ baseKey,
+ strings.ToLower(baseKey),
+ strings.ToTitle(baseKey),
+ strings.ToLower(string(baseKey[0])) + baseKey[1:],
+ }
+ for _, key := range keysToTry {
+ exists := tval.Has(key)
+ if !exists {
+ continue
+ }
+ val := tval.Get(key)
+ mvalf, err := d.valueFromToml(mtypef.Type, val)
+ if err != nil {
+ return mval, formatError(err, tval.GetPosition(key))
+ }
+ mval.Field(i).Set(mvalf)
+ break
+ }
+ }
+ }
+ case reflect.Map:
+ mval = reflect.MakeMap(mtype)
+ for _, key := range tval.Keys() {
+ // TODO: path splits key
+ val := tval.GetPath([]string{key})
+ mvalf, err := d.valueFromToml(mtype.Elem(), val)
+ if err != nil {
+ return mval, formatError(err, tval.GetPosition(key))
+ }
+ mval.SetMapIndex(reflect.ValueOf(key), mvalf)
+ }
+ }
+ return mval, nil
+}
+
+// Convert toml value to marshal struct/map slice, using marshal type
+func (d *Decoder) valueFromTreeSlice(mtype reflect.Type, tval []*Tree) (reflect.Value, error) {
+ mval := reflect.MakeSlice(mtype, len(tval), len(tval))
+ for i := 0; i < len(tval); i++ {
+ val, err := d.valueFromTree(mtype.Elem(), tval[i])
+ if err != nil {
+ return mval, err
+ }
+ mval.Index(i).Set(val)
+ }
+ return mval, nil
+}
+
+// Convert toml value to marshal primitive slice, using marshal type
+func (d *Decoder) valueFromOtherSlice(mtype reflect.Type, tval []interface{}) (reflect.Value, error) {
+ mval := reflect.MakeSlice(mtype, len(tval), len(tval))
+ for i := 0; i < len(tval); i++ {
+ val, err := d.valueFromToml(mtype.Elem(), tval[i])
+ if err != nil {
+ return mval, err
+ }
+ mval.Index(i).Set(val)
+ }
+ return mval, nil
+}
+
+// Convert toml value to marshal value, using marshal type
+func (d *Decoder) valueFromToml(mtype reflect.Type, tval interface{}) (reflect.Value, error) {
+ if mtype.Kind() == reflect.Ptr {
+ return d.unwrapPointer(mtype, tval)
+ }
+
+ switch t := tval.(type) {
+ case *Tree:
+ if isTree(mtype) {
+ return d.valueFromTree(mtype, t)
+ }
+ return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to a tree", tval, tval)
+ case []*Tree:
+ if isTreeSlice(mtype) {
+ return d.valueFromTreeSlice(mtype, t)
+ }
+ return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to trees", tval, tval)
+ case []interface{}:
+ if isOtherSlice(mtype) {
+ return d.valueFromOtherSlice(mtype, t)
+ }
+ return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to a slice", tval, tval)
+ default:
+ switch mtype.Kind() {
+ case reflect.Bool, reflect.Struct:
+ val := reflect.ValueOf(tval)
+ // if this passes for when mtype is reflect.Struct, tval is a time.Time
+ if !val.Type().ConvertibleTo(mtype) {
+ return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to %v", tval, tval, mtype.String())
+ }
+
+ return val.Convert(mtype), nil
+ case reflect.String:
+ val := reflect.ValueOf(tval)
+ // stupidly, int64 is convertible to string. So special case this.
+ if !val.Type().ConvertibleTo(mtype) || val.Kind() == reflect.Int64 {
+ return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to %v", tval, tval, mtype.String())
+ }
+
+ return val.Convert(mtype), nil
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ val := reflect.ValueOf(tval)
+ if mtype.Kind() == reflect.Int64 && mtype == reflect.TypeOf(time.Duration(1)) && val.Kind() == reflect.String {
+ d, err := time.ParseDuration(val.String())
+ if err != nil {
+ return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to %v. %s", tval, tval, mtype.String(), err)
+ }
+ return reflect.ValueOf(d), nil
+ }
+ if !val.Type().ConvertibleTo(mtype) {
+ return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to %v", tval, tval, mtype.String())
+ }
+ if reflect.Indirect(reflect.New(mtype)).OverflowInt(val.Convert(mtype).Int()) {
+ return reflect.ValueOf(nil), fmt.Errorf("%v(%T) would overflow %v", tval, tval, mtype.String())
+ }
+
+ return val.Convert(mtype), nil
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ val := reflect.ValueOf(tval)
+ if !val.Type().ConvertibleTo(mtype) {
+ return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to %v", tval, tval, mtype.String())
+ }
+
+ if val.Convert(reflect.TypeOf(int(1))).Int() < 0 {
+ return reflect.ValueOf(nil), fmt.Errorf("%v(%T) is negative so does not fit in %v", tval, tval, mtype.String())
+ }
+ if reflect.Indirect(reflect.New(mtype)).OverflowUint(uint64(val.Convert(mtype).Uint())) {
+ return reflect.ValueOf(nil), fmt.Errorf("%v(%T) would overflow %v", tval, tval, mtype.String())
+ }
+
+ return val.Convert(mtype), nil
+ case reflect.Float32, reflect.Float64:
+ val := reflect.ValueOf(tval)
+ if !val.Type().ConvertibleTo(mtype) {
+ return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to %v", tval, tval, mtype.String())
+ }
+ if reflect.Indirect(reflect.New(mtype)).OverflowFloat(val.Convert(mtype).Float()) {
+ return reflect.ValueOf(nil), fmt.Errorf("%v(%T) would overflow %v", tval, tval, mtype.String())
+ }
+
+ return val.Convert(mtype), nil
+ default:
+ return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to %v(%v)", tval, tval, mtype, mtype.Kind())
+ }
+ }
+}
+
+func (d *Decoder) unwrapPointer(mtype reflect.Type, tval interface{}) (reflect.Value, error) {
+ val, err := d.valueFromToml(mtype.Elem(), tval)
+ if err != nil {
+ return reflect.ValueOf(nil), err
+ }
+ mval := reflect.New(mtype.Elem())
+ mval.Elem().Set(val)
+ return mval, nil
+}
+
+func tomlOptions(vf reflect.StructField, an annotation) tomlOpts {
+ tag := vf.Tag.Get(an.tag)
+ parse := strings.Split(tag, ",")
+ var comment string
+ if c := vf.Tag.Get(an.comment); c != "" {
+ comment = c
+ }
+ commented, _ := strconv.ParseBool(vf.Tag.Get(an.commented))
+ multiline, _ := strconv.ParseBool(vf.Tag.Get(an.multiline))
+ result := tomlOpts{name: vf.Name, comment: comment, commented: commented, multiline: multiline, include: true, omitempty: false}
+ if parse[0] != "" {
+ if parse[0] == "-" && len(parse) == 1 {
+ result.include = false
+ } else {
+ result.name = strings.Trim(parse[0], " ")
+ }
+ }
+ if vf.PkgPath != "" {
+ result.include = false
+ }
+ if len(parse) > 1 && strings.Trim(parse[1], " ") == "omitempty" {
+ result.omitempty = true
+ }
+ if vf.Type.Kind() == reflect.Ptr {
+ result.omitempty = true
+ }
+ return result
+}
+
+func isZero(val reflect.Value) bool {
+ switch val.Type().Kind() {
+ case reflect.Map:
+ fallthrough
+ case reflect.Array:
+ fallthrough
+ case reflect.Slice:
+ return val.Len() == 0
+ default:
+ return reflect.DeepEqual(val.Interface(), reflect.Zero(val.Type()).Interface())
+ }
+}
+
+func formatError(err error, pos Position) error {
+ if err.Error()[0] == '(' { // Error already contains position information
+ return err
+ }
+ return fmt.Errorf("%s: %s", pos, err)
+}
diff --git a/vendor/github.com/pelletier/go-toml/marshal_test.toml b/vendor/github.com/pelletier/go-toml/marshal_test.toml
new file mode 100644
index 00000000..1c5f98e7
--- /dev/null
+++ b/vendor/github.com/pelletier/go-toml/marshal_test.toml
@@ -0,0 +1,38 @@
+title = "TOML Marshal Testing"
+
+[basic]
+ bool = true
+ date = 1979-05-27T07:32:00Z
+ float = 123.4
+ int = 5000
+ string = "Bite me"
+ uint = 5001
+
+[basic_lists]
+ bools = [true,false,true]
+ dates = [1979-05-27T07:32:00Z,1980-05-27T07:32:00Z]
+ floats = [12.3,45.6,78.9]
+ ints = [8001,8001,8002]
+ strings = ["One","Two","Three"]
+ uints = [5002,5003]
+
+[basic_map]
+ one = "one"
+ two = "two"
+
+[subdoc]
+
+ [subdoc.first]
+ name = "First"
+
+ [subdoc.second]
+ name = "Second"
+
+[[subdoclist]]
+ name = "List.First"
+
+[[subdoclist]]
+ name = "List.Second"
+
+[[subdocptrs]]
+ name = "Second"
diff --git a/vendor/github.com/pelletier/go-toml/parser.go b/vendor/github.com/pelletier/go-toml/parser.go
new file mode 100644
index 00000000..776353b2
--- /dev/null
+++ b/vendor/github.com/pelletier/go-toml/parser.go
@@ -0,0 +1,430 @@
+// TOML Parser.
+
+package toml
+
+import (
+ "errors"
+ "fmt"
+ "math"
+ "reflect"
+ "regexp"
+ "strconv"
+ "strings"
+ "time"
+)
+
+type tomlParser struct {
+ flowIdx int
+ flow []token
+ tree *Tree
+ currentTable []string
+ seenTableKeys []string
+}
+
+type tomlParserStateFn func() tomlParserStateFn
+
+// Formats and panics an error message based on a token
+func (p *tomlParser) raiseError(tok *token, msg string, args ...interface{}) {
+ panic(tok.Position.String() + ": " + fmt.Sprintf(msg, args...))
+}
+
+func (p *tomlParser) run() {
+ for state := p.parseStart; state != nil; {
+ state = state()
+ }
+}
+
+func (p *tomlParser) peek() *token {
+ if p.flowIdx >= len(p.flow) {
+ return nil
+ }
+ return &p.flow[p.flowIdx]
+}
+
+func (p *tomlParser) assume(typ tokenType) {
+ tok := p.getToken()
+ if tok == nil {
+ p.raiseError(tok, "was expecting token %s, but token stream is empty", tok)
+ }
+ if tok.typ != typ {
+ p.raiseError(tok, "was expecting token %s, but got %s instead", typ, tok)
+ }
+}
+
+func (p *tomlParser) getToken() *token {
+ tok := p.peek()
+ if tok == nil {
+ return nil
+ }
+ p.flowIdx++
+ return tok
+}
+
+func (p *tomlParser) parseStart() tomlParserStateFn {
+ tok := p.peek()
+
+ // end of stream, parsing is finished
+ if tok == nil {
+ return nil
+ }
+
+ switch tok.typ {
+ case tokenDoubleLeftBracket:
+ return p.parseGroupArray
+ case tokenLeftBracket:
+ return p.parseGroup
+ case tokenKey:
+ return p.parseAssign
+ case tokenEOF:
+ return nil
+ default:
+ p.raiseError(tok, "unexpected token")
+ }
+ return nil
+}
+
+func (p *tomlParser) parseGroupArray() tomlParserStateFn {
+ startToken := p.getToken() // discard the [[
+ key := p.getToken()
+ if key.typ != tokenKeyGroupArray {
+ p.raiseError(key, "unexpected token %s, was expecting a table array key", key)
+ }
+
+ // get or create table array element at the indicated part in the path
+ keys, err := parseKey(key.val)
+ if err != nil {
+ p.raiseError(key, "invalid table array key: %s", err)
+ }
+ p.tree.createSubTree(keys[:len(keys)-1], startToken.Position) // create parent entries
+ destTree := p.tree.GetPath(keys)
+ var array []*Tree
+ if destTree == nil {
+ array = make([]*Tree, 0)
+ } else if target, ok := destTree.([]*Tree); ok && target != nil {
+ array = destTree.([]*Tree)
+ } else {
+ p.raiseError(key, "key %s is already assigned and not of type table array", key)
+ }
+ p.currentTable = keys
+
+ // add a new tree to the end of the table array
+ newTree := newTree()
+ newTree.position = startToken.Position
+ array = append(array, newTree)
+ p.tree.SetPath(p.currentTable, array)
+
+ // remove all keys that were children of this table array
+ prefix := key.val + "."
+ found := false
+ for ii := 0; ii < len(p.seenTableKeys); {
+ tableKey := p.seenTableKeys[ii]
+ if strings.HasPrefix(tableKey, prefix) {
+ p.seenTableKeys = append(p.seenTableKeys[:ii], p.seenTableKeys[ii+1:]...)
+ } else {
+ found = (tableKey == key.val)
+ ii++
+ }
+ }
+
+ // keep this key name from use by other kinds of assignments
+ if !found {
+ p.seenTableKeys = append(p.seenTableKeys, key.val)
+ }
+
+ // move to next parser state
+ p.assume(tokenDoubleRightBracket)
+ return p.parseStart
+}
+
+func (p *tomlParser) parseGroup() tomlParserStateFn {
+ startToken := p.getToken() // discard the [
+ key := p.getToken()
+ if key.typ != tokenKeyGroup {
+ p.raiseError(key, "unexpected token %s, was expecting a table key", key)
+ }
+ for _, item := range p.seenTableKeys {
+ if item == key.val {
+ p.raiseError(key, "duplicated tables")
+ }
+ }
+
+ p.seenTableKeys = append(p.seenTableKeys, key.val)
+ keys, err := parseKey(key.val)
+ if err != nil {
+ p.raiseError(key, "invalid table array key: %s", err)
+ }
+ if err := p.tree.createSubTree(keys, startToken.Position); err != nil {
+ p.raiseError(key, "%s", err)
+ }
+ p.assume(tokenRightBracket)
+ p.currentTable = keys
+ return p.parseStart
+}
+
+func (p *tomlParser) parseAssign() tomlParserStateFn {
+ key := p.getToken()
+ p.assume(tokenEqual)
+
+ value := p.parseRvalue()
+ var tableKey []string
+ if len(p.currentTable) > 0 {
+ tableKey = p.currentTable
+ } else {
+ tableKey = []string{}
+ }
+
+ // find the table to assign, looking out for arrays of tables
+ var targetNode *Tree
+ switch node := p.tree.GetPath(tableKey).(type) {
+ case []*Tree:
+ targetNode = node[len(node)-1]
+ case *Tree:
+ targetNode = node
+ default:
+ p.raiseError(key, "Unknown table type for path: %s",
+ strings.Join(tableKey, "."))
+ }
+
+ // assign value to the found table
+ keyVals := []string{key.val}
+ if len(keyVals) != 1 {
+ p.raiseError(key, "Invalid key")
+ }
+ keyVal := keyVals[0]
+ localKey := []string{keyVal}
+ finalKey := append(tableKey, keyVal)
+ if targetNode.GetPath(localKey) != nil {
+ p.raiseError(key, "The following key was defined twice: %s",
+ strings.Join(finalKey, "."))
+ }
+ var toInsert interface{}
+
+ switch value.(type) {
+ case *Tree, []*Tree:
+ toInsert = value
+ default:
+ toInsert = &tomlValue{value: value, position: key.Position}
+ }
+ targetNode.values[keyVal] = toInsert
+ return p.parseStart
+}
+
+var numberUnderscoreInvalidRegexp *regexp.Regexp
+var hexNumberUnderscoreInvalidRegexp *regexp.Regexp
+
+func numberContainsInvalidUnderscore(value string) error {
+ if numberUnderscoreInvalidRegexp.MatchString(value) {
+ return errors.New("invalid use of _ in number")
+ }
+ return nil
+}
+
+func hexNumberContainsInvalidUnderscore(value string) error {
+ if hexNumberUnderscoreInvalidRegexp.MatchString(value) {
+ return errors.New("invalid use of _ in hex number")
+ }
+ return nil
+}
+
+func cleanupNumberToken(value string) string {
+ cleanedVal := strings.Replace(value, "_", "", -1)
+ return cleanedVal
+}
+
+func (p *tomlParser) parseRvalue() interface{} {
+ tok := p.getToken()
+ if tok == nil || tok.typ == tokenEOF {
+ p.raiseError(tok, "expecting a value")
+ }
+
+ switch tok.typ {
+ case tokenString:
+ return tok.val
+ case tokenTrue:
+ return true
+ case tokenFalse:
+ return false
+ case tokenInf:
+ if tok.val[0] == '-' {
+ return math.Inf(-1)
+ }
+ return math.Inf(1)
+ case tokenNan:
+ return math.NaN()
+ case tokenInteger:
+ cleanedVal := cleanupNumberToken(tok.val)
+ var err error
+ var val int64
+ if len(cleanedVal) >= 3 && cleanedVal[0] == '0' {
+ switch cleanedVal[1] {
+ case 'x':
+ err = hexNumberContainsInvalidUnderscore(tok.val)
+ if err != nil {
+ p.raiseError(tok, "%s", err)
+ }
+ val, err = strconv.ParseInt(cleanedVal[2:], 16, 64)
+ case 'o':
+ err = numberContainsInvalidUnderscore(tok.val)
+ if err != nil {
+ p.raiseError(tok, "%s", err)
+ }
+ val, err = strconv.ParseInt(cleanedVal[2:], 8, 64)
+ case 'b':
+ err = numberContainsInvalidUnderscore(tok.val)
+ if err != nil {
+ p.raiseError(tok, "%s", err)
+ }
+ val, err = strconv.ParseInt(cleanedVal[2:], 2, 64)
+ default:
+ panic("invalid base") // the lexer should catch this first
+ }
+ } else {
+ err = numberContainsInvalidUnderscore(tok.val)
+ if err != nil {
+ p.raiseError(tok, "%s", err)
+ }
+ val, err = strconv.ParseInt(cleanedVal, 10, 64)
+ }
+ if err != nil {
+ p.raiseError(tok, "%s", err)
+ }
+ return val
+ case tokenFloat:
+ err := numberContainsInvalidUnderscore(tok.val)
+ if err != nil {
+ p.raiseError(tok, "%s", err)
+ }
+ cleanedVal := cleanupNumberToken(tok.val)
+ val, err := strconv.ParseFloat(cleanedVal, 64)
+ if err != nil {
+ p.raiseError(tok, "%s", err)
+ }
+ return val
+ case tokenDate:
+ val, err := time.ParseInLocation(time.RFC3339Nano, tok.val, time.UTC)
+ if err != nil {
+ p.raiseError(tok, "%s", err)
+ }
+ return val
+ case tokenLeftBracket:
+ return p.parseArray()
+ case tokenLeftCurlyBrace:
+ return p.parseInlineTable()
+ case tokenEqual:
+ p.raiseError(tok, "cannot have multiple equals for the same key")
+ case tokenError:
+ p.raiseError(tok, "%s", tok)
+ }
+
+ p.raiseError(tok, "never reached")
+
+ return nil
+}
+
+func tokenIsComma(t *token) bool {
+ return t != nil && t.typ == tokenComma
+}
+
+func (p *tomlParser) parseInlineTable() *Tree {
+ tree := newTree()
+ var previous *token
+Loop:
+ for {
+ follow := p.peek()
+ if follow == nil || follow.typ == tokenEOF {
+ p.raiseError(follow, "unterminated inline table")
+ }
+ switch follow.typ {
+ case tokenRightCurlyBrace:
+ p.getToken()
+ break Loop
+ case tokenKey, tokenInteger, tokenString:
+ if !tokenIsComma(previous) && previous != nil {
+ p.raiseError(follow, "comma expected between fields in inline table")
+ }
+ key := p.getToken()
+ p.assume(tokenEqual)
+ value := p.parseRvalue()
+ tree.Set(key.val, value)
+ case tokenComma:
+ if previous == nil {
+ p.raiseError(follow, "inline table cannot start with a comma")
+ }
+ if tokenIsComma(previous) {
+ p.raiseError(follow, "need field between two commas in inline table")
+ }
+ p.getToken()
+ default:
+ p.raiseError(follow, "unexpected token type in inline table: %s", follow.String())
+ }
+ previous = follow
+ }
+ if tokenIsComma(previous) {
+ p.raiseError(previous, "trailing comma at the end of inline table")
+ }
+ return tree
+}
+
+func (p *tomlParser) parseArray() interface{} {
+ var array []interface{}
+ arrayType := reflect.TypeOf(nil)
+ for {
+ follow := p.peek()
+ if follow == nil || follow.typ == tokenEOF {
+ p.raiseError(follow, "unterminated array")
+ }
+ if follow.typ == tokenRightBracket {
+ p.getToken()
+ break
+ }
+ val := p.parseRvalue()
+ if arrayType == nil {
+ arrayType = reflect.TypeOf(val)
+ }
+ if reflect.TypeOf(val) != arrayType {
+ p.raiseError(follow, "mixed types in array")
+ }
+ array = append(array, val)
+ follow = p.peek()
+ if follow == nil || follow.typ == tokenEOF {
+ p.raiseError(follow, "unterminated array")
+ }
+ if follow.typ != tokenRightBracket && follow.typ != tokenComma {
+ p.raiseError(follow, "missing comma")
+ }
+ if follow.typ == tokenComma {
+ p.getToken()
+ }
+ }
+ // An array of Trees is actually an array of inline
+ // tables, which is a shorthand for a table array. If the
+ // array was not converted from []interface{} to []*Tree,
+ // the two notations would not be equivalent.
+ if arrayType == reflect.TypeOf(newTree()) {
+ tomlArray := make([]*Tree, len(array))
+ for i, v := range array {
+ tomlArray[i] = v.(*Tree)
+ }
+ return tomlArray
+ }
+ return array
+}
+
+func parseToml(flow []token) *Tree {
+ result := newTree()
+ result.position = Position{1, 1}
+ parser := &tomlParser{
+ flowIdx: 0,
+ flow: flow,
+ tree: result,
+ currentTable: make([]string, 0),
+ seenTableKeys: make([]string, 0),
+ }
+ parser.run()
+ return result
+}
+
+func init() {
+ numberUnderscoreInvalidRegexp = regexp.MustCompile(`([^\d]_|_[^\d])|_$|^_`)
+ hexNumberUnderscoreInvalidRegexp = regexp.MustCompile(`(^0x_)|([^\da-f]_|_[^\da-f])|_$|^_`)
+}
diff --git a/vendor/github.com/pelletier/go-toml/position.go b/vendor/github.com/pelletier/go-toml/position.go
new file mode 100644
index 00000000..c17bff87
--- /dev/null
+++ b/vendor/github.com/pelletier/go-toml/position.go
@@ -0,0 +1,29 @@
+// Position support for go-toml
+
+package toml
+
+import (
+ "fmt"
+)
+
+// Position of a document element within a TOML document.
+//
+// Line and Col are both 1-indexed positions for the element's line number and
+// column number, respectively. Values of zero or less will cause Invalid(),
+// to return true.
+type Position struct {
+ Line int // line within the document
+ Col int // column within the line
+}
+
+// String representation of the position.
+// Displays 1-indexed line and column numbers.
+func (p Position) String() string {
+ return fmt.Sprintf("(%d, %d)", p.Line, p.Col)
+}
+
+// Invalid returns whether or not the position is valid (i.e. with negative or
+// null values)
+func (p Position) Invalid() bool {
+ return p.Line <= 0 || p.Col <= 0
+}
diff --git a/vendor/github.com/pelletier/go-toml/test.sh b/vendor/github.com/pelletier/go-toml/test.sh
new file mode 100755
index 00000000..af013e81
--- /dev/null
+++ b/vendor/github.com/pelletier/go-toml/test.sh
@@ -0,0 +1,86 @@
+#!/bin/bash
+# fail out of the script if anything here fails
+set -e
+set -o pipefail
+
+# set the path to the present working directory
+export GOPATH=`pwd`
+
+function git_clone() {
+ path=$1
+ branch=$2
+ version=$3
+ if [ ! -d "src/$path" ]; then
+ mkdir -p src/$path
+ git clone https://$path.git src/$path
+ fi
+ pushd src/$path
+ git checkout "$branch"
+ git reset --hard "$version"
+ popd
+}
+
+# Remove potential previous runs
+rm -rf src test_program_bin toml-test
+
+go get github.com/davecgh/go-spew/spew
+go get gopkg.in/yaml.v2
+go get github.com/BurntSushi/toml
+
+# get code for BurntSushi TOML validation
+git_clone github.com/BurntSushi/toml master a368813
+git_clone github.com/BurntSushi/toml-test master 39e37e6
+
+# build the BurntSushi test application
+go build -o toml-test github.com/BurntSushi/toml-test
+
+# vendorize the current lib for testing
+# NOTE: this basically mocks an install without having to go back out to github for code
+mkdir -p src/github.com/pelletier/go-toml/cmd
+mkdir -p src/github.com/pelletier/go-toml/query
+cp *.go *.toml src/github.com/pelletier/go-toml
+cp -R cmd/* src/github.com/pelletier/go-toml/cmd
+cp -R query/* src/github.com/pelletier/go-toml/query
+go build -o test_program_bin src/github.com/pelletier/go-toml/cmd/test_program.go
+
+# Run basic unit tests
+go test github.com/pelletier/go-toml -race -coverprofile=coverage.txt -covermode=atomic
+go test github.com/pelletier/go-toml/cmd/tomljson
+go test github.com/pelletier/go-toml/query
+
+# run the entire BurntSushi test suite
+if [[ $# -eq 0 ]] ; then
+ echo "Running all BurntSushi tests"
+ ./toml-test ./test_program_bin | tee test_out
+else
+ # run a specific test
+ test=$1
+ test_path='src/github.com/BurntSushi/toml-test/tests'
+ valid_test="$test_path/valid/$test"
+ invalid_test="$test_path/invalid/$test"
+
+ if [ -e "$valid_test.toml" ]; then
+ echo "Valid Test TOML for $test:"
+ echo "===="
+ cat "$valid_test.toml"
+
+ echo "Valid Test JSON for $test:"
+ echo "===="
+ cat "$valid_test.json"
+
+ echo "Go-TOML Output for $test:"
+ echo "===="
+ cat "$valid_test.toml" | ./test_program_bin
+ fi
+
+ if [ -e "$invalid_test.toml" ]; then
+ echo "Invalid Test TOML for $test:"
+ echo "===="
+ cat "$invalid_test.toml"
+
+ echo "Go-TOML Output for $test:"
+ echo "===="
+ echo "go-toml Output:"
+ cat "$invalid_test.toml" | ./test_program_bin
+ fi
+fi
diff --git a/vendor/github.com/pelletier/go-toml/token.go b/vendor/github.com/pelletier/go-toml/token.go
new file mode 100644
index 00000000..1a908134
--- /dev/null
+++ b/vendor/github.com/pelletier/go-toml/token.go
@@ -0,0 +1,144 @@
+package toml
+
+import (
+ "fmt"
+ "strconv"
+ "unicode"
+)
+
+// Define tokens
+type tokenType int
+
+const (
+ eof = -(iota + 1)
+)
+
+const (
+ tokenError tokenType = iota
+ tokenEOF
+ tokenComment
+ tokenKey
+ tokenString
+ tokenInteger
+ tokenTrue
+ tokenFalse
+ tokenFloat
+ tokenInf
+ tokenNan
+ tokenEqual
+ tokenLeftBracket
+ tokenRightBracket
+ tokenLeftCurlyBrace
+ tokenRightCurlyBrace
+ tokenLeftParen
+ tokenRightParen
+ tokenDoubleLeftBracket
+ tokenDoubleRightBracket
+ tokenDate
+ tokenKeyGroup
+ tokenKeyGroupArray
+ tokenComma
+ tokenColon
+ tokenDollar
+ tokenStar
+ tokenQuestion
+ tokenDot
+ tokenDotDot
+ tokenEOL
+)
+
+var tokenTypeNames = []string{
+ "Error",
+ "EOF",
+ "Comment",
+ "Key",
+ "String",
+ "Integer",
+ "True",
+ "False",
+ "Float",
+ "Inf",
+ "NaN",
+ "=",
+ "[",
+ "]",
+ "{",
+ "}",
+ "(",
+ ")",
+ "]]",
+ "[[",
+ "Date",
+ "KeyGroup",
+ "KeyGroupArray",
+ ",",
+ ":",
+ "$",
+ "*",
+ "?",
+ ".",
+ "..",
+ "EOL",
+}
+
+type token struct {
+ Position
+ typ tokenType
+ val string
+}
+
+func (tt tokenType) String() string {
+ idx := int(tt)
+ if idx < len(tokenTypeNames) {
+ return tokenTypeNames[idx]
+ }
+ return "Unknown"
+}
+
+func (t token) Int() int {
+ if result, err := strconv.Atoi(t.val); err != nil {
+ panic(err)
+ } else {
+ return result
+ }
+}
+
+func (t token) String() string {
+ switch t.typ {
+ case tokenEOF:
+ return "EOF"
+ case tokenError:
+ return t.val
+ }
+
+ return fmt.Sprintf("%q", t.val)
+}
+
+func isSpace(r rune) bool {
+ return r == ' ' || r == '\t'
+}
+
+func isAlphanumeric(r rune) bool {
+ return unicode.IsLetter(r) || r == '_'
+}
+
+func isKeyChar(r rune) bool {
+ // Keys start with the first character that isn't whitespace or [ and end
+ // with the last non-whitespace character before the equals sign. Keys
+ // cannot contain a # character."
+ return !(r == '\r' || r == '\n' || r == eof || r == '=')
+}
+
+func isKeyStartChar(r rune) bool {
+ return !(isSpace(r) || r == '\r' || r == '\n' || r == eof || r == '[')
+}
+
+func isDigit(r rune) bool {
+ return unicode.IsNumber(r)
+}
+
+func isHexDigit(r rune) bool {
+ return isDigit(r) ||
+ (r >= 'a' && r <= 'f') ||
+ (r >= 'A' && r <= 'F')
+}
diff --git a/vendor/github.com/pelletier/go-toml/toml.go b/vendor/github.com/pelletier/go-toml/toml.go
new file mode 100644
index 00000000..20180ab9
--- /dev/null
+++ b/vendor/github.com/pelletier/go-toml/toml.go
@@ -0,0 +1,392 @@
+package toml
+
+import (
+ "errors"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "os"
+ "runtime"
+ "strings"
+)
+
+type tomlValue struct {
+ value interface{} // string, int64, uint64, float64, bool, time.Time, [] of any of this list
+ comment string
+ commented bool
+ multiline bool
+ position Position
+}
+
+// Tree is the result of the parsing of a TOML file.
+type Tree struct {
+ values map[string]interface{} // string -> *tomlValue, *Tree, []*Tree
+ comment string
+ commented bool
+ position Position
+}
+
+func newTree() *Tree {
+ return &Tree{
+ values: make(map[string]interface{}),
+ position: Position{},
+ }
+}
+
+// TreeFromMap initializes a new Tree object using the given map.
+func TreeFromMap(m map[string]interface{}) (*Tree, error) {
+ result, err := toTree(m)
+ if err != nil {
+ return nil, err
+ }
+ return result.(*Tree), nil
+}
+
+// Position returns the position of the tree.
+func (t *Tree) Position() Position {
+ return t.position
+}
+
+// Has returns a boolean indicating if the given key exists.
+func (t *Tree) Has(key string) bool {
+ if key == "" {
+ return false
+ }
+ return t.HasPath(strings.Split(key, "."))
+}
+
+// HasPath returns true if the given path of keys exists, false otherwise.
+func (t *Tree) HasPath(keys []string) bool {
+ return t.GetPath(keys) != nil
+}
+
+// Keys returns the keys of the toplevel tree (does not recurse).
+func (t *Tree) Keys() []string {
+ keys := make([]string, len(t.values))
+ i := 0
+ for k := range t.values {
+ keys[i] = k
+ i++
+ }
+ return keys
+}
+
+// Get the value at key in the Tree.
+// Key is a dot-separated path (e.g. a.b.c) without single/double quoted strings.
+// If you need to retrieve non-bare keys, use GetPath.
+// Returns nil if the path does not exist in the tree.
+// If keys is of length zero, the current tree is returned.
+func (t *Tree) Get(key string) interface{} {
+ if key == "" {
+ return t
+ }
+ return t.GetPath(strings.Split(key, "."))
+}
+
+// GetPath returns the element in the tree indicated by 'keys'.
+// If keys is of length zero, the current tree is returned.
+func (t *Tree) GetPath(keys []string) interface{} {
+ if len(keys) == 0 {
+ return t
+ }
+ subtree := t
+ for _, intermediateKey := range keys[:len(keys)-1] {
+ value, exists := subtree.values[intermediateKey]
+ if !exists {
+ return nil
+ }
+ switch node := value.(type) {
+ case *Tree:
+ subtree = node
+ case []*Tree:
+ // go to most recent element
+ if len(node) == 0 {
+ return nil
+ }
+ subtree = node[len(node)-1]
+ default:
+ return nil // cannot navigate through other node types
+ }
+ }
+ // branch based on final node type
+ switch node := subtree.values[keys[len(keys)-1]].(type) {
+ case *tomlValue:
+ return node.value
+ default:
+ return node
+ }
+}
+
+// GetPosition returns the position of the given key.
+func (t *Tree) GetPosition(key string) Position {
+ if key == "" {
+ return t.position
+ }
+ return t.GetPositionPath(strings.Split(key, "."))
+}
+
+// GetPositionPath returns the element in the tree indicated by 'keys'.
+// If keys is of length zero, the current tree is returned.
+func (t *Tree) GetPositionPath(keys []string) Position {
+ if len(keys) == 0 {
+ return t.position
+ }
+ subtree := t
+ for _, intermediateKey := range keys[:len(keys)-1] {
+ value, exists := subtree.values[intermediateKey]
+ if !exists {
+ return Position{0, 0}
+ }
+ switch node := value.(type) {
+ case *Tree:
+ subtree = node
+ case []*Tree:
+ // go to most recent element
+ if len(node) == 0 {
+ return Position{0, 0}
+ }
+ subtree = node[len(node)-1]
+ default:
+ return Position{0, 0}
+ }
+ }
+ // branch based on final node type
+ switch node := subtree.values[keys[len(keys)-1]].(type) {
+ case *tomlValue:
+ return node.position
+ case *Tree:
+ return node.position
+ case []*Tree:
+ // go to most recent element
+ if len(node) == 0 {
+ return Position{0, 0}
+ }
+ return node[len(node)-1].position
+ default:
+ return Position{0, 0}
+ }
+}
+
+// GetDefault works like Get but with a default value
+func (t *Tree) GetDefault(key string, def interface{}) interface{} {
+ val := t.Get(key)
+ if val == nil {
+ return def
+ }
+ return val
+}
+
+// SetOptions arguments are supplied to the SetWithOptions and SetPathWithOptions functions to modify marshalling behaviour.
+// The default values within the struct are valid default options.
+type SetOptions struct {
+ Comment string
+ Commented bool
+ Multiline bool
+}
+
+// SetWithOptions is the same as Set, but allows you to provide formatting
+// instructions to the key, that will be used by Marshal().
+func (t *Tree) SetWithOptions(key string, opts SetOptions, value interface{}) {
+ t.SetPathWithOptions(strings.Split(key, "."), opts, value)
+}
+
+// SetPathWithOptions is the same as SetPath, but allows you to provide
+// formatting instructions to the key, that will be reused by Marshal().
+func (t *Tree) SetPathWithOptions(keys []string, opts SetOptions, value interface{}) {
+ subtree := t
+ for _, intermediateKey := range keys[:len(keys)-1] {
+ nextTree, exists := subtree.values[intermediateKey]
+ if !exists {
+ nextTree = newTree()
+ subtree.values[intermediateKey] = nextTree // add new element here
+ }
+ switch node := nextTree.(type) {
+ case *Tree:
+ subtree = node
+ case []*Tree:
+ // go to most recent element
+ if len(node) == 0 {
+ // create element if it does not exist
+ subtree.values[intermediateKey] = append(node, newTree())
+ }
+ subtree = node[len(node)-1]
+ }
+ }
+
+ var toInsert interface{}
+
+ switch v := value.(type) {
+ case *Tree:
+ v.comment = opts.Comment
+ toInsert = value
+ case []*Tree:
+ toInsert = value
+ case *tomlValue:
+ v.comment = opts.Comment
+ toInsert = v
+ default:
+ toInsert = &tomlValue{value: value, comment: opts.Comment, commented: opts.Commented, multiline: opts.Multiline}
+ }
+
+ subtree.values[keys[len(keys)-1]] = toInsert
+}
+
+// Set an element in the tree.
+// Key is a dot-separated path (e.g. a.b.c).
+// Creates all necessary intermediate trees, if needed.
+func (t *Tree) Set(key string, value interface{}) {
+ t.SetWithComment(key, "", false, value)
+}
+
+// SetWithComment is the same as Set, but allows you to provide comment
+// information to the key, that will be reused by Marshal().
+func (t *Tree) SetWithComment(key string, comment string, commented bool, value interface{}) {
+ t.SetPathWithComment(strings.Split(key, "."), comment, commented, value)
+}
+
+// SetPath sets an element in the tree.
+// Keys is an array of path elements (e.g. {"a","b","c"}).
+// Creates all necessary intermediate trees, if needed.
+func (t *Tree) SetPath(keys []string, value interface{}) {
+ t.SetPathWithComment(keys, "", false, value)
+}
+
+// SetPathWithComment is the same as SetPath, but allows you to provide comment
+// information to the key, that will be reused by Marshal().
+func (t *Tree) SetPathWithComment(keys []string, comment string, commented bool, value interface{}) {
+ subtree := t
+ for _, intermediateKey := range keys[:len(keys)-1] {
+ nextTree, exists := subtree.values[intermediateKey]
+ if !exists {
+ nextTree = newTree()
+ subtree.values[intermediateKey] = nextTree // add new element here
+ }
+ switch node := nextTree.(type) {
+ case *Tree:
+ subtree = node
+ case []*Tree:
+ // go to most recent element
+ if len(node) == 0 {
+ // create element if it does not exist
+ subtree.values[intermediateKey] = append(node, newTree())
+ }
+ subtree = node[len(node)-1]
+ }
+ }
+
+ var toInsert interface{}
+
+ switch v := value.(type) {
+ case *Tree:
+ v.comment = comment
+ toInsert = value
+ case []*Tree:
+ toInsert = value
+ case *tomlValue:
+ v.comment = comment
+ toInsert = v
+ default:
+ toInsert = &tomlValue{value: value, comment: comment, commented: commented}
+ }
+
+ subtree.values[keys[len(keys)-1]] = toInsert
+}
+
+// createSubTree takes a tree and a key and create the necessary intermediate
+// subtrees to create a subtree at that point. In-place.
+//
+// e.g. passing a.b.c will create (assuming tree is empty) tree[a], tree[a][b]
+// and tree[a][b][c]
+//
+// Returns nil on success, error object on failure
+func (t *Tree) createSubTree(keys []string, pos Position) error {
+ subtree := t
+ for _, intermediateKey := range keys {
+ nextTree, exists := subtree.values[intermediateKey]
+ if !exists {
+ tree := newTree()
+ tree.position = pos
+ subtree.values[intermediateKey] = tree
+ nextTree = tree
+ }
+
+ switch node := nextTree.(type) {
+ case []*Tree:
+ subtree = node[len(node)-1]
+ case *Tree:
+ subtree = node
+ default:
+ return fmt.Errorf("unknown type for path %s (%s): %T (%#v)",
+ strings.Join(keys, "."), intermediateKey, nextTree, nextTree)
+ }
+ }
+ return nil
+}
+
+// LoadBytes creates a Tree from a []byte.
+func LoadBytes(b []byte) (tree *Tree, err error) {
+ defer func() {
+ if r := recover(); r != nil {
+ if _, ok := r.(runtime.Error); ok {
+ panic(r)
+ }
+ err = errors.New(r.(string))
+ }
+ }()
+
+ if len(b) >= 4 && (hasUTF32BigEndianBOM4(b) || hasUTF32LittleEndianBOM4(b)) {
+ b = b[4:]
+ } else if len(b) >= 3 && hasUTF8BOM3(b) {
+ b = b[3:]
+ } else if len(b) >= 2 && (hasUTF16BigEndianBOM2(b) || hasUTF16LittleEndianBOM2(b)) {
+ b = b[2:]
+ }
+
+ tree = parseToml(lexToml(b))
+ return
+}
+
+func hasUTF16BigEndianBOM2(b []byte) bool {
+ return b[0] == 0xFE && b[1] == 0xFF
+}
+
+func hasUTF16LittleEndianBOM2(b []byte) bool {
+ return b[0] == 0xFF && b[1] == 0xFE
+}
+
+func hasUTF8BOM3(b []byte) bool {
+ return b[0] == 0xEF && b[1] == 0xBB && b[2] == 0xBF
+}
+
+func hasUTF32BigEndianBOM4(b []byte) bool {
+ return b[0] == 0x00 && b[1] == 0x00 && b[2] == 0xFE && b[3] == 0xFF
+}
+
+func hasUTF32LittleEndianBOM4(b []byte) bool {
+ return b[0] == 0xFF && b[1] == 0xFE && b[2] == 0x00 && b[3] == 0x00
+}
+
+// LoadReader creates a Tree from any io.Reader.
+func LoadReader(reader io.Reader) (tree *Tree, err error) {
+ inputBytes, err := ioutil.ReadAll(reader)
+ if err != nil {
+ return
+ }
+ tree, err = LoadBytes(inputBytes)
+ return
+}
+
+// Load creates a Tree from a string.
+func Load(content string) (tree *Tree, err error) {
+ return LoadBytes([]byte(content))
+}
+
+// LoadFile creates a Tree from a file.
+func LoadFile(path string) (tree *Tree, err error) {
+ file, err := os.Open(path)
+ if err != nil {
+ return nil, err
+ }
+ defer file.Close()
+ return LoadReader(file)
+}
diff --git a/vendor/github.com/pelletier/go-toml/tomltree_create.go b/vendor/github.com/pelletier/go-toml/tomltree_create.go
new file mode 100644
index 00000000..79610e9b
--- /dev/null
+++ b/vendor/github.com/pelletier/go-toml/tomltree_create.go
@@ -0,0 +1,142 @@
+package toml
+
+import (
+ "fmt"
+ "reflect"
+ "time"
+)
+
+var kindToType = [reflect.String + 1]reflect.Type{
+ reflect.Bool: reflect.TypeOf(true),
+ reflect.String: reflect.TypeOf(""),
+ reflect.Float32: reflect.TypeOf(float64(1)),
+ reflect.Float64: reflect.TypeOf(float64(1)),
+ reflect.Int: reflect.TypeOf(int64(1)),
+ reflect.Int8: reflect.TypeOf(int64(1)),
+ reflect.Int16: reflect.TypeOf(int64(1)),
+ reflect.Int32: reflect.TypeOf(int64(1)),
+ reflect.Int64: reflect.TypeOf(int64(1)),
+ reflect.Uint: reflect.TypeOf(uint64(1)),
+ reflect.Uint8: reflect.TypeOf(uint64(1)),
+ reflect.Uint16: reflect.TypeOf(uint64(1)),
+ reflect.Uint32: reflect.TypeOf(uint64(1)),
+ reflect.Uint64: reflect.TypeOf(uint64(1)),
+}
+
+// typeFor returns a reflect.Type for a reflect.Kind, or nil if none is found.
+// supported values:
+// string, bool, int64, uint64, float64, time.Time, int, int8, int16, int32, uint, uint8, uint16, uint32, float32
+func typeFor(k reflect.Kind) reflect.Type {
+ if k > 0 && int(k) < len(kindToType) {
+ return kindToType[k]
+ }
+ return nil
+}
+
+func simpleValueCoercion(object interface{}) (interface{}, error) {
+ switch original := object.(type) {
+ case string, bool, int64, uint64, float64, time.Time:
+ return original, nil
+ case int:
+ return int64(original), nil
+ case int8:
+ return int64(original), nil
+ case int16:
+ return int64(original), nil
+ case int32:
+ return int64(original), nil
+ case uint:
+ return uint64(original), nil
+ case uint8:
+ return uint64(original), nil
+ case uint16:
+ return uint64(original), nil
+ case uint32:
+ return uint64(original), nil
+ case float32:
+ return float64(original), nil
+ case fmt.Stringer:
+ return original.String(), nil
+ default:
+ return nil, fmt.Errorf("cannot convert type %T to Tree", object)
+ }
+}
+
+func sliceToTree(object interface{}) (interface{}, error) {
+ // arrays are a bit tricky, since they can represent either a
+ // collection of simple values, which is represented by one
+ // *tomlValue, or an array of tables, which is represented by an
+ // array of *Tree.
+
+ // holding the assumption that this function is called from toTree only when value.Kind() is Array or Slice
+ value := reflect.ValueOf(object)
+ insideType := value.Type().Elem()
+ length := value.Len()
+ if length > 0 {
+ insideType = reflect.ValueOf(value.Index(0).Interface()).Type()
+ }
+ if insideType.Kind() == reflect.Map {
+ // this is considered as an array of tables
+ tablesArray := make([]*Tree, 0, length)
+ for i := 0; i < length; i++ {
+ table := value.Index(i)
+ tree, err := toTree(table.Interface())
+ if err != nil {
+ return nil, err
+ }
+ tablesArray = append(tablesArray, tree.(*Tree))
+ }
+ return tablesArray, nil
+ }
+
+ sliceType := typeFor(insideType.Kind())
+ if sliceType == nil {
+ sliceType = insideType
+ }
+
+ arrayValue := reflect.MakeSlice(reflect.SliceOf(sliceType), 0, length)
+
+ for i := 0; i < length; i++ {
+ val := value.Index(i).Interface()
+ simpleValue, err := simpleValueCoercion(val)
+ if err != nil {
+ return nil, err
+ }
+ arrayValue = reflect.Append(arrayValue, reflect.ValueOf(simpleValue))
+ }
+ return &tomlValue{value: arrayValue.Interface(), position: Position{}}, nil
+}
+
+func toTree(object interface{}) (interface{}, error) {
+ value := reflect.ValueOf(object)
+
+ if value.Kind() == reflect.Map {
+ values := map[string]interface{}{}
+ keys := value.MapKeys()
+ for _, key := range keys {
+ if key.Kind() != reflect.String {
+ if _, ok := key.Interface().(string); !ok {
+ return nil, fmt.Errorf("map key needs to be a string, not %T (%v)", key.Interface(), key.Kind())
+ }
+ }
+
+ v := value.MapIndex(key)
+ newValue, err := toTree(v.Interface())
+ if err != nil {
+ return nil, err
+ }
+ values[key.String()] = newValue
+ }
+ return &Tree{values: values, position: Position{}}, nil
+ }
+
+ if value.Kind() == reflect.Array || value.Kind() == reflect.Slice {
+ return sliceToTree(object)
+ }
+
+ simpleValue, err := simpleValueCoercion(object)
+ if err != nil {
+ return nil, err
+ }
+ return &tomlValue{value: simpleValue, position: Position{}}, nil
+}
diff --git a/vendor/github.com/pelletier/go-toml/tomltree_write.go b/vendor/github.com/pelletier/go-toml/tomltree_write.go
new file mode 100644
index 00000000..e4049e29
--- /dev/null
+++ b/vendor/github.com/pelletier/go-toml/tomltree_write.go
@@ -0,0 +1,333 @@
+package toml
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "math"
+ "reflect"
+ "sort"
+ "strconv"
+ "strings"
+ "time"
+)
+
+// Encodes a string to a TOML-compliant multi-line string value
+// This function is a clone of the existing encodeTomlString function, except that whitespace characters
+// are preserved. Quotation marks and backslashes are also not escaped.
+func encodeMultilineTomlString(value string) string {
+ var b bytes.Buffer
+
+ for _, rr := range value {
+ switch rr {
+ case '\b':
+ b.WriteString(`\b`)
+ case '\t':
+ b.WriteString("\t")
+ case '\n':
+ b.WriteString("\n")
+ case '\f':
+ b.WriteString(`\f`)
+ case '\r':
+ b.WriteString("\r")
+ case '"':
+ b.WriteString(`"`)
+ case '\\':
+ b.WriteString(`\`)
+ default:
+ intRr := uint16(rr)
+ if intRr < 0x001F {
+ b.WriteString(fmt.Sprintf("\\u%0.4X", intRr))
+ } else {
+ b.WriteRune(rr)
+ }
+ }
+ }
+ return b.String()
+}
+
+// Encodes a string to a TOML-compliant string value
+func encodeTomlString(value string) string {
+ var b bytes.Buffer
+
+ for _, rr := range value {
+ switch rr {
+ case '\b':
+ b.WriteString(`\b`)
+ case '\t':
+ b.WriteString(`\t`)
+ case '\n':
+ b.WriteString(`\n`)
+ case '\f':
+ b.WriteString(`\f`)
+ case '\r':
+ b.WriteString(`\r`)
+ case '"':
+ b.WriteString(`\"`)
+ case '\\':
+ b.WriteString(`\\`)
+ default:
+ intRr := uint16(rr)
+ if intRr < 0x001F {
+ b.WriteString(fmt.Sprintf("\\u%0.4X", intRr))
+ } else {
+ b.WriteRune(rr)
+ }
+ }
+ }
+ return b.String()
+}
+
+func tomlValueStringRepresentation(v interface{}, indent string, arraysOneElementPerLine bool) (string, error) {
+ // this interface check is added to dereference the change made in the writeTo function.
+ // That change was made to allow this function to see formatting options.
+ tv, ok := v.(*tomlValue)
+ if ok {
+ v = tv.value
+ } else {
+ tv = &tomlValue{}
+ }
+
+ switch value := v.(type) {
+ case uint64:
+ return strconv.FormatUint(value, 10), nil
+ case int64:
+ return strconv.FormatInt(value, 10), nil
+ case float64:
+ // Ensure a round float does contain a decimal point. Otherwise feeding
+ // the output back to the parser would convert to an integer.
+ if math.Trunc(value) == value {
+ return strings.ToLower(strconv.FormatFloat(value, 'f', 1, 32)), nil
+ }
+ return strings.ToLower(strconv.FormatFloat(value, 'f', -1, 32)), nil
+ case string:
+ if tv.multiline {
+ return "\"\"\"\n" + encodeMultilineTomlString(value) + "\"\"\"", nil
+ }
+ return "\"" + encodeTomlString(value) + "\"", nil
+ case []byte:
+ b, _ := v.([]byte)
+ return tomlValueStringRepresentation(string(b), indent, arraysOneElementPerLine)
+ case bool:
+ if value {
+ return "true", nil
+ }
+ return "false", nil
+ case time.Time:
+ return value.Format(time.RFC3339), nil
+ case nil:
+ return "", nil
+ }
+
+ rv := reflect.ValueOf(v)
+
+ if rv.Kind() == reflect.Slice {
+ var values []string
+ for i := 0; i < rv.Len(); i++ {
+ item := rv.Index(i).Interface()
+ itemRepr, err := tomlValueStringRepresentation(item, indent, arraysOneElementPerLine)
+ if err != nil {
+ return "", err
+ }
+ values = append(values, itemRepr)
+ }
+ if arraysOneElementPerLine && len(values) > 1 {
+ stringBuffer := bytes.Buffer{}
+ valueIndent := indent + ` ` // TODO: move that to a shared encoder state
+
+ stringBuffer.WriteString("[\n")
+
+ for _, value := range values {
+ stringBuffer.WriteString(valueIndent)
+ stringBuffer.WriteString(value)
+ stringBuffer.WriteString(`,`)
+ stringBuffer.WriteString("\n")
+ }
+
+ stringBuffer.WriteString(indent + "]")
+
+ return stringBuffer.String(), nil
+ }
+ return "[" + strings.Join(values, ",") + "]", nil
+ }
+ return "", fmt.Errorf("unsupported value type %T: %v", v, v)
+}
+
+func (t *Tree) writeTo(w io.Writer, indent, keyspace string, bytesCount int64, arraysOneElementPerLine bool) (int64, error) {
+ simpleValuesKeys := make([]string, 0)
+ complexValuesKeys := make([]string, 0)
+
+ for k := range t.values {
+ v := t.values[k]
+ switch v.(type) {
+ case *Tree, []*Tree:
+ complexValuesKeys = append(complexValuesKeys, k)
+ default:
+ simpleValuesKeys = append(simpleValuesKeys, k)
+ }
+ }
+
+ sort.Strings(simpleValuesKeys)
+ sort.Strings(complexValuesKeys)
+
+ for _, k := range simpleValuesKeys {
+ v, ok := t.values[k].(*tomlValue)
+ if !ok {
+ return bytesCount, fmt.Errorf("invalid value type at %s: %T", k, t.values[k])
+ }
+
+ repr, err := tomlValueStringRepresentation(v, indent, arraysOneElementPerLine)
+ if err != nil {
+ return bytesCount, err
+ }
+
+ if v.comment != "" {
+ comment := strings.Replace(v.comment, "\n", "\n"+indent+"#", -1)
+ start := "# "
+ if strings.HasPrefix(comment, "#") {
+ start = ""
+ }
+ writtenBytesCountComment, errc := writeStrings(w, "\n", indent, start, comment, "\n")
+ bytesCount += int64(writtenBytesCountComment)
+ if errc != nil {
+ return bytesCount, errc
+ }
+ }
+
+ var commented string
+ if v.commented {
+ commented = "# "
+ }
+ writtenBytesCount, err := writeStrings(w, indent, commented, k, " = ", repr, "\n")
+ bytesCount += int64(writtenBytesCount)
+ if err != nil {
+ return bytesCount, err
+ }
+ }
+
+ for _, k := range complexValuesKeys {
+ v := t.values[k]
+
+ combinedKey := k
+ if keyspace != "" {
+ combinedKey = keyspace + "." + combinedKey
+ }
+ var commented string
+ if t.commented {
+ commented = "# "
+ }
+
+ switch node := v.(type) {
+ // node has to be of those two types given how keys are sorted above
+ case *Tree:
+ tv, ok := t.values[k].(*Tree)
+ if !ok {
+ return bytesCount, fmt.Errorf("invalid value type at %s: %T", k, t.values[k])
+ }
+ if tv.comment != "" {
+ comment := strings.Replace(tv.comment, "\n", "\n"+indent+"#", -1)
+ start := "# "
+ if strings.HasPrefix(comment, "#") {
+ start = ""
+ }
+ writtenBytesCountComment, errc := writeStrings(w, "\n", indent, start, comment)
+ bytesCount += int64(writtenBytesCountComment)
+ if errc != nil {
+ return bytesCount, errc
+ }
+ }
+ writtenBytesCount, err := writeStrings(w, "\n", indent, commented, "[", combinedKey, "]\n")
+ bytesCount += int64(writtenBytesCount)
+ if err != nil {
+ return bytesCount, err
+ }
+ bytesCount, err = node.writeTo(w, indent+" ", combinedKey, bytesCount, arraysOneElementPerLine)
+ if err != nil {
+ return bytesCount, err
+ }
+ case []*Tree:
+ for _, subTree := range node {
+ writtenBytesCount, err := writeStrings(w, "\n", indent, commented, "[[", combinedKey, "]]\n")
+ bytesCount += int64(writtenBytesCount)
+ if err != nil {
+ return bytesCount, err
+ }
+
+ bytesCount, err = subTree.writeTo(w, indent+" ", combinedKey, bytesCount, arraysOneElementPerLine)
+ if err != nil {
+ return bytesCount, err
+ }
+ }
+ }
+ }
+
+ return bytesCount, nil
+}
+
+func writeStrings(w io.Writer, s ...string) (int, error) {
+ var n int
+ for i := range s {
+ b, err := io.WriteString(w, s[i])
+ n += b
+ if err != nil {
+ return n, err
+ }
+ }
+ return n, nil
+}
+
+// WriteTo encode the Tree as Toml and writes it to the writer w.
+// Returns the number of bytes written in case of success, or an error if anything happened.
+func (t *Tree) WriteTo(w io.Writer) (int64, error) {
+ return t.writeTo(w, "", "", 0, false)
+}
+
+// ToTomlString generates a human-readable representation of the current tree.
+// Output spans multiple lines, and is suitable for ingest by a TOML parser.
+// If the conversion cannot be performed, ToString returns a non-nil error.
+func (t *Tree) ToTomlString() (string, error) {
+ var buf bytes.Buffer
+ _, err := t.WriteTo(&buf)
+ if err != nil {
+ return "", err
+ }
+ return buf.String(), nil
+}
+
+// String generates a human-readable representation of the current tree.
+// Alias of ToString. Present to implement the fmt.Stringer interface.
+func (t *Tree) String() string {
+ result, _ := t.ToTomlString()
+ return result
+}
+
+// ToMap recursively generates a representation of the tree using Go built-in structures.
+// The following types are used:
+//
+// * bool
+// * float64
+// * int64
+// * string
+// * uint64
+// * time.Time
+// * map[string]interface{} (where interface{} is any of this list)
+// * []interface{} (where interface{} is any of this list)
+func (t *Tree) ToMap() map[string]interface{} {
+ result := map[string]interface{}{}
+
+ for k, v := range t.values {
+ switch node := v.(type) {
+ case []*Tree:
+ var array []interface{}
+ for _, item := range node {
+ array = append(array, item.ToMap())
+ }
+ result[k] = array
+ case *Tree:
+ result[k] = node.ToMap()
+ case *tomlValue:
+ result[k] = node.value
+ }
+ }
+ return result
+}
diff --git a/vendor/github.com/peterh/liner/COPYING b/vendor/github.com/peterh/liner/COPYING
new file mode 100644
index 00000000..9e8c9f20
--- /dev/null
+++ b/vendor/github.com/peterh/liner/COPYING
@@ -0,0 +1,21 @@
+Copyright © 2012 Peter Harris
+
+Permission is hereby granted, free of charge, to any person obtaining a
+copy of this software and associated documentation files (the "Software"),
+to deal in the Software without restriction, including without limitation
+the rights to use, copy, modify, merge, publish, distribute, sublicense,
+and/or sell copies of the Software, and to permit persons to whom the
+Software is furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice (including the next
+paragraph) shall be included in all copies or substantial portions of the
+Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+DEALINGS IN THE SOFTWARE.
+
diff --git a/vendor/github.com/peterh/liner/README.md b/vendor/github.com/peterh/liner/README.md
new file mode 100644
index 00000000..9148b249
--- /dev/null
+++ b/vendor/github.com/peterh/liner/README.md
@@ -0,0 +1,100 @@
+Liner
+=====
+
+Liner is a command line editor with history. It was inspired by linenoise;
+everything Unix-like is a VT100 (or is trying very hard to be). If your
+terminal is not pretending to be a VT100, change it. Liner also support
+Windows.
+
+Liner is released under the X11 license (which is similar to the new BSD
+license).
+
+Line Editing
+------------
+
+The following line editing commands are supported on platforms and terminals
+that Liner supports:
+
+Keystroke | Action
+--------- | ------
+Ctrl-A, Home | Move cursor to beginning of line
+Ctrl-E, End | Move cursor to end of line
+Ctrl-B, Left | Move cursor one character left
+Ctrl-F, Right| Move cursor one character right
+Ctrl-Left, Alt-B | Move cursor to previous word
+Ctrl-Right, Alt-F | Move cursor to next word
+Ctrl-D, Del | (if line is *not* empty) Delete character under cursor
+Ctrl-D | (if line *is* empty) End of File - usually quits application
+Ctrl-C | Reset input (create new empty prompt)
+Ctrl-L | Clear screen (line is unmodified)
+Ctrl-T | Transpose previous character with current character
+Ctrl-H, BackSpace | Delete character before cursor
+Ctrl-W | Delete word leading up to cursor
+Ctrl-K | Delete from cursor to end of line
+Ctrl-U | Delete from start of line to cursor
+Ctrl-P, Up | Previous match from history
+Ctrl-N, Down | Next match from history
+Ctrl-R | Reverse Search history (Ctrl-S forward, Ctrl-G cancel)
+Ctrl-Y | Paste from Yank buffer (Alt-Y to paste next yank instead)
+Tab | Next completion
+Shift-Tab | (after Tab) Previous completion
+
+Getting started
+-----------------
+
+```go
+package main
+
+import (
+ "log"
+ "os"
+ "path/filepath"
+ "strings"
+
+ "github.com/peterh/liner"
+)
+
+var (
+ history_fn = filepath.Join(os.TempDir(), ".liner_example_history")
+ names = []string{"john", "james", "mary", "nancy"}
+)
+
+func main() {
+ line := liner.NewLiner()
+ defer line.Close()
+
+ line.SetCtrlCAborts(true)
+
+ line.SetCompleter(func(line string) (c []string) {
+ for _, n := range names {
+ if strings.HasPrefix(n, strings.ToLower(line)) {
+ c = append(c, n)
+ }
+ }
+ return
+ })
+
+ if f, err := os.Open(history_fn); err == nil {
+ line.ReadHistory(f)
+ f.Close()
+ }
+
+ if name, err := line.Prompt("What is your name? "); err == nil {
+ log.Print("Got: ", name)
+ line.AppendHistory(name)
+ } else if err == liner.ErrPromptAborted {
+ log.Print("Aborted")
+ } else {
+ log.Print("Error reading line: ", err)
+ }
+
+ if f, err := os.Create(history_fn); err != nil {
+ log.Print("Error writing history file: ", err)
+ } else {
+ line.WriteHistory(f)
+ f.Close()
+ }
+}
+```
+
+For documentation, see http://godoc.org/github.com/peterh/liner
diff --git a/vendor/github.com/peterh/liner/bsdinput.go b/vendor/github.com/peterh/liner/bsdinput.go
new file mode 100644
index 00000000..35933982
--- /dev/null
+++ b/vendor/github.com/peterh/liner/bsdinput.go
@@ -0,0 +1,41 @@
+// +build openbsd freebsd netbsd
+
+package liner
+
+import "syscall"
+
+const (
+ getTermios = syscall.TIOCGETA
+ setTermios = syscall.TIOCSETA
+)
+
+const (
+ // Input flags
+ inpck = 0x010
+ istrip = 0x020
+ icrnl = 0x100
+ ixon = 0x200
+
+ // Output flags
+ opost = 0x1
+
+ // Control flags
+ cs8 = 0x300
+
+ // Local flags
+ isig = 0x080
+ icanon = 0x100
+ iexten = 0x400
+)
+
+type termios struct {
+ Iflag uint32
+ Oflag uint32
+ Cflag uint32
+ Lflag uint32
+ Cc [20]byte
+ Ispeed int32
+ Ospeed int32
+}
+
+const cursorColumn = false
diff --git a/vendor/github.com/peterh/liner/common.go b/vendor/github.com/peterh/liner/common.go
new file mode 100644
index 00000000..e16ecbc0
--- /dev/null
+++ b/vendor/github.com/peterh/liner/common.go
@@ -0,0 +1,255 @@
+/*
+Package liner implements a simple command line editor, inspired by linenoise
+(https://github.com/antirez/linenoise/). This package supports WIN32 in
+addition to the xterm codes supported by everything else.
+*/
+package liner
+
+import (
+ "bufio"
+ "container/ring"
+ "errors"
+ "fmt"
+ "io"
+ "strings"
+ "sync"
+ "unicode/utf8"
+)
+
+type commonState struct {
+ terminalSupported bool
+ outputRedirected bool
+ inputRedirected bool
+ history []string
+ historyMutex sync.RWMutex
+ completer WordCompleter
+ columns int
+ killRing *ring.Ring
+ ctrlCAborts bool
+ r *bufio.Reader
+ tabStyle TabStyle
+ multiLineMode bool
+ cursorRows int
+ maxRows int
+ shouldRestart ShouldRestart
+ needRefresh bool
+}
+
+// TabStyle is used to select how tab completions are displayed.
+type TabStyle int
+
+// Two tab styles are currently available:
+//
+// TabCircular cycles through each completion item and displays it directly on
+// the prompt
+//
+// TabPrints prints the list of completion items to the screen after a second
+// tab key is pressed. This behaves similar to GNU readline and BASH (which
+// uses readline)
+const (
+ TabCircular TabStyle = iota
+ TabPrints
+)
+
+// ErrPromptAborted is returned from Prompt or PasswordPrompt when the user presses Ctrl-C
+// if SetCtrlCAborts(true) has been called on the State
+var ErrPromptAborted = errors.New("prompt aborted")
+
+// ErrNotTerminalOutput is returned from Prompt or PasswordPrompt if the
+// platform is normally supported, but stdout has been redirected
+var ErrNotTerminalOutput = errors.New("standard output is not a terminal")
+
+// ErrInvalidPrompt is returned from Prompt or PasswordPrompt if the
+// prompt contains any unprintable runes (including substrings that could
+// be colour codes on some platforms).
+var ErrInvalidPrompt = errors.New("invalid prompt")
+
+// ErrInternal is returned when liner experiences an error that it cannot
+// handle. For example, if the number of colums becomes zero during an
+// active call to Prompt
+var ErrInternal = errors.New("liner: internal error")
+
+// KillRingMax is the max number of elements to save on the killring.
+const KillRingMax = 60
+
+// HistoryLimit is the maximum number of entries saved in the scrollback history.
+const HistoryLimit = 1000
+
+// ReadHistory reads scrollback history from r. Returns the number of lines
+// read, and any read error (except io.EOF).
+func (s *State) ReadHistory(r io.Reader) (num int, err error) {
+ s.historyMutex.Lock()
+ defer s.historyMutex.Unlock()
+
+ in := bufio.NewReader(r)
+ num = 0
+ for {
+ line, part, err := in.ReadLine()
+ if err == io.EOF {
+ break
+ }
+ if err != nil {
+ return num, err
+ }
+ if part {
+ return num, fmt.Errorf("line %d is too long", num+1)
+ }
+ if !utf8.Valid(line) {
+ return num, fmt.Errorf("invalid string at line %d", num+1)
+ }
+ num++
+ s.history = append(s.history, string(line))
+ if len(s.history) > HistoryLimit {
+ s.history = s.history[1:]
+ }
+ }
+ return num, nil
+}
+
+// WriteHistory writes scrollback history to w. Returns the number of lines
+// successfully written, and any write error.
+//
+// Unlike the rest of liner's API, WriteHistory is safe to call
+// from another goroutine while Prompt is in progress.
+// This exception is to facilitate the saving of the history buffer
+// during an unexpected exit (for example, due to Ctrl-C being invoked)
+func (s *State) WriteHistory(w io.Writer) (num int, err error) {
+ s.historyMutex.RLock()
+ defer s.historyMutex.RUnlock()
+
+ for _, item := range s.history {
+ _, err := fmt.Fprintln(w, item)
+ if err != nil {
+ return num, err
+ }
+ num++
+ }
+ return num, nil
+}
+
+// AppendHistory appends an entry to the scrollback history. AppendHistory
+// should be called iff Prompt returns a valid command.
+func (s *State) AppendHistory(item string) {
+ s.historyMutex.Lock()
+ defer s.historyMutex.Unlock()
+
+ if len(s.history) > 0 {
+ if item == s.history[len(s.history)-1] {
+ return
+ }
+ }
+ s.history = append(s.history, item)
+ if len(s.history) > HistoryLimit {
+ s.history = s.history[1:]
+ }
+}
+
+// ClearHistory clears the scroollback history.
+func (s *State) ClearHistory() {
+ s.historyMutex.Lock()
+ defer s.historyMutex.Unlock()
+ s.history = nil
+}
+
+// Returns the history lines starting with prefix
+func (s *State) getHistoryByPrefix(prefix string) (ph []string) {
+ for _, h := range s.history {
+ if strings.HasPrefix(h, prefix) {
+ ph = append(ph, h)
+ }
+ }
+ return
+}
+
+// Returns the history lines matching the intelligent search
+func (s *State) getHistoryByPattern(pattern string) (ph []string, pos []int) {
+ if pattern == "" {
+ return
+ }
+ for _, h := range s.history {
+ if i := strings.Index(h, pattern); i >= 0 {
+ ph = append(ph, h)
+ pos = append(pos, i)
+ }
+ }
+ return
+}
+
+// Completer takes the currently edited line content at the left of the cursor
+// and returns a list of completion candidates.
+// If the line is "Hello, wo!!!" and the cursor is before the first '!', "Hello, wo" is passed
+// to the completer which may return {"Hello, world", "Hello, Word"} to have "Hello, world!!!".
+type Completer func(line string) []string
+
+// WordCompleter takes the currently edited line with the cursor position and
+// returns the completion candidates for the partial word to be completed.
+// If the line is "Hello, wo!!!" and the cursor is before the first '!', ("Hello, wo!!!", 9) is passed
+// to the completer which may returns ("Hello, ", {"world", "Word"}, "!!!") to have "Hello, world!!!".
+type WordCompleter func(line string, pos int) (head string, completions []string, tail string)
+
+// SetCompleter sets the completion function that Liner will call to
+// fetch completion candidates when the user presses tab.
+func (s *State) SetCompleter(f Completer) {
+ if f == nil {
+ s.completer = nil
+ return
+ }
+ s.completer = func(line string, pos int) (string, []string, string) {
+ return "", f(string([]rune(line)[:pos])), string([]rune(line)[pos:])
+ }
+}
+
+// SetWordCompleter sets the completion function that Liner will call to
+// fetch completion candidates when the user presses tab.
+func (s *State) SetWordCompleter(f WordCompleter) {
+ s.completer = f
+}
+
+// SetTabCompletionStyle sets the behvavior when the Tab key is pressed
+// for auto-completion. TabCircular is the default behavior and cycles
+// through the list of candidates at the prompt. TabPrints will print
+// the available completion candidates to the screen similar to BASH
+// and GNU Readline
+func (s *State) SetTabCompletionStyle(tabStyle TabStyle) {
+ s.tabStyle = tabStyle
+}
+
+// ModeApplier is the interface that wraps a representation of the terminal
+// mode. ApplyMode sets the terminal to this mode.
+type ModeApplier interface {
+ ApplyMode() error
+}
+
+// SetCtrlCAborts sets whether Prompt on a supported terminal will return an
+// ErrPromptAborted when Ctrl-C is pressed. The default is false (will not
+// return when Ctrl-C is pressed). Unsupported terminals typically raise SIGINT
+// (and Prompt does not return) regardless of the value passed to SetCtrlCAborts.
+func (s *State) SetCtrlCAborts(aborts bool) {
+ s.ctrlCAborts = aborts
+}
+
+// SetMultiLineMode sets whether line is auto-wrapped. The default is false (single line).
+func (s *State) SetMultiLineMode(mlmode bool) {
+ s.multiLineMode = mlmode
+}
+
+// ShouldRestart is passed the error generated by readNext and returns true if
+// the the read should be restarted or false if the error should be returned.
+type ShouldRestart func(err error) bool
+
+// SetShouldRestart sets the restart function that Liner will call to determine
+// whether to retry the call to, or return the error returned by, readNext.
+func (s *State) SetShouldRestart(f ShouldRestart) {
+ s.shouldRestart = f
+}
+
+func (s *State) promptUnsupported(p string) (string, error) {
+ if !s.inputRedirected || !s.terminalSupported {
+ fmt.Print(p)
+ }
+ linebuf, _, err := s.r.ReadLine()
+ if err != nil {
+ return "", err
+ }
+ return string(linebuf), nil
+}
diff --git a/vendor/github.com/peterh/liner/fallbackinput.go b/vendor/github.com/peterh/liner/fallbackinput.go
new file mode 100644
index 00000000..043fb332
--- /dev/null
+++ b/vendor/github.com/peterh/liner/fallbackinput.go
@@ -0,0 +1,59 @@
+// +build !windows,!linux,!darwin,!openbsd,!freebsd,!netbsd
+
+package liner
+
+import (
+ "bufio"
+ "errors"
+ "os"
+)
+
+// State represents an open terminal
+type State struct {
+ commonState
+}
+
+// Prompt displays p, and then waits for user input. Prompt does not support
+// line editing on this operating system.
+func (s *State) Prompt(p string) (string, error) {
+ return s.promptUnsupported(p)
+}
+
+// PasswordPrompt is not supported in this OS.
+func (s *State) PasswordPrompt(p string) (string, error) {
+ return "", errors.New("liner: function not supported in this terminal")
+}
+
+// NewLiner initializes a new *State
+//
+// Note that this operating system uses a fallback mode without line
+// editing. Patches welcome.
+func NewLiner() *State {
+ var s State
+ s.r = bufio.NewReader(os.Stdin)
+ return &s
+}
+
+// Close returns the terminal to its previous mode
+func (s *State) Close() error {
+ return nil
+}
+
+// TerminalSupported returns false because line editing is not
+// supported on this platform.
+func TerminalSupported() bool {
+ return false
+}
+
+type noopMode struct{}
+
+func (n noopMode) ApplyMode() error {
+ return nil
+}
+
+// TerminalMode returns a noop InputModeSetter on this platform.
+func TerminalMode() (ModeApplier, error) {
+ return noopMode{}, nil
+}
+
+const cursorColumn = true
diff --git a/vendor/github.com/peterh/liner/go.mod b/vendor/github.com/peterh/liner/go.mod
new file mode 100644
index 00000000..6804de36
--- /dev/null
+++ b/vendor/github.com/peterh/liner/go.mod
@@ -0,0 +1,3 @@
+module github.com/peterh/liner
+
+require github.com/mattn/go-runewidth v0.0.3
diff --git a/vendor/github.com/peterh/liner/go.sum b/vendor/github.com/peterh/liner/go.sum
new file mode 100644
index 00000000..1c189160
--- /dev/null
+++ b/vendor/github.com/peterh/liner/go.sum
@@ -0,0 +1,2 @@
+github.com/mattn/go-runewidth v0.0.3 h1:a+kO+98RDGEfo6asOGMmpodZq4FNtnGP54yps8BzLR4=
+github.com/mattn/go-runewidth v0.0.3/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU=
diff --git a/vendor/github.com/peterh/liner/input.go b/vendor/github.com/peterh/liner/input.go
new file mode 100644
index 00000000..cdb83302
--- /dev/null
+++ b/vendor/github.com/peterh/liner/input.go
@@ -0,0 +1,367 @@
+// +build linux darwin openbsd freebsd netbsd
+
+package liner
+
+import (
+ "bufio"
+ "errors"
+ "os"
+ "os/signal"
+ "strconv"
+ "strings"
+ "syscall"
+ "time"
+)
+
+type nexter struct {
+ r rune
+ err error
+}
+
+// State represents an open terminal
+type State struct {
+ commonState
+ origMode termios
+ defaultMode termios
+ next <-chan nexter
+ winch chan os.Signal
+ pending []rune
+ useCHA bool
+}
+
+// NewLiner initializes a new *State, and sets the terminal into raw mode. To
+// restore the terminal to its previous state, call State.Close().
+func NewLiner() *State {
+ var s State
+ s.r = bufio.NewReader(os.Stdin)
+
+ s.terminalSupported = TerminalSupported()
+ if m, err := TerminalMode(); err == nil {
+ s.origMode = *m.(*termios)
+ } else {
+ s.inputRedirected = true
+ }
+ if _, err := getMode(syscall.Stdout); err != 0 {
+ s.outputRedirected = true
+ }
+ if s.inputRedirected && s.outputRedirected {
+ s.terminalSupported = false
+ }
+ if s.terminalSupported && !s.inputRedirected && !s.outputRedirected {
+ mode := s.origMode
+ mode.Iflag &^= icrnl | inpck | istrip | ixon
+ mode.Cflag |= cs8
+ mode.Lflag &^= syscall.ECHO | icanon | iexten
+ mode.ApplyMode()
+
+ winch := make(chan os.Signal, 1)
+ signal.Notify(winch, syscall.SIGWINCH)
+ s.winch = winch
+
+ s.checkOutput()
+ }
+
+ if !s.outputRedirected {
+ s.outputRedirected = !s.getColumns()
+ }
+
+ return &s
+}
+
+var errTimedOut = errors.New("timeout")
+
+func (s *State) startPrompt() {
+ if s.terminalSupported {
+ if m, err := TerminalMode(); err == nil {
+ s.defaultMode = *m.(*termios)
+ mode := s.defaultMode
+ mode.Lflag &^= isig
+ mode.ApplyMode()
+ }
+ }
+ s.restartPrompt()
+}
+
+func (s *State) inputWaiting() bool {
+ return len(s.next) > 0
+}
+
+func (s *State) restartPrompt() {
+ next := make(chan nexter, 200)
+ go func() {
+ for {
+ var n nexter
+ n.r, _, n.err = s.r.ReadRune()
+ next <- n
+ // Shut down nexter loop when an end condition has been reached
+ if n.err != nil || n.r == '\n' || n.r == '\r' || n.r == ctrlC || n.r == ctrlD {
+ close(next)
+ return
+ }
+ }
+ }()
+ s.next = next
+}
+
+func (s *State) stopPrompt() {
+ if s.terminalSupported {
+ s.defaultMode.ApplyMode()
+ }
+}
+
+func (s *State) nextPending(timeout <-chan time.Time) (rune, error) {
+ select {
+ case thing, ok := <-s.next:
+ if !ok {
+ return 0, ErrInternal
+ }
+ if thing.err != nil {
+ return 0, thing.err
+ }
+ s.pending = append(s.pending, thing.r)
+ return thing.r, nil
+ case <-timeout:
+ rv := s.pending[0]
+ s.pending = s.pending[1:]
+ return rv, errTimedOut
+ }
+}
+
+func (s *State) readNext() (interface{}, error) {
+ if len(s.pending) > 0 {
+ rv := s.pending[0]
+ s.pending = s.pending[1:]
+ return rv, nil
+ }
+ var r rune
+ select {
+ case thing, ok := <-s.next:
+ if !ok {
+ return 0, ErrInternal
+ }
+ if thing.err != nil {
+ return nil, thing.err
+ }
+ r = thing.r
+ case <-s.winch:
+ s.getColumns()
+ return winch, nil
+ }
+ if r != esc {
+ return r, nil
+ }
+ s.pending = append(s.pending, r)
+
+ // Wait at most 50 ms for the rest of the escape sequence
+ // If nothing else arrives, it was an actual press of the esc key
+ timeout := time.After(50 * time.Millisecond)
+ flag, err := s.nextPending(timeout)
+ if err != nil {
+ if err == errTimedOut {
+ return flag, nil
+ }
+ return unknown, err
+ }
+
+ switch flag {
+ case '[':
+ code, err := s.nextPending(timeout)
+ if err != nil {
+ if err == errTimedOut {
+ return code, nil
+ }
+ return unknown, err
+ }
+ switch code {
+ case 'A':
+ s.pending = s.pending[:0] // escape code complete
+ return up, nil
+ case 'B':
+ s.pending = s.pending[:0] // escape code complete
+ return down, nil
+ case 'C':
+ s.pending = s.pending[:0] // escape code complete
+ return right, nil
+ case 'D':
+ s.pending = s.pending[:0] // escape code complete
+ return left, nil
+ case 'F':
+ s.pending = s.pending[:0] // escape code complete
+ return end, nil
+ case 'H':
+ s.pending = s.pending[:0] // escape code complete
+ return home, nil
+ case 'Z':
+ s.pending = s.pending[:0] // escape code complete
+ return shiftTab, nil
+ case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9':
+ num := []rune{code}
+ for {
+ code, err := s.nextPending(timeout)
+ if err != nil {
+ if err == errTimedOut {
+ return code, nil
+ }
+ return nil, err
+ }
+ switch code {
+ case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9':
+ num = append(num, code)
+ case ';':
+ // Modifier code to follow
+ // This only supports Ctrl-left and Ctrl-right for now
+ x, _ := strconv.ParseInt(string(num), 10, 32)
+ if x != 1 {
+ // Can't be left or right
+ rv := s.pending[0]
+ s.pending = s.pending[1:]
+ return rv, nil
+ }
+ num = num[:0]
+ for {
+ code, err = s.nextPending(timeout)
+ if err != nil {
+ if err == errTimedOut {
+ rv := s.pending[0]
+ s.pending = s.pending[1:]
+ return rv, nil
+ }
+ return nil, err
+ }
+ switch code {
+ case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9':
+ num = append(num, code)
+ case 'C', 'D':
+ // right, left
+ mod, _ := strconv.ParseInt(string(num), 10, 32)
+ if mod != 5 {
+ // Not bare Ctrl
+ rv := s.pending[0]
+ s.pending = s.pending[1:]
+ return rv, nil
+ }
+ s.pending = s.pending[:0] // escape code complete
+ if code == 'C' {
+ return wordRight, nil
+ }
+ return wordLeft, nil
+ default:
+ // Not left or right
+ rv := s.pending[0]
+ s.pending = s.pending[1:]
+ return rv, nil
+ }
+ }
+ case '~':
+ s.pending = s.pending[:0] // escape code complete
+ x, _ := strconv.ParseInt(string(num), 10, 32)
+ switch x {
+ case 2:
+ return insert, nil
+ case 3:
+ return del, nil
+ case 5:
+ return pageUp, nil
+ case 6:
+ return pageDown, nil
+ case 7:
+ return home, nil
+ case 8:
+ return end, nil
+ case 15:
+ return f5, nil
+ case 17:
+ return f6, nil
+ case 18:
+ return f7, nil
+ case 19:
+ return f8, nil
+ case 20:
+ return f9, nil
+ case 21:
+ return f10, nil
+ case 23:
+ return f11, nil
+ case 24:
+ return f12, nil
+ default:
+ return unknown, nil
+ }
+ default:
+ // unrecognized escape code
+ rv := s.pending[0]
+ s.pending = s.pending[1:]
+ return rv, nil
+ }
+ }
+ }
+
+ case 'O':
+ code, err := s.nextPending(timeout)
+ if err != nil {
+ if err == errTimedOut {
+ return code, nil
+ }
+ return nil, err
+ }
+ s.pending = s.pending[:0] // escape code complete
+ switch code {
+ case 'c':
+ return wordRight, nil
+ case 'd':
+ return wordLeft, nil
+ case 'H':
+ return home, nil
+ case 'F':
+ return end, nil
+ case 'P':
+ return f1, nil
+ case 'Q':
+ return f2, nil
+ case 'R':
+ return f3, nil
+ case 'S':
+ return f4, nil
+ default:
+ return unknown, nil
+ }
+ case 'b':
+ s.pending = s.pending[:0] // escape code complete
+ return altB, nil
+ case 'd':
+ s.pending = s.pending[:0] // escape code complete
+ return altD, nil
+ case 'f':
+ s.pending = s.pending[:0] // escape code complete
+ return altF, nil
+ case 'y':
+ s.pending = s.pending[:0] // escape code complete
+ return altY, nil
+ default:
+ rv := s.pending[0]
+ s.pending = s.pending[1:]
+ return rv, nil
+ }
+
+ // not reached
+ return r, nil
+}
+
+// Close returns the terminal to its previous mode
+func (s *State) Close() error {
+ signal.Stop(s.winch)
+ if !s.inputRedirected {
+ s.origMode.ApplyMode()
+ }
+ return nil
+}
+
+// TerminalSupported returns true if the current terminal supports
+// line editing features, and false if liner will use the 'dumb'
+// fallback for input.
+// Note that TerminalSupported does not check all factors that may
+// cause liner to not fully support the terminal (such as stdin redirection)
+func TerminalSupported() bool {
+ bad := map[string]bool{"": true, "dumb": true, "cons25": true}
+ return !bad[strings.ToLower(os.Getenv("TERM"))]
+}
diff --git a/vendor/github.com/peterh/liner/input_darwin.go b/vendor/github.com/peterh/liner/input_darwin.go
new file mode 100644
index 00000000..e98ab4a4
--- /dev/null
+++ b/vendor/github.com/peterh/liner/input_darwin.go
@@ -0,0 +1,43 @@
+// +build darwin
+
+package liner
+
+import "syscall"
+
+const (
+ getTermios = syscall.TIOCGETA
+ setTermios = syscall.TIOCSETA
+)
+
+const (
+ // Input flags
+ inpck = 0x010
+ istrip = 0x020
+ icrnl = 0x100
+ ixon = 0x200
+
+ // Output flags
+ opost = 0x1
+
+ // Control flags
+ cs8 = 0x300
+
+ // Local flags
+ isig = 0x080
+ icanon = 0x100
+ iexten = 0x400
+)
+
+type termios struct {
+ Iflag uintptr
+ Oflag uintptr
+ Cflag uintptr
+ Lflag uintptr
+ Cc [20]byte
+ Ispeed uintptr
+ Ospeed uintptr
+}
+
+// Terminal.app needs a column for the cursor when the input line is at the
+// bottom of the window.
+const cursorColumn = true
diff --git a/vendor/github.com/peterh/liner/input_linux.go b/vendor/github.com/peterh/liner/input_linux.go
new file mode 100644
index 00000000..56ed185f
--- /dev/null
+++ b/vendor/github.com/peterh/liner/input_linux.go
@@ -0,0 +1,28 @@
+// +build linux
+
+package liner
+
+import "syscall"
+
+const (
+ getTermios = syscall.TCGETS
+ setTermios = syscall.TCSETS
+)
+
+const (
+ icrnl = syscall.ICRNL
+ inpck = syscall.INPCK
+ istrip = syscall.ISTRIP
+ ixon = syscall.IXON
+ opost = syscall.OPOST
+ cs8 = syscall.CS8
+ isig = syscall.ISIG
+ icanon = syscall.ICANON
+ iexten = syscall.IEXTEN
+)
+
+type termios struct {
+ syscall.Termios
+}
+
+const cursorColumn = false
diff --git a/vendor/github.com/peterh/liner/input_windows.go b/vendor/github.com/peterh/liner/input_windows.go
new file mode 100644
index 00000000..36e95161
--- /dev/null
+++ b/vendor/github.com/peterh/liner/input_windows.go
@@ -0,0 +1,364 @@
+package liner
+
+import (
+ "bufio"
+ "os"
+ "syscall"
+ "unicode/utf16"
+ "unsafe"
+)
+
+var (
+ kernel32 = syscall.NewLazyDLL("kernel32.dll")
+
+ procGetStdHandle = kernel32.NewProc("GetStdHandle")
+ procReadConsoleInput = kernel32.NewProc("ReadConsoleInputW")
+ procGetNumberOfConsoleInputEvents = kernel32.NewProc("GetNumberOfConsoleInputEvents")
+ procGetConsoleMode = kernel32.NewProc("GetConsoleMode")
+ procSetConsoleMode = kernel32.NewProc("SetConsoleMode")
+ procSetConsoleCursorPosition = kernel32.NewProc("SetConsoleCursorPosition")
+ procGetConsoleScreenBufferInfo = kernel32.NewProc("GetConsoleScreenBufferInfo")
+ procFillConsoleOutputCharacter = kernel32.NewProc("FillConsoleOutputCharacterW")
+)
+
+// These names are from the Win32 api, so they use underscores (contrary to
+// what golint suggests)
+const (
+ std_input_handle = uint32(-10 & 0xFFFFFFFF)
+ std_output_handle = uint32(-11 & 0xFFFFFFFF)
+ std_error_handle = uint32(-12 & 0xFFFFFFFF)
+ invalid_handle_value = ^uintptr(0)
+)
+
+type inputMode uint32
+
+// State represents an open terminal
+type State struct {
+ commonState
+ handle syscall.Handle
+ hOut syscall.Handle
+ origMode inputMode
+ defaultMode inputMode
+ key interface{}
+ repeat uint16
+}
+
+const (
+ enableEchoInput = 0x4
+ enableInsertMode = 0x20
+ enableLineInput = 0x2
+ enableMouseInput = 0x10
+ enableProcessedInput = 0x1
+ enableQuickEditMode = 0x40
+ enableWindowInput = 0x8
+)
+
+// NewLiner initializes a new *State, and sets the terminal into raw mode. To
+// restore the terminal to its previous state, call State.Close().
+func NewLiner() *State {
+ var s State
+ hIn, _, _ := procGetStdHandle.Call(uintptr(std_input_handle))
+ s.handle = syscall.Handle(hIn)
+ hOut, _, _ := procGetStdHandle.Call(uintptr(std_output_handle))
+ s.hOut = syscall.Handle(hOut)
+
+ s.terminalSupported = true
+ if m, err := TerminalMode(); err == nil {
+ s.origMode = m.(inputMode)
+ mode := s.origMode
+ mode &^= enableEchoInput
+ mode &^= enableInsertMode
+ mode &^= enableLineInput
+ mode &^= enableMouseInput
+ mode |= enableWindowInput
+ mode.ApplyMode()
+ } else {
+ s.inputRedirected = true
+ s.r = bufio.NewReader(os.Stdin)
+ }
+
+ s.getColumns()
+ s.outputRedirected = s.columns <= 0
+
+ return &s
+}
+
+// These names are from the Win32 api, so they use underscores (contrary to
+// what golint suggests)
+const (
+ focus_event = 0x0010
+ key_event = 0x0001
+ menu_event = 0x0008
+ mouse_event = 0x0002
+ window_buffer_size_event = 0x0004
+)
+
+type input_record struct {
+ eventType uint16
+ pad uint16
+ blob [16]byte
+}
+
+type key_event_record struct {
+ KeyDown int32
+ RepeatCount uint16
+ VirtualKeyCode uint16
+ VirtualScanCode uint16
+ Char uint16
+ ControlKeyState uint32
+}
+
+// These names are from the Win32 api, so they use underscores (contrary to
+// what golint suggests)
+const (
+ vk_tab = 0x09
+ vk_menu = 0x12 // ALT key
+ vk_prior = 0x21
+ vk_next = 0x22
+ vk_end = 0x23
+ vk_home = 0x24
+ vk_left = 0x25
+ vk_up = 0x26
+ vk_right = 0x27
+ vk_down = 0x28
+ vk_insert = 0x2d
+ vk_delete = 0x2e
+ vk_f1 = 0x70
+ vk_f2 = 0x71
+ vk_f3 = 0x72
+ vk_f4 = 0x73
+ vk_f5 = 0x74
+ vk_f6 = 0x75
+ vk_f7 = 0x76
+ vk_f8 = 0x77
+ vk_f9 = 0x78
+ vk_f10 = 0x79
+ vk_f11 = 0x7a
+ vk_f12 = 0x7b
+ bKey = 0x42
+ dKey = 0x44
+ fKey = 0x46
+ yKey = 0x59
+)
+
+const (
+ shiftPressed = 0x0010
+ leftAltPressed = 0x0002
+ leftCtrlPressed = 0x0008
+ rightAltPressed = 0x0001
+ rightCtrlPressed = 0x0004
+
+ modKeys = shiftPressed | leftAltPressed | rightAltPressed | leftCtrlPressed | rightCtrlPressed
+)
+
+// inputWaiting only returns true if the next call to readNext will return immediately.
+func (s *State) inputWaiting() bool {
+ var num uint32
+ ok, _, _ := procGetNumberOfConsoleInputEvents.Call(uintptr(s.handle), uintptr(unsafe.Pointer(&num)))
+ if ok == 0 {
+ // call failed, so we cannot guarantee a non-blocking readNext
+ return false
+ }
+
+ // during a "paste" input events are always an odd number, and
+ // the last one results in a blocking readNext, so return false
+ // when num is 1 or 0.
+ return num > 1
+}
+
+func (s *State) readNext() (interface{}, error) {
+ if s.repeat > 0 {
+ s.repeat--
+ return s.key, nil
+ }
+
+ var input input_record
+ pbuf := uintptr(unsafe.Pointer(&input))
+ var rv uint32
+ prv := uintptr(unsafe.Pointer(&rv))
+
+ var surrogate uint16
+
+ for {
+ ok, _, err := procReadConsoleInput.Call(uintptr(s.handle), pbuf, 1, prv)
+
+ if ok == 0 {
+ return nil, err
+ }
+
+ if input.eventType == window_buffer_size_event {
+ xy := (*coord)(unsafe.Pointer(&input.blob[0]))
+ s.columns = int(xy.x)
+ return winch, nil
+ }
+ if input.eventType != key_event {
+ continue
+ }
+ ke := (*key_event_record)(unsafe.Pointer(&input.blob[0]))
+ if ke.KeyDown == 0 {
+ if ke.VirtualKeyCode == vk_menu && ke.Char > 0 {
+ // paste of unicode (eg. via ALT-numpad)
+ if surrogate > 0 {
+ return utf16.DecodeRune(rune(surrogate), rune(ke.Char)), nil
+ } else if utf16.IsSurrogate(rune(ke.Char)) {
+ surrogate = ke.Char
+ continue
+ } else {
+ return rune(ke.Char), nil
+ }
+ }
+ continue
+ }
+
+ if ke.VirtualKeyCode == vk_tab && ke.ControlKeyState&modKeys == shiftPressed {
+ s.key = shiftTab
+ } else if ke.VirtualKeyCode == bKey && (ke.ControlKeyState&modKeys == leftAltPressed ||
+ ke.ControlKeyState&modKeys == rightAltPressed) {
+ s.key = altB
+ } else if ke.VirtualKeyCode == dKey && (ke.ControlKeyState&modKeys == leftAltPressed ||
+ ke.ControlKeyState&modKeys == rightAltPressed) {
+ s.key = altD
+ } else if ke.VirtualKeyCode == fKey && (ke.ControlKeyState&modKeys == leftAltPressed ||
+ ke.ControlKeyState&modKeys == rightAltPressed) {
+ s.key = altF
+ } else if ke.VirtualKeyCode == yKey && (ke.ControlKeyState&modKeys == leftAltPressed ||
+ ke.ControlKeyState&modKeys == rightAltPressed) {
+ s.key = altY
+ } else if ke.Char > 0 {
+ if surrogate > 0 {
+ s.key = utf16.DecodeRune(rune(surrogate), rune(ke.Char))
+ } else if utf16.IsSurrogate(rune(ke.Char)) {
+ surrogate = ke.Char
+ continue
+ } else {
+ s.key = rune(ke.Char)
+ }
+ } else {
+ switch ke.VirtualKeyCode {
+ case vk_prior:
+ s.key = pageUp
+ case vk_next:
+ s.key = pageDown
+ case vk_end:
+ s.key = end
+ case vk_home:
+ s.key = home
+ case vk_left:
+ s.key = left
+ if ke.ControlKeyState&(leftCtrlPressed|rightCtrlPressed) != 0 {
+ if ke.ControlKeyState&modKeys == ke.ControlKeyState&(leftCtrlPressed|rightCtrlPressed) {
+ s.key = wordLeft
+ }
+ }
+ case vk_right:
+ s.key = right
+ if ke.ControlKeyState&(leftCtrlPressed|rightCtrlPressed) != 0 {
+ if ke.ControlKeyState&modKeys == ke.ControlKeyState&(leftCtrlPressed|rightCtrlPressed) {
+ s.key = wordRight
+ }
+ }
+ case vk_up:
+ s.key = up
+ case vk_down:
+ s.key = down
+ case vk_insert:
+ s.key = insert
+ case vk_delete:
+ s.key = del
+ case vk_f1:
+ s.key = f1
+ case vk_f2:
+ s.key = f2
+ case vk_f3:
+ s.key = f3
+ case vk_f4:
+ s.key = f4
+ case vk_f5:
+ s.key = f5
+ case vk_f6:
+ s.key = f6
+ case vk_f7:
+ s.key = f7
+ case vk_f8:
+ s.key = f8
+ case vk_f9:
+ s.key = f9
+ case vk_f10:
+ s.key = f10
+ case vk_f11:
+ s.key = f11
+ case vk_f12:
+ s.key = f12
+ default:
+ // Eat modifier keys
+ // TODO: return Action(Unknown) if the key isn't a
+ // modifier.
+ continue
+ }
+ }
+
+ if ke.RepeatCount > 1 {
+ s.repeat = ke.RepeatCount - 1
+ }
+ return s.key, nil
+ }
+}
+
+// Close returns the terminal to its previous mode
+func (s *State) Close() error {
+ s.origMode.ApplyMode()
+ return nil
+}
+
+func (s *State) startPrompt() {
+ if m, err := TerminalMode(); err == nil {
+ s.defaultMode = m.(inputMode)
+ mode := s.defaultMode
+ mode &^= enableProcessedInput
+ mode.ApplyMode()
+ }
+}
+
+func (s *State) restartPrompt() {
+}
+
+func (s *State) stopPrompt() {
+ s.defaultMode.ApplyMode()
+}
+
+// TerminalSupported returns true because line editing is always
+// supported on Windows.
+func TerminalSupported() bool {
+ return true
+}
+
+func (mode inputMode) ApplyMode() error {
+ hIn, _, err := procGetStdHandle.Call(uintptr(std_input_handle))
+ if hIn == invalid_handle_value || hIn == 0 {
+ return err
+ }
+ ok, _, err := procSetConsoleMode.Call(hIn, uintptr(mode))
+ if ok != 0 {
+ err = nil
+ }
+ return err
+}
+
+// TerminalMode returns the current terminal input mode as an InputModeSetter.
+//
+// This function is provided for convenience, and should
+// not be necessary for most users of liner.
+func TerminalMode() (ModeApplier, error) {
+ var mode inputMode
+ hIn, _, err := procGetStdHandle.Call(uintptr(std_input_handle))
+ if hIn == invalid_handle_value || hIn == 0 {
+ return nil, err
+ }
+ ok, _, err := procGetConsoleMode.Call(hIn, uintptr(unsafe.Pointer(&mode)))
+ if ok != 0 {
+ err = nil
+ }
+ return mode, err
+}
+
+const cursorColumn = true
diff --git a/vendor/github.com/peterh/liner/line.go b/vendor/github.com/peterh/liner/line.go
new file mode 100644
index 00000000..076a195c
--- /dev/null
+++ b/vendor/github.com/peterh/liner/line.go
@@ -0,0 +1,1171 @@
+// +build windows linux darwin openbsd freebsd netbsd
+
+package liner
+
+import (
+ "bufio"
+ "container/ring"
+ "errors"
+ "fmt"
+ "io"
+ "os"
+ "strings"
+ "unicode"
+ "unicode/utf8"
+)
+
+type action int
+
+const (
+ left action = iota
+ right
+ up
+ down
+ home
+ end
+ insert
+ del
+ pageUp
+ pageDown
+ f1
+ f2
+ f3
+ f4
+ f5
+ f6
+ f7
+ f8
+ f9
+ f10
+ f11
+ f12
+ altB
+ altD
+ altF
+ altY
+ shiftTab
+ wordLeft
+ wordRight
+ winch
+ unknown
+)
+
+const (
+ ctrlA = 1
+ ctrlB = 2
+ ctrlC = 3
+ ctrlD = 4
+ ctrlE = 5
+ ctrlF = 6
+ ctrlG = 7
+ ctrlH = 8
+ tab = 9
+ lf = 10
+ ctrlK = 11
+ ctrlL = 12
+ cr = 13
+ ctrlN = 14
+ ctrlO = 15
+ ctrlP = 16
+ ctrlQ = 17
+ ctrlR = 18
+ ctrlS = 19
+ ctrlT = 20
+ ctrlU = 21
+ ctrlV = 22
+ ctrlW = 23
+ ctrlX = 24
+ ctrlY = 25
+ ctrlZ = 26
+ esc = 27
+ bs = 127
+)
+
+const (
+ beep = "\a"
+)
+
+type tabDirection int
+
+const (
+ tabForward tabDirection = iota
+ tabReverse
+)
+
+func (s *State) refresh(prompt []rune, buf []rune, pos int) error {
+ if s.columns == 0 {
+ return ErrInternal
+ }
+
+ s.needRefresh = false
+ if s.multiLineMode {
+ return s.refreshMultiLine(prompt, buf, pos)
+ }
+ return s.refreshSingleLine(prompt, buf, pos)
+}
+
+func (s *State) refreshSingleLine(prompt []rune, buf []rune, pos int) error {
+ s.cursorPos(0)
+ _, err := fmt.Print(string(prompt))
+ if err != nil {
+ return err
+ }
+
+ pLen := countGlyphs(prompt)
+ bLen := countGlyphs(buf)
+ // on some OS / terminals extra column is needed to place the cursor char
+ if cursorColumn {
+ bLen++
+ }
+ pos = countGlyphs(buf[:pos])
+ if pLen+bLen < s.columns {
+ _, err = fmt.Print(string(buf))
+ s.eraseLine()
+ s.cursorPos(pLen + pos)
+ } else {
+ // Find space available
+ space := s.columns - pLen
+ space-- // space for cursor
+ start := pos - space/2
+ end := start + space
+ if end > bLen {
+ end = bLen
+ start = end - space
+ }
+ if start < 0 {
+ start = 0
+ end = space
+ }
+ pos -= start
+
+ // Leave space for markers
+ if start > 0 {
+ start++
+ }
+ if end < bLen {
+ end--
+ }
+ startRune := len(getPrefixGlyphs(buf, start))
+ line := getPrefixGlyphs(buf[startRune:], end-start)
+
+ // Output
+ if start > 0 {
+ fmt.Print("{")
+ }
+ fmt.Print(string(line))
+ if end < bLen {
+ fmt.Print("}")
+ }
+
+ // Set cursor position
+ s.eraseLine()
+ s.cursorPos(pLen + pos)
+ }
+ return err
+}
+
+func (s *State) refreshMultiLine(prompt []rune, buf []rune, pos int) error {
+ promptColumns := countMultiLineGlyphs(prompt, s.columns, 0)
+ totalColumns := countMultiLineGlyphs(buf, s.columns, promptColumns)
+ // on some OS / terminals extra column is needed to place the cursor char
+ // if cursorColumn {
+ // totalColumns++
+ // }
+
+ // it looks like Multiline mode always assume that a cursor need an extra column,
+ // and always emit a newline if we are at the screen end, so no worarounds needed there
+
+ totalRows := (totalColumns + s.columns - 1) / s.columns
+ maxRows := s.maxRows
+ if totalRows > s.maxRows {
+ s.maxRows = totalRows
+ }
+ cursorRows := s.cursorRows
+ if cursorRows == 0 {
+ cursorRows = 1
+ }
+
+ /* First step: clear all the lines used before. To do so start by
+ * going to the last row. */
+ if maxRows-cursorRows > 0 {
+ s.moveDown(maxRows - cursorRows)
+ }
+
+ /* Now for every row clear it, go up. */
+ for i := 0; i < maxRows-1; i++ {
+ s.cursorPos(0)
+ s.eraseLine()
+ s.moveUp(1)
+ }
+
+ /* Clean the top line. */
+ s.cursorPos(0)
+ s.eraseLine()
+
+ /* Write the prompt and the current buffer content */
+ if _, err := fmt.Print(string(prompt)); err != nil {
+ return err
+ }
+ if _, err := fmt.Print(string(buf)); err != nil {
+ return err
+ }
+
+ /* If we are at the very end of the screen with our prompt, we need to
+ * emit a newline and move the prompt to the first column. */
+ cursorColumns := countMultiLineGlyphs(buf[:pos], s.columns, promptColumns)
+ if cursorColumns == totalColumns && totalColumns%s.columns == 0 {
+ s.emitNewLine()
+ s.cursorPos(0)
+ totalRows++
+ if totalRows > s.maxRows {
+ s.maxRows = totalRows
+ }
+ }
+
+ /* Move cursor to right position. */
+ cursorRows = (cursorColumns + s.columns) / s.columns
+ if s.cursorRows > 0 && totalRows-cursorRows > 0 {
+ s.moveUp(totalRows - cursorRows)
+ }
+ /* Set column. */
+ s.cursorPos(cursorColumns % s.columns)
+
+ s.cursorRows = cursorRows
+ return nil
+}
+
+func (s *State) resetMultiLine(prompt []rune, buf []rune, pos int) {
+ columns := countMultiLineGlyphs(prompt, s.columns, 0)
+ columns = countMultiLineGlyphs(buf[:pos], s.columns, columns)
+ columns += 2 // ^C
+ cursorRows := (columns + s.columns) / s.columns
+ if s.maxRows-cursorRows > 0 {
+ for i := 0; i < s.maxRows-cursorRows; i++ {
+ fmt.Println() // always moves the cursor down or scrolls the window up as needed
+ }
+ }
+ s.maxRows = 1
+ s.cursorRows = 0
+}
+
+func longestCommonPrefix(strs []string) string {
+ if len(strs) == 0 {
+ return ""
+ }
+ longest := strs[0]
+
+ for _, str := range strs[1:] {
+ for !strings.HasPrefix(str, longest) {
+ longest = longest[:len(longest)-1]
+ }
+ }
+ // Remove trailing partial runes
+ longest = strings.TrimRight(longest, "\uFFFD")
+ return longest
+}
+
+func (s *State) circularTabs(items []string) func(tabDirection) (string, error) {
+ item := -1
+ return func(direction tabDirection) (string, error) {
+ if direction == tabForward {
+ if item < len(items)-1 {
+ item++
+ } else {
+ item = 0
+ }
+ } else if direction == tabReverse {
+ if item > 0 {
+ item--
+ } else {
+ item = len(items) - 1
+ }
+ }
+ return items[item], nil
+ }
+}
+
+func calculateColumns(screenWidth int, items []string) (numColumns, numRows, maxWidth int) {
+ for _, item := range items {
+ if len(item) >= screenWidth {
+ return 1, len(items), screenWidth - 1
+ }
+ if len(item) >= maxWidth {
+ maxWidth = len(item) + 1
+ }
+ }
+
+ numColumns = screenWidth / maxWidth
+ numRows = len(items) / numColumns
+ if len(items)%numColumns > 0 {
+ numRows++
+ }
+
+ if len(items) <= numColumns {
+ maxWidth = 0
+ }
+
+ return
+}
+
+func (s *State) printedTabs(items []string) func(tabDirection) (string, error) {
+ numTabs := 1
+ prefix := longestCommonPrefix(items)
+ return func(direction tabDirection) (string, error) {
+ if len(items) == 1 {
+ return items[0], nil
+ }
+
+ if numTabs == 2 {
+ if len(items) > 100 {
+ fmt.Printf("\nDisplay all %d possibilities? (y or n) ", len(items))
+ prompt:
+ for {
+ next, err := s.readNext()
+ if err != nil {
+ return prefix, err
+ }
+
+ if key, ok := next.(rune); ok {
+ switch key {
+ case 'n', 'N':
+ return prefix, nil
+ case 'y', 'Y':
+ break prompt
+ case ctrlC, ctrlD, cr, lf:
+ s.restartPrompt()
+ }
+ }
+ }
+ }
+ fmt.Println("")
+
+ numColumns, numRows, maxWidth := calculateColumns(s.columns, items)
+
+ for i := 0; i < numRows; i++ {
+ for j := 0; j < numColumns*numRows; j += numRows {
+ if i+j < len(items) {
+ if maxWidth > 0 {
+ fmt.Printf("%-*.[1]*s", maxWidth, items[i+j])
+ } else {
+ fmt.Printf("%v ", items[i+j])
+ }
+ }
+ }
+ fmt.Println("")
+ }
+ } else {
+ numTabs++
+ }
+ return prefix, nil
+ }
+}
+
+func (s *State) tabComplete(p []rune, line []rune, pos int) ([]rune, int, interface{}, error) {
+ if s.completer == nil {
+ return line, pos, rune(esc), nil
+ }
+ head, list, tail := s.completer(string(line), pos)
+ if len(list) <= 0 {
+ return line, pos, rune(esc), nil
+ }
+ hl := utf8.RuneCountInString(head)
+ if len(list) == 1 {
+ err := s.refresh(p, []rune(head+list[0]+tail), hl+utf8.RuneCountInString(list[0]))
+ return []rune(head + list[0] + tail), hl + utf8.RuneCountInString(list[0]), rune(esc), err
+ }
+
+ direction := tabForward
+ tabPrinter := s.circularTabs(list)
+ if s.tabStyle == TabPrints {
+ tabPrinter = s.printedTabs(list)
+ }
+
+ for {
+ pick, err := tabPrinter(direction)
+ if err != nil {
+ return line, pos, rune(esc), err
+ }
+ err = s.refresh(p, []rune(head+pick+tail), hl+utf8.RuneCountInString(pick))
+ if err != nil {
+ return line, pos, rune(esc), err
+ }
+
+ next, err := s.readNext()
+ if err != nil {
+ return line, pos, rune(esc), err
+ }
+ if key, ok := next.(rune); ok {
+ if key == tab {
+ direction = tabForward
+ continue
+ }
+ if key == esc {
+ return line, pos, rune(esc), nil
+ }
+ }
+ if a, ok := next.(action); ok && a == shiftTab {
+ direction = tabReverse
+ continue
+ }
+ return []rune(head + pick + tail), hl + utf8.RuneCountInString(pick), next, nil
+ }
+}
+
+// reverse intelligent search, implements a bash-like history search.
+func (s *State) reverseISearch(origLine []rune, origPos int) ([]rune, int, interface{}, error) {
+ p := "(reverse-i-search)`': "
+ err := s.refresh([]rune(p), origLine, origPos)
+ if err != nil {
+ return origLine, origPos, rune(esc), err
+ }
+
+ line := []rune{}
+ pos := 0
+ foundLine := string(origLine)
+ foundPos := origPos
+
+ getLine := func() ([]rune, []rune, int) {
+ search := string(line)
+ prompt := "(reverse-i-search)`%s': "
+ return []rune(fmt.Sprintf(prompt, search)), []rune(foundLine), foundPos
+ }
+
+ history, positions := s.getHistoryByPattern(string(line))
+ historyPos := len(history) - 1
+
+ for {
+ next, err := s.readNext()
+ if err != nil {
+ return []rune(foundLine), foundPos, rune(esc), err
+ }
+
+ switch v := next.(type) {
+ case rune:
+ switch v {
+ case ctrlR: // Search backwards
+ if historyPos > 0 && historyPos < len(history) {
+ historyPos--
+ foundLine = history[historyPos]
+ foundPos = positions[historyPos]
+ } else {
+ fmt.Print(beep)
+ }
+ case ctrlS: // Search forward
+ if historyPos < len(history)-1 && historyPos >= 0 {
+ historyPos++
+ foundLine = history[historyPos]
+ foundPos = positions[historyPos]
+ } else {
+ fmt.Print(beep)
+ }
+ case ctrlH, bs: // Backspace
+ if pos <= 0 {
+ fmt.Print(beep)
+ } else {
+ n := len(getSuffixGlyphs(line[:pos], 1))
+ line = append(line[:pos-n], line[pos:]...)
+ pos -= n
+
+ // For each char deleted, display the last matching line of history
+ history, positions := s.getHistoryByPattern(string(line))
+ historyPos = len(history) - 1
+ if len(history) > 0 {
+ foundLine = history[historyPos]
+ foundPos = positions[historyPos]
+ } else {
+ foundLine = ""
+ foundPos = 0
+ }
+ }
+ case ctrlG: // Cancel
+ return origLine, origPos, rune(esc), err
+
+ case tab, cr, lf, ctrlA, ctrlB, ctrlD, ctrlE, ctrlF, ctrlK,
+ ctrlL, ctrlN, ctrlO, ctrlP, ctrlQ, ctrlT, ctrlU, ctrlV, ctrlW, ctrlX, ctrlY, ctrlZ:
+ fallthrough
+ case 0, ctrlC, esc, 28, 29, 30, 31:
+ return []rune(foundLine), foundPos, next, err
+ default:
+ line = append(line[:pos], append([]rune{v}, line[pos:]...)...)
+ pos++
+
+ // For each keystroke typed, display the last matching line of history
+ history, positions = s.getHistoryByPattern(string(line))
+ historyPos = len(history) - 1
+ if len(history) > 0 {
+ foundLine = history[historyPos]
+ foundPos = positions[historyPos]
+ } else {
+ foundLine = ""
+ foundPos = 0
+ }
+ }
+ case action:
+ return []rune(foundLine), foundPos, next, err
+ }
+ err = s.refresh(getLine())
+ if err != nil {
+ return []rune(foundLine), foundPos, rune(esc), err
+ }
+ }
+}
+
+// addToKillRing adds some text to the kill ring. If mode is 0 it adds it to a
+// new node in the end of the kill ring, and move the current pointer to the new
+// node. If mode is 1 or 2 it appends or prepends the text to the current entry
+// of the killRing.
+func (s *State) addToKillRing(text []rune, mode int) {
+ // Don't use the same underlying array as text
+ killLine := make([]rune, len(text))
+ copy(killLine, text)
+
+ // Point killRing to a newNode, procedure depends on the killring state and
+ // append mode.
+ if mode == 0 { // Add new node to killRing
+ if s.killRing == nil { // if killring is empty, create a new one
+ s.killRing = ring.New(1)
+ } else if s.killRing.Len() >= KillRingMax { // if killring is "full"
+ s.killRing = s.killRing.Next()
+ } else { // Normal case
+ s.killRing.Link(ring.New(1))
+ s.killRing = s.killRing.Next()
+ }
+ } else {
+ if s.killRing == nil { // if killring is empty, create a new one
+ s.killRing = ring.New(1)
+ s.killRing.Value = []rune{}
+ }
+ if mode == 1 { // Append to last entry
+ killLine = append(s.killRing.Value.([]rune), killLine...)
+ } else if mode == 2 { // Prepend to last entry
+ killLine = append(killLine, s.killRing.Value.([]rune)...)
+ }
+ }
+
+ // Save text in the current killring node
+ s.killRing.Value = killLine
+}
+
+func (s *State) yank(p []rune, text []rune, pos int) ([]rune, int, interface{}, error) {
+ if s.killRing == nil {
+ return text, pos, rune(esc), nil
+ }
+
+ lineStart := text[:pos]
+ lineEnd := text[pos:]
+ var line []rune
+
+ for {
+ value := s.killRing.Value.([]rune)
+ line = make([]rune, 0)
+ line = append(line, lineStart...)
+ line = append(line, value...)
+ line = append(line, lineEnd...)
+
+ pos = len(lineStart) + len(value)
+ err := s.refresh(p, line, pos)
+ if err != nil {
+ return line, pos, 0, err
+ }
+
+ next, err := s.readNext()
+ if err != nil {
+ return line, pos, next, err
+ }
+
+ switch v := next.(type) {
+ case rune:
+ return line, pos, next, nil
+ case action:
+ switch v {
+ case altY:
+ s.killRing = s.killRing.Prev()
+ default:
+ return line, pos, next, nil
+ }
+ }
+ }
+}
+
+// Prompt displays p and returns a line of user input, not including a trailing
+// newline character. An io.EOF error is returned if the user signals end-of-file
+// by pressing Ctrl-D. Prompt allows line editing if the terminal supports it.
+func (s *State) Prompt(prompt string) (string, error) {
+ return s.PromptWithSuggestion(prompt, "", 0)
+}
+
+// PromptWithSuggestion displays prompt and an editable text with cursor at
+// given position. The cursor will be set to the end of the line if given position
+// is negative or greater than length of text (in runes). Returns a line of user input, not
+// including a trailing newline character. An io.EOF error is returned if the user
+// signals end-of-file by pressing Ctrl-D.
+func (s *State) PromptWithSuggestion(prompt string, text string, pos int) (string, error) {
+ for _, r := range prompt {
+ if unicode.Is(unicode.C, r) {
+ return "", ErrInvalidPrompt
+ }
+ }
+ if s.inputRedirected || !s.terminalSupported {
+ return s.promptUnsupported(prompt)
+ }
+ p := []rune(prompt)
+ const minWorkingSpace = 10
+ if s.columns < countGlyphs(p)+minWorkingSpace {
+ return s.tooNarrow(prompt)
+ }
+ if s.outputRedirected {
+ return "", ErrNotTerminalOutput
+ }
+
+ s.historyMutex.RLock()
+ defer s.historyMutex.RUnlock()
+
+ fmt.Print(prompt)
+ var line = []rune(text)
+ historyEnd := ""
+ var historyPrefix []string
+ historyPos := 0
+ historyStale := true
+ historyAction := false // used to mark history related actions
+ killAction := 0 // used to mark kill related actions
+
+ defer s.stopPrompt()
+
+ if pos < 0 || len(line) < pos {
+ pos = len(line)
+ }
+ if len(line) > 0 {
+ err := s.refresh(p, line, pos)
+ if err != nil {
+ return "", err
+ }
+ }
+
+restart:
+ s.startPrompt()
+ s.getColumns()
+
+mainLoop:
+ for {
+ next, err := s.readNext()
+ haveNext:
+ if err != nil {
+ if s.shouldRestart != nil && s.shouldRestart(err) {
+ goto restart
+ }
+ return "", err
+ }
+
+ historyAction = false
+ switch v := next.(type) {
+ case rune:
+ switch v {
+ case cr, lf:
+ if s.needRefresh {
+ err := s.refresh(p, line, pos)
+ if err != nil {
+ return "", err
+ }
+ }
+ if s.multiLineMode {
+ s.resetMultiLine(p, line, pos)
+ }
+ fmt.Println()
+ break mainLoop
+ case ctrlA: // Start of line
+ pos = 0
+ s.needRefresh = true
+ case ctrlE: // End of line
+ pos = len(line)
+ s.needRefresh = true
+ case ctrlB: // left
+ if pos > 0 {
+ pos -= len(getSuffixGlyphs(line[:pos], 1))
+ s.needRefresh = true
+ } else {
+ fmt.Print(beep)
+ }
+ case ctrlF: // right
+ if pos < len(line) {
+ pos += len(getPrefixGlyphs(line[pos:], 1))
+ s.needRefresh = true
+ } else {
+ fmt.Print(beep)
+ }
+ case ctrlD: // del
+ if pos == 0 && len(line) == 0 {
+ // exit
+ return "", io.EOF
+ }
+
+ // ctrlD is a potential EOF, so the rune reader shuts down.
+ // Therefore, if it isn't actually an EOF, we must re-startPrompt.
+ s.restartPrompt()
+
+ if pos >= len(line) {
+ fmt.Print(beep)
+ } else {
+ n := len(getPrefixGlyphs(line[pos:], 1))
+ line = append(line[:pos], line[pos+n:]...)
+ s.needRefresh = true
+ }
+ case ctrlK: // delete remainder of line
+ if pos >= len(line) {
+ fmt.Print(beep)
+ } else {
+ if killAction > 0 {
+ s.addToKillRing(line[pos:], 1) // Add in apend mode
+ } else {
+ s.addToKillRing(line[pos:], 0) // Add in normal mode
+ }
+
+ killAction = 2 // Mark that there was a kill action
+ line = line[:pos]
+ s.needRefresh = true
+ }
+ case ctrlP: // up
+ historyAction = true
+ if historyStale {
+ historyPrefix = s.getHistoryByPrefix(string(line))
+ historyPos = len(historyPrefix)
+ historyStale = false
+ }
+ if historyPos > 0 {
+ if historyPos == len(historyPrefix) {
+ historyEnd = string(line)
+ }
+ historyPos--
+ line = []rune(historyPrefix[historyPos])
+ pos = len(line)
+ s.needRefresh = true
+ } else {
+ fmt.Print(beep)
+ }
+ case ctrlN: // down
+ historyAction = true
+ if historyStale {
+ historyPrefix = s.getHistoryByPrefix(string(line))
+ historyPos = len(historyPrefix)
+ historyStale = false
+ }
+ if historyPos < len(historyPrefix) {
+ historyPos++
+ if historyPos == len(historyPrefix) {
+ line = []rune(historyEnd)
+ } else {
+ line = []rune(historyPrefix[historyPos])
+ }
+ pos = len(line)
+ s.needRefresh = true
+ } else {
+ fmt.Print(beep)
+ }
+ case ctrlT: // transpose prev glyph with glyph under cursor
+ if len(line) < 2 || pos < 1 {
+ fmt.Print(beep)
+ } else {
+ if pos == len(line) {
+ pos -= len(getSuffixGlyphs(line, 1))
+ }
+ prev := getSuffixGlyphs(line[:pos], 1)
+ next := getPrefixGlyphs(line[pos:], 1)
+ scratch := make([]rune, len(prev))
+ copy(scratch, prev)
+ copy(line[pos-len(prev):], next)
+ copy(line[pos-len(prev)+len(next):], scratch)
+ pos += len(next)
+ s.needRefresh = true
+ }
+ case ctrlL: // clear screen
+ s.eraseScreen()
+ s.needRefresh = true
+ case ctrlC: // reset
+ fmt.Println("^C")
+ if s.multiLineMode {
+ s.resetMultiLine(p, line, pos)
+ }
+ if s.ctrlCAborts {
+ return "", ErrPromptAborted
+ }
+ line = line[:0]
+ pos = 0
+ fmt.Print(prompt)
+ s.restartPrompt()
+ case ctrlH, bs: // Backspace
+ if pos <= 0 {
+ fmt.Print(beep)
+ } else {
+ n := len(getSuffixGlyphs(line[:pos], 1))
+ line = append(line[:pos-n], line[pos:]...)
+ pos -= n
+ s.needRefresh = true
+ }
+ case ctrlU: // Erase line before cursor
+ if killAction > 0 {
+ s.addToKillRing(line[:pos], 2) // Add in prepend mode
+ } else {
+ s.addToKillRing(line[:pos], 0) // Add in normal mode
+ }
+
+ killAction = 2 // Mark that there was some killing
+ line = line[pos:]
+ pos = 0
+ s.needRefresh = true
+ case ctrlW: // Erase word
+ if pos == 0 {
+ fmt.Print(beep)
+ break
+ }
+ // Remove whitespace to the left
+ var buf []rune // Store the deleted chars in a buffer
+ for {
+ if pos == 0 || !unicode.IsSpace(line[pos-1]) {
+ break
+ }
+ buf = append(buf, line[pos-1])
+ line = append(line[:pos-1], line[pos:]...)
+ pos--
+ }
+ // Remove non-whitespace to the left
+ for {
+ if pos == 0 || unicode.IsSpace(line[pos-1]) {
+ break
+ }
+ buf = append(buf, line[pos-1])
+ line = append(line[:pos-1], line[pos:]...)
+ pos--
+ }
+ // Invert the buffer and save the result on the killRing
+ var newBuf []rune
+ for i := len(buf) - 1; i >= 0; i-- {
+ newBuf = append(newBuf, buf[i])
+ }
+ if killAction > 0 {
+ s.addToKillRing(newBuf, 2) // Add in prepend mode
+ } else {
+ s.addToKillRing(newBuf, 0) // Add in normal mode
+ }
+ killAction = 2 // Mark that there was some killing
+
+ s.needRefresh = true
+ case ctrlY: // Paste from Yank buffer
+ line, pos, next, err = s.yank(p, line, pos)
+ goto haveNext
+ case ctrlR: // Reverse Search
+ line, pos, next, err = s.reverseISearch(line, pos)
+ s.needRefresh = true
+ goto haveNext
+ case tab: // Tab completion
+ line, pos, next, err = s.tabComplete(p, line, pos)
+ goto haveNext
+ // Catch keys that do nothing, but you don't want them to beep
+ case esc:
+ // DO NOTHING
+ // Unused keys
+ case ctrlG, ctrlO, ctrlQ, ctrlS, ctrlV, ctrlX, ctrlZ:
+ fallthrough
+ // Catch unhandled control codes (anything <= 31)
+ case 0, 28, 29, 30, 31:
+ fmt.Print(beep)
+ default:
+ if pos == len(line) && !s.multiLineMode &&
+ len(p)+len(line) < s.columns*4 && // Avoid countGlyphs on large lines
+ countGlyphs(p)+countGlyphs(line) < s.columns-1 {
+ line = append(line, v)
+ fmt.Printf("%c", v)
+ pos++
+ } else {
+ line = append(line[:pos], append([]rune{v}, line[pos:]...)...)
+ pos++
+ s.needRefresh = true
+ }
+ }
+ case action:
+ switch v {
+ case del:
+ if pos >= len(line) {
+ fmt.Print(beep)
+ } else {
+ n := len(getPrefixGlyphs(line[pos:], 1))
+ line = append(line[:pos], line[pos+n:]...)
+ }
+ case left:
+ if pos > 0 {
+ pos -= len(getSuffixGlyphs(line[:pos], 1))
+ } else {
+ fmt.Print(beep)
+ }
+ case wordLeft, altB:
+ if pos > 0 {
+ var spaceHere, spaceLeft, leftKnown bool
+ for {
+ pos--
+ if pos == 0 {
+ break
+ }
+ if leftKnown {
+ spaceHere = spaceLeft
+ } else {
+ spaceHere = unicode.IsSpace(line[pos])
+ }
+ spaceLeft, leftKnown = unicode.IsSpace(line[pos-1]), true
+ if !spaceHere && spaceLeft {
+ break
+ }
+ }
+ } else {
+ fmt.Print(beep)
+ }
+ case right:
+ if pos < len(line) {
+ pos += len(getPrefixGlyphs(line[pos:], 1))
+ } else {
+ fmt.Print(beep)
+ }
+ case wordRight, altF:
+ if pos < len(line) {
+ var spaceHere, spaceLeft, hereKnown bool
+ for {
+ pos++
+ if pos == len(line) {
+ break
+ }
+ if hereKnown {
+ spaceLeft = spaceHere
+ } else {
+ spaceLeft = unicode.IsSpace(line[pos-1])
+ }
+ spaceHere, hereKnown = unicode.IsSpace(line[pos]), true
+ if spaceHere && !spaceLeft {
+ break
+ }
+ }
+ } else {
+ fmt.Print(beep)
+ }
+ case up:
+ historyAction = true
+ if historyStale {
+ historyPrefix = s.getHistoryByPrefix(string(line))
+ historyPos = len(historyPrefix)
+ historyStale = false
+ }
+ if historyPos > 0 {
+ if historyPos == len(historyPrefix) {
+ historyEnd = string(line)
+ }
+ historyPos--
+ line = []rune(historyPrefix[historyPos])
+ pos = len(line)
+ } else {
+ fmt.Print(beep)
+ }
+ case down:
+ historyAction = true
+ if historyStale {
+ historyPrefix = s.getHistoryByPrefix(string(line))
+ historyPos = len(historyPrefix)
+ historyStale = false
+ }
+ if historyPos < len(historyPrefix) {
+ historyPos++
+ if historyPos == len(historyPrefix) {
+ line = []rune(historyEnd)
+ } else {
+ line = []rune(historyPrefix[historyPos])
+ }
+ pos = len(line)
+ } else {
+ fmt.Print(beep)
+ }
+ case home: // Start of line
+ pos = 0
+ case end: // End of line
+ pos = len(line)
+ case altD: // Delete next word
+ if pos == len(line) {
+ fmt.Print(beep)
+ break
+ }
+ // Remove whitespace to the right
+ var buf []rune // Store the deleted chars in a buffer
+ for {
+ if pos == len(line) || !unicode.IsSpace(line[pos]) {
+ break
+ }
+ buf = append(buf, line[pos])
+ line = append(line[:pos], line[pos+1:]...)
+ }
+ // Remove non-whitespace to the right
+ for {
+ if pos == len(line) || unicode.IsSpace(line[pos]) {
+ break
+ }
+ buf = append(buf, line[pos])
+ line = append(line[:pos], line[pos+1:]...)
+ }
+ // Save the result on the killRing
+ if killAction > 0 {
+ s.addToKillRing(buf, 2) // Add in prepend mode
+ } else {
+ s.addToKillRing(buf, 0) // Add in normal mode
+ }
+ killAction = 2 // Mark that there was some killing
+ case winch: // Window change
+ if s.multiLineMode {
+ if s.maxRows-s.cursorRows > 0 {
+ s.moveDown(s.maxRows - s.cursorRows)
+ }
+ for i := 0; i < s.maxRows-1; i++ {
+ s.cursorPos(0)
+ s.eraseLine()
+ s.moveUp(1)
+ }
+ s.maxRows = 1
+ s.cursorRows = 1
+ }
+ }
+ s.needRefresh = true
+ }
+ if s.needRefresh && !s.inputWaiting() {
+ err := s.refresh(p, line, pos)
+ if err != nil {
+ return "", err
+ }
+ }
+ if !historyAction {
+ historyStale = true
+ }
+ if killAction > 0 {
+ killAction--
+ }
+ }
+ return string(line), nil
+}
+
+// PasswordPrompt displays p, and then waits for user input. The input typed by
+// the user is not displayed in the terminal.
+func (s *State) PasswordPrompt(prompt string) (string, error) {
+ for _, r := range prompt {
+ if unicode.Is(unicode.C, r) {
+ return "", ErrInvalidPrompt
+ }
+ }
+ if !s.terminalSupported || s.columns == 0 {
+ return "", errors.New("liner: function not supported in this terminal")
+ }
+ if s.inputRedirected {
+ return s.promptUnsupported(prompt)
+ }
+ if s.outputRedirected {
+ return "", ErrNotTerminalOutput
+ }
+
+ p := []rune(prompt)
+ const minWorkingSpace = 1
+ if s.columns < countGlyphs(p)+minWorkingSpace {
+ return s.tooNarrow(prompt)
+ }
+
+ defer s.stopPrompt()
+
+restart:
+ s.startPrompt()
+ s.getColumns()
+
+ fmt.Print(prompt)
+ var line []rune
+ pos := 0
+
+mainLoop:
+ for {
+ next, err := s.readNext()
+ if err != nil {
+ if s.shouldRestart != nil && s.shouldRestart(err) {
+ goto restart
+ }
+ return "", err
+ }
+
+ switch v := next.(type) {
+ case rune:
+ switch v {
+ case cr, lf:
+ if s.needRefresh {
+ err := s.refresh(p, line, pos)
+ if err != nil {
+ return "", err
+ }
+ }
+ if s.multiLineMode {
+ s.resetMultiLine(p, line, pos)
+ }
+ fmt.Println()
+ break mainLoop
+ case ctrlD: // del
+ if pos == 0 && len(line) == 0 {
+ // exit
+ return "", io.EOF
+ }
+
+ // ctrlD is a potential EOF, so the rune reader shuts down.
+ // Therefore, if it isn't actually an EOF, we must re-startPrompt.
+ s.restartPrompt()
+ case ctrlL: // clear screen
+ s.eraseScreen()
+ err := s.refresh(p, []rune{}, 0)
+ if err != nil {
+ return "", err
+ }
+ case ctrlH, bs: // Backspace
+ if pos <= 0 {
+ fmt.Print(beep)
+ } else {
+ n := len(getSuffixGlyphs(line[:pos], 1))
+ line = append(line[:pos-n], line[pos:]...)
+ pos -= n
+ }
+ case ctrlC:
+ fmt.Println("^C")
+ if s.multiLineMode {
+ s.resetMultiLine(p, line, pos)
+ }
+ if s.ctrlCAborts {
+ return "", ErrPromptAborted
+ }
+ line = line[:0]
+ pos = 0
+ fmt.Print(prompt)
+ s.restartPrompt()
+ // Unused keys
+ case esc, tab, ctrlA, ctrlB, ctrlE, ctrlF, ctrlG, ctrlK, ctrlN, ctrlO, ctrlP, ctrlQ, ctrlR, ctrlS,
+ ctrlT, ctrlU, ctrlV, ctrlW, ctrlX, ctrlY, ctrlZ:
+ fallthrough
+ // Catch unhandled control codes (anything <= 31)
+ case 0, 28, 29, 30, 31:
+ fmt.Print(beep)
+ default:
+ line = append(line[:pos], append([]rune{v}, line[pos:]...)...)
+ pos++
+ }
+ }
+ }
+ return string(line), nil
+}
+
+func (s *State) tooNarrow(prompt string) (string, error) {
+ // Docker and OpenWRT and etc sometimes return 0 column width
+ // Reset mode temporarily. Restore baked mode in case the terminal
+ // is wide enough for the next Prompt attempt.
+ m, merr := TerminalMode()
+ s.origMode.ApplyMode()
+ if merr == nil {
+ defer m.ApplyMode()
+ }
+ if s.r == nil {
+ // Windows does not always set s.r
+ s.r = bufio.NewReader(os.Stdin)
+ defer func() { s.r = nil }()
+ }
+ return s.promptUnsupported(prompt)
+}
diff --git a/vendor/github.com/peterh/liner/output.go b/vendor/github.com/peterh/liner/output.go
new file mode 100644
index 00000000..db0641cf
--- /dev/null
+++ b/vendor/github.com/peterh/liner/output.go
@@ -0,0 +1,76 @@
+// +build linux darwin openbsd freebsd netbsd
+
+package liner
+
+import (
+ "fmt"
+ "os"
+ "strings"
+ "syscall"
+ "unsafe"
+)
+
+func (s *State) cursorPos(x int) {
+ if s.useCHA {
+ // 'G' is "Cursor Character Absolute (CHA)"
+ fmt.Printf("\x1b[%dG", x+1)
+ } else {
+ // 'C' is "Cursor Forward (CUF)"
+ fmt.Print("\r")
+ if x > 0 {
+ fmt.Printf("\x1b[%dC", x)
+ }
+ }
+}
+
+func (s *State) eraseLine() {
+ fmt.Print("\x1b[0K")
+}
+
+func (s *State) eraseScreen() {
+ fmt.Print("\x1b[H\x1b[2J")
+}
+
+func (s *State) moveUp(lines int) {
+ fmt.Printf("\x1b[%dA", lines)
+}
+
+func (s *State) moveDown(lines int) {
+ fmt.Printf("\x1b[%dB", lines)
+}
+
+func (s *State) emitNewLine() {
+ fmt.Print("\n")
+}
+
+type winSize struct {
+ row, col uint16
+ xpixel, ypixel uint16
+}
+
+func (s *State) getColumns() bool {
+ var ws winSize
+ ok, _, _ := syscall.Syscall(syscall.SYS_IOCTL, uintptr(syscall.Stdout),
+ syscall.TIOCGWINSZ, uintptr(unsafe.Pointer(&ws)))
+ if int(ok) < 0 {
+ return false
+ }
+ s.columns = int(ws.col)
+ return true
+}
+
+func (s *State) checkOutput() {
+ // xterm is known to support CHA
+ if strings.Contains(strings.ToLower(os.Getenv("TERM")), "xterm") {
+ s.useCHA = true
+ return
+ }
+
+ // The test for functional ANSI CHA is unreliable (eg the Windows
+ // telnet command does not support reading the cursor position with
+ // an ANSI DSR request, despite setting TERM=ansi)
+
+ // Assume CHA isn't supported (which should be safe, although it
+ // does result in occasional visible cursor jitter)
+ s.useCHA = false
+}
diff --git a/vendor/github.com/peterh/liner/output_windows.go b/vendor/github.com/peterh/liner/output_windows.go
new file mode 100644
index 00000000..45cd978c
--- /dev/null
+++ b/vendor/github.com/peterh/liner/output_windows.go
@@ -0,0 +1,72 @@
+package liner
+
+import (
+ "unsafe"
+)
+
+type coord struct {
+ x, y int16
+}
+type smallRect struct {
+ left, top, right, bottom int16
+}
+
+type consoleScreenBufferInfo struct {
+ dwSize coord
+ dwCursorPosition coord
+ wAttributes int16
+ srWindow smallRect
+ dwMaximumWindowSize coord
+}
+
+func (s *State) cursorPos(x int) {
+ var sbi consoleScreenBufferInfo
+ procGetConsoleScreenBufferInfo.Call(uintptr(s.hOut), uintptr(unsafe.Pointer(&sbi)))
+ procSetConsoleCursorPosition.Call(uintptr(s.hOut),
+ uintptr(int(x)&0xFFFF|int(sbi.dwCursorPosition.y)<<16))
+}
+
+func (s *State) eraseLine() {
+ var sbi consoleScreenBufferInfo
+ procGetConsoleScreenBufferInfo.Call(uintptr(s.hOut), uintptr(unsafe.Pointer(&sbi)))
+ var numWritten uint32
+ procFillConsoleOutputCharacter.Call(uintptr(s.hOut), uintptr(' '),
+ uintptr(sbi.dwSize.x-sbi.dwCursorPosition.x),
+ uintptr(int(sbi.dwCursorPosition.x)&0xFFFF|int(sbi.dwCursorPosition.y)<<16),
+ uintptr(unsafe.Pointer(&numWritten)))
+}
+
+func (s *State) eraseScreen() {
+ var sbi consoleScreenBufferInfo
+ procGetConsoleScreenBufferInfo.Call(uintptr(s.hOut), uintptr(unsafe.Pointer(&sbi)))
+ var numWritten uint32
+ procFillConsoleOutputCharacter.Call(uintptr(s.hOut), uintptr(' '),
+ uintptr(sbi.dwSize.x)*uintptr(sbi.dwSize.y),
+ 0,
+ uintptr(unsafe.Pointer(&numWritten)))
+ procSetConsoleCursorPosition.Call(uintptr(s.hOut), 0)
+}
+
+func (s *State) moveUp(lines int) {
+ var sbi consoleScreenBufferInfo
+ procGetConsoleScreenBufferInfo.Call(uintptr(s.hOut), uintptr(unsafe.Pointer(&sbi)))
+ procSetConsoleCursorPosition.Call(uintptr(s.hOut),
+ uintptr(int(sbi.dwCursorPosition.x)&0xFFFF|(int(sbi.dwCursorPosition.y)-lines)<<16))
+}
+
+func (s *State) moveDown(lines int) {
+ var sbi consoleScreenBufferInfo
+ procGetConsoleScreenBufferInfo.Call(uintptr(s.hOut), uintptr(unsafe.Pointer(&sbi)))
+ procSetConsoleCursorPosition.Call(uintptr(s.hOut),
+ uintptr(int(sbi.dwCursorPosition.x)&0xFFFF|(int(sbi.dwCursorPosition.y)+lines)<<16))
+}
+
+func (s *State) emitNewLine() {
+ // windows doesn't need to omit a new line
+}
+
+func (s *State) getColumns() {
+ var sbi consoleScreenBufferInfo
+ procGetConsoleScreenBufferInfo.Call(uintptr(s.hOut), uintptr(unsafe.Pointer(&sbi)))
+ s.columns = int(sbi.dwSize.x)
+}
diff --git a/vendor/github.com/peterh/liner/unixmode.go b/vendor/github.com/peterh/liner/unixmode.go
new file mode 100644
index 00000000..9838923f
--- /dev/null
+++ b/vendor/github.com/peterh/liner/unixmode.go
@@ -0,0 +1,37 @@
+// +build linux darwin freebsd openbsd netbsd
+
+package liner
+
+import (
+ "syscall"
+ "unsafe"
+)
+
+func (mode *termios) ApplyMode() error {
+ _, _, errno := syscall.Syscall(syscall.SYS_IOCTL, uintptr(syscall.Stdin), setTermios, uintptr(unsafe.Pointer(mode)))
+
+ if errno != 0 {
+ return errno
+ }
+ return nil
+}
+
+// TerminalMode returns the current terminal input mode as an InputModeSetter.
+//
+// This function is provided for convenience, and should
+// not be necessary for most users of liner.
+func TerminalMode() (ModeApplier, error) {
+ mode, errno := getMode(syscall.Stdin)
+
+ if errno != 0 {
+ return nil, errno
+ }
+ return mode, nil
+}
+
+func getMode(handle int) (*termios, syscall.Errno) {
+ var mode termios
+ _, _, errno := syscall.Syscall(syscall.SYS_IOCTL, uintptr(handle), getTermios, uintptr(unsafe.Pointer(&mode)))
+
+ return &mode, errno
+}
diff --git a/vendor/github.com/peterh/liner/width.go b/vendor/github.com/peterh/liner/width.go
new file mode 100644
index 00000000..0395f3a8
--- /dev/null
+++ b/vendor/github.com/peterh/liner/width.go
@@ -0,0 +1,90 @@
+package liner
+
+import (
+ "unicode"
+
+ "github.com/mattn/go-runewidth"
+)
+
+// These character classes are mostly zero width (when combined).
+// A few might not be, depending on the user's font. Fixing this
+// is non-trivial, given that some terminals don't support
+// ANSI DSR/CPR
+var zeroWidth = []*unicode.RangeTable{
+ unicode.Mn,
+ unicode.Me,
+ unicode.Cc,
+ unicode.Cf,
+}
+
+// countGlyphs considers zero-width characters to be zero glyphs wide,
+// and members of Chinese, Japanese, and Korean scripts to be 2 glyphs wide.
+func countGlyphs(s []rune) int {
+ n := 0
+ for _, r := range s {
+ // speed up the common case
+ if r < 127 {
+ n++
+ continue
+ }
+
+ n += runewidth.RuneWidth(r)
+ }
+ return n
+}
+
+func countMultiLineGlyphs(s []rune, columns int, start int) int {
+ n := start
+ for _, r := range s {
+ if r < 127 {
+ n++
+ continue
+ }
+ switch runewidth.RuneWidth(r) {
+ case 0:
+ case 1:
+ n++
+ case 2:
+ n += 2
+ // no room for a 2-glyphs-wide char in the ending
+ // so skip a column and display it at the beginning
+ if n%columns == 1 {
+ n++
+ }
+ }
+ }
+ return n
+}
+
+func getPrefixGlyphs(s []rune, num int) []rune {
+ p := 0
+ for n := 0; n < num && p < len(s); p++ {
+ // speed up the common case
+ if s[p] < 127 {
+ n++
+ continue
+ }
+ if !unicode.IsOneOf(zeroWidth, s[p]) {
+ n++
+ }
+ }
+ for p < len(s) && unicode.IsOneOf(zeroWidth, s[p]) {
+ p++
+ }
+ return s[:p]
+}
+
+func getSuffixGlyphs(s []rune, num int) []rune {
+ p := len(s)
+ for n := 0; n < num && p > 0; p-- {
+ // speed up the common case
+ if s[p-1] < 127 {
+ n++
+ continue
+ }
+ if !unicode.IsOneOf(zeroWidth, s[p-1]) {
+ n++
+ }
+ }
+ return s[p:]
+}
diff --git a/vendor/github.com/pmezard/go-difflib/LICENSE b/vendor/github.com/pmezard/go-difflib/LICENSE
new file mode 100644
index 00000000..c67dad61
--- /dev/null
+++ b/vendor/github.com/pmezard/go-difflib/LICENSE
@@ -0,0 +1,27 @@
+Copyright (c) 2013, Patrick Mezard
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ Redistributions in binary form must reproduce the above copyright
+notice, this list of conditions and the following disclaimer in the
+documentation and/or other materials provided with the distribution.
+ The names of its contributors may not be used to endorse or promote
+products derived from this software without specific prior written
+permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
+IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
+PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
+TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/pmezard/go-difflib/difflib/difflib.go b/vendor/github.com/pmezard/go-difflib/difflib/difflib.go
new file mode 100644
index 00000000..003e99fa
--- /dev/null
+++ b/vendor/github.com/pmezard/go-difflib/difflib/difflib.go
@@ -0,0 +1,772 @@
+// Package difflib is a partial port of Python difflib module.
+//
+// It provides tools to compare sequences of strings and generate textual diffs.
+//
+// The following class and functions have been ported:
+//
+// - SequenceMatcher
+//
+// - unified_diff
+//
+// - context_diff
+//
+// Getting unified diffs was the main goal of the port. Keep in mind this code
+// is mostly suitable to output text differences in a human friendly way, there
+// are no guarantees generated diffs are consumable by patch(1).
+package difflib
+
+import (
+ "bufio"
+ "bytes"
+ "fmt"
+ "io"
+ "strings"
+)
+
+func min(a, b int) int {
+ if a < b {
+ return a
+ }
+ return b
+}
+
+func max(a, b int) int {
+ if a > b {
+ return a
+ }
+ return b
+}
+
+func calculateRatio(matches, length int) float64 {
+ if length > 0 {
+ return 2.0 * float64(matches) / float64(length)
+ }
+ return 1.0
+}
+
+type Match struct {
+ A int
+ B int
+ Size int
+}
+
+type OpCode struct {
+ Tag byte
+ I1 int
+ I2 int
+ J1 int
+ J2 int
+}
+
+// SequenceMatcher compares sequence of strings. The basic
+// algorithm predates, and is a little fancier than, an algorithm
+// published in the late 1980's by Ratcliff and Obershelp under the
+// hyperbolic name "gestalt pattern matching". The basic idea is to find
+// the longest contiguous matching subsequence that contains no "junk"
+// elements (R-O doesn't address junk). The same idea is then applied
+// recursively to the pieces of the sequences to the left and to the right
+// of the matching subsequence. This does not yield minimal edit
+// sequences, but does tend to yield matches that "look right" to people.
+//
+// SequenceMatcher tries to compute a "human-friendly diff" between two
+// sequences. Unlike e.g. UNIX(tm) diff, the fundamental notion is the
+// longest *contiguous* & junk-free matching subsequence. That's what
+// catches peoples' eyes. The Windows(tm) windiff has another interesting
+// notion, pairing up elements that appear uniquely in each sequence.
+// That, and the method here, appear to yield more intuitive difference
+// reports than does diff. This method appears to be the least vulnerable
+// to synching up on blocks of "junk lines", though (like blank lines in
+// ordinary text files, or maybe "" lines in HTML files). That may be
+// because this is the only method of the 3 that has a *concept* of
+// "junk" .
+//
+// Timing: Basic R-O is cubic time worst case and quadratic time expected
+// case. SequenceMatcher is quadratic time for the worst case and has
+// expected-case behavior dependent in a complicated way on how many
+// elements the sequences have in common; best case time is linear.
+type SequenceMatcher struct {
+ a []string
+ b []string
+ b2j map[string][]int
+ IsJunk func(string) bool
+ autoJunk bool
+ bJunk map[string]struct{}
+ matchingBlocks []Match
+ fullBCount map[string]int
+ bPopular map[string]struct{}
+ opCodes []OpCode
+}
+
+func NewMatcher(a, b []string) *SequenceMatcher {
+ m := SequenceMatcher{autoJunk: true}
+ m.SetSeqs(a, b)
+ return &m
+}
+
+func NewMatcherWithJunk(a, b []string, autoJunk bool,
+ isJunk func(string) bool) *SequenceMatcher {
+
+ m := SequenceMatcher{IsJunk: isJunk, autoJunk: autoJunk}
+ m.SetSeqs(a, b)
+ return &m
+}
+
+// Set two sequences to be compared.
+func (m *SequenceMatcher) SetSeqs(a, b []string) {
+ m.SetSeq1(a)
+ m.SetSeq2(b)
+}
+
+// Set the first sequence to be compared. The second sequence to be compared is
+// not changed.
+//
+// SequenceMatcher computes and caches detailed information about the second
+// sequence, so if you want to compare one sequence S against many sequences,
+// use .SetSeq2(s) once and call .SetSeq1(x) repeatedly for each of the other
+// sequences.
+//
+// See also SetSeqs() and SetSeq2().
+func (m *SequenceMatcher) SetSeq1(a []string) {
+ if &a == &m.a {
+ return
+ }
+ m.a = a
+ m.matchingBlocks = nil
+ m.opCodes = nil
+}
+
+// Set the second sequence to be compared. The first sequence to be compared is
+// not changed.
+func (m *SequenceMatcher) SetSeq2(b []string) {
+ if &b == &m.b {
+ return
+ }
+ m.b = b
+ m.matchingBlocks = nil
+ m.opCodes = nil
+ m.fullBCount = nil
+ m.chainB()
+}
+
+func (m *SequenceMatcher) chainB() {
+ // Populate line -> index mapping
+ b2j := map[string][]int{}
+ for i, s := range m.b {
+ indices := b2j[s]
+ indices = append(indices, i)
+ b2j[s] = indices
+ }
+
+ // Purge junk elements
+ m.bJunk = map[string]struct{}{}
+ if m.IsJunk != nil {
+ junk := m.bJunk
+ for s, _ := range b2j {
+ if m.IsJunk(s) {
+ junk[s] = struct{}{}
+ }
+ }
+ for s, _ := range junk {
+ delete(b2j, s)
+ }
+ }
+
+ // Purge remaining popular elements
+ popular := map[string]struct{}{}
+ n := len(m.b)
+ if m.autoJunk && n >= 200 {
+ ntest := n/100 + 1
+ for s, indices := range b2j {
+ if len(indices) > ntest {
+ popular[s] = struct{}{}
+ }
+ }
+ for s, _ := range popular {
+ delete(b2j, s)
+ }
+ }
+ m.bPopular = popular
+ m.b2j = b2j
+}
+
+func (m *SequenceMatcher) isBJunk(s string) bool {
+ _, ok := m.bJunk[s]
+ return ok
+}
+
+// Find longest matching block in a[alo:ahi] and b[blo:bhi].
+//
+// If IsJunk is not defined:
+//
+// Return (i,j,k) such that a[i:i+k] is equal to b[j:j+k], where
+// alo <= i <= i+k <= ahi
+// blo <= j <= j+k <= bhi
+// and for all (i',j',k') meeting those conditions,
+// k >= k'
+// i <= i'
+// and if i == i', j <= j'
+//
+// In other words, of all maximal matching blocks, return one that
+// starts earliest in a, and of all those maximal matching blocks that
+// start earliest in a, return the one that starts earliest in b.
+//
+// If IsJunk is defined, first the longest matching block is
+// determined as above, but with the additional restriction that no
+// junk element appears in the block. Then that block is extended as
+// far as possible by matching (only) junk elements on both sides. So
+// the resulting block never matches on junk except as identical junk
+// happens to be adjacent to an "interesting" match.
+//
+// If no blocks match, return (alo, blo, 0).
+func (m *SequenceMatcher) findLongestMatch(alo, ahi, blo, bhi int) Match {
+ // CAUTION: stripping common prefix or suffix would be incorrect.
+ // E.g.,
+ // ab
+ // acab
+ // Longest matching block is "ab", but if common prefix is
+ // stripped, it's "a" (tied with "b"). UNIX(tm) diff does so
+ // strip, so ends up claiming that ab is changed to acab by
+ // inserting "ca" in the middle. That's minimal but unintuitive:
+ // "it's obvious" that someone inserted "ac" at the front.
+ // Windiff ends up at the same place as diff, but by pairing up
+ // the unique 'b's and then matching the first two 'a's.
+ besti, bestj, bestsize := alo, blo, 0
+
+ // find longest junk-free match
+ // during an iteration of the loop, j2len[j] = length of longest
+ // junk-free match ending with a[i-1] and b[j]
+ j2len := map[int]int{}
+ for i := alo; i != ahi; i++ {
+ // look at all instances of a[i] in b; note that because
+ // b2j has no junk keys, the loop is skipped if a[i] is junk
+ newj2len := map[int]int{}
+ for _, j := range m.b2j[m.a[i]] {
+ // a[i] matches b[j]
+ if j < blo {
+ continue
+ }
+ if j >= bhi {
+ break
+ }
+ k := j2len[j-1] + 1
+ newj2len[j] = k
+ if k > bestsize {
+ besti, bestj, bestsize = i-k+1, j-k+1, k
+ }
+ }
+ j2len = newj2len
+ }
+
+ // Extend the best by non-junk elements on each end. In particular,
+ // "popular" non-junk elements aren't in b2j, which greatly speeds
+ // the inner loop above, but also means "the best" match so far
+ // doesn't contain any junk *or* popular non-junk elements.
+ for besti > alo && bestj > blo && !m.isBJunk(m.b[bestj-1]) &&
+ m.a[besti-1] == m.b[bestj-1] {
+ besti, bestj, bestsize = besti-1, bestj-1, bestsize+1
+ }
+ for besti+bestsize < ahi && bestj+bestsize < bhi &&
+ !m.isBJunk(m.b[bestj+bestsize]) &&
+ m.a[besti+bestsize] == m.b[bestj+bestsize] {
+ bestsize += 1
+ }
+
+ // Now that we have a wholly interesting match (albeit possibly
+ // empty!), we may as well suck up the matching junk on each
+ // side of it too. Can't think of a good reason not to, and it
+ // saves post-processing the (possibly considerable) expense of
+ // figuring out what to do with it. In the case of an empty
+ // interesting match, this is clearly the right thing to do,
+ // because no other kind of match is possible in the regions.
+ for besti > alo && bestj > blo && m.isBJunk(m.b[bestj-1]) &&
+ m.a[besti-1] == m.b[bestj-1] {
+ besti, bestj, bestsize = besti-1, bestj-1, bestsize+1
+ }
+ for besti+bestsize < ahi && bestj+bestsize < bhi &&
+ m.isBJunk(m.b[bestj+bestsize]) &&
+ m.a[besti+bestsize] == m.b[bestj+bestsize] {
+ bestsize += 1
+ }
+
+ return Match{A: besti, B: bestj, Size: bestsize}
+}
+
+// Return list of triples describing matching subsequences.
+//
+// Each triple is of the form (i, j, n), and means that
+// a[i:i+n] == b[j:j+n]. The triples are monotonically increasing in
+// i and in j. It's also guaranteed that if (i, j, n) and (i', j', n') are
+// adjacent triples in the list, and the second is not the last triple in the
+// list, then i+n != i' or j+n != j'. IOW, adjacent triples never describe
+// adjacent equal blocks.
+//
+// The last triple is a dummy, (len(a), len(b), 0), and is the only
+// triple with n==0.
+func (m *SequenceMatcher) GetMatchingBlocks() []Match {
+ if m.matchingBlocks != nil {
+ return m.matchingBlocks
+ }
+
+ var matchBlocks func(alo, ahi, blo, bhi int, matched []Match) []Match
+ matchBlocks = func(alo, ahi, blo, bhi int, matched []Match) []Match {
+ match := m.findLongestMatch(alo, ahi, blo, bhi)
+ i, j, k := match.A, match.B, match.Size
+ if match.Size > 0 {
+ if alo < i && blo < j {
+ matched = matchBlocks(alo, i, blo, j, matched)
+ }
+ matched = append(matched, match)
+ if i+k < ahi && j+k < bhi {
+ matched = matchBlocks(i+k, ahi, j+k, bhi, matched)
+ }
+ }
+ return matched
+ }
+ matched := matchBlocks(0, len(m.a), 0, len(m.b), nil)
+
+ // It's possible that we have adjacent equal blocks in the
+ // matching_blocks list now.
+ nonAdjacent := []Match{}
+ i1, j1, k1 := 0, 0, 0
+ for _, b := range matched {
+ // Is this block adjacent to i1, j1, k1?
+ i2, j2, k2 := b.A, b.B, b.Size
+ if i1+k1 == i2 && j1+k1 == j2 {
+ // Yes, so collapse them -- this just increases the length of
+ // the first block by the length of the second, and the first
+ // block so lengthened remains the block to compare against.
+ k1 += k2
+ } else {
+ // Not adjacent. Remember the first block (k1==0 means it's
+ // the dummy we started with), and make the second block the
+ // new block to compare against.
+ if k1 > 0 {
+ nonAdjacent = append(nonAdjacent, Match{i1, j1, k1})
+ }
+ i1, j1, k1 = i2, j2, k2
+ }
+ }
+ if k1 > 0 {
+ nonAdjacent = append(nonAdjacent, Match{i1, j1, k1})
+ }
+
+ nonAdjacent = append(nonAdjacent, Match{len(m.a), len(m.b), 0})
+ m.matchingBlocks = nonAdjacent
+ return m.matchingBlocks
+}
+
+// Return list of 5-tuples describing how to turn a into b.
+//
+// Each tuple is of the form (tag, i1, i2, j1, j2). The first tuple
+// has i1 == j1 == 0, and remaining tuples have i1 == the i2 from the
+// tuple preceding it, and likewise for j1 == the previous j2.
+//
+// The tags are characters, with these meanings:
+//
+// 'r' (replace): a[i1:i2] should be replaced by b[j1:j2]
+//
+// 'd' (delete): a[i1:i2] should be deleted, j1==j2 in this case.
+//
+// 'i' (insert): b[j1:j2] should be inserted at a[i1:i1], i1==i2 in this case.
+//
+// 'e' (equal): a[i1:i2] == b[j1:j2]
+func (m *SequenceMatcher) GetOpCodes() []OpCode {
+ if m.opCodes != nil {
+ return m.opCodes
+ }
+ i, j := 0, 0
+ matching := m.GetMatchingBlocks()
+ opCodes := make([]OpCode, 0, len(matching))
+ for _, m := range matching {
+ // invariant: we've pumped out correct diffs to change
+ // a[:i] into b[:j], and the next matching block is
+ // a[ai:ai+size] == b[bj:bj+size]. So we need to pump
+ // out a diff to change a[i:ai] into b[j:bj], pump out
+ // the matching block, and move (i,j) beyond the match
+ ai, bj, size := m.A, m.B, m.Size
+ tag := byte(0)
+ if i < ai && j < bj {
+ tag = 'r'
+ } else if i < ai {
+ tag = 'd'
+ } else if j < bj {
+ tag = 'i'
+ }
+ if tag > 0 {
+ opCodes = append(opCodes, OpCode{tag, i, ai, j, bj})
+ }
+ i, j = ai+size, bj+size
+ // the list of matching blocks is terminated by a
+ // sentinel with size 0
+ if size > 0 {
+ opCodes = append(opCodes, OpCode{'e', ai, i, bj, j})
+ }
+ }
+ m.opCodes = opCodes
+ return m.opCodes
+}
+
+// Isolate change clusters by eliminating ranges with no changes.
+//
+// Return a generator of groups with up to n lines of context.
+// Each group is in the same format as returned by GetOpCodes().
+func (m *SequenceMatcher) GetGroupedOpCodes(n int) [][]OpCode {
+ if n < 0 {
+ n = 3
+ }
+ codes := m.GetOpCodes()
+ if len(codes) == 0 {
+ codes = []OpCode{OpCode{'e', 0, 1, 0, 1}}
+ }
+ // Fixup leading and trailing groups if they show no changes.
+ if codes[0].Tag == 'e' {
+ c := codes[0]
+ i1, i2, j1, j2 := c.I1, c.I2, c.J1, c.J2
+ codes[0] = OpCode{c.Tag, max(i1, i2-n), i2, max(j1, j2-n), j2}
+ }
+ if codes[len(codes)-1].Tag == 'e' {
+ c := codes[len(codes)-1]
+ i1, i2, j1, j2 := c.I1, c.I2, c.J1, c.J2
+ codes[len(codes)-1] = OpCode{c.Tag, i1, min(i2, i1+n), j1, min(j2, j1+n)}
+ }
+ nn := n + n
+ groups := [][]OpCode{}
+ group := []OpCode{}
+ for _, c := range codes {
+ i1, i2, j1, j2 := c.I1, c.I2, c.J1, c.J2
+ // End the current group and start a new one whenever
+ // there is a large range with no changes.
+ if c.Tag == 'e' && i2-i1 > nn {
+ group = append(group, OpCode{c.Tag, i1, min(i2, i1+n),
+ j1, min(j2, j1+n)})
+ groups = append(groups, group)
+ group = []OpCode{}
+ i1, j1 = max(i1, i2-n), max(j1, j2-n)
+ }
+ group = append(group, OpCode{c.Tag, i1, i2, j1, j2})
+ }
+ if len(group) > 0 && !(len(group) == 1 && group[0].Tag == 'e') {
+ groups = append(groups, group)
+ }
+ return groups
+}
+
+// Return a measure of the sequences' similarity (float in [0,1]).
+//
+// Where T is the total number of elements in both sequences, and
+// M is the number of matches, this is 2.0*M / T.
+// Note that this is 1 if the sequences are identical, and 0 if
+// they have nothing in common.
+//
+// .Ratio() is expensive to compute if you haven't already computed
+// .GetMatchingBlocks() or .GetOpCodes(), in which case you may
+// want to try .QuickRatio() or .RealQuickRation() first to get an
+// upper bound.
+func (m *SequenceMatcher) Ratio() float64 {
+ matches := 0
+ for _, m := range m.GetMatchingBlocks() {
+ matches += m.Size
+ }
+ return calculateRatio(matches, len(m.a)+len(m.b))
+}
+
+// Return an upper bound on ratio() relatively quickly.
+//
+// This isn't defined beyond that it is an upper bound on .Ratio(), and
+// is faster to compute.
+func (m *SequenceMatcher) QuickRatio() float64 {
+ // viewing a and b as multisets, set matches to the cardinality
+ // of their intersection; this counts the number of matches
+ // without regard to order, so is clearly an upper bound
+ if m.fullBCount == nil {
+ m.fullBCount = map[string]int{}
+ for _, s := range m.b {
+ m.fullBCount[s] = m.fullBCount[s] + 1
+ }
+ }
+
+ // avail[x] is the number of times x appears in 'b' less the
+ // number of times we've seen it in 'a' so far ... kinda
+ avail := map[string]int{}
+ matches := 0
+ for _, s := range m.a {
+ n, ok := avail[s]
+ if !ok {
+ n = m.fullBCount[s]
+ }
+ avail[s] = n - 1
+ if n > 0 {
+ matches += 1
+ }
+ }
+ return calculateRatio(matches, len(m.a)+len(m.b))
+}
+
+// Return an upper bound on ratio() very quickly.
+//
+// This isn't defined beyond that it is an upper bound on .Ratio(), and
+// is faster to compute than either .Ratio() or .QuickRatio().
+func (m *SequenceMatcher) RealQuickRatio() float64 {
+ la, lb := len(m.a), len(m.b)
+ return calculateRatio(min(la, lb), la+lb)
+}
+
+// Convert range to the "ed" format
+func formatRangeUnified(start, stop int) string {
+ // Per the diff spec at http://www.unix.org/single_unix_specification/
+ beginning := start + 1 // lines start numbering with one
+ length := stop - start
+ if length == 1 {
+ return fmt.Sprintf("%d", beginning)
+ }
+ if length == 0 {
+ beginning -= 1 // empty ranges begin at line just before the range
+ }
+ return fmt.Sprintf("%d,%d", beginning, length)
+}
+
+// Unified diff parameters
+type UnifiedDiff struct {
+ A []string // First sequence lines
+ FromFile string // First file name
+ FromDate string // First file time
+ B []string // Second sequence lines
+ ToFile string // Second file name
+ ToDate string // Second file time
+ Eol string // Headers end of line, defaults to LF
+ Context int // Number of context lines
+}
+
+// Compare two sequences of lines; generate the delta as a unified diff.
+//
+// Unified diffs are a compact way of showing line changes and a few
+// lines of context. The number of context lines is set by 'n' which
+// defaults to three.
+//
+// By default, the diff control lines (those with ---, +++, or @@) are
+// created with a trailing newline. This is helpful so that inputs
+// created from file.readlines() result in diffs that are suitable for
+// file.writelines() since both the inputs and outputs have trailing
+// newlines.
+//
+// For inputs that do not have trailing newlines, set the lineterm
+// argument to "" so that the output will be uniformly newline free.
+//
+// The unidiff format normally has a header for filenames and modification
+// times. Any or all of these may be specified using strings for
+// 'fromfile', 'tofile', 'fromfiledate', and 'tofiledate'.
+// The modification times are normally expressed in the ISO 8601 format.
+func WriteUnifiedDiff(writer io.Writer, diff UnifiedDiff) error {
+ buf := bufio.NewWriter(writer)
+ defer buf.Flush()
+ wf := func(format string, args ...interface{}) error {
+ _, err := buf.WriteString(fmt.Sprintf(format, args...))
+ return err
+ }
+ ws := func(s string) error {
+ _, err := buf.WriteString(s)
+ return err
+ }
+
+ if len(diff.Eol) == 0 {
+ diff.Eol = "\n"
+ }
+
+ started := false
+ m := NewMatcher(diff.A, diff.B)
+ for _, g := range m.GetGroupedOpCodes(diff.Context) {
+ if !started {
+ started = true
+ fromDate := ""
+ if len(diff.FromDate) > 0 {
+ fromDate = "\t" + diff.FromDate
+ }
+ toDate := ""
+ if len(diff.ToDate) > 0 {
+ toDate = "\t" + diff.ToDate
+ }
+ if diff.FromFile != "" || diff.ToFile != "" {
+ err := wf("--- %s%s%s", diff.FromFile, fromDate, diff.Eol)
+ if err != nil {
+ return err
+ }
+ err = wf("+++ %s%s%s", diff.ToFile, toDate, diff.Eol)
+ if err != nil {
+ return err
+ }
+ }
+ }
+ first, last := g[0], g[len(g)-1]
+ range1 := formatRangeUnified(first.I1, last.I2)
+ range2 := formatRangeUnified(first.J1, last.J2)
+ if err := wf("@@ -%s +%s @@%s", range1, range2, diff.Eol); err != nil {
+ return err
+ }
+ for _, c := range g {
+ i1, i2, j1, j2 := c.I1, c.I2, c.J1, c.J2
+ if c.Tag == 'e' {
+ for _, line := range diff.A[i1:i2] {
+ if err := ws(" " + line); err != nil {
+ return err
+ }
+ }
+ continue
+ }
+ if c.Tag == 'r' || c.Tag == 'd' {
+ for _, line := range diff.A[i1:i2] {
+ if err := ws("-" + line); err != nil {
+ return err
+ }
+ }
+ }
+ if c.Tag == 'r' || c.Tag == 'i' {
+ for _, line := range diff.B[j1:j2] {
+ if err := ws("+" + line); err != nil {
+ return err
+ }
+ }
+ }
+ }
+ }
+ return nil
+}
+
+// Like WriteUnifiedDiff but returns the diff a string.
+func GetUnifiedDiffString(diff UnifiedDiff) (string, error) {
+ w := &bytes.Buffer{}
+ err := WriteUnifiedDiff(w, diff)
+ return string(w.Bytes()), err
+}
+
+// Convert range to the "ed" format.
+func formatRangeContext(start, stop int) string {
+ // Per the diff spec at http://www.unix.org/single_unix_specification/
+ beginning := start + 1 // lines start numbering with one
+ length := stop - start
+ if length == 0 {
+ beginning -= 1 // empty ranges begin at line just before the range
+ }
+ if length <= 1 {
+ return fmt.Sprintf("%d", beginning)
+ }
+ return fmt.Sprintf("%d,%d", beginning, beginning+length-1)
+}
+
+type ContextDiff UnifiedDiff
+
+// Compare two sequences of lines; generate the delta as a context diff.
+//
+// Context diffs are a compact way of showing line changes and a few
+// lines of context. The number of context lines is set by diff.Context
+// which defaults to three.
+//
+// By default, the diff control lines (those with *** or ---) are
+// created with a trailing newline.
+//
+// For inputs that do not have trailing newlines, set the diff.Eol
+// argument to "" so that the output will be uniformly newline free.
+//
+// The context diff format normally has a header for filenames and
+// modification times. Any or all of these may be specified using
+// strings for diff.FromFile, diff.ToFile, diff.FromDate, diff.ToDate.
+// The modification times are normally expressed in the ISO 8601 format.
+// If not specified, the strings default to blanks.
+func WriteContextDiff(writer io.Writer, diff ContextDiff) error {
+ buf := bufio.NewWriter(writer)
+ defer buf.Flush()
+ var diffErr error
+ wf := func(format string, args ...interface{}) {
+ _, err := buf.WriteString(fmt.Sprintf(format, args...))
+ if diffErr == nil && err != nil {
+ diffErr = err
+ }
+ }
+ ws := func(s string) {
+ _, err := buf.WriteString(s)
+ if diffErr == nil && err != nil {
+ diffErr = err
+ }
+ }
+
+ if len(diff.Eol) == 0 {
+ diff.Eol = "\n"
+ }
+
+ prefix := map[byte]string{
+ 'i': "+ ",
+ 'd': "- ",
+ 'r': "! ",
+ 'e': " ",
+ }
+
+ started := false
+ m := NewMatcher(diff.A, diff.B)
+ for _, g := range m.GetGroupedOpCodes(diff.Context) {
+ if !started {
+ started = true
+ fromDate := ""
+ if len(diff.FromDate) > 0 {
+ fromDate = "\t" + diff.FromDate
+ }
+ toDate := ""
+ if len(diff.ToDate) > 0 {
+ toDate = "\t" + diff.ToDate
+ }
+ if diff.FromFile != "" || diff.ToFile != "" {
+ wf("*** %s%s%s", diff.FromFile, fromDate, diff.Eol)
+ wf("--- %s%s%s", diff.ToFile, toDate, diff.Eol)
+ }
+ }
+
+ first, last := g[0], g[len(g)-1]
+ ws("***************" + diff.Eol)
+
+ range1 := formatRangeContext(first.I1, last.I2)
+ wf("*** %s ****%s", range1, diff.Eol)
+ for _, c := range g {
+ if c.Tag == 'r' || c.Tag == 'd' {
+ for _, cc := range g {
+ if cc.Tag == 'i' {
+ continue
+ }
+ for _, line := range diff.A[cc.I1:cc.I2] {
+ ws(prefix[cc.Tag] + line)
+ }
+ }
+ break
+ }
+ }
+
+ range2 := formatRangeContext(first.J1, last.J2)
+ wf("--- %s ----%s", range2, diff.Eol)
+ for _, c := range g {
+ if c.Tag == 'r' || c.Tag == 'i' {
+ for _, cc := range g {
+ if cc.Tag == 'd' {
+ continue
+ }
+ for _, line := range diff.B[cc.J1:cc.J2] {
+ ws(prefix[cc.Tag] + line)
+ }
+ }
+ break
+ }
+ }
+ }
+ return diffErr
+}
+
+// Like WriteContextDiff but returns the diff a string.
+func GetContextDiffString(diff ContextDiff) (string, error) {
+ w := &bytes.Buffer{}
+ err := WriteContextDiff(w, diff)
+ return string(w.Bytes()), err
+}
+
+// Split a string on "\n" while preserving them. The output can be used
+// as input for UnifiedDiff and ContextDiff structures.
+func SplitLines(s string) []string {
+ lines := strings.SplitAfter(s, "\n")
+ lines[len(lines)-1] += "\n"
+ return lines
+}
diff --git a/vendor/github.com/rs/cors/LICENSE b/vendor/github.com/rs/cors/LICENSE
new file mode 100644
index 00000000..d8e2df5a
--- /dev/null
+++ b/vendor/github.com/rs/cors/LICENSE
@@ -0,0 +1,19 @@
+Copyright (c) 2014 Olivier Poitrey
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is furnished
+to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
diff --git a/vendor/github.com/rs/cors/README.md b/vendor/github.com/rs/cors/README.md
new file mode 100644
index 00000000..ecc83b29
--- /dev/null
+++ b/vendor/github.com/rs/cors/README.md
@@ -0,0 +1,115 @@
+# Go CORS handler [](https://godoc.org/github.com/rs/cors) [](https://raw.githubusercontent.com/rs/cors/master/LICENSE) [](https://travis-ci.org/rs/cors) [](http://gocover.io/github.com/rs/cors)
+
+CORS is a `net/http` handler implementing [Cross Origin Resource Sharing W3 specification](http://www.w3.org/TR/cors/) in Golang.
+
+## Getting Started
+
+After installing Go and setting up your [GOPATH](http://golang.org/doc/code.html#GOPATH), create your first `.go` file. We'll call it `server.go`.
+
+```go
+package main
+
+import (
+ "net/http"
+
+ "github.com/rs/cors"
+)
+
+func main() {
+ mux := http.NewServeMux()
+ mux.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
+ w.Header().Set("Content-Type", "application/json")
+ w.Write([]byte("{\"hello\": \"world\"}"))
+ })
+
+ // cors.Default() setup the middleware with default options being
+ // all origins accepted with simple methods (GET, POST). See
+ // documentation below for more options.
+ handler := cors.Default().Handler(mux)
+ http.ListenAndServe(":8080", handler)
+}
+```
+
+Install `cors`:
+
+ go get github.com/rs/cors
+
+Then run your server:
+
+ go run server.go
+
+The server now runs on `localhost:8080`:
+
+ $ curl -D - -H 'Origin: http://foo.com' http://localhost:8080/
+ HTTP/1.1 200 OK
+ Access-Control-Allow-Origin: foo.com
+ Content-Type: application/json
+ Date: Sat, 25 Oct 2014 03:43:57 GMT
+ Content-Length: 18
+
+ {"hello": "world"}
+
+### Allow * With Credentials Security Protection
+
+This library has been modified to avoid a well known security issue when configured with `AllowedOrigins` to `*` and `AllowCredentials` to `true`. Such setup used to make the library reflects the request `Origin` header value, working around a security protection embedded into the standard that makes clients to refuse such configuration. This behavior has been removed with [#55](https://github.com/rs/cors/issues/55) and [#57](https://github.com/rs/cors/issues/57).
+
+If you depend on this behavior and understand the implications, you can restore it using the `AllowOriginFunc` with `func(origin string) {return true}`.
+
+Please refer to [#55](https://github.com/rs/cors/issues/55) for more information about the security implications.
+
+### More Examples
+
+* `net/http`: [examples/nethttp/server.go](https://github.com/rs/cors/blob/master/examples/nethttp/server.go)
+* [Goji](https://goji.io): [examples/goji/server.go](https://github.com/rs/cors/blob/master/examples/goji/server.go)
+* [Martini](http://martini.codegangsta.io): [examples/martini/server.go](https://github.com/rs/cors/blob/master/examples/martini/server.go)
+* [Negroni](https://github.com/codegangsta/negroni): [examples/negroni/server.go](https://github.com/rs/cors/blob/master/examples/negroni/server.go)
+* [Alice](https://github.com/justinas/alice): [examples/alice/server.go](https://github.com/rs/cors/blob/master/examples/alice/server.go)
+* [HttpRouter](https://github.com/julienschmidt/httprouter): [examples/httprouter/server.go](https://github.com/rs/cors/blob/master/examples/httprouter/server.go)
+* [Gorilla](http://www.gorillatoolkit.org/pkg/mux): [examples/gorilla/server.go](https://github.com/rs/cors/blob/master/examples/gorilla/server.go)
+* [Buffalo](https://gobuffalo.io): [examples/buffalo/server.go](https://github.com/rs/cors/blob/master/examples/buffalo/server.go)
+* [Gin](https://gin-gonic.github.io/gin): [examples/gin/server.go](https://github.com/rs/cors/blob/master/examples/gin/server.go)
+* [Chi](https://github.com/go-chi/chi): [examples/chi/server.go](https://github.com/rs/cors/blob/master/examples/chi/server.go)
+
+## Parameters
+
+Parameters are passed to the middleware thru the `cors.New` method as follow:
+
+```go
+c := cors.New(cors.Options{
+ AllowedOrigins: []string{"http://foo.com", "http://foo.com:8080"},
+ AllowCredentials: true,
+ // Enable Debugging for testing, consider disabling in production
+ Debug: true,
+})
+
+// Insert the middleware
+handler = c.Handler(handler)
+```
+
+* **AllowedOrigins** `[]string`: A list of origins a cross-domain request can be executed from. If the special `*` value is present in the list, all origins will be allowed. An origin may contain a wildcard (`*`) to replace 0 or more characters (i.e.: `http://*.domain.com`). Usage of wildcards implies a small performance penality. Only one wildcard can be used per origin. The default value is `*`.
+* **AllowOriginFunc** `func (origin string) bool`: A custom function to validate the origin. It takes the origin as an argument and returns true if allowed, or false otherwise. If this option is set, the content of `AllowedOrigins` is ignored.
+* **AllowOriginRequestFunc** `func (r *http.Request origin string) bool`: A custom function to validate the origin. It takes the HTTP Request object and the origin as argument and returns true if allowed or false otherwise. If this option is set, the content of `AllowedOrigins` and `AllowOriginFunc` is ignored
+* **AllowedMethods** `[]string`: A list of methods the client is allowed to use with cross-domain requests. Default value is simple methods (`GET` and `POST`).
+* **AllowedHeaders** `[]string`: A list of non simple headers the client is allowed to use with cross-domain requests.
+* **ExposedHeaders** `[]string`: Indicates which headers are safe to expose to the API of a CORS API specification
+* **AllowCredentials** `bool`: Indicates whether the request can include user credentials like cookies, HTTP authentication or client side SSL certificates. The default is `false`.
+* **MaxAge** `int`: Indicates how long (in seconds) the results of a preflight request can be cached. The default is `0` which stands for no max age.
+* **OptionsPassthrough** `bool`: Instructs preflight to let other potential next handlers to process the `OPTIONS` method. Turn this on if your application handles `OPTIONS`.
+* **Debug** `bool`: Debugging flag adds additional output to debug server side CORS issues.
+
+See [API documentation](http://godoc.org/github.com/rs/cors) for more info.
+
+## Benchmarks
+
+ BenchmarkWithout 20000000 64.6 ns/op 8 B/op 1 allocs/op
+ BenchmarkDefault 3000000 469 ns/op 114 B/op 2 allocs/op
+ BenchmarkAllowedOrigin 3000000 608 ns/op 114 B/op 2 allocs/op
+ BenchmarkPreflight 20000000 73.2 ns/op 0 B/op 0 allocs/op
+ BenchmarkPreflightHeader 20000000 73.6 ns/op 0 B/op 0 allocs/op
+ BenchmarkParseHeaderList 2000000 847 ns/op 184 B/op 6 allocs/op
+ BenchmarkParse…Single 5000000 290 ns/op 32 B/op 3 allocs/op
+ BenchmarkParse…Normalized 2000000 776 ns/op 160 B/op 6 allocs/op
+
+## Licenses
+
+All source code is licensed under the [MIT License](https://raw.github.com/rs/cors/master/LICENSE).
diff --git a/vendor/github.com/rs/cors/cors.go b/vendor/github.com/rs/cors/cors.go
new file mode 100644
index 00000000..10019eb8
--- /dev/null
+++ b/vendor/github.com/rs/cors/cors.go
@@ -0,0 +1,424 @@
+/*
+Package cors is net/http handler to handle CORS related requests
+as defined by http://www.w3.org/TR/cors/
+
+You can configure it by passing an option struct to cors.New:
+
+ c := cors.New(cors.Options{
+ AllowedOrigins: []string{"foo.com"},
+ AllowedMethods: []string{http.MethodGet, http.MethodPost, http.MethodDelete},
+ AllowCredentials: true,
+ })
+
+Then insert the handler in the chain:
+
+ handler = c.Handler(handler)
+
+See Options documentation for more options.
+
+The resulting handler is a standard net/http handler.
+*/
+package cors
+
+import (
+ "log"
+ "net/http"
+ "os"
+ "strconv"
+ "strings"
+)
+
+// Options is a configuration container to setup the CORS middleware.
+type Options struct {
+ // AllowedOrigins is a list of origins a cross-domain request can be executed from.
+ // If the special "*" value is present in the list, all origins will be allowed.
+ // An origin may contain a wildcard (*) to replace 0 or more characters
+ // (i.e.: http://*.domain.com). Usage of wildcards implies a small performance penalty.
+ // Only one wildcard can be used per origin.
+ // Default value is ["*"]
+ AllowedOrigins []string
+ // AllowOriginFunc is a custom function to validate the origin. It take the origin
+ // as argument and returns true if allowed or false otherwise. If this option is
+ // set, the content of AllowedOrigins is ignored.
+ AllowOriginFunc func(origin string) bool
+ // AllowOriginFunc is a custom function to validate the origin. It takes the HTTP Request object and the origin as
+ // argument and returns true if allowed or false otherwise. If this option is set, the content of `AllowedOrigins`
+ // and `AllowOriginFunc` is ignored.
+ AllowOriginRequestFunc func(r *http.Request, origin string) bool
+ // AllowedMethods is a list of methods the client is allowed to use with
+ // cross-domain requests. Default value is simple methods (HEAD, GET and POST).
+ AllowedMethods []string
+ // AllowedHeaders is list of non simple headers the client is allowed to use with
+ // cross-domain requests.
+ // If the special "*" value is present in the list, all headers will be allowed.
+ // Default value is [] but "Origin" is always appended to the list.
+ AllowedHeaders []string
+ // ExposedHeaders indicates which headers are safe to expose to the API of a CORS
+ // API specification
+ ExposedHeaders []string
+ // MaxAge indicates how long (in seconds) the results of a preflight request
+ // can be cached
+ MaxAge int
+ // AllowCredentials indicates whether the request can include user credentials like
+ // cookies, HTTP authentication or client side SSL certificates.
+ AllowCredentials bool
+ // OptionsPassthrough instructs preflight to let other potential next handlers to
+ // process the OPTIONS method. Turn this on if your application handles OPTIONS.
+ OptionsPassthrough bool
+ // Debugging flag adds additional output to debug server side CORS issues
+ Debug bool
+}
+
+// Cors http handler
+type Cors struct {
+ // Debug logger
+ Log *log.Logger
+ // Normalized list of plain allowed origins
+ allowedOrigins []string
+ // List of allowed origins containing wildcards
+ allowedWOrigins []wildcard
+ // Optional origin validator function
+ allowOriginFunc func(origin string) bool
+ // Optional origin validator (with request) function
+ allowOriginRequestFunc func(r *http.Request, origin string) bool
+ // Normalized list of allowed headers
+ allowedHeaders []string
+ // Normalized list of allowed methods
+ allowedMethods []string
+ // Normalized list of exposed headers
+ exposedHeaders []string
+ maxAge int
+ // Set to true when allowed origins contains a "*"
+ allowedOriginsAll bool
+ // Set to true when allowed headers contains a "*"
+ allowedHeadersAll bool
+ allowCredentials bool
+ optionPassthrough bool
+}
+
+// New creates a new Cors handler with the provided options.
+func New(options Options) *Cors {
+ c := &Cors{
+ exposedHeaders: convert(options.ExposedHeaders, http.CanonicalHeaderKey),
+ allowOriginFunc: options.AllowOriginFunc,
+ allowOriginRequestFunc: options.AllowOriginRequestFunc,
+ allowCredentials: options.AllowCredentials,
+ maxAge: options.MaxAge,
+ optionPassthrough: options.OptionsPassthrough,
+ }
+ if options.Debug {
+ c.Log = log.New(os.Stdout, "[cors] ", log.LstdFlags)
+ }
+
+ // Normalize options
+ // Note: for origins and methods matching, the spec requires a case-sensitive matching.
+ // As it may error prone, we chose to ignore the spec here.
+
+ // Allowed Origins
+ if len(options.AllowedOrigins) == 0 {
+ if options.AllowOriginFunc == nil && options.AllowOriginRequestFunc == nil {
+ // Default is all origins
+ c.allowedOriginsAll = true
+ }
+ } else {
+ c.allowedOrigins = []string{}
+ c.allowedWOrigins = []wildcard{}
+ for _, origin := range options.AllowedOrigins {
+ // Normalize
+ origin = strings.ToLower(origin)
+ if origin == "*" {
+ // If "*" is present in the list, turn the whole list into a match all
+ c.allowedOriginsAll = true
+ c.allowedOrigins = nil
+ c.allowedWOrigins = nil
+ break
+ } else if i := strings.IndexByte(origin, '*'); i >= 0 {
+ // Split the origin in two: start and end string without the *
+ w := wildcard{origin[0:i], origin[i+1:]}
+ c.allowedWOrigins = append(c.allowedWOrigins, w)
+ } else {
+ c.allowedOrigins = append(c.allowedOrigins, origin)
+ }
+ }
+ }
+
+ // Allowed Headers
+ if len(options.AllowedHeaders) == 0 {
+ // Use sensible defaults
+ c.allowedHeaders = []string{"Origin", "Accept", "Content-Type", "X-Requested-With"}
+ } else {
+ // Origin is always appended as some browsers will always request for this header at preflight
+ c.allowedHeaders = convert(append(options.AllowedHeaders, "Origin"), http.CanonicalHeaderKey)
+ for _, h := range options.AllowedHeaders {
+ if h == "*" {
+ c.allowedHeadersAll = true
+ c.allowedHeaders = nil
+ break
+ }
+ }
+ }
+
+ // Allowed Methods
+ if len(options.AllowedMethods) == 0 {
+ // Default is spec's "simple" methods
+ c.allowedMethods = []string{http.MethodGet, http.MethodPost, http.MethodHead}
+ } else {
+ c.allowedMethods = convert(options.AllowedMethods, strings.ToUpper)
+ }
+
+ return c
+}
+
+// Default creates a new Cors handler with default options.
+func Default() *Cors {
+ return New(Options{})
+}
+
+// AllowAll create a new Cors handler with permissive configuration allowing all
+// origins with all standard methods with any header and credentials.
+func AllowAll() *Cors {
+ return New(Options{
+ AllowedOrigins: []string{"*"},
+ AllowedMethods: []string{
+ http.MethodHead,
+ http.MethodGet,
+ http.MethodPost,
+ http.MethodPut,
+ http.MethodPatch,
+ http.MethodDelete,
+ },
+ AllowedHeaders: []string{"*"},
+ AllowCredentials: false,
+ })
+}
+
+// Handler apply the CORS specification on the request, and add relevant CORS headers
+// as necessary.
+func (c *Cors) Handler(h http.Handler) http.Handler {
+ return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ if r.Method == http.MethodOptions && r.Header.Get("Access-Control-Request-Method") != "" {
+ c.logf("Handler: Preflight request")
+ c.handlePreflight(w, r)
+ // Preflight requests are standalone and should stop the chain as some other
+ // middleware may not handle OPTIONS requests correctly. One typical example
+ // is authentication middleware ; OPTIONS requests won't carry authentication
+ // headers (see #1)
+ if c.optionPassthrough {
+ h.ServeHTTP(w, r)
+ } else {
+ w.WriteHeader(http.StatusOK)
+ }
+ } else {
+ c.logf("Handler: Actual request")
+ c.handleActualRequest(w, r)
+ h.ServeHTTP(w, r)
+ }
+ })
+}
+
+// HandlerFunc provides Martini compatible handler
+func (c *Cors) HandlerFunc(w http.ResponseWriter, r *http.Request) {
+ if r.Method == http.MethodOptions && r.Header.Get("Access-Control-Request-Method") != "" {
+ c.logf("HandlerFunc: Preflight request")
+ c.handlePreflight(w, r)
+ } else {
+ c.logf("HandlerFunc: Actual request")
+ c.handleActualRequest(w, r)
+ }
+}
+
+// Negroni compatible interface
+func (c *Cors) ServeHTTP(w http.ResponseWriter, r *http.Request, next http.HandlerFunc) {
+ if r.Method == http.MethodOptions && r.Header.Get("Access-Control-Request-Method") != "" {
+ c.logf("ServeHTTP: Preflight request")
+ c.handlePreflight(w, r)
+ // Preflight requests are standalone and should stop the chain as some other
+ // middleware may not handle OPTIONS requests correctly. One typical example
+ // is authentication middleware ; OPTIONS requests won't carry authentication
+ // headers (see #1)
+ if c.optionPassthrough {
+ next(w, r)
+ } else {
+ w.WriteHeader(http.StatusOK)
+ }
+ } else {
+ c.logf("ServeHTTP: Actual request")
+ c.handleActualRequest(w, r)
+ next(w, r)
+ }
+}
+
+// handlePreflight handles pre-flight CORS requests
+func (c *Cors) handlePreflight(w http.ResponseWriter, r *http.Request) {
+ headers := w.Header()
+ origin := r.Header.Get("Origin")
+
+ if r.Method != http.MethodOptions {
+ c.logf(" Preflight aborted: %s!=OPTIONS", r.Method)
+ return
+ }
+ // Always set Vary headers
+ // see https://github.com/rs/cors/issues/10,
+ // https://github.com/rs/cors/commit/dbdca4d95feaa7511a46e6f1efb3b3aa505bc43f#commitcomment-12352001
+ headers.Add("Vary", "Origin")
+ headers.Add("Vary", "Access-Control-Request-Method")
+ headers.Add("Vary", "Access-Control-Request-Headers")
+
+ if origin == "" {
+ c.logf(" Preflight aborted: empty origin")
+ return
+ }
+ if !c.isOriginAllowed(r, origin) {
+ c.logf(" Preflight aborted: origin '%s' not allowed", origin)
+ return
+ }
+
+ reqMethod := r.Header.Get("Access-Control-Request-Method")
+ if !c.isMethodAllowed(reqMethod) {
+ c.logf(" Preflight aborted: method '%s' not allowed", reqMethod)
+ return
+ }
+ reqHeaders := parseHeaderList(r.Header.Get("Access-Control-Request-Headers"))
+ if !c.areHeadersAllowed(reqHeaders) {
+ c.logf(" Preflight aborted: headers '%v' not allowed", reqHeaders)
+ return
+ }
+ if c.allowedOriginsAll {
+ headers.Set("Access-Control-Allow-Origin", "*")
+ } else {
+ headers.Set("Access-Control-Allow-Origin", origin)
+ }
+ // Spec says: Since the list of methods can be unbounded, simply returning the method indicated
+ // by Access-Control-Request-Method (if supported) can be enough
+ headers.Set("Access-Control-Allow-Methods", strings.ToUpper(reqMethod))
+ if len(reqHeaders) > 0 {
+
+ // Spec says: Since the list of headers can be unbounded, simply returning supported headers
+ // from Access-Control-Request-Headers can be enough
+ headers.Set("Access-Control-Allow-Headers", strings.Join(reqHeaders, ", "))
+ }
+ if c.allowCredentials {
+ headers.Set("Access-Control-Allow-Credentials", "true")
+ }
+ if c.maxAge > 0 {
+ headers.Set("Access-Control-Max-Age", strconv.Itoa(c.maxAge))
+ }
+ c.logf(" Preflight response headers: %v", headers)
+}
+
+// handleActualRequest handles simple cross-origin requests, actual request or redirects
+func (c *Cors) handleActualRequest(w http.ResponseWriter, r *http.Request) {
+ headers := w.Header()
+ origin := r.Header.Get("Origin")
+
+ if r.Method == http.MethodOptions {
+ c.logf(" Actual request no headers added: method == %s", r.Method)
+ return
+ }
+ // Always set Vary, see https://github.com/rs/cors/issues/10
+ headers.Add("Vary", "Origin")
+ if origin == "" {
+ c.logf(" Actual request no headers added: missing origin")
+ return
+ }
+ if !c.isOriginAllowed(r, origin) {
+ c.logf(" Actual request no headers added: origin '%s' not allowed", origin)
+ return
+ }
+
+ // Note that spec does define a way to specifically disallow a simple method like GET or
+ // POST. Access-Control-Allow-Methods is only used for pre-flight requests and the
+ // spec doesn't instruct to check the allowed methods for simple cross-origin requests.
+ // We think it's a nice feature to be able to have control on those methods though.
+ if !c.isMethodAllowed(r.Method) {
+ c.logf(" Actual request no headers added: method '%s' not allowed", r.Method)
+
+ return
+ }
+ if c.allowedOriginsAll {
+ headers.Set("Access-Control-Allow-Origin", "*")
+ } else {
+ headers.Set("Access-Control-Allow-Origin", origin)
+ }
+ if len(c.exposedHeaders) > 0 {
+ headers.Set("Access-Control-Expose-Headers", strings.Join(c.exposedHeaders, ", "))
+ }
+ if c.allowCredentials {
+ headers.Set("Access-Control-Allow-Credentials", "true")
+ }
+ c.logf(" Actual response added headers: %v", headers)
+}
+
+// convenience method. checks if debugging is turned on before printing
+func (c *Cors) logf(format string, a ...interface{}) {
+ if c.Log != nil {
+ c.Log.Printf(format, a...)
+ }
+}
+
+// isOriginAllowed checks if a given origin is allowed to perform cross-domain requests
+// on the endpoint
+func (c *Cors) isOriginAllowed(r *http.Request, origin string) bool {
+ if c.allowOriginRequestFunc != nil {
+ return c.allowOriginRequestFunc(r, origin)
+ }
+ if c.allowOriginFunc != nil {
+ return c.allowOriginFunc(origin)
+ }
+ if c.allowedOriginsAll {
+ return true
+ }
+ origin = strings.ToLower(origin)
+ for _, o := range c.allowedOrigins {
+ if o == origin {
+ return true
+ }
+ }
+ for _, w := range c.allowedWOrigins {
+ if w.match(origin) {
+ return true
+ }
+ }
+ return false
+}
+
+// isMethodAllowed checks if a given method can be used as part of a cross-domain request
+// on the endpoing
+func (c *Cors) isMethodAllowed(method string) bool {
+ if len(c.allowedMethods) == 0 {
+ // If no method allowed, always return false, even for preflight request
+ return false
+ }
+ method = strings.ToUpper(method)
+ if method == http.MethodOptions {
+ // Always allow preflight requests
+ return true
+ }
+ for _, m := range c.allowedMethods {
+ if m == method {
+ return true
+ }
+ }
+ return false
+}
+
+// areHeadersAllowed checks if a given list of headers are allowed to used within
+// a cross-domain request.
+func (c *Cors) areHeadersAllowed(requestedHeaders []string) bool {
+ if c.allowedHeadersAll || len(requestedHeaders) == 0 {
+ return true
+ }
+ for _, header := range requestedHeaders {
+ header = http.CanonicalHeaderKey(header)
+ found := false
+ for _, h := range c.allowedHeaders {
+ if h == header {
+ found = true
+ }
+ }
+ if !found {
+ return false
+ }
+ }
+ return true
+}
diff --git a/vendor/github.com/rs/cors/go.mod b/vendor/github.com/rs/cors/go.mod
new file mode 100644
index 00000000..0a4c6521
--- /dev/null
+++ b/vendor/github.com/rs/cors/go.mod
@@ -0,0 +1 @@
+module github.com/rs/cors
diff --git a/vendor/github.com/rs/cors/utils.go b/vendor/github.com/rs/cors/utils.go
new file mode 100644
index 00000000..db83ac3e
--- /dev/null
+++ b/vendor/github.com/rs/cors/utils.go
@@ -0,0 +1,71 @@
+package cors
+
+import "strings"
+
+const toLower = 'a' - 'A'
+
+type converter func(string) string
+
+type wildcard struct {
+ prefix string
+ suffix string
+}
+
+func (w wildcard) match(s string) bool {
+ return len(s) >= len(w.prefix)+len(w.suffix) && strings.HasPrefix(s, w.prefix) && strings.HasSuffix(s, w.suffix)
+}
+
+// convert converts a list of string using the passed converter function
+func convert(s []string, c converter) []string {
+ out := []string{}
+ for _, i := range s {
+ out = append(out, c(i))
+ }
+ return out
+}
+
+// parseHeaderList tokenize + normalize a string containing a list of headers
+func parseHeaderList(headerList string) []string {
+ l := len(headerList)
+ h := make([]byte, 0, l)
+ upper := true
+ // Estimate the number headers in order to allocate the right splice size
+ t := 0
+ for i := 0; i < l; i++ {
+ if headerList[i] == ',' {
+ t++
+ }
+ }
+ headers := make([]string, 0, t)
+ for i := 0; i < l; i++ {
+ b := headerList[i]
+ switch {
+ case b >= 'a' && b <= 'z':
+ if upper {
+ h = append(h, b-toLower)
+ } else {
+ h = append(h, b)
+ }
+ case b >= 'A' && b <= 'Z':
+ if !upper {
+ h = append(h, b+toLower)
+ } else {
+ h = append(h, b)
+ }
+ case b == '-' || b == '_' || (b >= '0' && b <= '9'):
+ h = append(h, b)
+ }
+
+ if b == ' ' || b == ',' || i == l-1 {
+ if len(h) > 0 {
+ // Flush the found header
+ headers = append(headers, string(h))
+ h = h[:0]
+ upper = true
+ }
+ } else {
+ upper = b == '-' || b == '_'
+ }
+ }
+ return headers
+}
diff --git a/vendor/github.com/smartystreets/assertions/CONTRIBUTING.md b/vendor/github.com/smartystreets/assertions/CONTRIBUTING.md
new file mode 100644
index 00000000..1820ecb3
--- /dev/null
+++ b/vendor/github.com/smartystreets/assertions/CONTRIBUTING.md
@@ -0,0 +1,12 @@
+# Contributing
+
+In general, the code posted to the [SmartyStreets github organization](https://github.com/smartystreets) is created to solve specific problems at SmartyStreets that are ancillary to our core products in the address verification industry and may or may not be useful to other organizations or developers. Our reason for posting said code isn't necessarily to solicit feedback or contributions from the community but more as a showcase of some of the approaches to solving problems we have adopted.
+
+Having stated that, we do consider issues raised by other githubbers as well as contributions submitted via pull requests. When submitting such a pull request, please follow these guidelines:
+
+- _Look before you leap:_ If the changes you plan to make are significant, it's in everyone's best interest for you to discuss them with a SmartyStreets team member prior to opening a pull request.
+- _License and ownership:_ If modifying the `LICENSE.md` file, limit your changes to fixing typographical mistakes. Do NOT modify the actual terms in the license or the copyright by **SmartyStreets, LLC**. Code submitted to SmartyStreets projects becomes property of SmartyStreets and must be compatible with the associated license.
+- _Testing:_ If the code you are submitting resides in packages/modules covered by automated tests, be sure to add passing tests that cover your changes and assert expected behavior and state. Submit the additional test cases as part of your change set.
+- _Style:_ Match your approach to **naming** and **formatting** with the surrounding code. Basically, the code you submit shouldn't stand out.
+ - "Naming" refers to such constructs as variables, methods, functions, classes, structs, interfaces, packages, modules, directories, files, etc...
+ - "Formatting" refers to such constructs as whitespace, horizontal line length, vertical function length, vertical file length, indentation, curly braces, etc...
diff --git a/vendor/github.com/smartystreets/assertions/LICENSE.md b/vendor/github.com/smartystreets/assertions/LICENSE.md
new file mode 100644
index 00000000..8ea6f945
--- /dev/null
+++ b/vendor/github.com/smartystreets/assertions/LICENSE.md
@@ -0,0 +1,23 @@
+Copyright (c) 2016 SmartyStreets, LLC
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
+
+NOTE: Various optional and subordinate components carry their own licensing
+requirements and restrictions. Use of those components is subject to the terms
+and conditions outlined the respective license of each component.
diff --git a/vendor/github.com/smartystreets/assertions/README.md b/vendor/github.com/smartystreets/assertions/README.md
new file mode 100644
index 00000000..58e51e90
--- /dev/null
+++ b/vendor/github.com/smartystreets/assertions/README.md
@@ -0,0 +1,611 @@
+# assertions
+--
+ import "github.com/smartystreets/assertions"
+
+Package assertions contains the implementations for all assertions which are
+referenced in goconvey's `convey` package
+(github.com/smartystreets/goconvey/convey) and gunit
+(github.com/smartystreets/gunit) for use with the So(...) method. They can also
+be used in traditional Go test functions and even in applications.
+
+https://smartystreets.com
+
+Many of the assertions lean heavily on work done by Aaron Jacobs in his
+excellent oglematchers library. (https://github.com/jacobsa/oglematchers) The
+ShouldResemble assertion leans heavily on work done by Daniel Jacques in his
+very helpful go-render library. (https://github.com/luci/go-render)
+
+## Usage
+
+#### func GoConveyMode
+
+```go
+func GoConveyMode(yes bool)
+```
+GoConveyMode provides control over JSON serialization of failures. When using
+the assertions in this package from the convey package JSON results are very
+helpful and can be rendered in a DIFF view. In that case, this function will be
+called with a true value to enable the JSON serialization. By default, the
+assertions in this package will not serializer a JSON result, making standalone
+usage more convenient.
+
+#### func ShouldAlmostEqual
+
+```go
+func ShouldAlmostEqual(actual interface{}, expected ...interface{}) string
+```
+ShouldAlmostEqual makes sure that two parameters are close enough to being
+equal. The acceptable delta may be specified with a third argument, or a very
+small default delta will be used.
+
+#### func ShouldBeBetween
+
+```go
+func ShouldBeBetween(actual interface{}, expected ...interface{}) string
+```
+ShouldBeBetween receives exactly three parameters: an actual value, a lower
+bound, and an upper bound. It ensures that the actual value is between both
+bounds (but not equal to either of them).
+
+#### func ShouldBeBetweenOrEqual
+
+```go
+func ShouldBeBetweenOrEqual(actual interface{}, expected ...interface{}) string
+```
+ShouldBeBetweenOrEqual receives exactly three parameters: an actual value, a
+lower bound, and an upper bound. It ensures that the actual value is between
+both bounds or equal to one of them.
+
+#### func ShouldBeBlank
+
+```go
+func ShouldBeBlank(actual interface{}, expected ...interface{}) string
+```
+ShouldBeBlank receives exactly 1 string parameter and ensures that it is equal
+to "".
+
+#### func ShouldBeChronological
+
+```go
+func ShouldBeChronological(actual interface{}, expected ...interface{}) string
+```
+ShouldBeChronological receives a []time.Time slice and asserts that the are in
+chronological order starting with the first time.Time as the earliest.
+
+#### func ShouldBeEmpty
+
+```go
+func ShouldBeEmpty(actual interface{}, expected ...interface{}) string
+```
+ShouldBeEmpty receives a single parameter (actual) and determines whether or not
+calling len(actual) would return `0`. It obeys the rules specified by the len
+function for determining length: http://golang.org/pkg/builtin/#len
+
+#### func ShouldBeError
+
+```go
+func ShouldBeError(actual interface{}, expected ...interface{}) string
+```
+ShouldBeError asserts that the first argument implements the error interface. It
+also compares the first argument against the second argument if provided (which
+must be an error message string or another error value).
+
+#### func ShouldBeFalse
+
+```go
+func ShouldBeFalse(actual interface{}, expected ...interface{}) string
+```
+ShouldBeFalse receives a single parameter and ensures that it is false.
+
+#### func ShouldBeGreaterThan
+
+```go
+func ShouldBeGreaterThan(actual interface{}, expected ...interface{}) string
+```
+ShouldBeGreaterThan receives exactly two parameters and ensures that the first
+is greater than the second.
+
+#### func ShouldBeGreaterThanOrEqualTo
+
+```go
+func ShouldBeGreaterThanOrEqualTo(actual interface{}, expected ...interface{}) string
+```
+ShouldBeGreaterThanOrEqualTo receives exactly two parameters and ensures that
+the first is greater than or equal to the second.
+
+#### func ShouldBeIn
+
+```go
+func ShouldBeIn(actual interface{}, expected ...interface{}) string
+```
+ShouldBeIn receives at least 2 parameters. The first is a proposed member of the
+collection that is passed in either as the second parameter, or of the
+collection that is comprised of all the remaining parameters. This assertion
+ensures that the proposed member is in the collection (using ShouldEqual).
+
+#### func ShouldBeLessThan
+
+```go
+func ShouldBeLessThan(actual interface{}, expected ...interface{}) string
+```
+ShouldBeLessThan receives exactly two parameters and ensures that the first is
+less than the second.
+
+#### func ShouldBeLessThanOrEqualTo
+
+```go
+func ShouldBeLessThanOrEqualTo(actual interface{}, expected ...interface{}) string
+```
+ShouldBeLessThan receives exactly two parameters and ensures that the first is
+less than or equal to the second.
+
+#### func ShouldBeNil
+
+```go
+func ShouldBeNil(actual interface{}, expected ...interface{}) string
+```
+ShouldBeNil receives a single parameter and ensures that it is nil.
+
+#### func ShouldBeTrue
+
+```go
+func ShouldBeTrue(actual interface{}, expected ...interface{}) string
+```
+ShouldBeTrue receives a single parameter and ensures that it is true.
+
+#### func ShouldBeZeroValue
+
+```go
+func ShouldBeZeroValue(actual interface{}, expected ...interface{}) string
+```
+ShouldBeZeroValue receives a single parameter and ensures that it is the Go
+equivalent of the default value, or "zero" value.
+
+#### func ShouldContain
+
+```go
+func ShouldContain(actual interface{}, expected ...interface{}) string
+```
+ShouldContain receives exactly two parameters. The first is a slice and the
+second is a proposed member. Membership is determined using ShouldEqual.
+
+#### func ShouldContainKey
+
+```go
+func ShouldContainKey(actual interface{}, expected ...interface{}) string
+```
+ShouldContainKey receives exactly two parameters. The first is a map and the
+second is a proposed key. Keys are compared with a simple '=='.
+
+#### func ShouldContainSubstring
+
+```go
+func ShouldContainSubstring(actual interface{}, expected ...interface{}) string
+```
+ShouldContainSubstring receives exactly 2 string parameters and ensures that the
+first contains the second as a substring.
+
+#### func ShouldEndWith
+
+```go
+func ShouldEndWith(actual interface{}, expected ...interface{}) string
+```
+ShouldEndWith receives exactly 2 string parameters and ensures that the first
+ends with the second.
+
+#### func ShouldEqual
+
+```go
+func ShouldEqual(actual interface{}, expected ...interface{}) string
+```
+ShouldEqual receives exactly two parameters and does an equality check using the
+following semantics: 1. If the expected and actual values implement an Equal
+method in the form `func (this T) Equal(that T) bool` then call the method. If
+true, they are equal. 2. The expected and actual values are judged equal or not
+by oglematchers.Equals.
+
+#### func ShouldEqualJSON
+
+```go
+func ShouldEqualJSON(actual interface{}, expected ...interface{}) string
+```
+ShouldEqualJSON receives exactly two parameters and does an equality check by
+marshalling to JSON
+
+#### func ShouldEqualTrimSpace
+
+```go
+func ShouldEqualTrimSpace(actual interface{}, expected ...interface{}) string
+```
+ShouldEqualTrimSpace receives exactly 2 string parameters and ensures that the
+first is equal to the second after removing all leading and trailing whitespace
+using strings.TrimSpace(first).
+
+#### func ShouldEqualWithout
+
+```go
+func ShouldEqualWithout(actual interface{}, expected ...interface{}) string
+```
+ShouldEqualWithout receives exactly 3 string parameters and ensures that the
+first is equal to the second after removing all instances of the third from the
+first using strings.Replace(first, third, "", -1).
+
+#### func ShouldHappenAfter
+
+```go
+func ShouldHappenAfter(actual interface{}, expected ...interface{}) string
+```
+ShouldHappenAfter receives exactly 2 time.Time arguments and asserts that the
+first happens after the second.
+
+#### func ShouldHappenBefore
+
+```go
+func ShouldHappenBefore(actual interface{}, expected ...interface{}) string
+```
+ShouldHappenBefore receives exactly 2 time.Time arguments and asserts that the
+first happens before the second.
+
+#### func ShouldHappenBetween
+
+```go
+func ShouldHappenBetween(actual interface{}, expected ...interface{}) string
+```
+ShouldHappenBetween receives exactly 3 time.Time arguments and asserts that the
+first happens between (not on) the second and third.
+
+#### func ShouldHappenOnOrAfter
+
+```go
+func ShouldHappenOnOrAfter(actual interface{}, expected ...interface{}) string
+```
+ShouldHappenOnOrAfter receives exactly 2 time.Time arguments and asserts that
+the first happens on or after the second.
+
+#### func ShouldHappenOnOrBefore
+
+```go
+func ShouldHappenOnOrBefore(actual interface{}, expected ...interface{}) string
+```
+ShouldHappenOnOrBefore receives exactly 2 time.Time arguments and asserts that
+the first happens on or before the second.
+
+#### func ShouldHappenOnOrBetween
+
+```go
+func ShouldHappenOnOrBetween(actual interface{}, expected ...interface{}) string
+```
+ShouldHappenOnOrBetween receives exactly 3 time.Time arguments and asserts that
+the first happens between or on the second and third.
+
+#### func ShouldHappenWithin
+
+```go
+func ShouldHappenWithin(actual interface{}, expected ...interface{}) string
+```
+ShouldHappenWithin receives a time.Time, a time.Duration, and a time.Time (3
+arguments) and asserts that the first time.Time happens within or on the
+duration specified relative to the other time.Time.
+
+#### func ShouldHaveLength
+
+```go
+func ShouldHaveLength(actual interface{}, expected ...interface{}) string
+```
+ShouldHaveLength receives 2 parameters. The first is a collection to check the
+length of, the second being the expected length. It obeys the rules specified by
+the len function for determining length: http://golang.org/pkg/builtin/#len
+
+#### func ShouldHaveSameTypeAs
+
+```go
+func ShouldHaveSameTypeAs(actual interface{}, expected ...interface{}) string
+```
+ShouldHaveSameTypeAs receives exactly two parameters and compares their
+underlying types for equality.
+
+#### func ShouldImplement
+
+```go
+func ShouldImplement(actual interface{}, expectedList ...interface{}) string
+```
+ShouldImplement receives exactly two parameters and ensures that the first
+implements the interface type of the second.
+
+#### func ShouldNotAlmostEqual
+
+```go
+func ShouldNotAlmostEqual(actual interface{}, expected ...interface{}) string
+```
+ShouldNotAlmostEqual is the inverse of ShouldAlmostEqual
+
+#### func ShouldNotBeBetween
+
+```go
+func ShouldNotBeBetween(actual interface{}, expected ...interface{}) string
+```
+ShouldNotBeBetween receives exactly three parameters: an actual value, a lower
+bound, and an upper bound. It ensures that the actual value is NOT between both
+bounds.
+
+#### func ShouldNotBeBetweenOrEqual
+
+```go
+func ShouldNotBeBetweenOrEqual(actual interface{}, expected ...interface{}) string
+```
+ShouldNotBeBetweenOrEqual receives exactly three parameters: an actual value, a
+lower bound, and an upper bound. It ensures that the actual value is nopt
+between the bounds nor equal to either of them.
+
+#### func ShouldNotBeBlank
+
+```go
+func ShouldNotBeBlank(actual interface{}, expected ...interface{}) string
+```
+ShouldNotBeBlank receives exactly 1 string parameter and ensures that it is
+equal to "".
+
+#### func ShouldNotBeEmpty
+
+```go
+func ShouldNotBeEmpty(actual interface{}, expected ...interface{}) string
+```
+ShouldNotBeEmpty receives a single parameter (actual) and determines whether or
+not calling len(actual) would return a value greater than zero. It obeys the
+rules specified by the `len` function for determining length:
+http://golang.org/pkg/builtin/#len
+
+#### func ShouldNotBeIn
+
+```go
+func ShouldNotBeIn(actual interface{}, expected ...interface{}) string
+```
+ShouldNotBeIn receives at least 2 parameters. The first is a proposed member of
+the collection that is passed in either as the second parameter, or of the
+collection that is comprised of all the remaining parameters. This assertion
+ensures that the proposed member is NOT in the collection (using ShouldEqual).
+
+#### func ShouldNotBeNil
+
+```go
+func ShouldNotBeNil(actual interface{}, expected ...interface{}) string
+```
+ShouldNotBeNil receives a single parameter and ensures that it is not nil.
+
+#### func ShouldNotBeZeroValue
+
+```go
+func ShouldNotBeZeroValue(actual interface{}, expected ...interface{}) string
+```
+ShouldBeZeroValue receives a single parameter and ensures that it is NOT the Go
+equivalent of the default value, or "zero" value.
+
+#### func ShouldNotContain
+
+```go
+func ShouldNotContain(actual interface{}, expected ...interface{}) string
+```
+ShouldNotContain receives exactly two parameters. The first is a slice and the
+second is a proposed member. Membership is determinied using ShouldEqual.
+
+#### func ShouldNotContainKey
+
+```go
+func ShouldNotContainKey(actual interface{}, expected ...interface{}) string
+```
+ShouldNotContainKey receives exactly two parameters. The first is a map and the
+second is a proposed absent key. Keys are compared with a simple '=='.
+
+#### func ShouldNotContainSubstring
+
+```go
+func ShouldNotContainSubstring(actual interface{}, expected ...interface{}) string
+```
+ShouldNotContainSubstring receives exactly 2 string parameters and ensures that
+the first does NOT contain the second as a substring.
+
+#### func ShouldNotEndWith
+
+```go
+func ShouldNotEndWith(actual interface{}, expected ...interface{}) string
+```
+ShouldEndWith receives exactly 2 string parameters and ensures that the first
+does not end with the second.
+
+#### func ShouldNotEqual
+
+```go
+func ShouldNotEqual(actual interface{}, expected ...interface{}) string
+```
+ShouldNotEqual receives exactly two parameters and does an inequality check. See
+ShouldEqual for details on how equality is determined.
+
+#### func ShouldNotHappenOnOrBetween
+
+```go
+func ShouldNotHappenOnOrBetween(actual interface{}, expected ...interface{}) string
+```
+ShouldNotHappenOnOrBetween receives exactly 3 time.Time arguments and asserts
+that the first does NOT happen between or on the second or third.
+
+#### func ShouldNotHappenWithin
+
+```go
+func ShouldNotHappenWithin(actual interface{}, expected ...interface{}) string
+```
+ShouldNotHappenWithin receives a time.Time, a time.Duration, and a time.Time (3
+arguments) and asserts that the first time.Time does NOT happen within or on the
+duration specified relative to the other time.Time.
+
+#### func ShouldNotHaveSameTypeAs
+
+```go
+func ShouldNotHaveSameTypeAs(actual interface{}, expected ...interface{}) string
+```
+ShouldNotHaveSameTypeAs receives exactly two parameters and compares their
+underlying types for inequality.
+
+#### func ShouldNotImplement
+
+```go
+func ShouldNotImplement(actual interface{}, expectedList ...interface{}) string
+```
+ShouldNotImplement receives exactly two parameters and ensures that the first
+does NOT implement the interface type of the second.
+
+#### func ShouldNotPanic
+
+```go
+func ShouldNotPanic(actual interface{}, expected ...interface{}) (message string)
+```
+ShouldNotPanic receives a void, niladic function and expects to execute the
+function without any panic.
+
+#### func ShouldNotPanicWith
+
+```go
+func ShouldNotPanicWith(actual interface{}, expected ...interface{}) (message string)
+```
+ShouldNotPanicWith receives a void, niladic function and expects to recover a
+panic whose content differs from the second argument.
+
+#### func ShouldNotPointTo
+
+```go
+func ShouldNotPointTo(actual interface{}, expected ...interface{}) string
+```
+ShouldNotPointTo receives exactly two parameters and checks to see that they
+point to different addresess.
+
+#### func ShouldNotResemble
+
+```go
+func ShouldNotResemble(actual interface{}, expected ...interface{}) string
+```
+ShouldNotResemble receives exactly two parameters and does an inverse deep equal
+check (see reflect.DeepEqual)
+
+#### func ShouldNotStartWith
+
+```go
+func ShouldNotStartWith(actual interface{}, expected ...interface{}) string
+```
+ShouldNotStartWith receives exactly 2 string parameters and ensures that the
+first does not start with the second.
+
+#### func ShouldPanic
+
+```go
+func ShouldPanic(actual interface{}, expected ...interface{}) (message string)
+```
+ShouldPanic receives a void, niladic function and expects to recover a panic.
+
+#### func ShouldPanicWith
+
+```go
+func ShouldPanicWith(actual interface{}, expected ...interface{}) (message string)
+```
+ShouldPanicWith receives a void, niladic function and expects to recover a panic
+with the second argument as the content.
+
+#### func ShouldPointTo
+
+```go
+func ShouldPointTo(actual interface{}, expected ...interface{}) string
+```
+ShouldPointTo receives exactly two parameters and checks to see that they point
+to the same address.
+
+#### func ShouldResemble
+
+```go
+func ShouldResemble(actual interface{}, expected ...interface{}) string
+```
+ShouldResemble receives exactly two parameters and does a deep equal check (see
+reflect.DeepEqual)
+
+#### func ShouldStartWith
+
+```go
+func ShouldStartWith(actual interface{}, expected ...interface{}) string
+```
+ShouldStartWith receives exactly 2 string parameters and ensures that the first
+starts with the second.
+
+#### func So
+
+```go
+func So(actual interface{}, assert assertion, expected ...interface{}) (bool, string)
+```
+So is a convenience function (as opposed to an inconvenience function?) for
+running assertions on arbitrary arguments in any context, be it for testing or
+even application logging. It allows you to perform assertion-like behavior (and
+get nicely formatted messages detailing discrepancies) but without the program
+blowing up or panicking. All that is required is to import this package and call
+`So` with one of the assertions exported by this package as the second
+parameter. The first return parameter is a boolean indicating if the assertion
+was true. The second return parameter is the well-formatted message showing why
+an assertion was incorrect, or blank if the assertion was correct.
+
+Example:
+
+ if ok, message := So(x, ShouldBeGreaterThan, y); !ok {
+ log.Println(message)
+ }
+
+For an alternative implementation of So (that provides more flexible return
+options) see the `So` function in the package at
+github.com/smartystreets/assertions/assert.
+
+#### type Assertion
+
+```go
+type Assertion struct {
+}
+```
+
+
+#### func New
+
+```go
+func New(t testingT) *Assertion
+```
+New swallows the *testing.T struct and prints failed assertions using t.Error.
+Example: assertions.New(t).So(1, should.Equal, 1)
+
+#### func (*Assertion) Failed
+
+```go
+func (this *Assertion) Failed() bool
+```
+Failed reports whether any calls to So (on this Assertion instance) have failed.
+
+#### func (*Assertion) So
+
+```go
+func (this *Assertion) So(actual interface{}, assert assertion, expected ...interface{}) bool
+```
+So calls the standalone So function and additionally, calls t.Error in failure
+scenarios.
+
+#### type FailureView
+
+```go
+type FailureView struct {
+ Message string `json:"Message"`
+ Expected string `json:"Expected"`
+ Actual string `json:"Actual"`
+}
+```
+
+This struct is also declared in
+github.com/smartystreets/goconvey/convey/reporting. The json struct tags should
+be equal in both declarations.
+
+#### type Serializer
+
+```go
+type Serializer interface {
+ // contains filtered or unexported methods
+}
+```
diff --git a/vendor/github.com/smartystreets/assertions/collections.go b/vendor/github.com/smartystreets/assertions/collections.go
new file mode 100644
index 00000000..b534d4ba
--- /dev/null
+++ b/vendor/github.com/smartystreets/assertions/collections.go
@@ -0,0 +1,244 @@
+package assertions
+
+import (
+ "fmt"
+ "reflect"
+
+ "github.com/smartystreets/assertions/internal/oglematchers"
+)
+
+// ShouldContain receives exactly two parameters. The first is a slice and the
+// second is a proposed member. Membership is determined using ShouldEqual.
+func ShouldContain(actual interface{}, expected ...interface{}) string {
+ if fail := need(1, expected); fail != success {
+ return fail
+ }
+
+ if matchError := oglematchers.Contains(expected[0]).Matches(actual); matchError != nil {
+ typeName := reflect.TypeOf(actual)
+
+ if fmt.Sprintf("%v", matchError) == "which is not a slice or array" {
+ return fmt.Sprintf(shouldHaveBeenAValidCollection, typeName)
+ }
+ return fmt.Sprintf(shouldHaveContained, typeName, expected[0])
+ }
+ return success
+}
+
+// ShouldNotContain receives exactly two parameters. The first is a slice and the
+// second is a proposed member. Membership is determinied using ShouldEqual.
+func ShouldNotContain(actual interface{}, expected ...interface{}) string {
+ if fail := need(1, expected); fail != success {
+ return fail
+ }
+ typeName := reflect.TypeOf(actual)
+
+ if matchError := oglematchers.Contains(expected[0]).Matches(actual); matchError != nil {
+ if fmt.Sprintf("%v", matchError) == "which is not a slice or array" {
+ return fmt.Sprintf(shouldHaveBeenAValidCollection, typeName)
+ }
+ return success
+ }
+ return fmt.Sprintf(shouldNotHaveContained, typeName, expected[0])
+}
+
+// ShouldContainKey receives exactly two parameters. The first is a map and the
+// second is a proposed key. Keys are compared with a simple '=='.
+func ShouldContainKey(actual interface{}, expected ...interface{}) string {
+ if fail := need(1, expected); fail != success {
+ return fail
+ }
+
+ keys, isMap := mapKeys(actual)
+ if !isMap {
+ return fmt.Sprintf(shouldHaveBeenAValidMap, reflect.TypeOf(actual))
+ }
+
+ if !keyFound(keys, expected[0]) {
+ return fmt.Sprintf(shouldHaveContainedKey, reflect.TypeOf(actual), expected)
+ }
+
+ return ""
+}
+
+// ShouldNotContainKey receives exactly two parameters. The first is a map and the
+// second is a proposed absent key. Keys are compared with a simple '=='.
+func ShouldNotContainKey(actual interface{}, expected ...interface{}) string {
+ if fail := need(1, expected); fail != success {
+ return fail
+ }
+
+ keys, isMap := mapKeys(actual)
+ if !isMap {
+ return fmt.Sprintf(shouldHaveBeenAValidMap, reflect.TypeOf(actual))
+ }
+
+ if keyFound(keys, expected[0]) {
+ return fmt.Sprintf(shouldNotHaveContainedKey, reflect.TypeOf(actual), expected)
+ }
+
+ return ""
+}
+
+func mapKeys(m interface{}) ([]reflect.Value, bool) {
+ value := reflect.ValueOf(m)
+ if value.Kind() != reflect.Map {
+ return nil, false
+ }
+ return value.MapKeys(), true
+}
+func keyFound(keys []reflect.Value, expectedKey interface{}) bool {
+ found := false
+ for _, key := range keys {
+ if key.Interface() == expectedKey {
+ found = true
+ }
+ }
+ return found
+}
+
+// ShouldBeIn receives at least 2 parameters. The first is a proposed member of the collection
+// that is passed in either as the second parameter, or of the collection that is comprised
+// of all the remaining parameters. This assertion ensures that the proposed member is in
+// the collection (using ShouldEqual).
+func ShouldBeIn(actual interface{}, expected ...interface{}) string {
+ if fail := atLeast(1, expected); fail != success {
+ return fail
+ }
+
+ if len(expected) == 1 {
+ return shouldBeIn(actual, expected[0])
+ }
+ return shouldBeIn(actual, expected)
+}
+func shouldBeIn(actual interface{}, expected interface{}) string {
+ if matchError := oglematchers.Contains(actual).Matches(expected); matchError != nil {
+ return fmt.Sprintf(shouldHaveBeenIn, actual, reflect.TypeOf(expected))
+ }
+ return success
+}
+
+// ShouldNotBeIn receives at least 2 parameters. The first is a proposed member of the collection
+// that is passed in either as the second parameter, or of the collection that is comprised
+// of all the remaining parameters. This assertion ensures that the proposed member is NOT in
+// the collection (using ShouldEqual).
+func ShouldNotBeIn(actual interface{}, expected ...interface{}) string {
+ if fail := atLeast(1, expected); fail != success {
+ return fail
+ }
+
+ if len(expected) == 1 {
+ return shouldNotBeIn(actual, expected[0])
+ }
+ return shouldNotBeIn(actual, expected)
+}
+func shouldNotBeIn(actual interface{}, expected interface{}) string {
+ if matchError := oglematchers.Contains(actual).Matches(expected); matchError == nil {
+ return fmt.Sprintf(shouldNotHaveBeenIn, actual, reflect.TypeOf(expected))
+ }
+ return success
+}
+
+// ShouldBeEmpty receives a single parameter (actual) and determines whether or not
+// calling len(actual) would return `0`. It obeys the rules specified by the len
+// function for determining length: http://golang.org/pkg/builtin/#len
+func ShouldBeEmpty(actual interface{}, expected ...interface{}) string {
+ if fail := need(0, expected); fail != success {
+ return fail
+ }
+
+ if actual == nil {
+ return success
+ }
+
+ value := reflect.ValueOf(actual)
+ switch value.Kind() {
+ case reflect.Slice:
+ if value.Len() == 0 {
+ return success
+ }
+ case reflect.Chan:
+ if value.Len() == 0 {
+ return success
+ }
+ case reflect.Map:
+ if value.Len() == 0 {
+ return success
+ }
+ case reflect.String:
+ if value.Len() == 0 {
+ return success
+ }
+ case reflect.Ptr:
+ elem := value.Elem()
+ kind := elem.Kind()
+ if (kind == reflect.Slice || kind == reflect.Array) && elem.Len() == 0 {
+ return success
+ }
+ }
+
+ return fmt.Sprintf(shouldHaveBeenEmpty, actual)
+}
+
+// ShouldNotBeEmpty receives a single parameter (actual) and determines whether or not
+// calling len(actual) would return a value greater than zero. It obeys the rules
+// specified by the `len` function for determining length: http://golang.org/pkg/builtin/#len
+func ShouldNotBeEmpty(actual interface{}, expected ...interface{}) string {
+ if fail := need(0, expected); fail != success {
+ return fail
+ }
+
+ if empty := ShouldBeEmpty(actual, expected...); empty != success {
+ return success
+ }
+ return fmt.Sprintf(shouldNotHaveBeenEmpty, actual)
+}
+
+// ShouldHaveLength receives 2 parameters. The first is a collection to check
+// the length of, the second being the expected length. It obeys the rules
+// specified by the len function for determining length:
+// http://golang.org/pkg/builtin/#len
+func ShouldHaveLength(actual interface{}, expected ...interface{}) string {
+ if fail := need(1, expected); fail != success {
+ return fail
+ }
+
+ var expectedLen int64
+ lenValue := reflect.ValueOf(expected[0])
+ switch lenValue.Kind() {
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ expectedLen = lenValue.Int()
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
+ expectedLen = int64(lenValue.Uint())
+ default:
+ return fmt.Sprintf(shouldHaveBeenAValidInteger, reflect.TypeOf(expected[0]))
+ }
+
+ if expectedLen < 0 {
+ return fmt.Sprintf(shouldHaveBeenAValidLength, expected[0])
+ }
+
+ value := reflect.ValueOf(actual)
+ switch value.Kind() {
+ case reflect.Slice,
+ reflect.Chan,
+ reflect.Map,
+ reflect.String:
+ if int64(value.Len()) == expectedLen {
+ return success
+ } else {
+ return fmt.Sprintf(shouldHaveHadLength, expectedLen, value.Len(), actual)
+ }
+ case reflect.Ptr:
+ elem := value.Elem()
+ kind := elem.Kind()
+ if kind == reflect.Slice || kind == reflect.Array {
+ if int64(elem.Len()) == expectedLen {
+ return success
+ } else {
+ return fmt.Sprintf(shouldHaveHadLength, expectedLen, elem.Len(), actual)
+ }
+ }
+ }
+ return fmt.Sprintf(shouldHaveBeenAValidCollection, reflect.TypeOf(actual))
+}
diff --git a/vendor/github.com/smartystreets/assertions/doc.go b/vendor/github.com/smartystreets/assertions/doc.go
new file mode 100644
index 00000000..ba30a926
--- /dev/null
+++ b/vendor/github.com/smartystreets/assertions/doc.go
@@ -0,0 +1,109 @@
+// Package assertions contains the implementations for all assertions which
+// are referenced in goconvey's `convey` package
+// (github.com/smartystreets/goconvey/convey) and gunit (github.com/smartystreets/gunit)
+// for use with the So(...) method.
+// They can also be used in traditional Go test functions and even in
+// applications.
+//
+// https://smartystreets.com
+//
+// Many of the assertions lean heavily on work done by Aaron Jacobs in his excellent oglematchers library.
+// (https://github.com/jacobsa/oglematchers)
+// The ShouldResemble assertion leans heavily on work done by Daniel Jacques in his very helpful go-render library.
+// (https://github.com/luci/go-render)
+package assertions
+
+import (
+ "fmt"
+ "runtime"
+)
+
+// By default we use a no-op serializer. The actual Serializer provides a JSON
+// representation of failure results on selected assertions so the goconvey
+// web UI can display a convenient diff.
+var serializer Serializer = new(noopSerializer)
+
+// GoConveyMode provides control over JSON serialization of failures. When
+// using the assertions in this package from the convey package JSON results
+// are very helpful and can be rendered in a DIFF view. In that case, this function
+// will be called with a true value to enable the JSON serialization. By default,
+// the assertions in this package will not serializer a JSON result, making
+// standalone usage more convenient.
+func GoConveyMode(yes bool) {
+ if yes {
+ serializer = newSerializer()
+ } else {
+ serializer = new(noopSerializer)
+ }
+}
+
+type testingT interface {
+ Error(args ...interface{})
+}
+
+type Assertion struct {
+ t testingT
+ failed bool
+}
+
+// New swallows the *testing.T struct and prints failed assertions using t.Error.
+// Example: assertions.New(t).So(1, should.Equal, 1)
+func New(t testingT) *Assertion {
+ return &Assertion{t: t}
+}
+
+// Failed reports whether any calls to So (on this Assertion instance) have failed.
+func (this *Assertion) Failed() bool {
+ return this.failed
+}
+
+// So calls the standalone So function and additionally, calls t.Error in failure scenarios.
+func (this *Assertion) So(actual interface{}, assert assertion, expected ...interface{}) bool {
+ ok, result := So(actual, assert, expected...)
+ if !ok {
+ this.failed = true
+ _, file, line, _ := runtime.Caller(1)
+ this.t.Error(fmt.Sprintf("\n%s:%d\n%s", file, line, result))
+ }
+ return ok
+}
+
+// So is a convenience function (as opposed to an inconvenience function?)
+// for running assertions on arbitrary arguments in any context, be it for testing or even
+// application logging. It allows you to perform assertion-like behavior (and get nicely
+// formatted messages detailing discrepancies) but without the program blowing up or panicking.
+// All that is required is to import this package and call `So` with one of the assertions
+// exported by this package as the second parameter.
+// The first return parameter is a boolean indicating if the assertion was true. The second
+// return parameter is the well-formatted message showing why an assertion was incorrect, or
+// blank if the assertion was correct.
+//
+// Example:
+//
+// if ok, message := So(x, ShouldBeGreaterThan, y); !ok {
+// log.Println(message)
+// }
+//
+// For an alternative implementation of So (that provides more flexible return options)
+// see the `So` function in the package at github.com/smartystreets/assertions/assert.
+func So(actual interface{}, assert assertion, expected ...interface{}) (bool, string) {
+ if result := so(actual, assert, expected...); len(result) == 0 {
+ return true, result
+ } else {
+ return false, result
+ }
+}
+
+// so is like So, except that it only returns the string message, which is blank if the
+// assertion passed. Used to facilitate testing.
+func so(actual interface{}, assert func(interface{}, ...interface{}) string, expected ...interface{}) string {
+ return assert(actual, expected...)
+}
+
+// assertion is an alias for a function with a signature that the So()
+// function can handle. Any future or custom assertions should conform to this
+// method signature. The return value should be an empty string if the assertion
+// passes and a well-formed failure message if not.
+type assertion func(actual interface{}, expected ...interface{}) string
+
+////////////////////////////////////////////////////////////////////////////
diff --git a/vendor/github.com/smartystreets/assertions/equal_method.go b/vendor/github.com/smartystreets/assertions/equal_method.go
new file mode 100644
index 00000000..c4fc38fa
--- /dev/null
+++ b/vendor/github.com/smartystreets/assertions/equal_method.go
@@ -0,0 +1,75 @@
+package assertions
+
+import "reflect"
+
+type equalityMethodSpecification struct {
+ a interface{}
+ b interface{}
+
+ aType reflect.Type
+ bType reflect.Type
+
+ equalMethod reflect.Value
+}
+
+func newEqualityMethodSpecification(a, b interface{}) *equalityMethodSpecification {
+ return &equalityMethodSpecification{
+ a: a,
+ b: b,
+ }
+}
+
+func (this *equalityMethodSpecification) IsSatisfied() bool {
+ if !this.bothAreSameType() {
+ return false
+ }
+ if !this.typeHasEqualMethod() {
+ return false
+ }
+ if !this.equalMethodReceivesSameTypeForComparison() {
+ return false
+ }
+ if !this.equalMethodReturnsBool() {
+ return false
+ }
+ return true
+}
+
+func (this *equalityMethodSpecification) bothAreSameType() bool {
+ this.aType = reflect.TypeOf(this.a)
+ if this.aType == nil {
+ return false
+ }
+ if this.aType.Kind() == reflect.Ptr {
+ this.aType = this.aType.Elem()
+ }
+ this.bType = reflect.TypeOf(this.b)
+ return this.aType == this.bType
+}
+func (this *equalityMethodSpecification) typeHasEqualMethod() bool {
+ aInstance := reflect.ValueOf(this.a)
+ this.equalMethod = aInstance.MethodByName("Equal")
+ return this.equalMethod != reflect.Value{}
+}
+
+func (this *equalityMethodSpecification) equalMethodReceivesSameTypeForComparison() bool {
+ signature := this.equalMethod.Type()
+ return signature.NumIn() == 1 && signature.In(0) == this.aType
+}
+
+func (this *equalityMethodSpecification) equalMethodReturnsBool() bool {
+ signature := this.equalMethod.Type()
+ return signature.NumOut() == 1 && signature.Out(0) == reflect.TypeOf(true)
+}
+
+func (this *equalityMethodSpecification) AreEqual() bool {
+ a := reflect.ValueOf(this.a)
+ b := reflect.ValueOf(this.b)
+ return areEqual(a, b) && areEqual(b, a)
+}
+func areEqual(receiver reflect.Value, argument reflect.Value) bool {
+ equalMethod := receiver.MethodByName("Equal")
+ argumentList := []reflect.Value{argument}
+ result := equalMethod.Call(argumentList)
+ return result[0].Bool()
+}
diff --git a/vendor/github.com/smartystreets/assertions/equality.go b/vendor/github.com/smartystreets/assertions/equality.go
new file mode 100644
index 00000000..713b0b5a
--- /dev/null
+++ b/vendor/github.com/smartystreets/assertions/equality.go
@@ -0,0 +1,322 @@
+package assertions
+
+import (
+ "encoding/json"
+ "errors"
+ "fmt"
+ "math"
+ "reflect"
+ "strings"
+
+ "github.com/smartystreets/assertions/internal/go-render/render"
+ "github.com/smartystreets/assertions/internal/oglematchers"
+)
+
+// ShouldEqual receives exactly two parameters and does an equality check
+// using the following semantics:
+// 1. If the expected and actual values implement an Equal method in the form
+// `func (this T) Equal(that T) bool` then call the method. If true, they are equal.
+// 2. The expected and actual values are judged equal or not by oglematchers.Equals.
+func ShouldEqual(actual interface{}, expected ...interface{}) string {
+ if message := need(1, expected); message != success {
+ return message
+ }
+ return shouldEqual(actual, expected[0])
+}
+func shouldEqual(actual, expected interface{}) (message string) {
+ defer func() {
+ if r := recover(); r != nil {
+ message = serializer.serialize(expected, actual, fmt.Sprintf(shouldHaveBeenEqual, expected, actual))
+ }
+ }()
+
+ if spec := newEqualityMethodSpecification(expected, actual); spec.IsSatisfied() && spec.AreEqual() {
+ return success
+ } else if matchError := oglematchers.Equals(expected).Matches(actual); matchError == nil {
+ return success
+ }
+
+ return serializer.serialize(expected, actual, composeEqualityMismatchMessage(expected, actual))
+}
+func composeEqualityMismatchMessage(expected, actual interface{}) string {
+ if fmt.Sprintf("%v", expected) == fmt.Sprintf("%v", actual) && reflect.TypeOf(expected) != reflect.TypeOf(actual) {
+ return fmt.Sprintf(shouldHaveBeenEqualTypeMismatch, expected, expected, actual, actual)
+ } else {
+ return fmt.Sprintf(shouldHaveBeenEqual, expected, actual)
+ }
+}
+
+// ShouldNotEqual receives exactly two parameters and does an inequality check.
+// See ShouldEqual for details on how equality is determined.
+func ShouldNotEqual(actual interface{}, expected ...interface{}) string {
+ if fail := need(1, expected); fail != success {
+ return fail
+ } else if ShouldEqual(actual, expected[0]) == success {
+ return fmt.Sprintf(shouldNotHaveBeenEqual, actual, expected[0])
+ }
+ return success
+}
+
+// ShouldAlmostEqual makes sure that two parameters are close enough to being equal.
+// The acceptable delta may be specified with a third argument,
+// or a very small default delta will be used.
+func ShouldAlmostEqual(actual interface{}, expected ...interface{}) string {
+ actualFloat, expectedFloat, deltaFloat, err := cleanAlmostEqualInput(actual, expected...)
+
+ if err != "" {
+ return err
+ }
+
+ if math.Abs(actualFloat-expectedFloat) <= deltaFloat {
+ return success
+ } else {
+ return fmt.Sprintf(shouldHaveBeenAlmostEqual, actualFloat, expectedFloat)
+ }
+}
+
+// ShouldNotAlmostEqual is the inverse of ShouldAlmostEqual
+func ShouldNotAlmostEqual(actual interface{}, expected ...interface{}) string {
+ actualFloat, expectedFloat, deltaFloat, err := cleanAlmostEqualInput(actual, expected...)
+
+ if err != "" {
+ return err
+ }
+
+ if math.Abs(actualFloat-expectedFloat) > deltaFloat {
+ return success
+ } else {
+ return fmt.Sprintf(shouldHaveNotBeenAlmostEqual, actualFloat, expectedFloat)
+ }
+}
+
+func cleanAlmostEqualInput(actual interface{}, expected ...interface{}) (float64, float64, float64, string) {
+ deltaFloat := 0.0000000001
+
+ if len(expected) == 0 {
+ return 0.0, 0.0, 0.0, "This assertion requires exactly one comparison value and an optional delta (you provided neither)"
+ } else if len(expected) == 2 {
+ delta, err := getFloat(expected[1])
+
+ if err != nil {
+ return 0.0, 0.0, 0.0, "The delta value " + err.Error()
+ }
+
+ deltaFloat = delta
+ } else if len(expected) > 2 {
+ return 0.0, 0.0, 0.0, "This assertion requires exactly one comparison value and an optional delta (you provided more values)"
+ }
+
+ actualFloat, err := getFloat(actual)
+ if err != nil {
+ return 0.0, 0.0, 0.0, "The actual value " + err.Error()
+ }
+
+ expectedFloat, err := getFloat(expected[0])
+ if err != nil {
+ return 0.0, 0.0, 0.0, "The comparison value " + err.Error()
+ }
+
+ return actualFloat, expectedFloat, deltaFloat, ""
+}
+
+// returns the float value of any real number, or error if it is not a numerical type
+func getFloat(num interface{}) (float64, error) {
+ numValue := reflect.ValueOf(num)
+ numKind := numValue.Kind()
+
+ if numKind == reflect.Int ||
+ numKind == reflect.Int8 ||
+ numKind == reflect.Int16 ||
+ numKind == reflect.Int32 ||
+ numKind == reflect.Int64 {
+ return float64(numValue.Int()), nil
+ } else if numKind == reflect.Uint ||
+ numKind == reflect.Uint8 ||
+ numKind == reflect.Uint16 ||
+ numKind == reflect.Uint32 ||
+ numKind == reflect.Uint64 {
+ return float64(numValue.Uint()), nil
+ } else if numKind == reflect.Float32 ||
+ numKind == reflect.Float64 {
+ return numValue.Float(), nil
+ } else {
+ return 0.0, errors.New("must be a numerical type, but was: " + numKind.String())
+ }
+}
+
+// ShouldEqualJSON receives exactly two parameters and does an equality check by marshalling to JSON
+func ShouldEqualJSON(actual interface{}, expected ...interface{}) string {
+ if message := need(1, expected); message != success {
+ return message
+ }
+
+ expectedString, expectedErr := remarshal(expected[0].(string))
+ if expectedErr != nil {
+ return "Expected value not valid JSON: " + expectedErr.Error()
+ }
+
+ actualString, actualErr := remarshal(actual.(string))
+ if actualErr != nil {
+ return "Actual value not valid JSON: " + actualErr.Error()
+ }
+
+ return ShouldEqual(actualString, expectedString)
+}
+func remarshal(value string) (string, error) {
+ var structured map[string]interface{}
+ err := json.Unmarshal([]byte(value), &structured)
+ if err != nil {
+ return "", err
+ }
+ canonical, _ := json.Marshal(structured)
+ return string(canonical), nil
+}
+
+// ShouldResemble receives exactly two parameters and does a deep equal check (see reflect.DeepEqual)
+func ShouldResemble(actual interface{}, expected ...interface{}) string {
+ if message := need(1, expected); message != success {
+ return message
+ }
+
+ if matchError := oglematchers.DeepEquals(expected[0]).Matches(actual); matchError != nil {
+ return serializer.serializeDetailed(expected[0], actual,
+ fmt.Sprintf(shouldHaveResembled, render.Render(expected[0]), render.Render(actual)))
+ }
+
+ return success
+}
+
+// ShouldNotResemble receives exactly two parameters and does an inverse deep equal check (see reflect.DeepEqual)
+func ShouldNotResemble(actual interface{}, expected ...interface{}) string {
+ if message := need(1, expected); message != success {
+ return message
+ } else if ShouldResemble(actual, expected[0]) == success {
+ return fmt.Sprintf(shouldNotHaveResembled, render.Render(actual), render.Render(expected[0]))
+ }
+ return success
+}
+
+// ShouldPointTo receives exactly two parameters and checks to see that they point to the same address.
+func ShouldPointTo(actual interface{}, expected ...interface{}) string {
+ if message := need(1, expected); message != success {
+ return message
+ }
+ return shouldPointTo(actual, expected[0])
+
+}
+func shouldPointTo(actual, expected interface{}) string {
+ actualValue := reflect.ValueOf(actual)
+ expectedValue := reflect.ValueOf(expected)
+
+ if ShouldNotBeNil(actual) != success {
+ return fmt.Sprintf(shouldHaveBeenNonNilPointer, "first", "nil")
+ } else if ShouldNotBeNil(expected) != success {
+ return fmt.Sprintf(shouldHaveBeenNonNilPointer, "second", "nil")
+ } else if actualValue.Kind() != reflect.Ptr {
+ return fmt.Sprintf(shouldHaveBeenNonNilPointer, "first", "not")
+ } else if expectedValue.Kind() != reflect.Ptr {
+ return fmt.Sprintf(shouldHaveBeenNonNilPointer, "second", "not")
+ } else if ShouldEqual(actualValue.Pointer(), expectedValue.Pointer()) != success {
+ actualAddress := reflect.ValueOf(actual).Pointer()
+ expectedAddress := reflect.ValueOf(expected).Pointer()
+ return serializer.serialize(expectedAddress, actualAddress, fmt.Sprintf(shouldHavePointedTo,
+ actual, actualAddress,
+ expected, expectedAddress))
+ }
+ return success
+}
+
+// ShouldNotPointTo receives exactly two parameters and checks to see that they point to different addresess.
+func ShouldNotPointTo(actual interface{}, expected ...interface{}) string {
+ if message := need(1, expected); message != success {
+ return message
+ }
+ compare := ShouldPointTo(actual, expected[0])
+ if strings.HasPrefix(compare, shouldBePointers) {
+ return compare
+ } else if compare == success {
+ return fmt.Sprintf(shouldNotHavePointedTo, actual, expected[0], reflect.ValueOf(actual).Pointer())
+ }
+ return success
+}
+
+// ShouldBeNil receives a single parameter and ensures that it is nil.
+func ShouldBeNil(actual interface{}, expected ...interface{}) string {
+ if fail := need(0, expected); fail != success {
+ return fail
+ } else if actual == nil {
+ return success
+ } else if interfaceHasNilValue(actual) {
+ return success
+ }
+ return fmt.Sprintf(shouldHaveBeenNil, actual)
+}
+func interfaceHasNilValue(actual interface{}) bool {
+ value := reflect.ValueOf(actual)
+ kind := value.Kind()
+ nilable := kind == reflect.Slice ||
+ kind == reflect.Chan ||
+ kind == reflect.Func ||
+ kind == reflect.Ptr ||
+ kind == reflect.Map
+
+ // Careful: reflect.Value.IsNil() will panic unless it's an interface, chan, map, func, slice, or ptr
+ // Reference: http://golang.org/pkg/reflect/#Value.IsNil
+ return nilable && value.IsNil()
+}
+
+// ShouldNotBeNil receives a single parameter and ensures that it is not nil.
+func ShouldNotBeNil(actual interface{}, expected ...interface{}) string {
+ if fail := need(0, expected); fail != success {
+ return fail
+ } else if ShouldBeNil(actual) == success {
+ return fmt.Sprintf(shouldNotHaveBeenNil, actual)
+ }
+ return success
+}
+
+// ShouldBeTrue receives a single parameter and ensures that it is true.
+func ShouldBeTrue(actual interface{}, expected ...interface{}) string {
+ if fail := need(0, expected); fail != success {
+ return fail
+ } else if actual != true {
+ return fmt.Sprintf(shouldHaveBeenTrue, actual)
+ }
+ return success
+}
+
+// ShouldBeFalse receives a single parameter and ensures that it is false.
+func ShouldBeFalse(actual interface{}, expected ...interface{}) string {
+ if fail := need(0, expected); fail != success {
+ return fail
+ } else if actual != false {
+ return fmt.Sprintf(shouldHaveBeenFalse, actual)
+ }
+ return success
+}
+
+// ShouldBeZeroValue receives a single parameter and ensures that it is
+// the Go equivalent of the default value, or "zero" value.
+func ShouldBeZeroValue(actual interface{}, expected ...interface{}) string {
+ if fail := need(0, expected); fail != success {
+ return fail
+ }
+ zeroVal := reflect.Zero(reflect.TypeOf(actual)).Interface()
+ if !reflect.DeepEqual(zeroVal, actual) {
+ return serializer.serialize(zeroVal, actual, fmt.Sprintf(shouldHaveBeenZeroValue, actual))
+ }
+ return success
+}
+
+// ShouldBeZeroValue receives a single parameter and ensures that it is NOT
+// the Go equivalent of the default value, or "zero" value.
+func ShouldNotBeZeroValue(actual interface{}, expected ...interface{}) string {
+ if fail := need(0, expected); fail != success {
+ return fail
+ }
+ zeroVal := reflect.Zero(reflect.TypeOf(actual)).Interface()
+ if reflect.DeepEqual(zeroVal, actual) {
+ return serializer.serialize(zeroVal, actual, fmt.Sprintf(shouldNotHaveBeenZeroValue, actual))
+ }
+ return success
+}
diff --git a/vendor/github.com/smartystreets/assertions/filter.go b/vendor/github.com/smartystreets/assertions/filter.go
new file mode 100644
index 00000000..7c46ab8e
--- /dev/null
+++ b/vendor/github.com/smartystreets/assertions/filter.go
@@ -0,0 +1,31 @@
+package assertions
+
+import "fmt"
+
+const (
+ success = ""
+ needExactValues = "This assertion requires exactly %d comparison values (you provided %d)."
+ needNonEmptyCollection = "This assertion requires at least 1 comparison value (you provided 0)."
+ needFewerValues = "This assertion allows %d or fewer comparison values (you provided %d)."
+)
+
+func need(needed int, expected []interface{}) string {
+ if len(expected) != needed {
+ return fmt.Sprintf(needExactValues, needed, len(expected))
+ }
+ return success
+}
+
+func atLeast(minimum int, expected []interface{}) string {
+ if len(expected) < 1 {
+ return needNonEmptyCollection
+ }
+ return success
+}
+
+func atMost(max int, expected []interface{}) string {
+ if len(expected) > max {
+ return fmt.Sprintf(needFewerValues, max, len(expected))
+ }
+ return success
+}
diff --git a/vendor/github.com/smartystreets/assertions/internal/go-render/LICENSE b/vendor/github.com/smartystreets/assertions/internal/go-render/LICENSE
new file mode 100644
index 00000000..6280ff0e
--- /dev/null
+++ b/vendor/github.com/smartystreets/assertions/internal/go-render/LICENSE
@@ -0,0 +1,27 @@
+// Copyright (c) 2015 The Chromium Authors. All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/smartystreets/assertions/internal/go-render/render/render.go b/vendor/github.com/smartystreets/assertions/internal/go-render/render/render.go
new file mode 100644
index 00000000..313611ef
--- /dev/null
+++ b/vendor/github.com/smartystreets/assertions/internal/go-render/render/render.go
@@ -0,0 +1,481 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package render
+
+import (
+ "bytes"
+ "fmt"
+ "reflect"
+ "sort"
+ "strconv"
+)
+
+var builtinTypeMap = map[reflect.Kind]string{
+ reflect.Bool: "bool",
+ reflect.Complex128: "complex128",
+ reflect.Complex64: "complex64",
+ reflect.Float32: "float32",
+ reflect.Float64: "float64",
+ reflect.Int16: "int16",
+ reflect.Int32: "int32",
+ reflect.Int64: "int64",
+ reflect.Int8: "int8",
+ reflect.Int: "int",
+ reflect.String: "string",
+ reflect.Uint16: "uint16",
+ reflect.Uint32: "uint32",
+ reflect.Uint64: "uint64",
+ reflect.Uint8: "uint8",
+ reflect.Uint: "uint",
+ reflect.Uintptr: "uintptr",
+}
+
+var builtinTypeSet = map[string]struct{}{}
+
+func init() {
+ for _, v := range builtinTypeMap {
+ builtinTypeSet[v] = struct{}{}
+ }
+}
+
+var typeOfString = reflect.TypeOf("")
+var typeOfInt = reflect.TypeOf(int(1))
+var typeOfUint = reflect.TypeOf(uint(1))
+var typeOfFloat = reflect.TypeOf(10.1)
+
+// Render converts a structure to a string representation. Unline the "%#v"
+// format string, this resolves pointer types' contents in structs, maps, and
+// slices/arrays and prints their field values.
+func Render(v interface{}) string {
+ buf := bytes.Buffer{}
+ s := (*traverseState)(nil)
+ s.render(&buf, 0, reflect.ValueOf(v), false)
+ return buf.String()
+}
+
+// renderPointer is called to render a pointer value.
+//
+// This is overridable so that the test suite can have deterministic pointer
+// values in its expectations.
+var renderPointer = func(buf *bytes.Buffer, p uintptr) {
+ fmt.Fprintf(buf, "0x%016x", p)
+}
+
+// traverseState is used to note and avoid recursion as struct members are being
+// traversed.
+//
+// traverseState is allowed to be nil. Specifically, the root state is nil.
+type traverseState struct {
+ parent *traverseState
+ ptr uintptr
+}
+
+func (s *traverseState) forkFor(ptr uintptr) *traverseState {
+ for cur := s; cur != nil; cur = cur.parent {
+ if ptr == cur.ptr {
+ return nil
+ }
+ }
+
+ fs := &traverseState{
+ parent: s,
+ ptr: ptr,
+ }
+ return fs
+}
+
+func (s *traverseState) render(buf *bytes.Buffer, ptrs int, v reflect.Value, implicit bool) {
+ if v.Kind() == reflect.Invalid {
+ buf.WriteString("nil")
+ return
+ }
+ vt := v.Type()
+
+ // If the type being rendered is a potentially recursive type (a type that
+ // can contain itself as a member), we need to avoid recursion.
+ //
+ // If we've already seen this type before, mark that this is the case and
+ // write a recursion placeholder instead of actually rendering it.
+ //
+ // If we haven't seen it before, fork our `seen` tracking so any higher-up
+ // renderers will also render it at least once, then mark that we've seen it
+ // to avoid recursing on lower layers.
+ pe := uintptr(0)
+ vk := vt.Kind()
+ switch vk {
+ case reflect.Ptr:
+ // Since structs and arrays aren't pointers, they can't directly be
+ // recursed, but they can contain pointers to themselves. Record their
+ // pointer to avoid this.
+ switch v.Elem().Kind() {
+ case reflect.Struct, reflect.Array:
+ pe = v.Pointer()
+ }
+
+ case reflect.Slice, reflect.Map:
+ pe = v.Pointer()
+ }
+ if pe != 0 {
+ s = s.forkFor(pe)
+ if s == nil {
+ buf.WriteString("")
+ return
+ }
+ }
+
+ isAnon := func(t reflect.Type) bool {
+ if t.Name() != "" {
+ if _, ok := builtinTypeSet[t.Name()]; !ok {
+ return false
+ }
+ }
+ return t.Kind() != reflect.Interface
+ }
+
+ switch vk {
+ case reflect.Struct:
+ if !implicit {
+ writeType(buf, ptrs, vt)
+ }
+ buf.WriteRune('{')
+ if rendered, ok := renderTime(v); ok {
+ buf.WriteString(rendered)
+ } else {
+ structAnon := vt.Name() == ""
+ for i := 0; i < vt.NumField(); i++ {
+ if i > 0 {
+ buf.WriteString(", ")
+ }
+ anon := structAnon && isAnon(vt.Field(i).Type)
+
+ if !anon {
+ buf.WriteString(vt.Field(i).Name)
+ buf.WriteRune(':')
+ }
+
+ s.render(buf, 0, v.Field(i), anon)
+ }
+ }
+ buf.WriteRune('}')
+
+ case reflect.Slice:
+ if v.IsNil() {
+ if !implicit {
+ writeType(buf, ptrs, vt)
+ buf.WriteString("(nil)")
+ } else {
+ buf.WriteString("nil")
+ }
+ return
+ }
+ fallthrough
+
+ case reflect.Array:
+ if !implicit {
+ writeType(buf, ptrs, vt)
+ }
+ anon := vt.Name() == "" && isAnon(vt.Elem())
+ buf.WriteString("{")
+ for i := 0; i < v.Len(); i++ {
+ if i > 0 {
+ buf.WriteString(", ")
+ }
+
+ s.render(buf, 0, v.Index(i), anon)
+ }
+ buf.WriteRune('}')
+
+ case reflect.Map:
+ if !implicit {
+ writeType(buf, ptrs, vt)
+ }
+ if v.IsNil() {
+ buf.WriteString("(nil)")
+ } else {
+ buf.WriteString("{")
+
+ mkeys := v.MapKeys()
+ tryAndSortMapKeys(vt, mkeys)
+
+ kt := vt.Key()
+ keyAnon := typeOfString.ConvertibleTo(kt) || typeOfInt.ConvertibleTo(kt) || typeOfUint.ConvertibleTo(kt) || typeOfFloat.ConvertibleTo(kt)
+ valAnon := vt.Name() == "" && isAnon(vt.Elem())
+ for i, mk := range mkeys {
+ if i > 0 {
+ buf.WriteString(", ")
+ }
+
+ s.render(buf, 0, mk, keyAnon)
+ buf.WriteString(":")
+ s.render(buf, 0, v.MapIndex(mk), valAnon)
+ }
+ buf.WriteRune('}')
+ }
+
+ case reflect.Ptr:
+ ptrs++
+ fallthrough
+ case reflect.Interface:
+ if v.IsNil() {
+ writeType(buf, ptrs, v.Type())
+ buf.WriteString("(nil)")
+ } else {
+ s.render(buf, ptrs, v.Elem(), false)
+ }
+
+ case reflect.Chan, reflect.Func, reflect.UnsafePointer:
+ writeType(buf, ptrs, vt)
+ buf.WriteRune('(')
+ renderPointer(buf, v.Pointer())
+ buf.WriteRune(')')
+
+ default:
+ tstr := vt.String()
+ implicit = implicit || (ptrs == 0 && builtinTypeMap[vk] == tstr)
+ if !implicit {
+ writeType(buf, ptrs, vt)
+ buf.WriteRune('(')
+ }
+
+ switch vk {
+ case reflect.String:
+ fmt.Fprintf(buf, "%q", v.String())
+ case reflect.Bool:
+ fmt.Fprintf(buf, "%v", v.Bool())
+
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ fmt.Fprintf(buf, "%d", v.Int())
+
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ fmt.Fprintf(buf, "%d", v.Uint())
+
+ case reflect.Float32, reflect.Float64:
+ fmt.Fprintf(buf, "%g", v.Float())
+
+ case reflect.Complex64, reflect.Complex128:
+ fmt.Fprintf(buf, "%g", v.Complex())
+ }
+
+ if !implicit {
+ buf.WriteRune(')')
+ }
+ }
+}
+
+func writeType(buf *bytes.Buffer, ptrs int, t reflect.Type) {
+ parens := ptrs > 0
+ switch t.Kind() {
+ case reflect.Chan, reflect.Func, reflect.UnsafePointer:
+ parens = true
+ }
+
+ if parens {
+ buf.WriteRune('(')
+ for i := 0; i < ptrs; i++ {
+ buf.WriteRune('*')
+ }
+ }
+
+ switch t.Kind() {
+ case reflect.Ptr:
+ if ptrs == 0 {
+ // This pointer was referenced from within writeType (e.g., as part of
+ // rendering a list), and so hasn't had its pointer asterisk accounted
+ // for.
+ buf.WriteRune('*')
+ }
+ writeType(buf, 0, t.Elem())
+
+ case reflect.Interface:
+ if n := t.Name(); n != "" {
+ buf.WriteString(t.String())
+ } else {
+ buf.WriteString("interface{}")
+ }
+
+ case reflect.Array:
+ buf.WriteRune('[')
+ buf.WriteString(strconv.FormatInt(int64(t.Len()), 10))
+ buf.WriteRune(']')
+ writeType(buf, 0, t.Elem())
+
+ case reflect.Slice:
+ if t == reflect.SliceOf(t.Elem()) {
+ buf.WriteString("[]")
+ writeType(buf, 0, t.Elem())
+ } else {
+ // Custom slice type, use type name.
+ buf.WriteString(t.String())
+ }
+
+ case reflect.Map:
+ if t == reflect.MapOf(t.Key(), t.Elem()) {
+ buf.WriteString("map[")
+ writeType(buf, 0, t.Key())
+ buf.WriteRune(']')
+ writeType(buf, 0, t.Elem())
+ } else {
+ // Custom map type, use type name.
+ buf.WriteString(t.String())
+ }
+
+ default:
+ buf.WriteString(t.String())
+ }
+
+ if parens {
+ buf.WriteRune(')')
+ }
+}
+
+type cmpFn func(a, b reflect.Value) int
+
+type sortableValueSlice struct {
+ cmp cmpFn
+ elements []reflect.Value
+}
+
+func (s sortableValueSlice) Len() int {
+ return len(s.elements)
+}
+
+func (s sortableValueSlice) Less(i, j int) bool {
+ return s.cmp(s.elements[i], s.elements[j]) < 0
+}
+
+func (s sortableValueSlice) Swap(i, j int) {
+ s.elements[i], s.elements[j] = s.elements[j], s.elements[i]
+}
+
+// cmpForType returns a cmpFn which sorts the data for some type t in the same
+// order that a go-native map key is compared for equality.
+func cmpForType(t reflect.Type) cmpFn {
+ switch t.Kind() {
+ case reflect.String:
+ return func(av, bv reflect.Value) int {
+ a, b := av.String(), bv.String()
+ if a < b {
+ return -1
+ } else if a > b {
+ return 1
+ }
+ return 0
+ }
+
+ case reflect.Bool:
+ return func(av, bv reflect.Value) int {
+ a, b := av.Bool(), bv.Bool()
+ if !a && b {
+ return -1
+ } else if a && !b {
+ return 1
+ }
+ return 0
+ }
+
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ return func(av, bv reflect.Value) int {
+ a, b := av.Int(), bv.Int()
+ if a < b {
+ return -1
+ } else if a > b {
+ return 1
+ }
+ return 0
+ }
+
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32,
+ reflect.Uint64, reflect.Uintptr, reflect.UnsafePointer:
+ return func(av, bv reflect.Value) int {
+ a, b := av.Uint(), bv.Uint()
+ if a < b {
+ return -1
+ } else if a > b {
+ return 1
+ }
+ return 0
+ }
+
+ case reflect.Float32, reflect.Float64:
+ return func(av, bv reflect.Value) int {
+ a, b := av.Float(), bv.Float()
+ if a < b {
+ return -1
+ } else if a > b {
+ return 1
+ }
+ return 0
+ }
+
+ case reflect.Interface:
+ return func(av, bv reflect.Value) int {
+ a, b := av.InterfaceData(), bv.InterfaceData()
+ if a[0] < b[0] {
+ return -1
+ } else if a[0] > b[0] {
+ return 1
+ }
+ if a[1] < b[1] {
+ return -1
+ } else if a[1] > b[1] {
+ return 1
+ }
+ return 0
+ }
+
+ case reflect.Complex64, reflect.Complex128:
+ return func(av, bv reflect.Value) int {
+ a, b := av.Complex(), bv.Complex()
+ if real(a) < real(b) {
+ return -1
+ } else if real(a) > real(b) {
+ return 1
+ }
+ if imag(a) < imag(b) {
+ return -1
+ } else if imag(a) > imag(b) {
+ return 1
+ }
+ return 0
+ }
+
+ case reflect.Ptr, reflect.Chan:
+ return func(av, bv reflect.Value) int {
+ a, b := av.Pointer(), bv.Pointer()
+ if a < b {
+ return -1
+ } else if a > b {
+ return 1
+ }
+ return 0
+ }
+
+ case reflect.Struct:
+ cmpLst := make([]cmpFn, t.NumField())
+ for i := range cmpLst {
+ cmpLst[i] = cmpForType(t.Field(i).Type)
+ }
+ return func(a, b reflect.Value) int {
+ for i, cmp := range cmpLst {
+ if rslt := cmp(a.Field(i), b.Field(i)); rslt != 0 {
+ return rslt
+ }
+ }
+ return 0
+ }
+ }
+
+ return nil
+}
+
+func tryAndSortMapKeys(mt reflect.Type, k []reflect.Value) {
+ if cmp := cmpForType(mt.Key()); cmp != nil {
+ sort.Sort(sortableValueSlice{cmp, k})
+ }
+}
diff --git a/vendor/github.com/smartystreets/assertions/internal/go-render/render/render_time.go b/vendor/github.com/smartystreets/assertions/internal/go-render/render/render_time.go
new file mode 100644
index 00000000..990c75d0
--- /dev/null
+++ b/vendor/github.com/smartystreets/assertions/internal/go-render/render/render_time.go
@@ -0,0 +1,26 @@
+package render
+
+import (
+ "reflect"
+ "time"
+)
+
+func renderTime(value reflect.Value) (string, bool) {
+ if instant, ok := convertTime(value); !ok {
+ return "", false
+ } else if instant.IsZero() {
+ return "0", true
+ } else {
+ return instant.String(), true
+ }
+}
+
+func convertTime(value reflect.Value) (t time.Time, ok bool) {
+ if value.Type() == timeType {
+ defer func() { recover() }()
+ t, ok = value.Interface().(time.Time)
+ }
+ return
+}
+
+var timeType = reflect.TypeOf(time.Time{})
diff --git a/vendor/github.com/smartystreets/assertions/internal/oglematchers/LICENSE b/vendor/github.com/smartystreets/assertions/internal/oglematchers/LICENSE
new file mode 100644
index 00000000..d6456956
--- /dev/null
+++ b/vendor/github.com/smartystreets/assertions/internal/oglematchers/LICENSE
@@ -0,0 +1,202 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/smartystreets/assertions/internal/oglematchers/README.md b/vendor/github.com/smartystreets/assertions/internal/oglematchers/README.md
new file mode 100644
index 00000000..215a2bb7
--- /dev/null
+++ b/vendor/github.com/smartystreets/assertions/internal/oglematchers/README.md
@@ -0,0 +1,58 @@
+[](https://godoc.org/github.com/smartystreets/assertions/internal/oglematchers)
+
+`oglematchers` is a package for the Go programming language containing a set of
+matchers, useful in a testing or mocking framework, inspired by and mostly
+compatible with [Google Test][googletest] for C++ and
+[Google JS Test][google-js-test]. The package is used by the
+[ogletest][ogletest] testing framework and [oglemock][oglemock] mocking
+framework, which may be more directly useful to you, but can be generically used
+elsewhere as well.
+
+A "matcher" is simply an object with a `Matches` method defining a set of golang
+values matched by the matcher, and a `Description` method describing that set.
+For example, here are some matchers:
+
+```go
+// Numbers
+Equals(17.13)
+LessThan(19)
+
+// Strings
+Equals("taco")
+HasSubstr("burrito")
+MatchesRegex("t.*o")
+
+// Combining matchers
+AnyOf(LessThan(17), GreaterThan(19))
+```
+
+There are lots more; see [here][reference] for a reference. You can also add
+your own simply by implementing the `oglematchers.Matcher` interface.
+
+
+Installation
+------------
+
+First, make sure you have installed Go 1.0.2 or newer. See
+[here][golang-install] for instructions.
+
+Use the following command to install `oglematchers` and keep it up to date:
+
+ go get -u github.com/smartystreets/assertions/internal/oglematchers
+
+
+Documentation
+-------------
+
+See [here][reference] for documentation. Alternatively, you can install the
+package and then use `godoc`:
+
+ godoc github.com/smartystreets/assertions/internal/oglematchers
+
+
+[reference]: http://godoc.org/github.com/smartystreets/assertions/internal/oglematchers
+[golang-install]: http://golang.org/doc/install.html
+[googletest]: http://code.google.com/p/googletest/
+[google-js-test]: http://code.google.com/p/google-js-test/
+[ogletest]: http://github.com/smartystreets/assertions/internal/ogletest
+[oglemock]: http://github.com/smartystreets/assertions/internal/oglemock
diff --git a/vendor/github.com/smartystreets/assertions/internal/oglematchers/any_of.go b/vendor/github.com/smartystreets/assertions/internal/oglematchers/any_of.go
new file mode 100644
index 00000000..2918b51f
--- /dev/null
+++ b/vendor/github.com/smartystreets/assertions/internal/oglematchers/any_of.go
@@ -0,0 +1,94 @@
+// Copyright 2011 Aaron Jacobs. All Rights Reserved.
+// Author: aaronjjacobs@gmail.com (Aaron Jacobs)
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package oglematchers
+
+import (
+ "errors"
+ "fmt"
+ "reflect"
+ "strings"
+)
+
+// AnyOf accepts a set of values S and returns a matcher that follows the
+// algorithm below when considering a candidate c:
+//
+// 1. If there exists a value m in S such that m implements the Matcher
+// interface and m matches c, return true.
+//
+// 2. Otherwise, if there exists a value v in S such that v does not implement
+// the Matcher interface and the matcher Equals(v) matches c, return true.
+//
+// 3. Otherwise, if there is a value m in S such that m implements the Matcher
+// interface and m returns a fatal error for c, return that fatal error.
+//
+// 4. Otherwise, return false.
+//
+// This is akin to a logical OR operation for matchers, with non-matchers x
+// being treated as Equals(x).
+func AnyOf(vals ...interface{}) Matcher {
+ // Get ahold of a type variable for the Matcher interface.
+ var dummy *Matcher
+ matcherType := reflect.TypeOf(dummy).Elem()
+
+ // Create a matcher for each value, or use the value itself if it's already a
+ // matcher.
+ wrapped := make([]Matcher, len(vals))
+ for i, v := range vals {
+ t := reflect.TypeOf(v)
+ if t != nil && t.Implements(matcherType) {
+ wrapped[i] = v.(Matcher)
+ } else {
+ wrapped[i] = Equals(v)
+ }
+ }
+
+ return &anyOfMatcher{wrapped}
+}
+
+type anyOfMatcher struct {
+ wrapped []Matcher
+}
+
+func (m *anyOfMatcher) Description() string {
+ wrappedDescs := make([]string, len(m.wrapped))
+ for i, matcher := range m.wrapped {
+ wrappedDescs[i] = matcher.Description()
+ }
+
+ return fmt.Sprintf("or(%s)", strings.Join(wrappedDescs, ", "))
+}
+
+func (m *anyOfMatcher) Matches(c interface{}) (err error) {
+ err = errors.New("")
+
+ // Try each matcher in turn.
+ for _, matcher := range m.wrapped {
+ wrappedErr := matcher.Matches(c)
+
+ // Return immediately if there's a match.
+ if wrappedErr == nil {
+ err = nil
+ return
+ }
+
+ // Note the fatal error, if any.
+ if _, isFatal := wrappedErr.(*FatalError); isFatal {
+ err = wrappedErr
+ }
+ }
+
+ return
+}
diff --git a/vendor/github.com/smartystreets/assertions/internal/oglematchers/contains.go b/vendor/github.com/smartystreets/assertions/internal/oglematchers/contains.go
new file mode 100644
index 00000000..87f107d3
--- /dev/null
+++ b/vendor/github.com/smartystreets/assertions/internal/oglematchers/contains.go
@@ -0,0 +1,61 @@
+// Copyright 2012 Aaron Jacobs. All Rights Reserved.
+// Author: aaronjjacobs@gmail.com (Aaron Jacobs)
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package oglematchers
+
+import (
+ "fmt"
+ "reflect"
+)
+
+// Return a matcher that matches arrays slices with at least one element that
+// matches the supplied argument. If the argument x is not itself a Matcher,
+// this is equivalent to Contains(Equals(x)).
+func Contains(x interface{}) Matcher {
+ var result containsMatcher
+ var ok bool
+
+ if result.elementMatcher, ok = x.(Matcher); !ok {
+ result.elementMatcher = DeepEquals(x)
+ }
+
+ return &result
+}
+
+type containsMatcher struct {
+ elementMatcher Matcher
+}
+
+func (m *containsMatcher) Description() string {
+ return fmt.Sprintf("contains: %s", m.elementMatcher.Description())
+}
+
+func (m *containsMatcher) Matches(candidate interface{}) error {
+ // The candidate must be a slice or an array.
+ v := reflect.ValueOf(candidate)
+ if v.Kind() != reflect.Slice && v.Kind() != reflect.Array {
+ return NewFatalError("which is not a slice or array")
+ }
+
+ // Check each element.
+ for i := 0; i < v.Len(); i++ {
+ elem := v.Index(i)
+ if matchErr := m.elementMatcher.Matches(elem.Interface()); matchErr == nil {
+ return nil
+ }
+ }
+
+ return fmt.Errorf("")
+}
diff --git a/vendor/github.com/smartystreets/assertions/internal/oglematchers/deep_equals.go b/vendor/github.com/smartystreets/assertions/internal/oglematchers/deep_equals.go
new file mode 100644
index 00000000..1d91baef
--- /dev/null
+++ b/vendor/github.com/smartystreets/assertions/internal/oglematchers/deep_equals.go
@@ -0,0 +1,88 @@
+// Copyright 2012 Aaron Jacobs. All Rights Reserved.
+// Author: aaronjjacobs@gmail.com (Aaron Jacobs)
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package oglematchers
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "reflect"
+)
+
+var byteSliceType reflect.Type = reflect.TypeOf([]byte{})
+
+// DeepEquals returns a matcher that matches based on 'deep equality', as
+// defined by the reflect package. This matcher requires that values have
+// identical types to x.
+func DeepEquals(x interface{}) Matcher {
+ return &deepEqualsMatcher{x}
+}
+
+type deepEqualsMatcher struct {
+ x interface{}
+}
+
+func (m *deepEqualsMatcher) Description() string {
+ xDesc := fmt.Sprintf("%v", m.x)
+ xValue := reflect.ValueOf(m.x)
+
+ // Special case: fmt.Sprintf presents nil slices as "[]", but
+ // reflect.DeepEqual makes a distinction between nil and empty slices. Make
+ // this less confusing.
+ if xValue.Kind() == reflect.Slice && xValue.IsNil() {
+ xDesc = ""
+ }
+
+ return fmt.Sprintf("deep equals: %s", xDesc)
+}
+
+func (m *deepEqualsMatcher) Matches(c interface{}) error {
+ // Make sure the types match.
+ ct := reflect.TypeOf(c)
+ xt := reflect.TypeOf(m.x)
+
+ if ct != xt {
+ return NewFatalError(fmt.Sprintf("which is of type %v", ct))
+ }
+
+ // Special case: handle byte slices more efficiently.
+ cValue := reflect.ValueOf(c)
+ xValue := reflect.ValueOf(m.x)
+
+ if ct == byteSliceType && !cValue.IsNil() && !xValue.IsNil() {
+ xBytes := m.x.([]byte)
+ cBytes := c.([]byte)
+
+ if bytes.Equal(cBytes, xBytes) {
+ return nil
+ }
+
+ return errors.New("")
+ }
+
+ // Defer to the reflect package.
+ if reflect.DeepEqual(m.x, c) {
+ return nil
+ }
+
+ // Special case: if the comparison failed because c is the nil slice, given
+ // an indication of this (since its value is printed as "[]").
+ if cValue.Kind() == reflect.Slice && cValue.IsNil() {
+ return errors.New("which is nil")
+ }
+
+ return errors.New("")
+}
diff --git a/vendor/github.com/smartystreets/assertions/internal/oglematchers/equals.go b/vendor/github.com/smartystreets/assertions/internal/oglematchers/equals.go
new file mode 100644
index 00000000..a510707b
--- /dev/null
+++ b/vendor/github.com/smartystreets/assertions/internal/oglematchers/equals.go
@@ -0,0 +1,541 @@
+// Copyright 2011 Aaron Jacobs. All Rights Reserved.
+// Author: aaronjjacobs@gmail.com (Aaron Jacobs)
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package oglematchers
+
+import (
+ "errors"
+ "fmt"
+ "math"
+ "reflect"
+)
+
+// Equals(x) returns a matcher that matches values v such that v and x are
+// equivalent. This includes the case when the comparison v == x using Go's
+// built-in comparison operator is legal (except for structs, which this
+// matcher does not support), but for convenience the following rules also
+// apply:
+//
+// * Type checking is done based on underlying types rather than actual
+// types, so that e.g. two aliases for string can be compared:
+//
+// type stringAlias1 string
+// type stringAlias2 string
+//
+// a := "taco"
+// b := stringAlias1("taco")
+// c := stringAlias2("taco")
+//
+// ExpectTrue(a == b) // Legal, passes
+// ExpectTrue(b == c) // Illegal, doesn't compile
+//
+// ExpectThat(a, Equals(b)) // Passes
+// ExpectThat(b, Equals(c)) // Passes
+//
+// * Values of numeric type are treated as if they were abstract numbers, and
+// compared accordingly. Therefore Equals(17) will match int(17),
+// int16(17), uint(17), float32(17), complex64(17), and so on.
+//
+// If you want a stricter matcher that contains no such cleverness, see
+// IdenticalTo instead.
+//
+// Arrays are supported by this matcher, but do not participate in the
+// exceptions above. Two arrays compared with this matcher must have identical
+// types, and their element type must itself be comparable according to Go's ==
+// operator.
+func Equals(x interface{}) Matcher {
+ v := reflect.ValueOf(x)
+
+ // This matcher doesn't support structs.
+ if v.Kind() == reflect.Struct {
+ panic(fmt.Sprintf("oglematchers.Equals: unsupported kind %v", v.Kind()))
+ }
+
+ // The == operator is not defined for non-nil slices.
+ if v.Kind() == reflect.Slice && v.Pointer() != uintptr(0) {
+ panic(fmt.Sprintf("oglematchers.Equals: non-nil slice"))
+ }
+
+ return &equalsMatcher{v}
+}
+
+type equalsMatcher struct {
+ expectedValue reflect.Value
+}
+
+////////////////////////////////////////////////////////////////////////
+// Numeric types
+////////////////////////////////////////////////////////////////////////
+
+func isSignedInteger(v reflect.Value) bool {
+ k := v.Kind()
+ return k >= reflect.Int && k <= reflect.Int64
+}
+
+func isUnsignedInteger(v reflect.Value) bool {
+ k := v.Kind()
+ return k >= reflect.Uint && k <= reflect.Uintptr
+}
+
+func isInteger(v reflect.Value) bool {
+ return isSignedInteger(v) || isUnsignedInteger(v)
+}
+
+func isFloat(v reflect.Value) bool {
+ k := v.Kind()
+ return k == reflect.Float32 || k == reflect.Float64
+}
+
+func isComplex(v reflect.Value) bool {
+ k := v.Kind()
+ return k == reflect.Complex64 || k == reflect.Complex128
+}
+
+func checkAgainstInt64(e int64, c reflect.Value) (err error) {
+ err = errors.New("")
+
+ switch {
+ case isSignedInteger(c):
+ if c.Int() == e {
+ err = nil
+ }
+
+ case isUnsignedInteger(c):
+ u := c.Uint()
+ if u <= math.MaxInt64 && int64(u) == e {
+ err = nil
+ }
+
+ // Turn around the various floating point types so that the checkAgainst*
+ // functions for them can deal with precision issues.
+ case isFloat(c), isComplex(c):
+ return Equals(c.Interface()).Matches(e)
+
+ default:
+ err = NewFatalError("which is not numeric")
+ }
+
+ return
+}
+
+func checkAgainstUint64(e uint64, c reflect.Value) (err error) {
+ err = errors.New("")
+
+ switch {
+ case isSignedInteger(c):
+ i := c.Int()
+ if i >= 0 && uint64(i) == e {
+ err = nil
+ }
+
+ case isUnsignedInteger(c):
+ if c.Uint() == e {
+ err = nil
+ }
+
+ // Turn around the various floating point types so that the checkAgainst*
+ // functions for them can deal with precision issues.
+ case isFloat(c), isComplex(c):
+ return Equals(c.Interface()).Matches(e)
+
+ default:
+ err = NewFatalError("which is not numeric")
+ }
+
+ return
+}
+
+func checkAgainstFloat32(e float32, c reflect.Value) (err error) {
+ err = errors.New("")
+
+ switch {
+ case isSignedInteger(c):
+ if float32(c.Int()) == e {
+ err = nil
+ }
+
+ case isUnsignedInteger(c):
+ if float32(c.Uint()) == e {
+ err = nil
+ }
+
+ case isFloat(c):
+ // Compare using float32 to avoid a false sense of precision; otherwise
+ // e.g. Equals(float32(0.1)) won't match float32(0.1).
+ if float32(c.Float()) == e {
+ err = nil
+ }
+
+ case isComplex(c):
+ comp := c.Complex()
+ rl := real(comp)
+ im := imag(comp)
+
+ // Compare using float32 to avoid a false sense of precision; otherwise
+ // e.g. Equals(float32(0.1)) won't match (0.1 + 0i).
+ if im == 0 && float32(rl) == e {
+ err = nil
+ }
+
+ default:
+ err = NewFatalError("which is not numeric")
+ }
+
+ return
+}
+
+func checkAgainstFloat64(e float64, c reflect.Value) (err error) {
+ err = errors.New("")
+
+ ck := c.Kind()
+
+ switch {
+ case isSignedInteger(c):
+ if float64(c.Int()) == e {
+ err = nil
+ }
+
+ case isUnsignedInteger(c):
+ if float64(c.Uint()) == e {
+ err = nil
+ }
+
+ // If the actual value is lower precision, turn the comparison around so we
+ // apply the low-precision rules. Otherwise, e.g. Equals(0.1) may not match
+ // float32(0.1).
+ case ck == reflect.Float32 || ck == reflect.Complex64:
+ return Equals(c.Interface()).Matches(e)
+
+ // Otherwise, compare with double precision.
+ case isFloat(c):
+ if c.Float() == e {
+ err = nil
+ }
+
+ case isComplex(c):
+ comp := c.Complex()
+ rl := real(comp)
+ im := imag(comp)
+
+ if im == 0 && rl == e {
+ err = nil
+ }
+
+ default:
+ err = NewFatalError("which is not numeric")
+ }
+
+ return
+}
+
+func checkAgainstComplex64(e complex64, c reflect.Value) (err error) {
+ err = errors.New("")
+ realPart := real(e)
+ imaginaryPart := imag(e)
+
+ switch {
+ case isInteger(c) || isFloat(c):
+ // If we have no imaginary part, then we should just compare against the
+ // real part. Otherwise, we can't be equal.
+ if imaginaryPart != 0 {
+ return
+ }
+
+ return checkAgainstFloat32(realPart, c)
+
+ case isComplex(c):
+ // Compare using complex64 to avoid a false sense of precision; otherwise
+ // e.g. Equals(0.1 + 0i) won't match float32(0.1).
+ if complex64(c.Complex()) == e {
+ err = nil
+ }
+
+ default:
+ err = NewFatalError("which is not numeric")
+ }
+
+ return
+}
+
+func checkAgainstComplex128(e complex128, c reflect.Value) (err error) {
+ err = errors.New("")
+ realPart := real(e)
+ imaginaryPart := imag(e)
+
+ switch {
+ case isInteger(c) || isFloat(c):
+ // If we have no imaginary part, then we should just compare against the
+ // real part. Otherwise, we can't be equal.
+ if imaginaryPart != 0 {
+ return
+ }
+
+ return checkAgainstFloat64(realPart, c)
+
+ case isComplex(c):
+ if c.Complex() == e {
+ err = nil
+ }
+
+ default:
+ err = NewFatalError("which is not numeric")
+ }
+
+ return
+}
+
+////////////////////////////////////////////////////////////////////////
+// Other types
+////////////////////////////////////////////////////////////////////////
+
+func checkAgainstBool(e bool, c reflect.Value) (err error) {
+ if c.Kind() != reflect.Bool {
+ err = NewFatalError("which is not a bool")
+ return
+ }
+
+ err = errors.New("")
+ if c.Bool() == e {
+ err = nil
+ }
+ return
+}
+
+func checkAgainstChan(e reflect.Value, c reflect.Value) (err error) {
+ // Create a description of e's type, e.g. "chan int".
+ typeStr := fmt.Sprintf("%s %s", e.Type().ChanDir(), e.Type().Elem())
+
+ // Make sure c is a chan of the correct type.
+ if c.Kind() != reflect.Chan ||
+ c.Type().ChanDir() != e.Type().ChanDir() ||
+ c.Type().Elem() != e.Type().Elem() {
+ err = NewFatalError(fmt.Sprintf("which is not a %s", typeStr))
+ return
+ }
+
+ err = errors.New("")
+ if c.Pointer() == e.Pointer() {
+ err = nil
+ }
+ return
+}
+
+func checkAgainstFunc(e reflect.Value, c reflect.Value) (err error) {
+ // Make sure c is a function.
+ if c.Kind() != reflect.Func {
+ err = NewFatalError("which is not a function")
+ return
+ }
+
+ err = errors.New("")
+ if c.Pointer() == e.Pointer() {
+ err = nil
+ }
+ return
+}
+
+func checkAgainstMap(e reflect.Value, c reflect.Value) (err error) {
+ // Make sure c is a map.
+ if c.Kind() != reflect.Map {
+ err = NewFatalError("which is not a map")
+ return
+ }
+
+ err = errors.New("")
+ if c.Pointer() == e.Pointer() {
+ err = nil
+ }
+ return
+}
+
+func checkAgainstPtr(e reflect.Value, c reflect.Value) (err error) {
+ // Create a description of e's type, e.g. "*int".
+ typeStr := fmt.Sprintf("*%v", e.Type().Elem())
+
+ // Make sure c is a pointer of the correct type.
+ if c.Kind() != reflect.Ptr ||
+ c.Type().Elem() != e.Type().Elem() {
+ err = NewFatalError(fmt.Sprintf("which is not a %s", typeStr))
+ return
+ }
+
+ err = errors.New("")
+ if c.Pointer() == e.Pointer() {
+ err = nil
+ }
+ return
+}
+
+func checkAgainstSlice(e reflect.Value, c reflect.Value) (err error) {
+ // Create a description of e's type, e.g. "[]int".
+ typeStr := fmt.Sprintf("[]%v", e.Type().Elem())
+
+ // Make sure c is a slice of the correct type.
+ if c.Kind() != reflect.Slice ||
+ c.Type().Elem() != e.Type().Elem() {
+ err = NewFatalError(fmt.Sprintf("which is not a %s", typeStr))
+ return
+ }
+
+ err = errors.New("")
+ if c.Pointer() == e.Pointer() {
+ err = nil
+ }
+ return
+}
+
+func checkAgainstString(e reflect.Value, c reflect.Value) (err error) {
+ // Make sure c is a string.
+ if c.Kind() != reflect.String {
+ err = NewFatalError("which is not a string")
+ return
+ }
+
+ err = errors.New("")
+ if c.String() == e.String() {
+ err = nil
+ }
+ return
+}
+
+func checkAgainstArray(e reflect.Value, c reflect.Value) (err error) {
+ // Create a description of e's type, e.g. "[2]int".
+ typeStr := fmt.Sprintf("%v", e.Type())
+
+ // Make sure c is the correct type.
+ if c.Type() != e.Type() {
+ err = NewFatalError(fmt.Sprintf("which is not %s", typeStr))
+ return
+ }
+
+ // Check for equality.
+ if e.Interface() != c.Interface() {
+ err = errors.New("")
+ return
+ }
+
+ return
+}
+
+func checkAgainstUnsafePointer(e reflect.Value, c reflect.Value) (err error) {
+ // Make sure c is a pointer.
+ if c.Kind() != reflect.UnsafePointer {
+ err = NewFatalError("which is not a unsafe.Pointer")
+ return
+ }
+
+ err = errors.New("")
+ if c.Pointer() == e.Pointer() {
+ err = nil
+ }
+ return
+}
+
+func checkForNil(c reflect.Value) (err error) {
+ err = errors.New("")
+
+ // Make sure it is legal to call IsNil.
+ switch c.Kind() {
+ case reflect.Invalid:
+ case reflect.Chan:
+ case reflect.Func:
+ case reflect.Interface:
+ case reflect.Map:
+ case reflect.Ptr:
+ case reflect.Slice:
+
+ default:
+ err = NewFatalError("which cannot be compared to nil")
+ return
+ }
+
+ // Ask whether the value is nil. Handle a nil literal (kind Invalid)
+ // specially, since it's not legal to call IsNil there.
+ if c.Kind() == reflect.Invalid || c.IsNil() {
+ err = nil
+ }
+ return
+}
+
+////////////////////////////////////////////////////////////////////////
+// Public implementation
+////////////////////////////////////////////////////////////////////////
+
+func (m *equalsMatcher) Matches(candidate interface{}) error {
+ e := m.expectedValue
+ c := reflect.ValueOf(candidate)
+ ek := e.Kind()
+
+ switch {
+ case ek == reflect.Bool:
+ return checkAgainstBool(e.Bool(), c)
+
+ case isSignedInteger(e):
+ return checkAgainstInt64(e.Int(), c)
+
+ case isUnsignedInteger(e):
+ return checkAgainstUint64(e.Uint(), c)
+
+ case ek == reflect.Float32:
+ return checkAgainstFloat32(float32(e.Float()), c)
+
+ case ek == reflect.Float64:
+ return checkAgainstFloat64(e.Float(), c)
+
+ case ek == reflect.Complex64:
+ return checkAgainstComplex64(complex64(e.Complex()), c)
+
+ case ek == reflect.Complex128:
+ return checkAgainstComplex128(complex128(e.Complex()), c)
+
+ case ek == reflect.Chan:
+ return checkAgainstChan(e, c)
+
+ case ek == reflect.Func:
+ return checkAgainstFunc(e, c)
+
+ case ek == reflect.Map:
+ return checkAgainstMap(e, c)
+
+ case ek == reflect.Ptr:
+ return checkAgainstPtr(e, c)
+
+ case ek == reflect.Slice:
+ return checkAgainstSlice(e, c)
+
+ case ek == reflect.String:
+ return checkAgainstString(e, c)
+
+ case ek == reflect.Array:
+ return checkAgainstArray(e, c)
+
+ case ek == reflect.UnsafePointer:
+ return checkAgainstUnsafePointer(e, c)
+
+ case ek == reflect.Invalid:
+ return checkForNil(c)
+ }
+
+ panic(fmt.Sprintf("equalsMatcher.Matches: unexpected kind: %v", ek))
+}
+
+func (m *equalsMatcher) Description() string {
+ // Special case: handle nil.
+ if !m.expectedValue.IsValid() {
+ return "is nil"
+ }
+
+ return fmt.Sprintf("%v", m.expectedValue.Interface())
+}
diff --git a/vendor/github.com/smartystreets/assertions/internal/oglematchers/greater_or_equal.go b/vendor/github.com/smartystreets/assertions/internal/oglematchers/greater_or_equal.go
new file mode 100644
index 00000000..4b9d103a
--- /dev/null
+++ b/vendor/github.com/smartystreets/assertions/internal/oglematchers/greater_or_equal.go
@@ -0,0 +1,39 @@
+// Copyright 2011 Aaron Jacobs. All Rights Reserved.
+// Author: aaronjjacobs@gmail.com (Aaron Jacobs)
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package oglematchers
+
+import (
+ "fmt"
+ "reflect"
+)
+
+// GreaterOrEqual returns a matcher that matches integer, floating point, or
+// strings values v such that v >= x. Comparison is not defined between numeric
+// and string types, but is defined between all integer and floating point
+// types.
+//
+// x must itself be an integer, floating point, or string type; otherwise,
+// GreaterOrEqual will panic.
+func GreaterOrEqual(x interface{}) Matcher {
+ desc := fmt.Sprintf("greater than or equal to %v", x)
+
+ // Special case: make it clear that strings are strings.
+ if reflect.TypeOf(x).Kind() == reflect.String {
+ desc = fmt.Sprintf("greater than or equal to \"%s\"", x)
+ }
+
+ return transformDescription(Not(LessThan(x)), desc)
+}
diff --git a/vendor/github.com/smartystreets/assertions/internal/oglematchers/greater_than.go b/vendor/github.com/smartystreets/assertions/internal/oglematchers/greater_than.go
new file mode 100644
index 00000000..3eef3217
--- /dev/null
+++ b/vendor/github.com/smartystreets/assertions/internal/oglematchers/greater_than.go
@@ -0,0 +1,39 @@
+// Copyright 2011 Aaron Jacobs. All Rights Reserved.
+// Author: aaronjjacobs@gmail.com (Aaron Jacobs)
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package oglematchers
+
+import (
+ "fmt"
+ "reflect"
+)
+
+// GreaterThan returns a matcher that matches integer, floating point, or
+// strings values v such that v > x. Comparison is not defined between numeric
+// and string types, but is defined between all integer and floating point
+// types.
+//
+// x must itself be an integer, floating point, or string type; otherwise,
+// GreaterThan will panic.
+func GreaterThan(x interface{}) Matcher {
+ desc := fmt.Sprintf("greater than %v", x)
+
+ // Special case: make it clear that strings are strings.
+ if reflect.TypeOf(x).Kind() == reflect.String {
+ desc = fmt.Sprintf("greater than \"%s\"", x)
+ }
+
+ return transformDescription(Not(LessOrEqual(x)), desc)
+}
diff --git a/vendor/github.com/smartystreets/assertions/internal/oglematchers/less_or_equal.go b/vendor/github.com/smartystreets/assertions/internal/oglematchers/less_or_equal.go
new file mode 100644
index 00000000..8402cdea
--- /dev/null
+++ b/vendor/github.com/smartystreets/assertions/internal/oglematchers/less_or_equal.go
@@ -0,0 +1,41 @@
+// Copyright 2011 Aaron Jacobs. All Rights Reserved.
+// Author: aaronjjacobs@gmail.com (Aaron Jacobs)
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package oglematchers
+
+import (
+ "fmt"
+ "reflect"
+)
+
+// LessOrEqual returns a matcher that matches integer, floating point, or
+// strings values v such that v <= x. Comparison is not defined between numeric
+// and string types, but is defined between all integer and floating point
+// types.
+//
+// x must itself be an integer, floating point, or string type; otherwise,
+// LessOrEqual will panic.
+func LessOrEqual(x interface{}) Matcher {
+ desc := fmt.Sprintf("less than or equal to %v", x)
+
+ // Special case: make it clear that strings are strings.
+ if reflect.TypeOf(x).Kind() == reflect.String {
+ desc = fmt.Sprintf("less than or equal to \"%s\"", x)
+ }
+
+ // Put LessThan last so that its error messages will be used in the event of
+ // failure.
+ return transformDescription(AnyOf(Equals(x), LessThan(x)), desc)
+}
diff --git a/vendor/github.com/smartystreets/assertions/internal/oglematchers/less_than.go b/vendor/github.com/smartystreets/assertions/internal/oglematchers/less_than.go
new file mode 100644
index 00000000..8258e45d
--- /dev/null
+++ b/vendor/github.com/smartystreets/assertions/internal/oglematchers/less_than.go
@@ -0,0 +1,152 @@
+// Copyright 2011 Aaron Jacobs. All Rights Reserved.
+// Author: aaronjjacobs@gmail.com (Aaron Jacobs)
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package oglematchers
+
+import (
+ "errors"
+ "fmt"
+ "math"
+ "reflect"
+)
+
+// LessThan returns a matcher that matches integer, floating point, or strings
+// values v such that v < x. Comparison is not defined between numeric and
+// string types, but is defined between all integer and floating point types.
+//
+// x must itself be an integer, floating point, or string type; otherwise,
+// LessThan will panic.
+func LessThan(x interface{}) Matcher {
+ v := reflect.ValueOf(x)
+ kind := v.Kind()
+
+ switch {
+ case isInteger(v):
+ case isFloat(v):
+ case kind == reflect.String:
+
+ default:
+ panic(fmt.Sprintf("LessThan: unexpected kind %v", kind))
+ }
+
+ return &lessThanMatcher{v}
+}
+
+type lessThanMatcher struct {
+ limit reflect.Value
+}
+
+func (m *lessThanMatcher) Description() string {
+ // Special case: make it clear that strings are strings.
+ if m.limit.Kind() == reflect.String {
+ return fmt.Sprintf("less than \"%s\"", m.limit.String())
+ }
+
+ return fmt.Sprintf("less than %v", m.limit.Interface())
+}
+
+func compareIntegers(v1, v2 reflect.Value) (err error) {
+ err = errors.New("")
+
+ switch {
+ case isSignedInteger(v1) && isSignedInteger(v2):
+ if v1.Int() < v2.Int() {
+ err = nil
+ }
+ return
+
+ case isSignedInteger(v1) && isUnsignedInteger(v2):
+ if v1.Int() < 0 || uint64(v1.Int()) < v2.Uint() {
+ err = nil
+ }
+ return
+
+ case isUnsignedInteger(v1) && isSignedInteger(v2):
+ if v1.Uint() <= math.MaxInt64 && int64(v1.Uint()) < v2.Int() {
+ err = nil
+ }
+ return
+
+ case isUnsignedInteger(v1) && isUnsignedInteger(v2):
+ if v1.Uint() < v2.Uint() {
+ err = nil
+ }
+ return
+ }
+
+ panic(fmt.Sprintf("compareIntegers: %v %v", v1, v2))
+}
+
+func getFloat(v reflect.Value) float64 {
+ switch {
+ case isSignedInteger(v):
+ return float64(v.Int())
+
+ case isUnsignedInteger(v):
+ return float64(v.Uint())
+
+ case isFloat(v):
+ return v.Float()
+ }
+
+ panic(fmt.Sprintf("getFloat: %v", v))
+}
+
+func (m *lessThanMatcher) Matches(c interface{}) (err error) {
+ v1 := reflect.ValueOf(c)
+ v2 := m.limit
+
+ err = errors.New("")
+
+ // Handle strings as a special case.
+ if v1.Kind() == reflect.String && v2.Kind() == reflect.String {
+ if v1.String() < v2.String() {
+ err = nil
+ }
+ return
+ }
+
+ // If we get here, we require that we are dealing with integers or floats.
+ v1Legal := isInteger(v1) || isFloat(v1)
+ v2Legal := isInteger(v2) || isFloat(v2)
+ if !v1Legal || !v2Legal {
+ err = NewFatalError("which is not comparable")
+ return
+ }
+
+ // Handle the various comparison cases.
+ switch {
+ // Both integers
+ case isInteger(v1) && isInteger(v2):
+ return compareIntegers(v1, v2)
+
+ // At least one float32
+ case v1.Kind() == reflect.Float32 || v2.Kind() == reflect.Float32:
+ if float32(getFloat(v1)) < float32(getFloat(v2)) {
+ err = nil
+ }
+ return
+
+ // At least one float64
+ case v1.Kind() == reflect.Float64 || v2.Kind() == reflect.Float64:
+ if getFloat(v1) < getFloat(v2) {
+ err = nil
+ }
+ return
+ }
+
+ // We shouldn't get here.
+ panic(fmt.Sprintf("lessThanMatcher.Matches: Shouldn't get here: %v %v", v1, v2))
+}
diff --git a/vendor/github.com/smartystreets/assertions/internal/oglematchers/matcher.go b/vendor/github.com/smartystreets/assertions/internal/oglematchers/matcher.go
new file mode 100644
index 00000000..78159a07
--- /dev/null
+++ b/vendor/github.com/smartystreets/assertions/internal/oglematchers/matcher.go
@@ -0,0 +1,86 @@
+// Copyright 2011 Aaron Jacobs. All Rights Reserved.
+// Author: aaronjjacobs@gmail.com (Aaron Jacobs)
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package oglematchers provides a set of matchers useful in a testing or
+// mocking framework. These matchers are inspired by and mostly compatible with
+// Google Test for C++ and Google JS Test.
+//
+// This package is used by github.com/smartystreets/assertions/internal/ogletest and
+// github.com/smartystreets/assertions/internal/oglemock, which may be more directly useful if you're not
+// writing your own testing package or defining your own matchers.
+package oglematchers
+
+// A Matcher is some predicate implicitly defining a set of values that it
+// matches. For example, GreaterThan(17) matches all numeric values greater
+// than 17, and HasSubstr("taco") matches all strings with the substring
+// "taco".
+//
+// Matchers are typically exposed to tests via constructor functions like
+// HasSubstr. In order to implement such a function you can either define your
+// own matcher type or use NewMatcher.
+type Matcher interface {
+ // Check whether the supplied value belongs to the the set defined by the
+ // matcher. Return a non-nil error if and only if it does not.
+ //
+ // The error describes why the value doesn't match. The error text is a
+ // relative clause that is suitable for being placed after the value. For
+ // example, a predicate that matches strings with a particular substring may,
+ // when presented with a numerical value, return the following error text:
+ //
+ // "which is not a string"
+ //
+ // Then the failure message may look like:
+ //
+ // Expected: has substring "taco"
+ // Actual: 17, which is not a string
+ //
+ // If the error is self-apparent based on the description of the matcher, the
+ // error text may be empty (but the error still non-nil). For example:
+ //
+ // Expected: 17
+ // Actual: 19
+ //
+ // If you are implementing a new matcher, see also the documentation on
+ // FatalError.
+ Matches(candidate interface{}) error
+
+ // Description returns a string describing the property that values matching
+ // this matcher have, as a verb phrase where the subject is the value. For
+ // example, "is greather than 17" or "has substring "taco"".
+ Description() string
+}
+
+// FatalError is an implementation of the error interface that may be returned
+// from matchers, indicating the error should be propagated. Returning a
+// *FatalError indicates that the matcher doesn't process values of the
+// supplied type, or otherwise doesn't know how to handle the value.
+//
+// For example, if GreaterThan(17) returned false for the value "taco" without
+// a fatal error, then Not(GreaterThan(17)) would return true. This is
+// technically correct, but is surprising and may mask failures where the wrong
+// sort of matcher is accidentally used. Instead, GreaterThan(17) can return a
+// fatal error, which will be propagated by Not().
+type FatalError struct {
+ errorText string
+}
+
+// NewFatalError creates a FatalError struct with the supplied error text.
+func NewFatalError(s string) *FatalError {
+ return &FatalError{s}
+}
+
+func (e *FatalError) Error() string {
+ return e.errorText
+}
diff --git a/vendor/github.com/smartystreets/assertions/internal/oglematchers/not.go b/vendor/github.com/smartystreets/assertions/internal/oglematchers/not.go
new file mode 100644
index 00000000..623789fe
--- /dev/null
+++ b/vendor/github.com/smartystreets/assertions/internal/oglematchers/not.go
@@ -0,0 +1,53 @@
+// Copyright 2011 Aaron Jacobs. All Rights Reserved.
+// Author: aaronjjacobs@gmail.com (Aaron Jacobs)
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package oglematchers
+
+import (
+ "errors"
+ "fmt"
+)
+
+// Not returns a matcher that inverts the set of values matched by the wrapped
+// matcher. It does not transform the result for values for which the wrapped
+// matcher returns a fatal error.
+func Not(m Matcher) Matcher {
+ return ¬Matcher{m}
+}
+
+type notMatcher struct {
+ wrapped Matcher
+}
+
+func (m *notMatcher) Matches(c interface{}) (err error) {
+ err = m.wrapped.Matches(c)
+
+ // Did the wrapped matcher say yes?
+ if err == nil {
+ return errors.New("")
+ }
+
+ // Did the wrapped matcher return a fatal error?
+ if _, isFatal := err.(*FatalError); isFatal {
+ return err
+ }
+
+ // The wrapped matcher returned a non-fatal error.
+ return nil
+}
+
+func (m *notMatcher) Description() string {
+ return fmt.Sprintf("not(%s)", m.wrapped.Description())
+}
diff --git a/vendor/github.com/smartystreets/assertions/internal/oglematchers/transform_description.go b/vendor/github.com/smartystreets/assertions/internal/oglematchers/transform_description.go
new file mode 100644
index 00000000..8ea2807c
--- /dev/null
+++ b/vendor/github.com/smartystreets/assertions/internal/oglematchers/transform_description.go
@@ -0,0 +1,36 @@
+// Copyright 2011 Aaron Jacobs. All Rights Reserved.
+// Author: aaronjjacobs@gmail.com (Aaron Jacobs)
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package oglematchers
+
+// transformDescription returns a matcher that is equivalent to the supplied
+// one, except that it has the supplied description instead of the one attached
+// to the existing matcher.
+func transformDescription(m Matcher, newDesc string) Matcher {
+ return &transformDescriptionMatcher{newDesc, m}
+}
+
+type transformDescriptionMatcher struct {
+ desc string
+ wrappedMatcher Matcher
+}
+
+func (m *transformDescriptionMatcher) Description() string {
+ return m.desc
+}
+
+func (m *transformDescriptionMatcher) Matches(c interface{}) error {
+ return m.wrappedMatcher.Matches(c)
+}
diff --git a/vendor/github.com/smartystreets/assertions/messages.go b/vendor/github.com/smartystreets/assertions/messages.go
new file mode 100644
index 00000000..6ed7dc2a
--- /dev/null
+++ b/vendor/github.com/smartystreets/assertions/messages.go
@@ -0,0 +1,97 @@
+package assertions
+
+const ( // equality
+ shouldHaveBeenEqual = "Expected: '%v'\nActual: '%v'\n(Should be equal)"
+ shouldNotHaveBeenEqual = "Expected '%v'\nto NOT equal '%v'\n(but it did)!"
+ shouldHaveBeenEqualTypeMismatch = "Expected: '%v' (%T)\nActual: '%v' (%T)\n(Should be equal, type mismatch)"
+ shouldHaveBeenAlmostEqual = "Expected '%v' to almost equal '%v' (but it didn't)!"
+ shouldHaveNotBeenAlmostEqual = "Expected '%v' to NOT almost equal '%v' (but it did)!"
+ shouldHaveResembled = "Expected: '%s'\nActual: '%s'\n(Should resemble)!"
+ shouldNotHaveResembled = "Expected '%#v'\nto NOT resemble '%#v'\n(but it did)!"
+ shouldBePointers = "Both arguments should be pointers "
+ shouldHaveBeenNonNilPointer = shouldBePointers + "(the %s was %s)!"
+ shouldHavePointedTo = "Expected '%+v' (address: '%v') and '%+v' (address: '%v') to be the same address (but their weren't)!"
+ shouldNotHavePointedTo = "Expected '%+v' and '%+v' to be different references (but they matched: '%v')!"
+ shouldHaveBeenNil = "Expected: nil\nActual: '%v'"
+ shouldNotHaveBeenNil = "Expected '%+v' to NOT be nil (but it was)!"
+ shouldHaveBeenTrue = "Expected: true\nActual: %v"
+ shouldHaveBeenFalse = "Expected: false\nActual: %v"
+ shouldHaveBeenZeroValue = "'%+v' should have been the zero value" //"Expected: (zero value)\nActual: %v"
+ shouldNotHaveBeenZeroValue = "'%+v' should NOT have been the zero value"
+)
+
+const ( // quantity comparisons
+ shouldHaveBeenGreater = "Expected '%v' to be greater than '%v' (but it wasn't)!"
+ shouldHaveBeenGreaterOrEqual = "Expected '%v' to be greater than or equal to '%v' (but it wasn't)!"
+ shouldHaveBeenLess = "Expected '%v' to be less than '%v' (but it wasn't)!"
+ shouldHaveBeenLessOrEqual = "Expected '%v' to be less than or equal to '%v' (but it wasn't)!"
+ shouldHaveBeenBetween = "Expected '%v' to be between '%v' and '%v' (but it wasn't)!"
+ shouldNotHaveBeenBetween = "Expected '%v' NOT to be between '%v' and '%v' (but it was)!"
+ shouldHaveDifferentUpperAndLower = "The lower and upper bounds must be different values (they were both '%v')."
+ shouldHaveBeenBetweenOrEqual = "Expected '%v' to be between '%v' and '%v' or equal to one of them (but it wasn't)!"
+ shouldNotHaveBeenBetweenOrEqual = "Expected '%v' NOT to be between '%v' and '%v' or equal to one of them (but it was)!"
+)
+
+const ( // collections
+ shouldHaveContained = "Expected the container (%v) to contain: '%v' (but it didn't)!"
+ shouldNotHaveContained = "Expected the container (%v) NOT to contain: '%v' (but it did)!"
+ shouldHaveContainedKey = "Expected the %v to contain the key: %v (but it didn't)!"
+ shouldNotHaveContainedKey = "Expected the %v NOT to contain the key: %v (but it did)!"
+ shouldHaveBeenIn = "Expected '%v' to be in the container (%v), but it wasn't!"
+ shouldNotHaveBeenIn = "Expected '%v' NOT to be in the container (%v), but it was!"
+ shouldHaveBeenAValidCollection = "You must provide a valid container (was %v)!"
+ shouldHaveBeenAValidMap = "You must provide a valid map type (was %v)!"
+ shouldHaveBeenEmpty = "Expected %+v to be empty (but it wasn't)!"
+ shouldNotHaveBeenEmpty = "Expected %+v to NOT be empty (but it was)!"
+ shouldHaveBeenAValidInteger = "You must provide a valid integer (was %v)!"
+ shouldHaveBeenAValidLength = "You must provide a valid positive integer (was %v)!"
+ shouldHaveHadLength = "Expected collection to have length equal to [%v], but it's length was [%v] instead! contents: %+v"
+)
+
+const ( // strings
+ shouldHaveStartedWith = "Expected '%v'\nto start with '%v'\n(but it didn't)!"
+ shouldNotHaveStartedWith = "Expected '%v'\nNOT to start with '%v'\n(but it did)!"
+ shouldHaveEndedWith = "Expected '%v'\nto end with '%v'\n(but it didn't)!"
+ shouldNotHaveEndedWith = "Expected '%v'\nNOT to end with '%v'\n(but it did)!"
+ shouldAllBeStrings = "All arguments to this assertion must be strings (you provided: %v)."
+ shouldBothBeStrings = "Both arguments to this assertion must be strings (you provided %v and %v)."
+ shouldBeString = "The argument to this assertion must be a string (you provided %v)."
+ shouldHaveContainedSubstring = "Expected '%s' to contain substring '%s' (but it didn't)!"
+ shouldNotHaveContainedSubstring = "Expected '%s' NOT to contain substring '%s' (but it did)!"
+ shouldHaveBeenBlank = "Expected '%s' to be blank (but it wasn't)!"
+ shouldNotHaveBeenBlank = "Expected value to NOT be blank (but it was)!"
+)
+
+const ( // panics
+ shouldUseVoidNiladicFunction = "You must provide a void, niladic function as the first argument!"
+ shouldHavePanickedWith = "Expected func() to panic with '%v' (but it panicked with '%v')!"
+ shouldHavePanicked = "Expected func() to panic (but it didn't)!"
+ shouldNotHavePanicked = "Expected func() NOT to panic (error: '%+v')!"
+ shouldNotHavePanickedWith = "Expected func() NOT to panic with '%v' (but it did)!"
+)
+
+const ( // type checking
+ shouldHaveBeenA = "Expected '%v' to be: '%v' (but was: '%v')!"
+ shouldNotHaveBeenA = "Expected '%v' to NOT be: '%v' (but it was)!"
+
+ shouldHaveImplemented = "Expected: '%v interface support'\nActual: '%v' does not implement the interface!"
+ shouldNotHaveImplemented = "Expected '%v'\nto NOT implement '%v'\n(but it did)!"
+ shouldCompareWithInterfacePointer = "The expected value must be a pointer to an interface type (eg. *fmt.Stringer)"
+ shouldNotBeNilActual = "The actual value was 'nil' and should be a value or a pointer to a value!"
+
+ shouldBeError = "Expected an error value (but was '%v' instead)!"
+ shouldBeErrorInvalidComparisonValue = "The final argument to this assertion must be a string or an error value (you provided: '%v')."
+)
+
+const ( // time comparisons
+ shouldUseTimes = "You must provide time instances as arguments to this assertion."
+ shouldUseTimeSlice = "You must provide a slice of time instances as the first argument to this assertion."
+ shouldUseDurationAndTime = "You must provide a duration and a time as arguments to this assertion."
+ shouldHaveHappenedBefore = "Expected '%v' to happen before '%v' (it happened '%v' after)!"
+ shouldHaveHappenedAfter = "Expected '%v' to happen after '%v' (it happened '%v' before)!"
+ shouldHaveHappenedBetween = "Expected '%v' to happen between '%v' and '%v' (it happened '%v' outside threshold)!"
+ shouldNotHaveHappenedOnOrBetween = "Expected '%v' to NOT happen on or between '%v' and '%v' (but it did)!"
+
+ // format params: incorrect-index, previous-index, previous-time, incorrect-index, incorrect-time
+ shouldHaveBeenChronological = "The 'Time' at index [%d] should have happened after the previous one (but it didn't!):\n [%d]: %s\n [%d]: %s (see, it happened before!)"
+)
diff --git a/vendor/github.com/smartystreets/assertions/panic.go b/vendor/github.com/smartystreets/assertions/panic.go
new file mode 100644
index 00000000..7e75db17
--- /dev/null
+++ b/vendor/github.com/smartystreets/assertions/panic.go
@@ -0,0 +1,115 @@
+package assertions
+
+import "fmt"
+
+// ShouldPanic receives a void, niladic function and expects to recover a panic.
+func ShouldPanic(actual interface{}, expected ...interface{}) (message string) {
+ if fail := need(0, expected); fail != success {
+ return fail
+ }
+
+ action, _ := actual.(func())
+
+ if action == nil {
+ message = shouldUseVoidNiladicFunction
+ return
+ }
+
+ defer func() {
+ recovered := recover()
+ if recovered == nil {
+ message = shouldHavePanicked
+ } else {
+ message = success
+ }
+ }()
+ action()
+
+ return
+}
+
+// ShouldNotPanic receives a void, niladic function and expects to execute the function without any panic.
+func ShouldNotPanic(actual interface{}, expected ...interface{}) (message string) {
+ if fail := need(0, expected); fail != success {
+ return fail
+ }
+
+ action, _ := actual.(func())
+
+ if action == nil {
+ message = shouldUseVoidNiladicFunction
+ return
+ }
+
+ defer func() {
+ recovered := recover()
+ if recovered != nil {
+ message = fmt.Sprintf(shouldNotHavePanicked, recovered)
+ } else {
+ message = success
+ }
+ }()
+ action()
+
+ return
+}
+
+// ShouldPanicWith receives a void, niladic function and expects to recover a panic with the second argument as the content.
+func ShouldPanicWith(actual interface{}, expected ...interface{}) (message string) {
+ if fail := need(1, expected); fail != success {
+ return fail
+ }
+
+ action, _ := actual.(func())
+
+ if action == nil {
+ message = shouldUseVoidNiladicFunction
+ return
+ }
+
+ defer func() {
+ recovered := recover()
+ if recovered == nil {
+ message = shouldHavePanicked
+ } else {
+ if equal := ShouldEqual(recovered, expected[0]); equal != success {
+ message = serializer.serialize(expected[0], recovered, fmt.Sprintf(shouldHavePanickedWith, expected[0], recovered))
+ } else {
+ message = success
+ }
+ }
+ }()
+ action()
+
+ return
+}
+
+// ShouldNotPanicWith receives a void, niladic function and expects to recover a panic whose content differs from the second argument.
+func ShouldNotPanicWith(actual interface{}, expected ...interface{}) (message string) {
+ if fail := need(1, expected); fail != success {
+ return fail
+ }
+
+ action, _ := actual.(func())
+
+ if action == nil {
+ message = shouldUseVoidNiladicFunction
+ return
+ }
+
+ defer func() {
+ recovered := recover()
+ if recovered == nil {
+ message = success
+ } else {
+ if equal := ShouldEqual(recovered, expected[0]); equal == success {
+ message = fmt.Sprintf(shouldNotHavePanickedWith, expected[0])
+ } else {
+ message = success
+ }
+ }
+ }()
+ action()
+
+ return
+}
diff --git a/vendor/github.com/smartystreets/assertions/quantity.go b/vendor/github.com/smartystreets/assertions/quantity.go
new file mode 100644
index 00000000..f28b0a06
--- /dev/null
+++ b/vendor/github.com/smartystreets/assertions/quantity.go
@@ -0,0 +1,141 @@
+package assertions
+
+import (
+ "fmt"
+
+ "github.com/smartystreets/assertions/internal/oglematchers"
+)
+
+// ShouldBeGreaterThan receives exactly two parameters and ensures that the first is greater than the second.
+func ShouldBeGreaterThan(actual interface{}, expected ...interface{}) string {
+ if fail := need(1, expected); fail != success {
+ return fail
+ }
+
+ if matchError := oglematchers.GreaterThan(expected[0]).Matches(actual); matchError != nil {
+ return fmt.Sprintf(shouldHaveBeenGreater, actual, expected[0])
+ }
+ return success
+}
+
+// ShouldBeGreaterThanOrEqualTo receives exactly two parameters and ensures that the first is greater than or equal to the second.
+func ShouldBeGreaterThanOrEqualTo(actual interface{}, expected ...interface{}) string {
+ if fail := need(1, expected); fail != success {
+ return fail
+ } else if matchError := oglematchers.GreaterOrEqual(expected[0]).Matches(actual); matchError != nil {
+ return fmt.Sprintf(shouldHaveBeenGreaterOrEqual, actual, expected[0])
+ }
+ return success
+}
+
+// ShouldBeLessThan receives exactly two parameters and ensures that the first is less than the second.
+func ShouldBeLessThan(actual interface{}, expected ...interface{}) string {
+ if fail := need(1, expected); fail != success {
+ return fail
+ } else if matchError := oglematchers.LessThan(expected[0]).Matches(actual); matchError != nil {
+ return fmt.Sprintf(shouldHaveBeenLess, actual, expected[0])
+ }
+ return success
+}
+
+// ShouldBeLessThan receives exactly two parameters and ensures that the first is less than or equal to the second.
+func ShouldBeLessThanOrEqualTo(actual interface{}, expected ...interface{}) string {
+ if fail := need(1, expected); fail != success {
+ return fail
+ } else if matchError := oglematchers.LessOrEqual(expected[0]).Matches(actual); matchError != nil {
+ return fmt.Sprintf(shouldHaveBeenLessOrEqual, actual, expected[0])
+ }
+ return success
+}
+
+// ShouldBeBetween receives exactly three parameters: an actual value, a lower bound, and an upper bound.
+// It ensures that the actual value is between both bounds (but not equal to either of them).
+func ShouldBeBetween(actual interface{}, expected ...interface{}) string {
+ if fail := need(2, expected); fail != success {
+ return fail
+ }
+ lower, upper, fail := deriveBounds(expected)
+
+ if fail != success {
+ return fail
+ } else if !isBetween(actual, lower, upper) {
+ return fmt.Sprintf(shouldHaveBeenBetween, actual, lower, upper)
+ }
+ return success
+}
+
+// ShouldNotBeBetween receives exactly three parameters: an actual value, a lower bound, and an upper bound.
+// It ensures that the actual value is NOT between both bounds.
+func ShouldNotBeBetween(actual interface{}, expected ...interface{}) string {
+ if fail := need(2, expected); fail != success {
+ return fail
+ }
+ lower, upper, fail := deriveBounds(expected)
+
+ if fail != success {
+ return fail
+ } else if isBetween(actual, lower, upper) {
+ return fmt.Sprintf(shouldNotHaveBeenBetween, actual, lower, upper)
+ }
+ return success
+}
+func deriveBounds(values []interface{}) (lower interface{}, upper interface{}, fail string) {
+ lower = values[0]
+ upper = values[1]
+
+ if ShouldNotEqual(lower, upper) != success {
+ return nil, nil, fmt.Sprintf(shouldHaveDifferentUpperAndLower, lower)
+ } else if ShouldBeLessThan(lower, upper) != success {
+ lower, upper = upper, lower
+ }
+ return lower, upper, success
+}
+func isBetween(value, lower, upper interface{}) bool {
+ if ShouldBeGreaterThan(value, lower) != success {
+ return false
+ } else if ShouldBeLessThan(value, upper) != success {
+ return false
+ }
+ return true
+}
+
+// ShouldBeBetweenOrEqual receives exactly three parameters: an actual value, a lower bound, and an upper bound.
+// It ensures that the actual value is between both bounds or equal to one of them.
+func ShouldBeBetweenOrEqual(actual interface{}, expected ...interface{}) string {
+ if fail := need(2, expected); fail != success {
+ return fail
+ }
+ lower, upper, fail := deriveBounds(expected)
+
+ if fail != success {
+ return fail
+ } else if !isBetweenOrEqual(actual, lower, upper) {
+ return fmt.Sprintf(shouldHaveBeenBetweenOrEqual, actual, lower, upper)
+ }
+ return success
+}
+
+// ShouldNotBeBetweenOrEqual receives exactly three parameters: an actual value, a lower bound, and an upper bound.
+// It ensures that the actual value is nopt between the bounds nor equal to either of them.
+func ShouldNotBeBetweenOrEqual(actual interface{}, expected ...interface{}) string {
+ if fail := need(2, expected); fail != success {
+ return fail
+ }
+ lower, upper, fail := deriveBounds(expected)
+
+ if fail != success {
+ return fail
+ } else if isBetweenOrEqual(actual, lower, upper) {
+ return fmt.Sprintf(shouldNotHaveBeenBetweenOrEqual, actual, lower, upper)
+ }
+ return success
+}
+
+func isBetweenOrEqual(value, lower, upper interface{}) bool {
+ if ShouldBeGreaterThanOrEqualTo(value, lower) != success {
+ return false
+ } else if ShouldBeLessThanOrEqualTo(value, upper) != success {
+ return false
+ }
+ return true
+}
diff --git a/vendor/github.com/smartystreets/assertions/serializer.go b/vendor/github.com/smartystreets/assertions/serializer.go
new file mode 100644
index 00000000..fa32f940
--- /dev/null
+++ b/vendor/github.com/smartystreets/assertions/serializer.go
@@ -0,0 +1,63 @@
+package assertions
+
+import (
+ "encoding/json"
+ "fmt"
+
+ "github.com/smartystreets/assertions/internal/go-render/render"
+)
+
+type Serializer interface {
+ serialize(expected, actual interface{}, message string) string
+ serializeDetailed(expected, actual interface{}, message string) string
+}
+
+type failureSerializer struct{}
+
+func (self *failureSerializer) serializeDetailed(expected, actual interface{}, message string) string {
+ view := FailureView{
+ Message: message,
+ Expected: render.Render(expected),
+ Actual: render.Render(actual),
+ }
+ serialized, _ := json.Marshal(view)
+ return string(serialized)
+}
+
+func (self *failureSerializer) serialize(expected, actual interface{}, message string) string {
+ view := FailureView{
+ Message: message,
+ Expected: fmt.Sprintf("%+v", expected),
+ Actual: fmt.Sprintf("%+v", actual),
+ }
+ serialized, _ := json.Marshal(view)
+ return string(serialized)
+}
+
+func newSerializer() *failureSerializer {
+ return &failureSerializer{}
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+// This struct is also declared in github.com/smartystreets/goconvey/convey/reporting.
+// The json struct tags should be equal in both declarations.
+type FailureView struct {
+ Message string `json:"Message"`
+ Expected string `json:"Expected"`
+ Actual string `json:"Actual"`
+}
+
+///////////////////////////////////////////////////////
+
+// noopSerializer just gives back the original message. This is useful when we are using
+// the assertions from a context other than the GoConvey Web UI, that requires the JSON
+// structure provided by the failureSerializer.
+type noopSerializer struct{}
+
+func (self *noopSerializer) serialize(expected, actual interface{}, message string) string {
+ return message
+}
+func (self *noopSerializer) serializeDetailed(expected, actual interface{}, message string) string {
+ return message
+}
diff --git a/vendor/github.com/smartystreets/assertions/strings.go b/vendor/github.com/smartystreets/assertions/strings.go
new file mode 100644
index 00000000..dbc3f047
--- /dev/null
+++ b/vendor/github.com/smartystreets/assertions/strings.go
@@ -0,0 +1,227 @@
+package assertions
+
+import (
+ "fmt"
+ "reflect"
+ "strings"
+)
+
+// ShouldStartWith receives exactly 2 string parameters and ensures that the first starts with the second.
+func ShouldStartWith(actual interface{}, expected ...interface{}) string {
+ if fail := need(1, expected); fail != success {
+ return fail
+ }
+
+ value, valueIsString := actual.(string)
+ prefix, prefixIsString := expected[0].(string)
+
+ if !valueIsString || !prefixIsString {
+ return fmt.Sprintf(shouldBothBeStrings, reflect.TypeOf(actual), reflect.TypeOf(expected[0]))
+ }
+
+ return shouldStartWith(value, prefix)
+}
+func shouldStartWith(value, prefix string) string {
+ if !strings.HasPrefix(value, prefix) {
+ shortval := value
+ if len(shortval) > len(prefix) {
+ shortval = shortval[:len(prefix)] + "..."
+ }
+ return serializer.serialize(prefix, shortval, fmt.Sprintf(shouldHaveStartedWith, value, prefix))
+ }
+ return success
+}
+
+// ShouldNotStartWith receives exactly 2 string parameters and ensures that the first does not start with the second.
+func ShouldNotStartWith(actual interface{}, expected ...interface{}) string {
+ if fail := need(1, expected); fail != success {
+ return fail
+ }
+
+ value, valueIsString := actual.(string)
+ prefix, prefixIsString := expected[0].(string)
+
+ if !valueIsString || !prefixIsString {
+ return fmt.Sprintf(shouldBothBeStrings, reflect.TypeOf(actual), reflect.TypeOf(expected[0]))
+ }
+
+ return shouldNotStartWith(value, prefix)
+}
+func shouldNotStartWith(value, prefix string) string {
+ if strings.HasPrefix(value, prefix) {
+ if value == "" {
+ value = ""
+ }
+ if prefix == "" {
+ prefix = ""
+ }
+ return fmt.Sprintf(shouldNotHaveStartedWith, value, prefix)
+ }
+ return success
+}
+
+// ShouldEndWith receives exactly 2 string parameters and ensures that the first ends with the second.
+func ShouldEndWith(actual interface{}, expected ...interface{}) string {
+ if fail := need(1, expected); fail != success {
+ return fail
+ }
+
+ value, valueIsString := actual.(string)
+ suffix, suffixIsString := expected[0].(string)
+
+ if !valueIsString || !suffixIsString {
+ return fmt.Sprintf(shouldBothBeStrings, reflect.TypeOf(actual), reflect.TypeOf(expected[0]))
+ }
+
+ return shouldEndWith(value, suffix)
+}
+func shouldEndWith(value, suffix string) string {
+ if !strings.HasSuffix(value, suffix) {
+ shortval := value
+ if len(shortval) > len(suffix) {
+ shortval = "..." + shortval[len(shortval)-len(suffix):]
+ }
+ return serializer.serialize(suffix, shortval, fmt.Sprintf(shouldHaveEndedWith, value, suffix))
+ }
+ return success
+}
+
+// ShouldEndWith receives exactly 2 string parameters and ensures that the first does not end with the second.
+func ShouldNotEndWith(actual interface{}, expected ...interface{}) string {
+ if fail := need(1, expected); fail != success {
+ return fail
+ }
+
+ value, valueIsString := actual.(string)
+ suffix, suffixIsString := expected[0].(string)
+
+ if !valueIsString || !suffixIsString {
+ return fmt.Sprintf(shouldBothBeStrings, reflect.TypeOf(actual), reflect.TypeOf(expected[0]))
+ }
+
+ return shouldNotEndWith(value, suffix)
+}
+func shouldNotEndWith(value, suffix string) string {
+ if strings.HasSuffix(value, suffix) {
+ if value == "" {
+ value = ""
+ }
+ if suffix == "" {
+ suffix = ""
+ }
+ return fmt.Sprintf(shouldNotHaveEndedWith, value, suffix)
+ }
+ return success
+}
+
+// ShouldContainSubstring receives exactly 2 string parameters and ensures that the first contains the second as a substring.
+func ShouldContainSubstring(actual interface{}, expected ...interface{}) string {
+ if fail := need(1, expected); fail != success {
+ return fail
+ }
+
+ long, longOk := actual.(string)
+ short, shortOk := expected[0].(string)
+
+ if !longOk || !shortOk {
+ return fmt.Sprintf(shouldBothBeStrings, reflect.TypeOf(actual), reflect.TypeOf(expected[0]))
+ }
+
+ if !strings.Contains(long, short) {
+ return serializer.serialize(expected[0], actual, fmt.Sprintf(shouldHaveContainedSubstring, long, short))
+ }
+ return success
+}
+
+// ShouldNotContainSubstring receives exactly 2 string parameters and ensures that the first does NOT contain the second as a substring.
+func ShouldNotContainSubstring(actual interface{}, expected ...interface{}) string {
+ if fail := need(1, expected); fail != success {
+ return fail
+ }
+
+ long, longOk := actual.(string)
+ short, shortOk := expected[0].(string)
+
+ if !longOk || !shortOk {
+ return fmt.Sprintf(shouldBothBeStrings, reflect.TypeOf(actual), reflect.TypeOf(expected[0]))
+ }
+
+ if strings.Contains(long, short) {
+ return fmt.Sprintf(shouldNotHaveContainedSubstring, long, short)
+ }
+ return success
+}
+
+// ShouldBeBlank receives exactly 1 string parameter and ensures that it is equal to "".
+func ShouldBeBlank(actual interface{}, expected ...interface{}) string {
+ if fail := need(0, expected); fail != success {
+ return fail
+ }
+ value, ok := actual.(string)
+ if !ok {
+ return fmt.Sprintf(shouldBeString, reflect.TypeOf(actual))
+ }
+ if value != "" {
+ return serializer.serialize("", value, fmt.Sprintf(shouldHaveBeenBlank, value))
+ }
+ return success
+}
+
+// ShouldNotBeBlank receives exactly 1 string parameter and ensures that it is equal to "".
+func ShouldNotBeBlank(actual interface{}, expected ...interface{}) string {
+ if fail := need(0, expected); fail != success {
+ return fail
+ }
+ value, ok := actual.(string)
+ if !ok {
+ return fmt.Sprintf(shouldBeString, reflect.TypeOf(actual))
+ }
+ if value == "" {
+ return shouldNotHaveBeenBlank
+ }
+ return success
+}
+
+// ShouldEqualWithout receives exactly 3 string parameters and ensures that the first is equal to the second
+// after removing all instances of the third from the first using strings.Replace(first, third, "", -1).
+func ShouldEqualWithout(actual interface{}, expected ...interface{}) string {
+ if fail := need(2, expected); fail != success {
+ return fail
+ }
+ actualString, ok1 := actual.(string)
+ expectedString, ok2 := expected[0].(string)
+ replace, ok3 := expected[1].(string)
+
+ if !ok1 || !ok2 || !ok3 {
+ return fmt.Sprintf(shouldAllBeStrings, []reflect.Type{
+ reflect.TypeOf(actual),
+ reflect.TypeOf(expected[0]),
+ reflect.TypeOf(expected[1]),
+ })
+ }
+
+ replaced := strings.Replace(actualString, replace, "", -1)
+ if replaced == expectedString {
+ return ""
+ }
+
+ return fmt.Sprintf("Expected '%s' to equal '%s' but without any '%s' (but it didn't).", actualString, expectedString, replace)
+}
+
+// ShouldEqualTrimSpace receives exactly 2 string parameters and ensures that the first is equal to the second
+// after removing all leading and trailing whitespace using strings.TrimSpace(first).
+func ShouldEqualTrimSpace(actual interface{}, expected ...interface{}) string {
+ if fail := need(1, expected); fail != success {
+ return fail
+ }
+
+ actualString, valueIsString := actual.(string)
+ _, value2IsString := expected[0].(string)
+
+ if !valueIsString || !value2IsString {
+ return fmt.Sprintf(shouldBothBeStrings, reflect.TypeOf(actual), reflect.TypeOf(expected[0]))
+ }
+
+ actualString = strings.TrimSpace(actualString)
+ return ShouldEqual(actualString, expected[0])
+}
diff --git a/vendor/github.com/smartystreets/assertions/time.go b/vendor/github.com/smartystreets/assertions/time.go
new file mode 100644
index 00000000..7e050261
--- /dev/null
+++ b/vendor/github.com/smartystreets/assertions/time.go
@@ -0,0 +1,202 @@
+package assertions
+
+import (
+ "fmt"
+ "time"
+)
+
+// ShouldHappenBefore receives exactly 2 time.Time arguments and asserts that the first happens before the second.
+func ShouldHappenBefore(actual interface{}, expected ...interface{}) string {
+ if fail := need(1, expected); fail != success {
+ return fail
+ }
+ actualTime, firstOk := actual.(time.Time)
+ expectedTime, secondOk := expected[0].(time.Time)
+
+ if !firstOk || !secondOk {
+ return shouldUseTimes
+ }
+
+ if !actualTime.Before(expectedTime) {
+ return fmt.Sprintf(shouldHaveHappenedBefore, actualTime, expectedTime, actualTime.Sub(expectedTime))
+ }
+
+ return success
+}
+
+// ShouldHappenOnOrBefore receives exactly 2 time.Time arguments and asserts that the first happens on or before the second.
+func ShouldHappenOnOrBefore(actual interface{}, expected ...interface{}) string {
+ if fail := need(1, expected); fail != success {
+ return fail
+ }
+ actualTime, firstOk := actual.(time.Time)
+ expectedTime, secondOk := expected[0].(time.Time)
+
+ if !firstOk || !secondOk {
+ return shouldUseTimes
+ }
+
+ if actualTime.Equal(expectedTime) {
+ return success
+ }
+ return ShouldHappenBefore(actualTime, expectedTime)
+}
+
+// ShouldHappenAfter receives exactly 2 time.Time arguments and asserts that the first happens after the second.
+func ShouldHappenAfter(actual interface{}, expected ...interface{}) string {
+ if fail := need(1, expected); fail != success {
+ return fail
+ }
+ actualTime, firstOk := actual.(time.Time)
+ expectedTime, secondOk := expected[0].(time.Time)
+
+ if !firstOk || !secondOk {
+ return shouldUseTimes
+ }
+ if !actualTime.After(expectedTime) {
+ return fmt.Sprintf(shouldHaveHappenedAfter, actualTime, expectedTime, expectedTime.Sub(actualTime))
+ }
+ return success
+}
+
+// ShouldHappenOnOrAfter receives exactly 2 time.Time arguments and asserts that the first happens on or after the second.
+func ShouldHappenOnOrAfter(actual interface{}, expected ...interface{}) string {
+ if fail := need(1, expected); fail != success {
+ return fail
+ }
+ actualTime, firstOk := actual.(time.Time)
+ expectedTime, secondOk := expected[0].(time.Time)
+
+ if !firstOk || !secondOk {
+ return shouldUseTimes
+ }
+ if actualTime.Equal(expectedTime) {
+ return success
+ }
+ return ShouldHappenAfter(actualTime, expectedTime)
+}
+
+// ShouldHappenBetween receives exactly 3 time.Time arguments and asserts that the first happens between (not on) the second and third.
+func ShouldHappenBetween(actual interface{}, expected ...interface{}) string {
+ if fail := need(2, expected); fail != success {
+ return fail
+ }
+ actualTime, firstOk := actual.(time.Time)
+ min, secondOk := expected[0].(time.Time)
+ max, thirdOk := expected[1].(time.Time)
+
+ if !firstOk || !secondOk || !thirdOk {
+ return shouldUseTimes
+ }
+
+ if !actualTime.After(min) {
+ return fmt.Sprintf(shouldHaveHappenedBetween, actualTime, min, max, min.Sub(actualTime))
+ }
+ if !actualTime.Before(max) {
+ return fmt.Sprintf(shouldHaveHappenedBetween, actualTime, min, max, actualTime.Sub(max))
+ }
+ return success
+}
+
+// ShouldHappenOnOrBetween receives exactly 3 time.Time arguments and asserts that the first happens between or on the second and third.
+func ShouldHappenOnOrBetween(actual interface{}, expected ...interface{}) string {
+ if fail := need(2, expected); fail != success {
+ return fail
+ }
+ actualTime, firstOk := actual.(time.Time)
+ min, secondOk := expected[0].(time.Time)
+ max, thirdOk := expected[1].(time.Time)
+
+ if !firstOk || !secondOk || !thirdOk {
+ return shouldUseTimes
+ }
+ if actualTime.Equal(min) || actualTime.Equal(max) {
+ return success
+ }
+ return ShouldHappenBetween(actualTime, min, max)
+}
+
+// ShouldNotHappenOnOrBetween receives exactly 3 time.Time arguments and asserts that the first
+// does NOT happen between or on the second or third.
+func ShouldNotHappenOnOrBetween(actual interface{}, expected ...interface{}) string {
+ if fail := need(2, expected); fail != success {
+ return fail
+ }
+ actualTime, firstOk := actual.(time.Time)
+ min, secondOk := expected[0].(time.Time)
+ max, thirdOk := expected[1].(time.Time)
+
+ if !firstOk || !secondOk || !thirdOk {
+ return shouldUseTimes
+ }
+ if actualTime.Equal(min) || actualTime.Equal(max) {
+ return fmt.Sprintf(shouldNotHaveHappenedOnOrBetween, actualTime, min, max)
+ }
+ if actualTime.After(min) && actualTime.Before(max) {
+ return fmt.Sprintf(shouldNotHaveHappenedOnOrBetween, actualTime, min, max)
+ }
+ return success
+}
+
+// ShouldHappenWithin receives a time.Time, a time.Duration, and a time.Time (3 arguments)
+// and asserts that the first time.Time happens within or on the duration specified relative to
+// the other time.Time.
+func ShouldHappenWithin(actual interface{}, expected ...interface{}) string {
+ if fail := need(2, expected); fail != success {
+ return fail
+ }
+ actualTime, firstOk := actual.(time.Time)
+ tolerance, secondOk := expected[0].(time.Duration)
+ threshold, thirdOk := expected[1].(time.Time)
+
+ if !firstOk || !secondOk || !thirdOk {
+ return shouldUseDurationAndTime
+ }
+
+ min := threshold.Add(-tolerance)
+ max := threshold.Add(tolerance)
+ return ShouldHappenOnOrBetween(actualTime, min, max)
+}
+
+// ShouldNotHappenWithin receives a time.Time, a time.Duration, and a time.Time (3 arguments)
+// and asserts that the first time.Time does NOT happen within or on the duration specified relative to
+// the other time.Time.
+func ShouldNotHappenWithin(actual interface{}, expected ...interface{}) string {
+ if fail := need(2, expected); fail != success {
+ return fail
+ }
+ actualTime, firstOk := actual.(time.Time)
+ tolerance, secondOk := expected[0].(time.Duration)
+ threshold, thirdOk := expected[1].(time.Time)
+
+ if !firstOk || !secondOk || !thirdOk {
+ return shouldUseDurationAndTime
+ }
+
+ min := threshold.Add(-tolerance)
+ max := threshold.Add(tolerance)
+ return ShouldNotHappenOnOrBetween(actualTime, min, max)
+}
+
+// ShouldBeChronological receives a []time.Time slice and asserts that the are
+// in chronological order starting with the first time.Time as the earliest.
+func ShouldBeChronological(actual interface{}, expected ...interface{}) string {
+ if fail := need(0, expected); fail != success {
+ return fail
+ }
+
+ times, ok := actual.([]time.Time)
+ if !ok {
+ return shouldUseTimeSlice
+ }
+
+ var previous time.Time
+ for i, current := range times {
+ if i > 0 && current.Before(previous) {
+ return fmt.Sprintf(shouldHaveBeenChronological,
+ i, i-1, previous.String(), i, current.String())
+ }
+ previous = current
+ }
+ return ""
+}
diff --git a/vendor/github.com/smartystreets/assertions/type.go b/vendor/github.com/smartystreets/assertions/type.go
new file mode 100644
index 00000000..d2d1dc86
--- /dev/null
+++ b/vendor/github.com/smartystreets/assertions/type.go
@@ -0,0 +1,134 @@
+package assertions
+
+import (
+ "fmt"
+ "reflect"
+)
+
+// ShouldHaveSameTypeAs receives exactly two parameters and compares their underlying types for equality.
+func ShouldHaveSameTypeAs(actual interface{}, expected ...interface{}) string {
+ if fail := need(1, expected); fail != success {
+ return fail
+ }
+
+ first := reflect.TypeOf(actual)
+ second := reflect.TypeOf(expected[0])
+
+ if first != second {
+ return serializer.serialize(second, first, fmt.Sprintf(shouldHaveBeenA, actual, second, first))
+ }
+
+ return success
+}
+
+// ShouldNotHaveSameTypeAs receives exactly two parameters and compares their underlying types for inequality.
+func ShouldNotHaveSameTypeAs(actual interface{}, expected ...interface{}) string {
+ if fail := need(1, expected); fail != success {
+ return fail
+ }
+
+ first := reflect.TypeOf(actual)
+ second := reflect.TypeOf(expected[0])
+
+ if (actual == nil && expected[0] == nil) || first == second {
+ return fmt.Sprintf(shouldNotHaveBeenA, actual, second)
+ }
+ return success
+}
+
+// ShouldImplement receives exactly two parameters and ensures
+// that the first implements the interface type of the second.
+func ShouldImplement(actual interface{}, expectedList ...interface{}) string {
+ if fail := need(1, expectedList); fail != success {
+ return fail
+ }
+
+ expected := expectedList[0]
+ if fail := ShouldBeNil(expected); fail != success {
+ return shouldCompareWithInterfacePointer
+ }
+
+ if fail := ShouldNotBeNil(actual); fail != success {
+ return shouldNotBeNilActual
+ }
+
+ var actualType reflect.Type
+ if reflect.TypeOf(actual).Kind() != reflect.Ptr {
+ actualType = reflect.PtrTo(reflect.TypeOf(actual))
+ } else {
+ actualType = reflect.TypeOf(actual)
+ }
+
+ expectedType := reflect.TypeOf(expected)
+ if fail := ShouldNotBeNil(expectedType); fail != success {
+ return shouldCompareWithInterfacePointer
+ }
+
+ expectedInterface := expectedType.Elem()
+
+ if !actualType.Implements(expectedInterface) {
+ return fmt.Sprintf(shouldHaveImplemented, expectedInterface, actualType)
+ }
+ return success
+}
+
+// ShouldNotImplement receives exactly two parameters and ensures
+// that the first does NOT implement the interface type of the second.
+func ShouldNotImplement(actual interface{}, expectedList ...interface{}) string {
+ if fail := need(1, expectedList); fail != success {
+ return fail
+ }
+
+ expected := expectedList[0]
+ if fail := ShouldBeNil(expected); fail != success {
+ return shouldCompareWithInterfacePointer
+ }
+
+ if fail := ShouldNotBeNil(actual); fail != success {
+ return shouldNotBeNilActual
+ }
+
+ var actualType reflect.Type
+ if reflect.TypeOf(actual).Kind() != reflect.Ptr {
+ actualType = reflect.PtrTo(reflect.TypeOf(actual))
+ } else {
+ actualType = reflect.TypeOf(actual)
+ }
+
+ expectedType := reflect.TypeOf(expected)
+ if fail := ShouldNotBeNil(expectedType); fail != success {
+ return shouldCompareWithInterfacePointer
+ }
+
+ expectedInterface := expectedType.Elem()
+
+ if actualType.Implements(expectedInterface) {
+ return fmt.Sprintf(shouldNotHaveImplemented, actualType, expectedInterface)
+ }
+ return success
+}
+
+// ShouldBeError asserts that the first argument implements the error interface.
+// It also compares the first argument against the second argument if provided
+// (which must be an error message string or another error value).
+func ShouldBeError(actual interface{}, expected ...interface{}) string {
+ if fail := atMost(1, expected); fail != success {
+ return fail
+ }
+
+ if !isError(actual) {
+ return fmt.Sprintf(shouldBeError, reflect.TypeOf(actual))
+ }
+
+ if len(expected) == 0 {
+ return success
+ }
+
+ if expected := expected[0]; !isString(expected) && !isError(expected) {
+ return fmt.Sprintf(shouldBeErrorInvalidComparisonValue, reflect.TypeOf(expected))
+ }
+ return ShouldEqual(fmt.Sprint(actual), fmt.Sprint(expected[0]))
+}
+
+func isString(value interface{}) bool { _, ok := value.(string); return ok }
+func isError(value interface{}) bool { _, ok := value.(error); return ok }
diff --git a/vendor/github.com/smartystreets/goconvey/LICENSE.md b/vendor/github.com/smartystreets/goconvey/LICENSE.md
new file mode 100644
index 00000000..3f87a40e
--- /dev/null
+++ b/vendor/github.com/smartystreets/goconvey/LICENSE.md
@@ -0,0 +1,23 @@
+Copyright (c) 2016 SmartyStreets, LLC
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
+
+NOTE: Various optional and subordinate components carry their own licensing
+requirements and restrictions. Use of those components is subject to the terms
+and conditions outlined the respective license of each component.
diff --git a/vendor/github.com/smartystreets/goconvey/convey/assertions.go b/vendor/github.com/smartystreets/goconvey/convey/assertions.go
new file mode 100644
index 00000000..b37f6b43
--- /dev/null
+++ b/vendor/github.com/smartystreets/goconvey/convey/assertions.go
@@ -0,0 +1,70 @@
+package convey
+
+import "github.com/smartystreets/assertions"
+
+var (
+ ShouldEqual = assertions.ShouldEqual
+ ShouldNotEqual = assertions.ShouldNotEqual
+ ShouldAlmostEqual = assertions.ShouldAlmostEqual
+ ShouldNotAlmostEqual = assertions.ShouldNotAlmostEqual
+ ShouldResemble = assertions.ShouldResemble
+ ShouldNotResemble = assertions.ShouldNotResemble
+ ShouldPointTo = assertions.ShouldPointTo
+ ShouldNotPointTo = assertions.ShouldNotPointTo
+ ShouldBeNil = assertions.ShouldBeNil
+ ShouldNotBeNil = assertions.ShouldNotBeNil
+ ShouldBeTrue = assertions.ShouldBeTrue
+ ShouldBeFalse = assertions.ShouldBeFalse
+ ShouldBeZeroValue = assertions.ShouldBeZeroValue
+
+ ShouldBeGreaterThan = assertions.ShouldBeGreaterThan
+ ShouldBeGreaterThanOrEqualTo = assertions.ShouldBeGreaterThanOrEqualTo
+ ShouldBeLessThan = assertions.ShouldBeLessThan
+ ShouldBeLessThanOrEqualTo = assertions.ShouldBeLessThanOrEqualTo
+ ShouldBeBetween = assertions.ShouldBeBetween
+ ShouldNotBeBetween = assertions.ShouldNotBeBetween
+ ShouldBeBetweenOrEqual = assertions.ShouldBeBetweenOrEqual
+ ShouldNotBeBetweenOrEqual = assertions.ShouldNotBeBetweenOrEqual
+
+ ShouldContain = assertions.ShouldContain
+ ShouldNotContain = assertions.ShouldNotContain
+ ShouldContainKey = assertions.ShouldContainKey
+ ShouldNotContainKey = assertions.ShouldNotContainKey
+ ShouldBeIn = assertions.ShouldBeIn
+ ShouldNotBeIn = assertions.ShouldNotBeIn
+ ShouldBeEmpty = assertions.ShouldBeEmpty
+ ShouldNotBeEmpty = assertions.ShouldNotBeEmpty
+ ShouldHaveLength = assertions.ShouldHaveLength
+
+ ShouldStartWith = assertions.ShouldStartWith
+ ShouldNotStartWith = assertions.ShouldNotStartWith
+ ShouldEndWith = assertions.ShouldEndWith
+ ShouldNotEndWith = assertions.ShouldNotEndWith
+ ShouldBeBlank = assertions.ShouldBeBlank
+ ShouldNotBeBlank = assertions.ShouldNotBeBlank
+ ShouldContainSubstring = assertions.ShouldContainSubstring
+ ShouldNotContainSubstring = assertions.ShouldNotContainSubstring
+
+ ShouldPanic = assertions.ShouldPanic
+ ShouldNotPanic = assertions.ShouldNotPanic
+ ShouldPanicWith = assertions.ShouldPanicWith
+ ShouldNotPanicWith = assertions.ShouldNotPanicWith
+
+ ShouldHaveSameTypeAs = assertions.ShouldHaveSameTypeAs
+ ShouldNotHaveSameTypeAs = assertions.ShouldNotHaveSameTypeAs
+ ShouldImplement = assertions.ShouldImplement
+ ShouldNotImplement = assertions.ShouldNotImplement
+
+ ShouldHappenBefore = assertions.ShouldHappenBefore
+ ShouldHappenOnOrBefore = assertions.ShouldHappenOnOrBefore
+ ShouldHappenAfter = assertions.ShouldHappenAfter
+ ShouldHappenOnOrAfter = assertions.ShouldHappenOnOrAfter
+ ShouldHappenBetween = assertions.ShouldHappenBetween
+ ShouldHappenOnOrBetween = assertions.ShouldHappenOnOrBetween
+ ShouldNotHappenOnOrBetween = assertions.ShouldNotHappenOnOrBetween
+ ShouldHappenWithin = assertions.ShouldHappenWithin
+ ShouldNotHappenWithin = assertions.ShouldNotHappenWithin
+ ShouldBeChronological = assertions.ShouldBeChronological
+
+ ShouldBeError = assertions.ShouldBeError
+)
diff --git a/vendor/github.com/smartystreets/goconvey/convey/context.go b/vendor/github.com/smartystreets/goconvey/convey/context.go
new file mode 100644
index 00000000..2c75c2d7
--- /dev/null
+++ b/vendor/github.com/smartystreets/goconvey/convey/context.go
@@ -0,0 +1,272 @@
+package convey
+
+import (
+ "fmt"
+
+ "github.com/jtolds/gls"
+ "github.com/smartystreets/goconvey/convey/reporting"
+)
+
+type conveyErr struct {
+ fmt string
+ params []interface{}
+}
+
+func (e *conveyErr) Error() string {
+ return fmt.Sprintf(e.fmt, e.params...)
+}
+
+func conveyPanic(fmt string, params ...interface{}) {
+ panic(&conveyErr{fmt, params})
+}
+
+const (
+ missingGoTest = `Top-level calls to Convey(...) need a reference to the *testing.T.
+ Hint: Convey("description here", t, func() { /* notice that the second argument was the *testing.T (t)! */ }) `
+ extraGoTest = `Only the top-level call to Convey(...) needs a reference to the *testing.T.`
+ noStackContext = "Convey operation made without context on goroutine stack.\n" +
+ "Hint: Perhaps you meant to use `Convey(..., func(c C){...})` ?"
+ differentConveySituations = "Different set of Convey statements on subsequent pass!\nDid not expect %#v."
+ multipleIdenticalConvey = "Multiple convey suites with identical names: %#v"
+)
+
+const (
+ failureHalt = "___FAILURE_HALT___"
+
+ nodeKey = "node"
+)
+
+///////////////////////////////// Stack Context /////////////////////////////////
+
+func getCurrentContext() *context {
+ ctx, ok := ctxMgr.GetValue(nodeKey)
+ if ok {
+ return ctx.(*context)
+ }
+ return nil
+}
+
+func mustGetCurrentContext() *context {
+ ctx := getCurrentContext()
+ if ctx == nil {
+ conveyPanic(noStackContext)
+ }
+ return ctx
+}
+
+//////////////////////////////////// Context ////////////////////////////////////
+
+// context magically handles all coordination of Convey's and So assertions.
+//
+// It is tracked on the stack as goroutine-local-storage with the gls package,
+// or explicitly if the user decides to call convey like:
+//
+// Convey(..., func(c C) {
+// c.So(...)
+// })
+//
+// This implements the `C` interface.
+type context struct {
+ reporter reporting.Reporter
+
+ children map[string]*context
+
+ resets []func()
+
+ executedOnce bool
+ expectChildRun *bool
+ complete bool
+
+ focus bool
+ failureMode FailureMode
+}
+
+// rootConvey is the main entry point to a test suite. This is called when
+// there's no context in the stack already, and items must contain a `t` object,
+// or this panics.
+func rootConvey(items ...interface{}) {
+ entry := discover(items)
+
+ if entry.Test == nil {
+ conveyPanic(missingGoTest)
+ }
+
+ expectChildRun := true
+ ctx := &context{
+ reporter: buildReporter(),
+
+ children: make(map[string]*context),
+
+ expectChildRun: &expectChildRun,
+
+ focus: entry.Focus,
+ failureMode: defaultFailureMode.combine(entry.FailMode),
+ }
+ ctxMgr.SetValues(gls.Values{nodeKey: ctx}, func() {
+ ctx.reporter.BeginStory(reporting.NewStoryReport(entry.Test))
+ defer ctx.reporter.EndStory()
+
+ for ctx.shouldVisit() {
+ ctx.conveyInner(entry.Situation, entry.Func)
+ expectChildRun = true
+ }
+ })
+}
+
+//////////////////////////////////// Methods ////////////////////////////////////
+
+func (ctx *context) SkipConvey(items ...interface{}) {
+ ctx.Convey(items, skipConvey)
+}
+
+func (ctx *context) FocusConvey(items ...interface{}) {
+ ctx.Convey(items, focusConvey)
+}
+
+func (ctx *context) Convey(items ...interface{}) {
+ entry := discover(items)
+
+ // we're a branch, or leaf (on the wind)
+ if entry.Test != nil {
+ conveyPanic(extraGoTest)
+ }
+ if ctx.focus && !entry.Focus {
+ return
+ }
+
+ var inner_ctx *context
+ if ctx.executedOnce {
+ var ok bool
+ inner_ctx, ok = ctx.children[entry.Situation]
+ if !ok {
+ conveyPanic(differentConveySituations, entry.Situation)
+ }
+ } else {
+ if _, ok := ctx.children[entry.Situation]; ok {
+ conveyPanic(multipleIdenticalConvey, entry.Situation)
+ }
+ inner_ctx = &context{
+ reporter: ctx.reporter,
+
+ children: make(map[string]*context),
+
+ expectChildRun: ctx.expectChildRun,
+
+ focus: entry.Focus,
+ failureMode: ctx.failureMode.combine(entry.FailMode),
+ }
+ ctx.children[entry.Situation] = inner_ctx
+ }
+
+ if inner_ctx.shouldVisit() {
+ ctxMgr.SetValues(gls.Values{nodeKey: inner_ctx}, func() {
+ inner_ctx.conveyInner(entry.Situation, entry.Func)
+ })
+ }
+}
+
+func (ctx *context) SkipSo(stuff ...interface{}) {
+ ctx.assertionReport(reporting.NewSkipReport())
+}
+
+func (ctx *context) So(actual interface{}, assert assertion, expected ...interface{}) {
+ if result := assert(actual, expected...); result == assertionSuccess {
+ ctx.assertionReport(reporting.NewSuccessReport())
+ } else {
+ ctx.assertionReport(reporting.NewFailureReport(result))
+ }
+}
+
+func (ctx *context) Reset(action func()) {
+ /* TODO: Failure mode configuration */
+ ctx.resets = append(ctx.resets, action)
+}
+
+func (ctx *context) Print(items ...interface{}) (int, error) {
+ fmt.Fprint(ctx.reporter, items...)
+ return fmt.Print(items...)
+}
+
+func (ctx *context) Println(items ...interface{}) (int, error) {
+ fmt.Fprintln(ctx.reporter, items...)
+ return fmt.Println(items...)
+}
+
+func (ctx *context) Printf(format string, items ...interface{}) (int, error) {
+ fmt.Fprintf(ctx.reporter, format, items...)
+ return fmt.Printf(format, items...)
+}
+
+//////////////////////////////////// Private ////////////////////////////////////
+
+// shouldVisit returns true iff we should traverse down into a Convey. Note
+// that just because we don't traverse a Convey this time, doesn't mean that
+// we may not traverse it on a subsequent pass.
+func (c *context) shouldVisit() bool {
+ return !c.complete && *c.expectChildRun
+}
+
+// conveyInner is the function which actually executes the user's anonymous test
+// function body. At this point, Convey or RootConvey has decided that this
+// function should actually run.
+func (ctx *context) conveyInner(situation string, f func(C)) {
+ // Record/Reset state for next time.
+ defer func() {
+ ctx.executedOnce = true
+
+ // This is only needed at the leaves, but there's no harm in also setting it
+ // when returning from branch Convey's
+ *ctx.expectChildRun = false
+ }()
+
+ // Set up+tear down our scope for the reporter
+ ctx.reporter.Enter(reporting.NewScopeReport(situation))
+ defer ctx.reporter.Exit()
+
+ // Recover from any panics in f, and assign the `complete` status for this
+ // node of the tree.
+ defer func() {
+ ctx.complete = true
+ if problem := recover(); problem != nil {
+ if problem, ok := problem.(*conveyErr); ok {
+ panic(problem)
+ }
+ if problem != failureHalt {
+ ctx.reporter.Report(reporting.NewErrorReport(problem))
+ }
+ } else {
+ for _, child := range ctx.children {
+ if !child.complete {
+ ctx.complete = false
+ return
+ }
+ }
+ }
+ }()
+
+ // Resets are registered as the `f` function executes, so nil them here.
+ // All resets are run in registration order (FIFO).
+ ctx.resets = []func(){}
+ defer func() {
+ for _, r := range ctx.resets {
+ // panics handled by the previous defer
+ r()
+ }
+ }()
+
+ if f == nil {
+ // if f is nil, this was either a Convey(..., nil), or a SkipConvey
+ ctx.reporter.Report(reporting.NewSkipReport())
+ } else {
+ f(ctx)
+ }
+}
+
+// assertionReport is a helper for So and SkipSo which makes the report and
+// then possibly panics, depending on the current context's failureMode.
+func (ctx *context) assertionReport(r *reporting.AssertionResult) {
+ ctx.reporter.Report(r)
+ if r.Failure != "" && ctx.failureMode == FailureHalts {
+ panic(failureHalt)
+ }
+}
diff --git a/vendor/github.com/smartystreets/goconvey/convey/convey.goconvey b/vendor/github.com/smartystreets/goconvey/convey/convey.goconvey
new file mode 100644
index 00000000..a2d9327d
--- /dev/null
+++ b/vendor/github.com/smartystreets/goconvey/convey/convey.goconvey
@@ -0,0 +1,4 @@
+#ignore
+-timeout=1s
+#-covermode=count
+#-coverpkg=github.com/smartystreets/goconvey/convey,github.com/smartystreets/goconvey/convey/gotest,github.com/smartystreets/goconvey/convey/reporting
\ No newline at end of file
diff --git a/vendor/github.com/smartystreets/goconvey/convey/discovery.go b/vendor/github.com/smartystreets/goconvey/convey/discovery.go
new file mode 100644
index 00000000..eb8d4cb2
--- /dev/null
+++ b/vendor/github.com/smartystreets/goconvey/convey/discovery.go
@@ -0,0 +1,103 @@
+package convey
+
+type actionSpecifier uint8
+
+const (
+ noSpecifier actionSpecifier = iota
+ skipConvey
+ focusConvey
+)
+
+type suite struct {
+ Situation string
+ Test t
+ Focus bool
+ Func func(C) // nil means skipped
+ FailMode FailureMode
+}
+
+func newSuite(situation string, failureMode FailureMode, f func(C), test t, specifier actionSpecifier) *suite {
+ ret := &suite{
+ Situation: situation,
+ Test: test,
+ Func: f,
+ FailMode: failureMode,
+ }
+ switch specifier {
+ case skipConvey:
+ ret.Func = nil
+ case focusConvey:
+ ret.Focus = true
+ }
+ return ret
+}
+
+func discover(items []interface{}) *suite {
+ name, items := parseName(items)
+ test, items := parseGoTest(items)
+ failure, items := parseFailureMode(items)
+ action, items := parseAction(items)
+ specifier, items := parseSpecifier(items)
+
+ if len(items) != 0 {
+ conveyPanic(parseError)
+ }
+
+ return newSuite(name, failure, action, test, specifier)
+}
+func item(items []interface{}) interface{} {
+ if len(items) == 0 {
+ conveyPanic(parseError)
+ }
+ return items[0]
+}
+func parseName(items []interface{}) (string, []interface{}) {
+ if name, parsed := item(items).(string); parsed {
+ return name, items[1:]
+ }
+ conveyPanic(parseError)
+ panic("never get here")
+}
+func parseGoTest(items []interface{}) (t, []interface{}) {
+ if test, parsed := item(items).(t); parsed {
+ return test, items[1:]
+ }
+ return nil, items
+}
+func parseFailureMode(items []interface{}) (FailureMode, []interface{}) {
+ if mode, parsed := item(items).(FailureMode); parsed {
+ return mode, items[1:]
+ }
+ return FailureInherits, items
+}
+func parseAction(items []interface{}) (func(C), []interface{}) {
+ switch x := item(items).(type) {
+ case nil:
+ return nil, items[1:]
+ case func(C):
+ return x, items[1:]
+ case func():
+ return func(C) { x() }, items[1:]
+ }
+ conveyPanic(parseError)
+ panic("never get here")
+}
+func parseSpecifier(items []interface{}) (actionSpecifier, []interface{}) {
+ if len(items) == 0 {
+ return noSpecifier, items
+ }
+ if spec, ok := items[0].(actionSpecifier); ok {
+ return spec, items[1:]
+ }
+ conveyPanic(parseError)
+ panic("never get here")
+}
+
+// This interface allows us to pass the *testing.T struct
+// throughout the internals of this package without ever
+// having to import the "testing" package.
+type t interface {
+ Fail()
+}
+
+const parseError = "You must provide a name (string), then a *testing.T (if in outermost scope), an optional FailureMode, and then an action (func())."
diff --git a/vendor/github.com/smartystreets/goconvey/convey/doc.go b/vendor/github.com/smartystreets/goconvey/convey/doc.go
new file mode 100644
index 00000000..2562ce4c
--- /dev/null
+++ b/vendor/github.com/smartystreets/goconvey/convey/doc.go
@@ -0,0 +1,218 @@
+// Package convey contains all of the public-facing entry points to this project.
+// This means that it should never be required of the user to import any other
+// packages from this project as they serve internal purposes.
+package convey
+
+import "github.com/smartystreets/goconvey/convey/reporting"
+
+////////////////////////////////// suite //////////////////////////////////
+
+// C is the Convey context which you can optionally obtain in your action
+// by calling Convey like:
+//
+// Convey(..., func(c C) {
+// ...
+// })
+//
+// See the documentation on Convey for more details.
+//
+// All methods in this context behave identically to the global functions of the
+// same name in this package.
+type C interface {
+ Convey(items ...interface{})
+ SkipConvey(items ...interface{})
+ FocusConvey(items ...interface{})
+
+ So(actual interface{}, assert assertion, expected ...interface{})
+ SkipSo(stuff ...interface{})
+
+ Reset(action func())
+
+ Println(items ...interface{}) (int, error)
+ Print(items ...interface{}) (int, error)
+ Printf(format string, items ...interface{}) (int, error)
+}
+
+// Convey is the method intended for use when declaring the scopes of
+// a specification. Each scope has a description and a func() which may contain
+// other calls to Convey(), Reset() or Should-style assertions. Convey calls can
+// be nested as far as you see fit.
+//
+// IMPORTANT NOTE: The top-level Convey() within a Test method
+// must conform to the following signature:
+//
+// Convey(description string, t *testing.T, action func())
+//
+// All other calls should look like this (no need to pass in *testing.T):
+//
+// Convey(description string, action func())
+//
+// Don't worry, goconvey will panic if you get it wrong so you can fix it.
+//
+// Additionally, you may explicitly obtain access to the Convey context by doing:
+//
+// Convey(description string, action func(c C))
+//
+// You may need to do this if you want to pass the context through to a
+// goroutine, or to close over the context in a handler to a library which
+// calls your handler in a goroutine (httptest comes to mind).
+//
+// All Convey()-blocks also accept an optional parameter of FailureMode which sets
+// how goconvey should treat failures for So()-assertions in the block and
+// nested blocks. See the constants in this file for the available options.
+//
+// By default it will inherit from its parent block and the top-level blocks
+// default to the FailureHalts setting.
+//
+// This parameter is inserted before the block itself:
+//
+// Convey(description string, t *testing.T, mode FailureMode, action func())
+// Convey(description string, mode FailureMode, action func())
+//
+// See the examples package for, well, examples.
+func Convey(items ...interface{}) {
+ if ctx := getCurrentContext(); ctx == nil {
+ rootConvey(items...)
+ } else {
+ ctx.Convey(items...)
+ }
+}
+
+// SkipConvey is analagous to Convey except that the scope is not executed
+// (which means that child scopes defined within this scope are not run either).
+// The reporter will be notified that this step was skipped.
+func SkipConvey(items ...interface{}) {
+ Convey(append(items, skipConvey)...)
+}
+
+// FocusConvey is has the inverse effect of SkipConvey. If the top-level
+// Convey is changed to `FocusConvey`, only nested scopes that are defined
+// with FocusConvey will be run. The rest will be ignored completely. This
+// is handy when debugging a large suite that runs a misbehaving function
+// repeatedly as you can disable all but one of that function
+// without swaths of `SkipConvey` calls, just a targeted chain of calls
+// to FocusConvey.
+func FocusConvey(items ...interface{}) {
+ Convey(append(items, focusConvey)...)
+}
+
+// Reset registers a cleanup function to be run after each Convey()
+// in the same scope. See the examples package for a simple use case.
+func Reset(action func()) {
+ mustGetCurrentContext().Reset(action)
+}
+
+/////////////////////////////////// Assertions ///////////////////////////////////
+
+// assertion is an alias for a function with a signature that the convey.So()
+// method can handle. Any future or custom assertions should conform to this
+// method signature. The return value should be an empty string if the assertion
+// passes and a well-formed failure message if not.
+type assertion func(actual interface{}, expected ...interface{}) string
+
+const assertionSuccess = ""
+
+// So is the means by which assertions are made against the system under test.
+// The majority of exported names in the assertions package begin with the word
+// 'Should' and describe how the first argument (actual) should compare with any
+// of the final (expected) arguments. How many final arguments are accepted
+// depends on the particular assertion that is passed in as the assert argument.
+// See the examples package for use cases and the assertions package for
+// documentation on specific assertion methods. A failing assertion will
+// cause t.Fail() to be invoked--you should never call this method (or other
+// failure-inducing methods) in your test code. Leave that to GoConvey.
+func So(actual interface{}, assert assertion, expected ...interface{}) {
+ mustGetCurrentContext().So(actual, assert, expected...)
+}
+
+// SkipSo is analagous to So except that the assertion that would have been passed
+// to So is not executed and the reporter is notified that the assertion was skipped.
+func SkipSo(stuff ...interface{}) {
+ mustGetCurrentContext().SkipSo()
+}
+
+// FailureMode is a type which determines how the So() blocks should fail
+// if their assertion fails. See constants further down for acceptable values
+type FailureMode string
+
+const (
+
+ // FailureContinues is a failure mode which prevents failing
+ // So()-assertions from halting Convey-block execution, instead
+ // allowing the test to continue past failing So()-assertions.
+ FailureContinues FailureMode = "continue"
+
+ // FailureHalts is the default setting for a top-level Convey()-block
+ // and will cause all failing So()-assertions to halt further execution
+ // in that test-arm and continue on to the next arm.
+ FailureHalts FailureMode = "halt"
+
+ // FailureInherits is the default setting for failure-mode, it will
+ // default to the failure-mode of the parent block. You should never
+ // need to specify this mode in your tests..
+ FailureInherits FailureMode = "inherits"
+)
+
+func (f FailureMode) combine(other FailureMode) FailureMode {
+ if other == FailureInherits {
+ return f
+ }
+ return other
+}
+
+var defaultFailureMode FailureMode = FailureHalts
+
+// SetDefaultFailureMode allows you to specify the default failure mode
+// for all Convey blocks. It is meant to be used in an init function to
+// allow the default mode to be changdd across all tests for an entire packgae
+// but it can be used anywhere.
+func SetDefaultFailureMode(mode FailureMode) {
+ if mode == FailureContinues || mode == FailureHalts {
+ defaultFailureMode = mode
+ } else {
+ panic("You may only use the constants named 'FailureContinues' and 'FailureHalts' as default failure modes.")
+ }
+}
+
+//////////////////////////////////// Print functions ////////////////////////////////////
+
+// Print is analogous to fmt.Print (and it even calls fmt.Print). It ensures that
+// output is aligned with the corresponding scopes in the web UI.
+func Print(items ...interface{}) (written int, err error) {
+ return mustGetCurrentContext().Print(items...)
+}
+
+// Print is analogous to fmt.Println (and it even calls fmt.Println). It ensures that
+// output is aligned with the corresponding scopes in the web UI.
+func Println(items ...interface{}) (written int, err error) {
+ return mustGetCurrentContext().Println(items...)
+}
+
+// Print is analogous to fmt.Printf (and it even calls fmt.Printf). It ensures that
+// output is aligned with the corresponding scopes in the web UI.
+func Printf(format string, items ...interface{}) (written int, err error) {
+ return mustGetCurrentContext().Printf(format, items...)
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+// SuppressConsoleStatistics prevents automatic printing of console statistics.
+// Calling PrintConsoleStatistics explicitly will force printing of statistics.
+func SuppressConsoleStatistics() {
+ reporting.SuppressConsoleStatistics()
+}
+
+// ConsoleStatistics may be called at any time to print assertion statistics.
+// Generally, the best place to do this would be in a TestMain function,
+// after all tests have been run. Something like this:
+//
+// func TestMain(m *testing.M) {
+// convey.SuppressConsoleStatistics()
+// result := m.Run()
+// convey.PrintConsoleStatistics()
+// os.Exit(result)
+// }
+//
+func PrintConsoleStatistics() {
+ reporting.PrintConsoleStatistics()
+}
diff --git a/vendor/github.com/smartystreets/goconvey/convey/gotest/utils.go b/vendor/github.com/smartystreets/goconvey/convey/gotest/utils.go
new file mode 100644
index 00000000..167c8fb7
--- /dev/null
+++ b/vendor/github.com/smartystreets/goconvey/convey/gotest/utils.go
@@ -0,0 +1,28 @@
+// Package gotest contains internal functionality. Although this package
+// contains one or more exported names it is not intended for public
+// consumption. See the examples package for how to use this project.
+package gotest
+
+import (
+ "runtime"
+ "strings"
+)
+
+func ResolveExternalCaller() (file string, line int, name string) {
+ var caller_id uintptr
+ callers := runtime.Callers(0, callStack)
+
+ for x := 0; x < callers; x++ {
+ caller_id, file, line, _ = runtime.Caller(x)
+ if strings.HasSuffix(file, "_test.go") || strings.HasSuffix(file, "_tests.go") {
+ name = runtime.FuncForPC(caller_id).Name()
+ return
+ }
+ }
+ file, line, name = "", -1, ""
+ return // panic?
+}
+
+const maxStackDepth = 100 // This had better be enough...
+
+var callStack []uintptr = make([]uintptr, maxStackDepth, maxStackDepth)
diff --git a/vendor/github.com/smartystreets/goconvey/convey/init.go b/vendor/github.com/smartystreets/goconvey/convey/init.go
new file mode 100644
index 00000000..cb930a0d
--- /dev/null
+++ b/vendor/github.com/smartystreets/goconvey/convey/init.go
@@ -0,0 +1,81 @@
+package convey
+
+import (
+ "flag"
+ "os"
+
+ "github.com/jtolds/gls"
+ "github.com/smartystreets/assertions"
+ "github.com/smartystreets/goconvey/convey/reporting"
+)
+
+func init() {
+ assertions.GoConveyMode(true)
+
+ declareFlags()
+
+ ctxMgr = gls.NewContextManager()
+}
+
+func declareFlags() {
+ flag.BoolVar(&json, "convey-json", false, "When true, emits results in JSON blocks. Default: 'false'")
+ flag.BoolVar(&silent, "convey-silent", false, "When true, all output from GoConvey is suppressed.")
+ flag.BoolVar(&story, "convey-story", false, "When true, emits story output, otherwise emits dot output. When not provided, this flag mirrors the value of the '-test.v' flag")
+
+ if noStoryFlagProvided() {
+ story = verboseEnabled
+ }
+
+ // FYI: flag.Parse() is called from the testing package.
+}
+
+func noStoryFlagProvided() bool {
+ return !story && !storyDisabled
+}
+
+func buildReporter() reporting.Reporter {
+ selectReporter := os.Getenv("GOCONVEY_REPORTER")
+
+ switch {
+ case testReporter != nil:
+ return testReporter
+ case json || selectReporter == "json":
+ return reporting.BuildJsonReporter()
+ case silent || selectReporter == "silent":
+ return reporting.BuildSilentReporter()
+ case selectReporter == "dot":
+ // Story is turned on when verbose is set, so we need to check for dot reporter first.
+ return reporting.BuildDotReporter()
+ case story || selectReporter == "story":
+ return reporting.BuildStoryReporter()
+ default:
+ return reporting.BuildDotReporter()
+ }
+}
+
+var (
+ ctxMgr *gls.ContextManager
+
+ // only set by internal tests
+ testReporter reporting.Reporter
+)
+
+var (
+ json bool
+ silent bool
+ story bool
+
+ verboseEnabled = flagFound("-test.v=true")
+ storyDisabled = flagFound("-story=false")
+)
+
+// flagFound parses the command line args manually for flags defined in other
+// packages. Like the '-v' flag from the "testing" package, for instance.
+func flagFound(flagValue string) bool {
+ for _, arg := range os.Args {
+ if arg == flagValue {
+ return true
+ }
+ }
+ return false
+}
diff --git a/vendor/github.com/smartystreets/goconvey/convey/nilReporter.go b/vendor/github.com/smartystreets/goconvey/convey/nilReporter.go
new file mode 100644
index 00000000..777b2a51
--- /dev/null
+++ b/vendor/github.com/smartystreets/goconvey/convey/nilReporter.go
@@ -0,0 +1,15 @@
+package convey
+
+import (
+ "github.com/smartystreets/goconvey/convey/reporting"
+)
+
+type nilReporter struct{}
+
+func (self *nilReporter) BeginStory(story *reporting.StoryReport) {}
+func (self *nilReporter) Enter(scope *reporting.ScopeReport) {}
+func (self *nilReporter) Report(report *reporting.AssertionResult) {}
+func (self *nilReporter) Exit() {}
+func (self *nilReporter) EndStory() {}
+func (self *nilReporter) Write(p []byte) (int, error) { return len(p), nil }
+func newNilReporter() *nilReporter { return &nilReporter{} }
diff --git a/vendor/github.com/smartystreets/goconvey/convey/reporting/console.go b/vendor/github.com/smartystreets/goconvey/convey/reporting/console.go
new file mode 100644
index 00000000..7bf67dbb
--- /dev/null
+++ b/vendor/github.com/smartystreets/goconvey/convey/reporting/console.go
@@ -0,0 +1,16 @@
+package reporting
+
+import (
+ "fmt"
+ "io"
+)
+
+type console struct{}
+
+func (self *console) Write(p []byte) (n int, err error) {
+ return fmt.Print(string(p))
+}
+
+func NewConsole() io.Writer {
+ return new(console)
+}
diff --git a/vendor/github.com/smartystreets/goconvey/convey/reporting/doc.go b/vendor/github.com/smartystreets/goconvey/convey/reporting/doc.go
new file mode 100644
index 00000000..a37d0019
--- /dev/null
+++ b/vendor/github.com/smartystreets/goconvey/convey/reporting/doc.go
@@ -0,0 +1,5 @@
+// Package reporting contains internal functionality related
+// to console reporting and output. Although this package has
+// exported names is not intended for public consumption. See the
+// examples package for how to use this project.
+package reporting
diff --git a/vendor/github.com/smartystreets/goconvey/convey/reporting/dot.go b/vendor/github.com/smartystreets/goconvey/convey/reporting/dot.go
new file mode 100644
index 00000000..47d57c6b
--- /dev/null
+++ b/vendor/github.com/smartystreets/goconvey/convey/reporting/dot.go
@@ -0,0 +1,40 @@
+package reporting
+
+import "fmt"
+
+type dot struct{ out *Printer }
+
+func (self *dot) BeginStory(story *StoryReport) {}
+
+func (self *dot) Enter(scope *ScopeReport) {}
+
+func (self *dot) Report(report *AssertionResult) {
+ if report.Error != nil {
+ fmt.Print(redColor)
+ self.out.Insert(dotError)
+ } else if report.Failure != "" {
+ fmt.Print(yellowColor)
+ self.out.Insert(dotFailure)
+ } else if report.Skipped {
+ fmt.Print(yellowColor)
+ self.out.Insert(dotSkip)
+ } else {
+ fmt.Print(greenColor)
+ self.out.Insert(dotSuccess)
+ }
+ fmt.Print(resetColor)
+}
+
+func (self *dot) Exit() {}
+
+func (self *dot) EndStory() {}
+
+func (self *dot) Write(content []byte) (written int, err error) {
+ return len(content), nil // no-op
+}
+
+func NewDotReporter(out *Printer) *dot {
+ self := new(dot)
+ self.out = out
+ return self
+}
diff --git a/vendor/github.com/smartystreets/goconvey/convey/reporting/gotest.go b/vendor/github.com/smartystreets/goconvey/convey/reporting/gotest.go
new file mode 100644
index 00000000..c396e16b
--- /dev/null
+++ b/vendor/github.com/smartystreets/goconvey/convey/reporting/gotest.go
@@ -0,0 +1,33 @@
+package reporting
+
+type gotestReporter struct{ test T }
+
+func (self *gotestReporter) BeginStory(story *StoryReport) {
+ self.test = story.Test
+}
+
+func (self *gotestReporter) Enter(scope *ScopeReport) {}
+
+func (self *gotestReporter) Report(r *AssertionResult) {
+ if !passed(r) {
+ self.test.Fail()
+ }
+}
+
+func (self *gotestReporter) Exit() {}
+
+func (self *gotestReporter) EndStory() {
+ self.test = nil
+}
+
+func (self *gotestReporter) Write(content []byte) (written int, err error) {
+ return len(content), nil // no-op
+}
+
+func NewGoTestReporter() *gotestReporter {
+ return new(gotestReporter)
+}
+
+func passed(r *AssertionResult) bool {
+ return r.Error == nil && r.Failure == ""
+}
diff --git a/vendor/github.com/smartystreets/goconvey/convey/reporting/init.go b/vendor/github.com/smartystreets/goconvey/convey/reporting/init.go
new file mode 100644
index 00000000..44d080e9
--- /dev/null
+++ b/vendor/github.com/smartystreets/goconvey/convey/reporting/init.go
@@ -0,0 +1,94 @@
+package reporting
+
+import (
+ "os"
+ "runtime"
+ "strings"
+)
+
+func init() {
+ if !isColorableTerminal() {
+ monochrome()
+ }
+
+ if runtime.GOOS == "windows" {
+ success, failure, error_ = dotSuccess, dotFailure, dotError
+ }
+}
+
+func BuildJsonReporter() Reporter {
+ out := NewPrinter(NewConsole())
+ return NewReporters(
+ NewGoTestReporter(),
+ NewJsonReporter(out))
+}
+func BuildDotReporter() Reporter {
+ out := NewPrinter(NewConsole())
+ return NewReporters(
+ NewGoTestReporter(),
+ NewDotReporter(out),
+ NewProblemReporter(out),
+ consoleStatistics)
+}
+func BuildStoryReporter() Reporter {
+ out := NewPrinter(NewConsole())
+ return NewReporters(
+ NewGoTestReporter(),
+ NewStoryReporter(out),
+ NewProblemReporter(out),
+ consoleStatistics)
+}
+func BuildSilentReporter() Reporter {
+ out := NewPrinter(NewConsole())
+ return NewReporters(
+ NewGoTestReporter(),
+ NewSilentProblemReporter(out))
+}
+
+var (
+ newline = "\n"
+ success = "✔"
+ failure = "✘"
+ error_ = "🔥"
+ skip = "⚠"
+ dotSuccess = "."
+ dotFailure = "x"
+ dotError = "E"
+ dotSkip = "S"
+ errorTemplate = "* %s \nLine %d: - %v \n%s\n"
+ failureTemplate = "* %s \nLine %d:\n%s\n"
+)
+
+var (
+ greenColor = "\033[32m"
+ yellowColor = "\033[33m"
+ redColor = "\033[31m"
+ resetColor = "\033[0m"
+)
+
+var consoleStatistics = NewStatisticsReporter(NewPrinter(NewConsole()))
+
+func SuppressConsoleStatistics() { consoleStatistics.Suppress() }
+func PrintConsoleStatistics() { consoleStatistics.PrintSummary() }
+
+// QuiteMode disables all console output symbols. This is only meant to be used
+// for tests that are internal to goconvey where the output is distracting or
+// otherwise not needed in the test output.
+func QuietMode() {
+ success, failure, error_, skip, dotSuccess, dotFailure, dotError, dotSkip = "", "", "", "", "", "", "", ""
+}
+
+func monochrome() {
+ greenColor, yellowColor, redColor, resetColor = "", "", "", ""
+}
+
+func isColorableTerminal() bool {
+ return strings.Contains(os.Getenv("TERM"), "color")
+}
+
+// This interface allows us to pass the *testing.T struct
+// throughout the internals of this tool without ever
+// having to import the "testing" package.
+type T interface {
+ Fail()
+}
diff --git a/vendor/github.com/smartystreets/goconvey/convey/reporting/json.go b/vendor/github.com/smartystreets/goconvey/convey/reporting/json.go
new file mode 100644
index 00000000..f8526979
--- /dev/null
+++ b/vendor/github.com/smartystreets/goconvey/convey/reporting/json.go
@@ -0,0 +1,88 @@
+// TODO: under unit test
+
+package reporting
+
+import (
+ "bytes"
+ "encoding/json"
+ "fmt"
+ "strings"
+)
+
+type JsonReporter struct {
+ out *Printer
+ currentKey []string
+ current *ScopeResult
+ index map[string]*ScopeResult
+ scopes []*ScopeResult
+}
+
+func (self *JsonReporter) depth() int { return len(self.currentKey) }
+
+func (self *JsonReporter) BeginStory(story *StoryReport) {}
+
+func (self *JsonReporter) Enter(scope *ScopeReport) {
+ self.currentKey = append(self.currentKey, scope.Title)
+ ID := strings.Join(self.currentKey, "|")
+ if _, found := self.index[ID]; !found {
+ next := newScopeResult(scope.Title, self.depth(), scope.File, scope.Line)
+ self.scopes = append(self.scopes, next)
+ self.index[ID] = next
+ }
+ self.current = self.index[ID]
+}
+
+func (self *JsonReporter) Report(report *AssertionResult) {
+ self.current.Assertions = append(self.current.Assertions, report)
+}
+
+func (self *JsonReporter) Exit() {
+ self.currentKey = self.currentKey[:len(self.currentKey)-1]
+}
+
+func (self *JsonReporter) EndStory() {
+ self.report()
+ self.reset()
+}
+func (self *JsonReporter) report() {
+ scopes := []string{}
+ for _, scope := range self.scopes {
+ serialized, err := json.Marshal(scope)
+ if err != nil {
+ self.out.Println(jsonMarshalFailure)
+ panic(err)
+ }
+ var buffer bytes.Buffer
+ json.Indent(&buffer, serialized, "", " ")
+ scopes = append(scopes, buffer.String())
+ }
+ self.out.Print(fmt.Sprintf("%s\n%s,\n%s\n", OpenJson, strings.Join(scopes, ","), CloseJson))
+}
+func (self *JsonReporter) reset() {
+ self.scopes = []*ScopeResult{}
+ self.index = map[string]*ScopeResult{}
+ self.currentKey = nil
+}
+
+func (self *JsonReporter) Write(content []byte) (written int, err error) {
+ self.current.Output += string(content)
+ return len(content), nil
+}
+
+func NewJsonReporter(out *Printer) *JsonReporter {
+ self := new(JsonReporter)
+ self.out = out
+ self.reset()
+ return self
+}
+
+const OpenJson = ">->->OPEN-JSON->->->" // "⌦"
+const CloseJson = "<-<-<-CLOSE-JSON<-<-<" // "⌫"
+const jsonMarshalFailure = `
+
+GOCONVEY_JSON_MARSHALL_FAILURE: There was an error when attempting to convert test results to JSON.
+Please file a bug report and reference the code that caused this failure if possible.
+
+Here's the panic:
+
+`
diff --git a/vendor/github.com/smartystreets/goconvey/convey/reporting/printer.go b/vendor/github.com/smartystreets/goconvey/convey/reporting/printer.go
new file mode 100644
index 00000000..3dac0d4d
--- /dev/null
+++ b/vendor/github.com/smartystreets/goconvey/convey/reporting/printer.go
@@ -0,0 +1,60 @@
+package reporting
+
+import (
+ "fmt"
+ "io"
+ "strings"
+)
+
+type Printer struct {
+ out io.Writer
+ prefix string
+}
+
+func (self *Printer) Println(message string, values ...interface{}) {
+ formatted := self.format(message, values...) + newline
+ self.out.Write([]byte(formatted))
+}
+
+func (self *Printer) Print(message string, values ...interface{}) {
+ formatted := self.format(message, values...)
+ self.out.Write([]byte(formatted))
+}
+
+func (self *Printer) Insert(text string) {
+ self.out.Write([]byte(text))
+}
+
+func (self *Printer) format(message string, values ...interface{}) string {
+ var formatted string
+ if len(values) == 0 {
+ formatted = self.prefix + message
+ } else {
+ formatted = self.prefix + fmt_Sprintf(message, values...)
+ }
+ indented := strings.Replace(formatted, newline, newline+self.prefix, -1)
+ return strings.TrimRight(indented, space)
+}
+
+// Extracting fmt.Sprintf to a separate variable circumvents go vet, which, as of go 1.10 is run with go test.
+var fmt_Sprintf = fmt.Sprintf
+
+func (self *Printer) Indent() {
+ self.prefix += pad
+}
+
+func (self *Printer) Dedent() {
+ if len(self.prefix) >= padLength {
+ self.prefix = self.prefix[:len(self.prefix)-padLength]
+ }
+}
+
+func NewPrinter(out io.Writer) *Printer {
+ self := new(Printer)
+ self.out = out
+ return self
+}
+
+const space = " "
+const pad = space + space
+const padLength = len(pad)
diff --git a/vendor/github.com/smartystreets/goconvey/convey/reporting/problems.go b/vendor/github.com/smartystreets/goconvey/convey/reporting/problems.go
new file mode 100644
index 00000000..9ae493ac
--- /dev/null
+++ b/vendor/github.com/smartystreets/goconvey/convey/reporting/problems.go
@@ -0,0 +1,80 @@
+package reporting
+
+import "fmt"
+
+type problem struct {
+ silent bool
+ out *Printer
+ errors []*AssertionResult
+ failures []*AssertionResult
+}
+
+func (self *problem) BeginStory(story *StoryReport) {}
+
+func (self *problem) Enter(scope *ScopeReport) {}
+
+func (self *problem) Report(report *AssertionResult) {
+ if report.Error != nil {
+ self.errors = append(self.errors, report)
+ } else if report.Failure != "" {
+ self.failures = append(self.failures, report)
+ }
+}
+
+func (self *problem) Exit() {}
+
+func (self *problem) EndStory() {
+ self.show(self.showErrors, redColor)
+ self.show(self.showFailures, yellowColor)
+ self.prepareForNextStory()
+}
+func (self *problem) show(display func(), color string) {
+ if !self.silent {
+ fmt.Print(color)
+ }
+ display()
+ if !self.silent {
+ fmt.Print(resetColor)
+ }
+ self.out.Dedent()
+}
+func (self *problem) showErrors() {
+ for i, e := range self.errors {
+ if i == 0 {
+ self.out.Println("\nErrors:\n")
+ self.out.Indent()
+ }
+ self.out.Println(errorTemplate, e.File, e.Line, e.Error, e.StackTrace)
+ }
+}
+func (self *problem) showFailures() {
+ for i, f := range self.failures {
+ if i == 0 {
+ self.out.Println("\nFailures:\n")
+ self.out.Indent()
+ }
+ self.out.Println(failureTemplate, f.File, f.Line, f.Failure)
+ }
+}
+
+func (self *problem) Write(content []byte) (written int, err error) {
+ return len(content), nil // no-op
+}
+
+func NewProblemReporter(out *Printer) *problem {
+ self := new(problem)
+ self.out = out
+ self.prepareForNextStory()
+ return self
+}
+
+func NewSilentProblemReporter(out *Printer) *problem {
+ self := NewProblemReporter(out)
+ self.silent = true
+ return self
+}
+
+func (self *problem) prepareForNextStory() {
+ self.errors = []*AssertionResult{}
+ self.failures = []*AssertionResult{}
+}
diff --git a/vendor/github.com/smartystreets/goconvey/convey/reporting/reporter.go b/vendor/github.com/smartystreets/goconvey/convey/reporting/reporter.go
new file mode 100644
index 00000000..cce6c5e4
--- /dev/null
+++ b/vendor/github.com/smartystreets/goconvey/convey/reporting/reporter.go
@@ -0,0 +1,39 @@
+package reporting
+
+import "io"
+
+type Reporter interface {
+ BeginStory(story *StoryReport)
+ Enter(scope *ScopeReport)
+ Report(r *AssertionResult)
+ Exit()
+ EndStory()
+ io.Writer
+}
+
+type reporters struct{ collection []Reporter }
+
+func (self *reporters) BeginStory(s *StoryReport) { self.foreach(func(r Reporter) { r.BeginStory(s) }) }
+func (self *reporters) Enter(s *ScopeReport) { self.foreach(func(r Reporter) { r.Enter(s) }) }
+func (self *reporters) Report(a *AssertionResult) { self.foreach(func(r Reporter) { r.Report(a) }) }
+func (self *reporters) Exit() { self.foreach(func(r Reporter) { r.Exit() }) }
+func (self *reporters) EndStory() { self.foreach(func(r Reporter) { r.EndStory() }) }
+
+func (self *reporters) Write(contents []byte) (written int, err error) {
+ self.foreach(func(r Reporter) {
+ written, err = r.Write(contents)
+ })
+ return written, err
+}
+
+func (self *reporters) foreach(action func(Reporter)) {
+ for _, r := range self.collection {
+ action(r)
+ }
+}
+
+func NewReporters(collection ...Reporter) *reporters {
+ self := new(reporters)
+ self.collection = collection
+ return self
+}
diff --git a/vendor/github.com/smartystreets/goconvey/convey/reporting/reporting.goconvey b/vendor/github.com/smartystreets/goconvey/convey/reporting/reporting.goconvey
new file mode 100644
index 00000000..79982854
--- /dev/null
+++ b/vendor/github.com/smartystreets/goconvey/convey/reporting/reporting.goconvey
@@ -0,0 +1,2 @@
+#ignore
+-timeout=1s
diff --git a/vendor/github.com/smartystreets/goconvey/convey/reporting/reports.go b/vendor/github.com/smartystreets/goconvey/convey/reporting/reports.go
new file mode 100644
index 00000000..712e6ade
--- /dev/null
+++ b/vendor/github.com/smartystreets/goconvey/convey/reporting/reports.go
@@ -0,0 +1,179 @@
+package reporting
+
+import (
+ "encoding/json"
+ "fmt"
+ "runtime"
+ "strings"
+
+ "github.com/smartystreets/goconvey/convey/gotest"
+)
+
+////////////////// ScopeReport ////////////////////
+
+type ScopeReport struct {
+ Title string
+ File string
+ Line int
+}
+
+func NewScopeReport(title string) *ScopeReport {
+ file, line, _ := gotest.ResolveExternalCaller()
+ self := new(ScopeReport)
+ self.Title = title
+ self.File = file
+ self.Line = line
+ return self
+}
+
+////////////////// ScopeResult ////////////////////
+
+type ScopeResult struct {
+ Title string
+ File string
+ Line int
+ Depth int
+ Assertions []*AssertionResult
+ Output string
+}
+
+func newScopeResult(title string, depth int, file string, line int) *ScopeResult {
+ self := new(ScopeResult)
+ self.Title = title
+ self.Depth = depth
+ self.File = file
+ self.Line = line
+ self.Assertions = []*AssertionResult{}
+ return self
+}
+
+/////////////////// StoryReport /////////////////////
+
+type StoryReport struct {
+ Test T
+ Name string
+ File string
+ Line int
+}
+
+func NewStoryReport(test T) *StoryReport {
+ file, line, name := gotest.ResolveExternalCaller()
+ name = removePackagePath(name)
+ self := new(StoryReport)
+ self.Test = test
+ self.Name = name
+ self.File = file
+ self.Line = line
+ return self
+}
+
+// name comes in looking like "github.com/smartystreets/goconvey/examples.TestName".
+// We only want the stuff after the last '.', which is the name of the test function.
+func removePackagePath(name string) string {
+ parts := strings.Split(name, ".")
+ return parts[len(parts)-1]
+}
+
+/////////////////// FailureView ////////////////////////
+
+// This struct is also declared in github.com/smartystreets/assertions.
+// The json struct tags should be equal in both declarations.
+type FailureView struct {
+ Message string `json:"Message"`
+ Expected string `json:"Expected"`
+ Actual string `json:"Actual"`
+}
+
+////////////////////AssertionResult //////////////////////
+
+type AssertionResult struct {
+ File string
+ Line int
+ Expected string
+ Actual string
+ Failure string
+ Error interface{}
+ StackTrace string
+ Skipped bool
+}
+
+func NewFailureReport(failure string) *AssertionResult {
+ report := new(AssertionResult)
+ report.File, report.Line = caller()
+ report.StackTrace = stackTrace()
+ parseFailure(failure, report)
+ return report
+}
+func parseFailure(failure string, report *AssertionResult) {
+ view := new(FailureView)
+ err := json.Unmarshal([]byte(failure), view)
+ if err == nil {
+ report.Failure = view.Message
+ report.Expected = view.Expected
+ report.Actual = view.Actual
+ } else {
+ report.Failure = failure
+ }
+}
+func NewErrorReport(err interface{}) *AssertionResult {
+ report := new(AssertionResult)
+ report.File, report.Line = caller()
+ report.StackTrace = fullStackTrace()
+ report.Error = fmt.Sprintf("%v", err)
+ return report
+}
+func NewSuccessReport() *AssertionResult {
+ return new(AssertionResult)
+}
+func NewSkipReport() *AssertionResult {
+ report := new(AssertionResult)
+ report.File, report.Line = caller()
+ report.StackTrace = fullStackTrace()
+ report.Skipped = true
+ return report
+}
+
+func caller() (file string, line int) {
+ file, line, _ = gotest.ResolveExternalCaller()
+ return
+}
+
+func stackTrace() string {
+ buffer := make([]byte, 1024*64)
+ n := runtime.Stack(buffer, false)
+ return removeInternalEntries(string(buffer[:n]))
+}
+func fullStackTrace() string {
+ buffer := make([]byte, 1024*64)
+ n := runtime.Stack(buffer, true)
+ return removeInternalEntries(string(buffer[:n]))
+}
+func removeInternalEntries(stack string) string {
+ lines := strings.Split(stack, newline)
+ filtered := []string{}
+ for _, line := range lines {
+ if !isExternal(line) {
+ filtered = append(filtered, line)
+ }
+ }
+ return strings.Join(filtered, newline)
+}
+func isExternal(line string) bool {
+ for _, p := range internalPackages {
+ if strings.Contains(line, p) {
+ return true
+ }
+ }
+ return false
+}
+
+// NOTE: any new packages that host goconvey packages will need to be added here!
+// An alternative is to scan the goconvey directory and then exclude stuff like
+// the examples package but that's nasty too.
+var internalPackages = []string{
+ "goconvey/assertions",
+ "goconvey/convey",
+ "goconvey/execution",
+ "goconvey/gotest",
+ "goconvey/reporting",
+}
diff --git a/vendor/github.com/smartystreets/goconvey/convey/reporting/statistics.go b/vendor/github.com/smartystreets/goconvey/convey/reporting/statistics.go
new file mode 100644
index 00000000..c3ccd056
--- /dev/null
+++ b/vendor/github.com/smartystreets/goconvey/convey/reporting/statistics.go
@@ -0,0 +1,108 @@
+package reporting
+
+import (
+ "fmt"
+ "sync"
+)
+
+func (self *statistics) BeginStory(story *StoryReport) {}
+
+func (self *statistics) Enter(scope *ScopeReport) {}
+
+func (self *statistics) Report(report *AssertionResult) {
+ self.Lock()
+ defer self.Unlock()
+
+ if !self.failing && report.Failure != "" {
+ self.failing = true
+ }
+ if !self.erroring && report.Error != nil {
+ self.erroring = true
+ }
+ if report.Skipped {
+ self.skipped += 1
+ } else {
+ self.total++
+ }
+}
+
+func (self *statistics) Exit() {}
+
+func (self *statistics) EndStory() {
+ self.Lock()
+ defer self.Unlock()
+
+ if !self.suppressed {
+ self.printSummaryLocked()
+ }
+}
+
+func (self *statistics) Suppress() {
+ self.Lock()
+ defer self.Unlock()
+ self.suppressed = true
+}
+
+func (self *statistics) PrintSummary() {
+ self.Lock()
+ defer self.Unlock()
+ self.printSummaryLocked()
+}
+
+func (self *statistics) printSummaryLocked() {
+ self.reportAssertionsLocked()
+ self.reportSkippedSectionsLocked()
+ self.completeReportLocked()
+}
+func (self *statistics) reportAssertionsLocked() {
+ self.decideColorLocked()
+ self.out.Print("\n%d total %s", self.total, plural("assertion", self.total))
+}
+func (self *statistics) decideColorLocked() {
+ if self.failing && !self.erroring {
+ fmt.Print(yellowColor)
+ } else if self.erroring {
+ fmt.Print(redColor)
+ } else {
+ fmt.Print(greenColor)
+ }
+}
+func (self *statistics) reportSkippedSectionsLocked() {
+ if self.skipped > 0 {
+ fmt.Print(yellowColor)
+ self.out.Print(" (one or more sections skipped)")
+ }
+}
+func (self *statistics) completeReportLocked() {
+ fmt.Print(resetColor)
+ self.out.Print("\n")
+ self.out.Print("\n")
+}
+
+func (self *statistics) Write(content []byte) (written int, err error) {
+ return len(content), nil // no-op
+}
+
+func NewStatisticsReporter(out *Printer) *statistics {
+ self := statistics{}
+ self.out = out
+ return &self
+}
+
+type statistics struct {
+ sync.Mutex
+
+ out *Printer
+ total int
+ failing bool
+ erroring bool
+ skipped int
+ suppressed bool
+}
+
+func plural(word string, count int) string {
+ if count == 1 {
+ return word
+ }
+ return word + "s"
+}
diff --git a/vendor/github.com/smartystreets/goconvey/convey/reporting/story.go b/vendor/github.com/smartystreets/goconvey/convey/reporting/story.go
new file mode 100644
index 00000000..9e73c971
--- /dev/null
+++ b/vendor/github.com/smartystreets/goconvey/convey/reporting/story.go
@@ -0,0 +1,73 @@
+// TODO: in order for this reporter to be completely honest
+// we need to retrofit to be more like the json reporter such that:
+// 1. it maintains ScopeResult collections, which count assertions
+// 2. it reports only after EndStory(), so that all tick marks
+// are placed near the appropriate title.
+// 3. Under unit test
+
+package reporting
+
+import (
+ "fmt"
+ "strings"
+)
+
+type story struct {
+ out *Printer
+ titlesById map[string]string
+ currentKey []string
+}
+
+func (self *story) BeginStory(story *StoryReport) {}
+
+func (self *story) Enter(scope *ScopeReport) {
+ self.out.Indent()
+
+ self.currentKey = append(self.currentKey, scope.Title)
+ ID := strings.Join(self.currentKey, "|")
+
+ if _, found := self.titlesById[ID]; !found {
+ self.out.Println("")
+ self.out.Print(scope.Title)
+ self.out.Insert(" ")
+ self.titlesById[ID] = scope.Title
+ }
+}
+
+func (self *story) Report(report *AssertionResult) {
+ if report.Error != nil {
+ fmt.Print(redColor)
+ self.out.Insert(error_)
+ } else if report.Failure != "" {
+ fmt.Print(yellowColor)
+ self.out.Insert(failure)
+ } else if report.Skipped {
+ fmt.Print(yellowColor)
+ self.out.Insert(skip)
+ } else {
+ fmt.Print(greenColor)
+ self.out.Insert(success)
+ }
+ fmt.Print(resetColor)
+}
+
+func (self *story) Exit() {
+ self.out.Dedent()
+ self.currentKey = self.currentKey[:len(self.currentKey)-1]
+}
+
+func (self *story) EndStory() {
+ self.titlesById = make(map[string]string)
+ self.out.Println("\n")
+}
+
+func (self *story) Write(content []byte) (written int, err error) {
+ return len(content), nil // no-op
+}
+
+func NewStoryReporter(out *Printer) *story {
+ self := new(story)
+ self.out = out
+ self.titlesById = make(map[string]string)
+ return self
+}
diff --git a/vendor/github.com/spf13/afero/LICENSE.txt b/vendor/github.com/spf13/afero/LICENSE.txt
new file mode 100644
index 00000000..298f0e26
--- /dev/null
+++ b/vendor/github.com/spf13/afero/LICENSE.txt
@@ -0,0 +1,174 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
diff --git a/vendor/github.com/spf13/afero/README.md b/vendor/github.com/spf13/afero/README.md
new file mode 100644
index 00000000..0c9b04b5
--- /dev/null
+++ b/vendor/github.com/spf13/afero/README.md
@@ -0,0 +1,452 @@
+
+
+A FileSystem Abstraction System for Go
+
+[](https://travis-ci.org/spf13/afero) [](https://ci.appveyor.com/project/spf13/afero) [](https://godoc.org/github.com/spf13/afero) [](https://gitter.im/spf13/afero?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge)
+
+# Overview
+
+Afero is an filesystem framework providing a simple, uniform and universal API
+interacting with any filesystem, as an abstraction layer providing interfaces,
+types and methods. Afero has an exceptionally clean interface and simple design
+without needless constructors or initialization methods.
+
+Afero is also a library providing a base set of interoperable backend
+filesystems that make it easy to work with afero while retaining all the power
+and benefit of the os and ioutil packages.
+
+Afero provides significant improvements over using the os package alone, most
+notably the ability to create mock and testing filesystems without relying on the disk.
+
+It is suitable for use in a any situation where you would consider using the OS
+package as it provides an additional abstraction that makes it easy to use a
+memory backed file system during testing. It also adds support for the http
+filesystem for full interoperability.
+
+
+## Afero Features
+
+* A single consistent API for accessing a variety of filesystems
+* Interoperation between a variety of file system types
+* A set of interfaces to encourage and enforce interoperability between backends
+* An atomic cross platform memory backed file system
+* Support for compositional (union) file systems by combining multiple file systems acting as one
+* Specialized backends which modify existing filesystems (Read Only, Regexp filtered)
+* A set of utility functions ported from io, ioutil & hugo to be afero aware
+
+
+# Using Afero
+
+Afero is easy to use and easier to adopt.
+
+A few different ways you could use Afero:
+
+* Use the interfaces alone to define you own file system.
+* Wrap for the OS packages.
+* Define different filesystems for different parts of your application.
+* Use Afero for mock filesystems while testing
+
+## Step 1: Install Afero
+
+First use go get to install the latest version of the library.
+
+ $ go get github.com/spf13/afero
+
+Next include Afero in your application.
+```go
+import "github.com/spf13/afero"
+```
+
+## Step 2: Declare a backend
+
+First define a package variable and set it to a pointer to a filesystem.
+```go
+var AppFs = afero.NewMemMapFs()
+
+or
+
+var AppFs = afero.NewOsFs()
+```
+It is important to note that if you repeat the composite literal you
+will be using a completely new and isolated filesystem. In the case of
+OsFs it will still use the same underlying filesystem but will reduce
+the ability to drop in other filesystems as desired.
+
+## Step 3: Use it like you would the OS package
+
+Throughout your application use any function and method like you normally
+would.
+
+So if my application before had:
+```go
+os.Open('/tmp/foo')
+```
+We would replace it with:
+```go
+AppFs.Open('/tmp/foo')
+```
+
+`AppFs` being the variable we defined above.
+
+
+## List of all available functions
+
+File System Methods Available:
+```go
+Chmod(name string, mode os.FileMode) : error
+Chtimes(name string, atime time.Time, mtime time.Time) : error
+Create(name string) : File, error
+Mkdir(name string, perm os.FileMode) : error
+MkdirAll(path string, perm os.FileMode) : error
+Name() : string
+Open(name string) : File, error
+OpenFile(name string, flag int, perm os.FileMode) : File, error
+Remove(name string) : error
+RemoveAll(path string) : error
+Rename(oldname, newname string) : error
+Stat(name string) : os.FileInfo, error
+```
+File Interfaces and Methods Available:
+```go
+io.Closer
+io.Reader
+io.ReaderAt
+io.Seeker
+io.Writer
+io.WriterAt
+
+Name() : string
+Readdir(count int) : []os.FileInfo, error
+Readdirnames(n int) : []string, error
+Stat() : os.FileInfo, error
+Sync() : error
+Truncate(size int64) : error
+WriteString(s string) : ret int, err error
+```
+In some applications it may make sense to define a new package that
+simply exports the file system variable for easy access from anywhere.
+
+## Using Afero's utility functions
+
+Afero provides a set of functions to make it easier to use the underlying file systems.
+These functions have been primarily ported from io & ioutil with some developed for Hugo.
+
+The afero utilities support all afero compatible backends.
+
+The list of utilities includes:
+
+```go
+DirExists(path string) (bool, error)
+Exists(path string) (bool, error)
+FileContainsBytes(filename string, subslice []byte) (bool, error)
+GetTempDir(subPath string) string
+IsDir(path string) (bool, error)
+IsEmpty(path string) (bool, error)
+ReadDir(dirname string) ([]os.FileInfo, error)
+ReadFile(filename string) ([]byte, error)
+SafeWriteReader(path string, r io.Reader) (err error)
+TempDir(dir, prefix string) (name string, err error)
+TempFile(dir, prefix string) (f File, err error)
+Walk(root string, walkFn filepath.WalkFunc) error
+WriteFile(filename string, data []byte, perm os.FileMode) error
+WriteReader(path string, r io.Reader) (err error)
+```
+For a complete list see [Afero's GoDoc](https://godoc.org/github.com/spf13/afero)
+
+They are available under two different approaches to use. You can either call
+them directly where the first parameter of each function will be the file
+system, or you can declare a new `Afero`, a custom type used to bind these
+functions as methods to a given filesystem.
+
+### Calling utilities directly
+
+```go
+fs := new(afero.MemMapFs)
+f, err := afero.TempFile(fs,"", "ioutil-test")
+
+```
+
+### Calling via Afero
+
+```go
+fs := afero.NewMemMapFs()
+afs := &afero.Afero{Fs: fs}
+f, err := afs.TempFile("", "ioutil-test")
+```
+
+## Using Afero for Testing
+
+There is a large benefit to using a mock filesystem for testing. It has a
+completely blank state every time it is initialized and can be easily
+reproducible regardless of OS. You could create files to your heart’s content
+and the file access would be fast while also saving you from all the annoying
+issues with deleting temporary files, Windows file locking, etc. The MemMapFs
+backend is perfect for testing.
+
+* Much faster than performing I/O operations on disk
+* Avoid security issues and permissions
+* Far more control. 'rm -rf /' with confidence
+* Test setup is far more easier to do
+* No test cleanup needed
+
+One way to accomplish this is to define a variable as mentioned above.
+In your application this will be set to afero.NewOsFs() during testing you
+can set it to afero.NewMemMapFs().
+
+It wouldn't be uncommon to have each test initialize a blank slate memory
+backend. To do this I would define my `appFS = afero.NewOsFs()` somewhere
+appropriate in my application code. This approach ensures that Tests are order
+independent, with no test relying on the state left by an earlier test.
+
+Then in my tests I would initialize a new MemMapFs for each test:
+```go
+func TestExist(t *testing.T) {
+ appFS := afero.NewMemMapFs()
+ // create test files and directories
+ appFS.MkdirAll("src/a", 0755)
+ afero.WriteFile(appFS, "src/a/b", []byte("file b"), 0644)
+ afero.WriteFile(appFS, "src/c", []byte("file c"), 0644)
+ name := "src/c"
+ _, err := appFS.Stat(name)
+ if os.IsNotExist(err) {
+ t.Errorf("file \"%s\" does not exist.\n", name)
+ }
+}
+```
+
+# Available Backends
+
+## Operating System Native
+
+### OsFs
+
+The first is simply a wrapper around the native OS calls. This makes it
+very easy to use as all of the calls are the same as the existing OS
+calls. It also makes it trivial to have your code use the OS during
+operation and a mock filesystem during testing or as needed.
+
+```go
+appfs := afero.NewOsFs()
+appfs.MkdirAll("src/a", 0755))
+```
+
+## Memory Backed Storage
+
+### MemMapFs
+
+Afero also provides a fully atomic memory backed filesystem perfect for use in
+mocking and to speed up unnecessary disk io when persistence isn’t
+necessary. It is fully concurrent and will work within go routines
+safely.
+
+```go
+mm := afero.NewMemMapFs()
+mm.MkdirAll("src/a", 0755))
+```
+
+#### InMemoryFile
+
+As part of MemMapFs, Afero also provides an atomic, fully concurrent memory
+backed file implementation. This can be used in other memory backed file
+systems with ease. Plans are to add a radix tree memory stored file
+system using InMemoryFile.
+
+## Network Interfaces
+
+### SftpFs
+
+Afero has experimental support for secure file transfer protocol (sftp). Which can
+be used to perform file operations over a encrypted channel.
+
+## Filtering Backends
+
+### BasePathFs
+
+The BasePathFs restricts all operations to a given path within an Fs.
+The given file name to the operations on this Fs will be prepended with
+the base path before calling the source Fs.
+
+```go
+bp := afero.NewBasePathFs(afero.NewOsFs(), "/base/path")
+```
+
+### ReadOnlyFs
+
+A thin wrapper around the source Fs providing a read only view.
+
+```go
+fs := afero.NewReadOnlyFs(afero.NewOsFs())
+_, err := fs.Create("/file.txt")
+// err = syscall.EPERM
+```
+
+# RegexpFs
+
+A filtered view on file names, any file NOT matching
+the passed regexp will be treated as non-existing.
+Files not matching the regexp provided will not be created.
+Directories are not filtered.
+
+```go
+fs := afero.NewRegexpFs(afero.NewMemMapFs(), regexp.MustCompile(`\.txt$`))
+_, err := fs.Create("/file.html")
+// err = syscall.ENOENT
+```
+
+### HttpFs
+
+Afero provides an http compatible backend which can wrap any of the existing
+backends.
+
+The Http package requires a slightly specific version of Open which
+returns an http.File type.
+
+Afero provides an httpFs file system which satisfies this requirement.
+Any Afero FileSystem can be used as an httpFs.
+
+```go
+httpFs := afero.NewHttpFs()
+fileserver := http.FileServer(httpFs.Dir()))
+http.Handle("/", fileserver)
+```
+
+## Composite Backends
+
+Afero provides the ability have two filesystems (or more) act as a single
+file system.
+
+### CacheOnReadFs
+
+The CacheOnReadFs will lazily make copies of any accessed files from the base
+layer into the overlay. Subsequent reads will be pulled from the overlay
+directly permitting the request is within the cache duration of when it was
+created in the overlay.
+
+If the base filesystem is writeable, any changes to files will be
+done first to the base, then to the overlay layer. Write calls to open file
+handles like `Write()` or `Truncate()` to the overlay first.
+
+To writing files to the overlay only, you can use the overlay Fs directly (not
+via the union Fs).
+
+Cache files in the layer for the given time.Duration, a cache duration of 0
+means "forever" meaning the file will not be re-requested from the base ever.
+
+A read-only base will make the overlay also read-only but still copy files
+from the base to the overlay when they're not present (or outdated) in the
+caching layer.
+
+```go
+base := afero.NewOsFs()
+layer := afero.NewMemMapFs()
+ufs := afero.NewCacheOnReadFs(base, layer, 100 * time.Second)
+```
+
+### CopyOnWriteFs()
+
+The CopyOnWriteFs is a read only base file system with a potentially
+writeable layer on top.
+
+Read operations will first look in the overlay and if not found there, will
+serve the file from the base.
+
+Changes to the file system will only be made in the overlay.
+
+Any attempt to modify a file found only in the base will copy the file to the
+overlay layer before modification (including opening a file with a writable
+handle).
+
+Removing and Renaming files present only in the base layer is not currently
+permitted. If a file is present in the base layer and the overlay, only the
+overlay will be removed/renamed.
+
+```go
+ base := afero.NewOsFs()
+ roBase := afero.NewReadOnlyFs(base)
+ ufs := afero.NewCopyOnWriteFs(roBase, afero.NewMemMapFs())
+
+ fh, _ = ufs.Create("/home/test/file2.txt")
+ fh.WriteString("This is a test")
+ fh.Close()
+```
+
+In this example all write operations will only occur in memory (MemMapFs)
+leaving the base filesystem (OsFs) untouched.
+
+
+## Desired/possible backends
+
+The following is a short list of possible backends we hope someone will
+implement:
+
+* SSH
+* ZIP
+* TAR
+* S3
+
+# About the project
+
+## What's in the name
+
+Afero comes from the latin roots Ad-Facere.
+
+**"Ad"** is a prefix meaning "to".
+
+**"Facere"** is a form of the root "faciō" making "make or do".
+
+The literal meaning of afero is "to make" or "to do" which seems very fitting
+for a library that allows one to make files and directories and do things with them.
+
+The English word that shares the same roots as Afero is "affair". Affair shares
+the same concept but as a noun it means "something that is made or done" or "an
+object of a particular type".
+
+It's also nice that unlike some of my other libraries (hugo, cobra, viper) it
+Googles very well.
+
+## Release Notes
+
+* **0.10.0** 2015.12.10
+ * Full compatibility with Windows
+ * Introduction of afero utilities
+ * Test suite rewritten to work cross platform
+ * Normalize paths for MemMapFs
+ * Adding Sync to the file interface
+ * **Breaking Change** Walk and ReadDir have changed parameter order
+ * Moving types used by MemMapFs to a subpackage
+ * General bugfixes and improvements
+* **0.9.0** 2015.11.05
+ * New Walk function similar to filepath.Walk
+ * MemMapFs.OpenFile handles O_CREATE, O_APPEND, O_TRUNC
+ * MemMapFs.Remove now really deletes the file
+ * InMemoryFile.Readdir and Readdirnames work correctly
+ * InMemoryFile functions lock it for concurrent access
+ * Test suite improvements
+* **0.8.0** 2014.10.28
+ * First public version
+ * Interfaces feel ready for people to build using
+ * Interfaces satisfy all known uses
+ * MemMapFs passes the majority of the OS test suite
+ * OsFs passes the majority of the OS test suite
+
+## Contributing
+
+1. Fork it
+2. Create your feature branch (`git checkout -b my-new-feature`)
+3. Commit your changes (`git commit -am 'Add some feature'`)
+4. Push to the branch (`git push origin my-new-feature`)
+5. Create new Pull Request
+
+## Contributors
+
+Names in no particular order:
+
+* [spf13](https://github.com/spf13)
+* [jaqx0r](https://github.com/jaqx0r)
+* [mbertschler](https://github.com/mbertschler)
+* [xor-gate](https://github.com/xor-gate)
+
+## License
+
+Afero is released under the Apache 2.0 license. See
+[LICENSE.txt](https://github.com/spf13/afero/blob/master/LICENSE.txt)
diff --git a/vendor/github.com/spf13/afero/afero.go b/vendor/github.com/spf13/afero/afero.go
new file mode 100644
index 00000000..f5b5e127
--- /dev/null
+++ b/vendor/github.com/spf13/afero/afero.go
@@ -0,0 +1,108 @@
+// Copyright © 2014 Steve Francia .
+// Copyright 2013 tsuru authors. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package afero provides types and methods for interacting with the filesystem,
+// as an abstraction layer.
+
+// Afero also provides a few implementations that are mostly interoperable. One that
+// uses the operating system filesystem, one that uses memory to store files
+// (cross platform) and an interface that should be implemented if you want to
+// provide your own filesystem.
+
+package afero
+
+import (
+ "errors"
+ "io"
+ "os"
+ "time"
+)
+
+type Afero struct {
+ Fs
+}
+
+// File represents a file in the filesystem.
+type File interface {
+ io.Closer
+ io.Reader
+ io.ReaderAt
+ io.Seeker
+ io.Writer
+ io.WriterAt
+
+ Name() string
+ Readdir(count int) ([]os.FileInfo, error)
+ Readdirnames(n int) ([]string, error)
+ Stat() (os.FileInfo, error)
+ Sync() error
+ Truncate(size int64) error
+ WriteString(s string) (ret int, err error)
+}
+
+// Fs is the filesystem interface.
+//
+// Any simulated or real filesystem should implement this interface.
+type Fs interface {
+ // Create creates a file in the filesystem, returning the file and an
+ // error, if any happens.
+ Create(name string) (File, error)
+
+ // Mkdir creates a directory in the filesystem, return an error if any
+ // happens.
+ Mkdir(name string, perm os.FileMode) error
+
+ // MkdirAll creates a directory path and all parents that does not exist
+ // yet.
+ MkdirAll(path string, perm os.FileMode) error
+
+ // Open opens a file, returning it or an error, if any happens.
+ Open(name string) (File, error)
+
+ // OpenFile opens a file using the given flags and the given mode.
+ OpenFile(name string, flag int, perm os.FileMode) (File, error)
+
+ // Remove removes a file identified by name, returning an error, if any
+ // happens.
+ Remove(name string) error
+
+ // RemoveAll removes a directory path and any children it contains. It
+ // does not fail if the path does not exist (return nil).
+ RemoveAll(path string) error
+
+ // Rename renames a file.
+ Rename(oldname, newname string) error
+
+ // Stat returns a FileInfo describing the named file, or an error, if any
+ // happens.
+ Stat(name string) (os.FileInfo, error)
+
+ // The name of this FileSystem
+ Name() string
+
+ //Chmod changes the mode of the named file to mode.
+ Chmod(name string, mode os.FileMode) error
+
+ //Chtimes changes the access and modification times of the named file
+ Chtimes(name string, atime time.Time, mtime time.Time) error
+}
+
+var (
+ ErrFileClosed = errors.New("File is closed")
+ ErrOutOfRange = errors.New("Out of range")
+ ErrTooLarge = errors.New("Too large")
+ ErrFileNotFound = os.ErrNotExist
+ ErrFileExists = os.ErrExist
+ ErrDestinationExists = os.ErrExist
+)
diff --git a/vendor/github.com/spf13/afero/appveyor.yml b/vendor/github.com/spf13/afero/appveyor.yml
new file mode 100644
index 00000000..a633ad50
--- /dev/null
+++ b/vendor/github.com/spf13/afero/appveyor.yml
@@ -0,0 +1,15 @@
+version: '{build}'
+clone_folder: C:\gopath\src\github.com\spf13\afero
+environment:
+ GOPATH: C:\gopath
+build_script:
+- cmd: >-
+ go version
+
+ go env
+
+ go get -v github.com/spf13/afero/...
+
+ go build github.com/spf13/afero
+test_script:
+- cmd: go test -race -v github.com/spf13/afero/...
diff --git a/vendor/github.com/spf13/afero/basepath.go b/vendor/github.com/spf13/afero/basepath.go
new file mode 100644
index 00000000..616ff8ff
--- /dev/null
+++ b/vendor/github.com/spf13/afero/basepath.go
@@ -0,0 +1,180 @@
+package afero
+
+import (
+ "os"
+ "path/filepath"
+ "runtime"
+ "strings"
+ "time"
+)
+
+var _ Lstater = (*BasePathFs)(nil)
+
+// The BasePathFs restricts all operations to a given path within an Fs.
+// The given file name to the operations on this Fs will be prepended with
+// the base path before calling the base Fs.
+// Any file name (after filepath.Clean()) outside this base path will be
+// treated as non existing file.
+//
+// Note that it does not clean the error messages on return, so you may
+// reveal the real path on errors.
+type BasePathFs struct {
+ source Fs
+ path string
+}
+
+type BasePathFile struct {
+ File
+ path string
+}
+
+func (f *BasePathFile) Name() string {
+ sourcename := f.File.Name()
+ return strings.TrimPrefix(sourcename, filepath.Clean(f.path))
+}
+
+func NewBasePathFs(source Fs, path string) Fs {
+ return &BasePathFs{source: source, path: path}
+}
+
+// on a file outside the base path it returns the given file name and an error,
+// else the given file with the base path prepended
+func (b *BasePathFs) RealPath(name string) (path string, err error) {
+ if err := validateBasePathName(name); err != nil {
+ return name, err
+ }
+
+ bpath := filepath.Clean(b.path)
+ path = filepath.Clean(filepath.Join(bpath, name))
+ if !strings.HasPrefix(path, bpath) {
+ return name, os.ErrNotExist
+ }
+
+ return path, nil
+}
+
+func validateBasePathName(name string) error {
+ if runtime.GOOS != "windows" {
+ // Not much to do here;
+ // the virtual file paths all look absolute on *nix.
+ return nil
+ }
+
+ // On Windows a common mistake would be to provide an absolute OS path
+ // We could strip out the base part, but that would not be very portable.
+ if filepath.IsAbs(name) {
+ return os.ErrNotExist
+ }
+
+ return nil
+}
+
+func (b *BasePathFs) Chtimes(name string, atime, mtime time.Time) (err error) {
+ if name, err = b.RealPath(name); err != nil {
+ return &os.PathError{Op: "chtimes", Path: name, Err: err}
+ }
+ return b.source.Chtimes(name, atime, mtime)
+}
+
+func (b *BasePathFs) Chmod(name string, mode os.FileMode) (err error) {
+ if name, err = b.RealPath(name); err != nil {
+ return &os.PathError{Op: "chmod", Path: name, Err: err}
+ }
+ return b.source.Chmod(name, mode)
+}
+
+func (b *BasePathFs) Name() string {
+ return "BasePathFs"
+}
+
+func (b *BasePathFs) Stat(name string) (fi os.FileInfo, err error) {
+ if name, err = b.RealPath(name); err != nil {
+ return nil, &os.PathError{Op: "stat", Path: name, Err: err}
+ }
+ return b.source.Stat(name)
+}
+
+func (b *BasePathFs) Rename(oldname, newname string) (err error) {
+ if oldname, err = b.RealPath(oldname); err != nil {
+ return &os.PathError{Op: "rename", Path: oldname, Err: err}
+ }
+ if newname, err = b.RealPath(newname); err != nil {
+ return &os.PathError{Op: "rename", Path: newname, Err: err}
+ }
+ return b.source.Rename(oldname, newname)
+}
+
+func (b *BasePathFs) RemoveAll(name string) (err error) {
+ if name, err = b.RealPath(name); err != nil {
+ return &os.PathError{Op: "remove_all", Path: name, Err: err}
+ }
+ return b.source.RemoveAll(name)
+}
+
+func (b *BasePathFs) Remove(name string) (err error) {
+ if name, err = b.RealPath(name); err != nil {
+ return &os.PathError{Op: "remove", Path: name, Err: err}
+ }
+ return b.source.Remove(name)
+}
+
+func (b *BasePathFs) OpenFile(name string, flag int, mode os.FileMode) (f File, err error) {
+ if name, err = b.RealPath(name); err != nil {
+ return nil, &os.PathError{Op: "openfile", Path: name, Err: err}
+ }
+ sourcef, err := b.source.OpenFile(name, flag, mode)
+ if err != nil {
+ return nil, err
+ }
+ return &BasePathFile{sourcef, b.path}, nil
+}
+
+func (b *BasePathFs) Open(name string) (f File, err error) {
+ if name, err = b.RealPath(name); err != nil {
+ return nil, &os.PathError{Op: "open", Path: name, Err: err}
+ }
+ sourcef, err := b.source.Open(name)
+ if err != nil {
+ return nil, err
+ }
+ return &BasePathFile{File: sourcef, path: b.path}, nil
+}
+
+func (b *BasePathFs) Mkdir(name string, mode os.FileMode) (err error) {
+ if name, err = b.RealPath(name); err != nil {
+ return &os.PathError{Op: "mkdir", Path: name, Err: err}
+ }
+ return b.source.Mkdir(name, mode)
+}
+
+func (b *BasePathFs) MkdirAll(name string, mode os.FileMode) (err error) {
+ if name, err = b.RealPath(name); err != nil {
+ return &os.PathError{Op: "mkdir", Path: name, Err: err}
+ }
+ return b.source.MkdirAll(name, mode)
+}
+
+func (b *BasePathFs) Create(name string) (f File, err error) {
+ if name, err = b.RealPath(name); err != nil {
+ return nil, &os.PathError{Op: "create", Path: name, Err: err}
+ }
+ sourcef, err := b.source.Create(name)
+ if err != nil {
+ return nil, err
+ }
+ return &BasePathFile{File: sourcef, path: b.path}, nil
+}
+
+func (b *BasePathFs) LstatIfPossible(name string) (os.FileInfo, bool, error) {
+ name, err := b.RealPath(name)
+ if err != nil {
+ return nil, false, &os.PathError{Op: "lstat", Path: name, Err: err}
+ }
+ if lstater, ok := b.source.(Lstater); ok {
+ return lstater.LstatIfPossible(name)
+ }
+ fi, err := b.source.Stat(name)
+ return fi, false, err
+}
+
+// vim: ts=4 sw=4 noexpandtab nolist syn=go
diff --git a/vendor/github.com/spf13/afero/cacheOnReadFs.go b/vendor/github.com/spf13/afero/cacheOnReadFs.go
new file mode 100644
index 00000000..29a26c67
--- /dev/null
+++ b/vendor/github.com/spf13/afero/cacheOnReadFs.go
@@ -0,0 +1,290 @@
+package afero
+
+import (
+ "os"
+ "syscall"
+ "time"
+)
+
+// If the cache duration is 0, cache time will be unlimited, i.e. once
+// a file is in the layer, the base will never be read again for this file.
+//
+// For cache times greater than 0, the modification time of a file is
+// checked. Note that a lot of file system implementations only allow a
+// resolution of a second for timestamps... or as the godoc for os.Chtimes()
+// states: "The underlying filesystem may truncate or round the values to a
+// less precise time unit."
+//
+// This caching union will forward all write calls also to the base file
+// system first. To prevent writing to the base Fs, wrap it in a read-only
+// filter - Note: this will also make the overlay read-only, for writing files
+// in the overlay, use the overlay Fs directly, not via the union Fs.
+type CacheOnReadFs struct {
+ base Fs
+ layer Fs
+ cacheTime time.Duration
+}
+
+func NewCacheOnReadFs(base Fs, layer Fs, cacheTime time.Duration) Fs {
+ return &CacheOnReadFs{base: base, layer: layer, cacheTime: cacheTime}
+}
+
+type cacheState int
+
+const (
+ // not present in the overlay, unknown if it exists in the base:
+ cacheMiss cacheState = iota
+ // present in the overlay and in base, base file is newer:
+ cacheStale
+ // present in the overlay - with cache time == 0 it may exist in the base,
+ // with cacheTime > 0 it exists in the base and is same age or newer in the
+ // overlay
+ cacheHit
+ // happens if someone writes directly to the overlay without
+ // going through this union
+ cacheLocal
+)
+
+func (u *CacheOnReadFs) cacheStatus(name string) (state cacheState, fi os.FileInfo, err error) {
+ var lfi, bfi os.FileInfo
+ lfi, err = u.layer.Stat(name)
+ if err == nil {
+ if u.cacheTime == 0 {
+ return cacheHit, lfi, nil
+ }
+ if lfi.ModTime().Add(u.cacheTime).Before(time.Now()) {
+ bfi, err = u.base.Stat(name)
+ if err != nil {
+ return cacheLocal, lfi, nil
+ }
+ if bfi.ModTime().After(lfi.ModTime()) {
+ return cacheStale, bfi, nil
+ }
+ }
+ return cacheHit, lfi, nil
+ }
+
+ if err == syscall.ENOENT || os.IsNotExist(err) {
+ return cacheMiss, nil, nil
+ }
+
+ return cacheMiss, nil, err
+}
+
+func (u *CacheOnReadFs) copyToLayer(name string) error {
+ return copyToLayer(u.base, u.layer, name)
+}
+
+func (u *CacheOnReadFs) Chtimes(name string, atime, mtime time.Time) error {
+ st, _, err := u.cacheStatus(name)
+ if err != nil {
+ return err
+ }
+ switch st {
+ case cacheLocal:
+ case cacheHit:
+ err = u.base.Chtimes(name, atime, mtime)
+ case cacheStale, cacheMiss:
+ if err := u.copyToLayer(name); err != nil {
+ return err
+ }
+ err = u.base.Chtimes(name, atime, mtime)
+ }
+ if err != nil {
+ return err
+ }
+ return u.layer.Chtimes(name, atime, mtime)
+}
+
+func (u *CacheOnReadFs) Chmod(name string, mode os.FileMode) error {
+ st, _, err := u.cacheStatus(name)
+ if err != nil {
+ return err
+ }
+ switch st {
+ case cacheLocal:
+ case cacheHit:
+ err = u.base.Chmod(name, mode)
+ case cacheStale, cacheMiss:
+ if err := u.copyToLayer(name); err != nil {
+ return err
+ }
+ err = u.base.Chmod(name, mode)
+ }
+ if err != nil {
+ return err
+ }
+ return u.layer.Chmod(name, mode)
+}
+
+func (u *CacheOnReadFs) Stat(name string) (os.FileInfo, error) {
+ st, fi, err := u.cacheStatus(name)
+ if err != nil {
+ return nil, err
+ }
+ switch st {
+ case cacheMiss:
+ return u.base.Stat(name)
+ default: // cacheStale has base, cacheHit and cacheLocal the layer os.FileInfo
+ return fi, nil
+ }
+}
+
+func (u *CacheOnReadFs) Rename(oldname, newname string) error {
+ st, _, err := u.cacheStatus(oldname)
+ if err != nil {
+ return err
+ }
+ switch st {
+ case cacheLocal:
+ case cacheHit:
+ err = u.base.Rename(oldname, newname)
+ case cacheStale, cacheMiss:
+ if err := u.copyToLayer(oldname); err != nil {
+ return err
+ }
+ err = u.base.Rename(oldname, newname)
+ }
+ if err != nil {
+ return err
+ }
+ return u.layer.Rename(oldname, newname)
+}
+
+func (u *CacheOnReadFs) Remove(name string) error {
+ st, _, err := u.cacheStatus(name)
+ if err != nil {
+ return err
+ }
+ switch st {
+ case cacheLocal:
+ case cacheHit, cacheStale, cacheMiss:
+ err = u.base.Remove(name)
+ }
+ if err != nil {
+ return err
+ }
+ return u.layer.Remove(name)
+}
+
+func (u *CacheOnReadFs) RemoveAll(name string) error {
+ st, _, err := u.cacheStatus(name)
+ if err != nil {
+ return err
+ }
+ switch st {
+ case cacheLocal:
+ case cacheHit, cacheStale, cacheMiss:
+ err = u.base.RemoveAll(name)
+ }
+ if err != nil {
+ return err
+ }
+ return u.layer.RemoveAll(name)
+}
+
+func (u *CacheOnReadFs) OpenFile(name string, flag int, perm os.FileMode) (File, error) {
+ st, _, err := u.cacheStatus(name)
+ if err != nil {
+ return nil, err
+ }
+ switch st {
+ case cacheLocal, cacheHit:
+ default:
+ if err := u.copyToLayer(name); err != nil {
+ return nil, err
+ }
+ }
+ if flag&(os.O_WRONLY|syscall.O_RDWR|os.O_APPEND|os.O_CREATE|os.O_TRUNC) != 0 {
+ bfi, err := u.base.OpenFile(name, flag, perm)
+ if err != nil {
+ return nil, err
+ }
+ lfi, err := u.layer.OpenFile(name, flag, perm)
+ if err != nil {
+ bfi.Close() // oops, what if O_TRUNC was set and file opening in the layer failed...?
+ return nil, err
+ }
+ return &UnionFile{Base: bfi, Layer: lfi}, nil
+ }
+ return u.layer.OpenFile(name, flag, perm)
+}
+
+func (u *CacheOnReadFs) Open(name string) (File, error) {
+ st, fi, err := u.cacheStatus(name)
+ if err != nil {
+ return nil, err
+ }
+
+ switch st {
+ case cacheLocal:
+ return u.layer.Open(name)
+
+ case cacheMiss:
+ bfi, err := u.base.Stat(name)
+ if err != nil {
+ return nil, err
+ }
+ if bfi.IsDir() {
+ return u.base.Open(name)
+ }
+ if err := u.copyToLayer(name); err != nil {
+ return nil, err
+ }
+ return u.layer.Open(name)
+
+ case cacheStale:
+ if !fi.IsDir() {
+ if err := u.copyToLayer(name); err != nil {
+ return nil, err
+ }
+ return u.layer.Open(name)
+ }
+ case cacheHit:
+ if !fi.IsDir() {
+ return u.layer.Open(name)
+ }
+ }
+ // the dirs from cacheHit, cacheStale fall down here:
+ bfile, _ := u.base.Open(name)
+ lfile, err := u.layer.Open(name)
+ if err != nil && bfile == nil {
+ return nil, err
+ }
+ return &UnionFile{Base: bfile, Layer: lfile}, nil
+}
+
+func (u *CacheOnReadFs) Mkdir(name string, perm os.FileMode) error {
+ err := u.base.Mkdir(name, perm)
+ if err != nil {
+ return err
+ }
+ return u.layer.MkdirAll(name, perm) // yes, MkdirAll... we cannot assume it exists in the cache
+}
+
+func (u *CacheOnReadFs) Name() string {
+ return "CacheOnReadFs"
+}
+
+func (u *CacheOnReadFs) MkdirAll(name string, perm os.FileMode) error {
+ err := u.base.MkdirAll(name, perm)
+ if err != nil {
+ return err
+ }
+ return u.layer.MkdirAll(name, perm)
+}
+
+func (u *CacheOnReadFs) Create(name string) (File, error) {
+ bfh, err := u.base.Create(name)
+ if err != nil {
+ return nil, err
+ }
+ lfh, err := u.layer.Create(name)
+ if err != nil {
+ // oops, see comment about OS_TRUNC above, should we remove? then we have to
+ // remember if the file did not exist before
+ bfh.Close()
+ return nil, err
+ }
+ return &UnionFile{Base: bfh, Layer: lfh}, nil
+}
diff --git a/vendor/github.com/spf13/afero/const_bsds.go b/vendor/github.com/spf13/afero/const_bsds.go
new file mode 100644
index 00000000..5728243d
--- /dev/null
+++ b/vendor/github.com/spf13/afero/const_bsds.go
@@ -0,0 +1,22 @@
+// Copyright © 2016 Steve Francia .
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// +build darwin openbsd freebsd netbsd dragonfly
+
+package afero
+
+import (
+ "syscall"
+)
+
+const BADFD = syscall.EBADF
diff --git a/vendor/github.com/spf13/afero/const_win_unix.go b/vendor/github.com/spf13/afero/const_win_unix.go
new file mode 100644
index 00000000..968fc278
--- /dev/null
+++ b/vendor/github.com/spf13/afero/const_win_unix.go
@@ -0,0 +1,25 @@
+// Copyright © 2016 Steve Francia .
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+// +build !darwin
+// +build !openbsd
+// +build !freebsd
+// +build !dragonfly
+// +build !netbsd
+
+package afero
+
+import (
+ "syscall"
+)
+
+const BADFD = syscall.EBADFD
diff --git a/vendor/github.com/spf13/afero/copyOnWriteFs.go b/vendor/github.com/spf13/afero/copyOnWriteFs.go
new file mode 100644
index 00000000..e8108a85
--- /dev/null
+++ b/vendor/github.com/spf13/afero/copyOnWriteFs.go
@@ -0,0 +1,293 @@
+package afero
+
+import (
+ "fmt"
+ "os"
+ "path/filepath"
+ "syscall"
+ "time"
+)
+
+var _ Lstater = (*CopyOnWriteFs)(nil)
+
+// The CopyOnWriteFs is a union filesystem: a read only base file system with
+// a possibly writeable layer on top. Changes to the file system will only
+// be made in the overlay: Changing an existing file in the base layer which
+// is not present in the overlay will copy the file to the overlay ("changing"
+// includes also calls to e.g. Chtimes() and Chmod()).
+//
+// Reading directories is currently only supported via Open(), not OpenFile().
+type CopyOnWriteFs struct {
+ base Fs
+ layer Fs
+}
+
+func NewCopyOnWriteFs(base Fs, layer Fs) Fs {
+ return &CopyOnWriteFs{base: base, layer: layer}
+}
+
+// Returns true if the file is not in the overlay
+func (u *CopyOnWriteFs) isBaseFile(name string) (bool, error) {
+ if _, err := u.layer.Stat(name); err == nil {
+ return false, nil
+ }
+ _, err := u.base.Stat(name)
+ if err != nil {
+ if oerr, ok := err.(*os.PathError); ok {
+ if oerr.Err == os.ErrNotExist || oerr.Err == syscall.ENOENT || oerr.Err == syscall.ENOTDIR {
+ return false, nil
+ }
+ }
+ if err == syscall.ENOENT {
+ return false, nil
+ }
+ }
+ return true, err
+}
+
+func (u *CopyOnWriteFs) copyToLayer(name string) error {
+ return copyToLayer(u.base, u.layer, name)
+}
+
+func (u *CopyOnWriteFs) Chtimes(name string, atime, mtime time.Time) error {
+ b, err := u.isBaseFile(name)
+ if err != nil {
+ return err
+ }
+ if b {
+ if err := u.copyToLayer(name); err != nil {
+ return err
+ }
+ }
+ return u.layer.Chtimes(name, atime, mtime)
+}
+
+func (u *CopyOnWriteFs) Chmod(name string, mode os.FileMode) error {
+ b, err := u.isBaseFile(name)
+ if err != nil {
+ return err
+ }
+ if b {
+ if err := u.copyToLayer(name); err != nil {
+ return err
+ }
+ }
+ return u.layer.Chmod(name, mode)
+}
+
+func (u *CopyOnWriteFs) Stat(name string) (os.FileInfo, error) {
+ fi, err := u.layer.Stat(name)
+ if err != nil {
+ isNotExist := u.isNotExist(err)
+ if isNotExist {
+ return u.base.Stat(name)
+ }
+ return nil, err
+ }
+ return fi, nil
+}
+
+func (u *CopyOnWriteFs) LstatIfPossible(name string) (os.FileInfo, bool, error) {
+ llayer, ok1 := u.layer.(Lstater)
+ lbase, ok2 := u.base.(Lstater)
+
+ if ok1 {
+ fi, b, err := llayer.LstatIfPossible(name)
+ if err == nil {
+ return fi, b, nil
+ }
+
+ if !u.isNotExist(err) {
+ return nil, b, err
+ }
+ }
+
+ if ok2 {
+ fi, b, err := lbase.LstatIfPossible(name)
+ if err == nil {
+ return fi, b, nil
+ }
+ if !u.isNotExist(err) {
+ return nil, b, err
+ }
+ }
+
+ fi, err := u.Stat(name)
+
+ return fi, false, err
+}
+
+func (u *CopyOnWriteFs) isNotExist(err error) bool {
+ if e, ok := err.(*os.PathError); ok {
+ err = e.Err
+ }
+ if err == os.ErrNotExist || err == syscall.ENOENT || err == syscall.ENOTDIR {
+ return true
+ }
+ return false
+}
+
+// Renaming files present only in the base layer is not permitted
+func (u *CopyOnWriteFs) Rename(oldname, newname string) error {
+ b, err := u.isBaseFile(oldname)
+ if err != nil {
+ return err
+ }
+ if b {
+ return syscall.EPERM
+ }
+ return u.layer.Rename(oldname, newname)
+}
+
+// Removing files present only in the base layer is not permitted. If
+// a file is present in the base layer and the overlay, only the overlay
+// will be removed.
+func (u *CopyOnWriteFs) Remove(name string) error {
+ err := u.layer.Remove(name)
+ switch err {
+ case syscall.ENOENT:
+ _, err = u.base.Stat(name)
+ if err == nil {
+ return syscall.EPERM
+ }
+ return syscall.ENOENT
+ default:
+ return err
+ }
+}
+
+func (u *CopyOnWriteFs) RemoveAll(name string) error {
+ err := u.layer.RemoveAll(name)
+ switch err {
+ case syscall.ENOENT:
+ _, err = u.base.Stat(name)
+ if err == nil {
+ return syscall.EPERM
+ }
+ return syscall.ENOENT
+ default:
+ return err
+ }
+}
+
+func (u *CopyOnWriteFs) OpenFile(name string, flag int, perm os.FileMode) (File, error) {
+ b, err := u.isBaseFile(name)
+ if err != nil {
+ return nil, err
+ }
+
+ if flag&(os.O_WRONLY|os.O_RDWR|os.O_APPEND|os.O_CREATE|os.O_TRUNC) != 0 {
+ if b {
+ if err = u.copyToLayer(name); err != nil {
+ return nil, err
+ }
+ return u.layer.OpenFile(name, flag, perm)
+ }
+
+ dir := filepath.Dir(name)
+ isaDir, err := IsDir(u.base, dir)
+ if err != nil && !os.IsNotExist(err) {
+ return nil, err
+ }
+ if isaDir {
+ if err = u.layer.MkdirAll(dir, 0777); err != nil {
+ return nil, err
+ }
+ return u.layer.OpenFile(name, flag, perm)
+ }
+
+ isaDir, err = IsDir(u.layer, dir)
+ if err != nil {
+ return nil, err
+ }
+ if isaDir {
+ return u.layer.OpenFile(name, flag, perm)
+ }
+
+ return nil, &os.PathError{Op: "open", Path: name, Err: syscall.ENOTDIR} // ...or os.ErrNotExist?
+ }
+ if b {
+ return u.base.OpenFile(name, flag, perm)
+ }
+ return u.layer.OpenFile(name, flag, perm)
+}
+
+// This function handles the 9 different possibilities caused
+// by the union which are the intersection of the following...
+// layer: doesn't exist, exists as a file, and exists as a directory
+// base: doesn't exist, exists as a file, and exists as a directory
+func (u *CopyOnWriteFs) Open(name string) (File, error) {
+ // Since the overlay overrides the base we check that first
+ b, err := u.isBaseFile(name)
+ if err != nil {
+ return nil, err
+ }
+
+ // If overlay doesn't exist, return the base (base state irrelevant)
+ if b {
+ return u.base.Open(name)
+ }
+
+ // If overlay is a file, return it (base state irrelevant)
+ dir, err := IsDir(u.layer, name)
+ if err != nil {
+ return nil, err
+ }
+ if !dir {
+ return u.layer.Open(name)
+ }
+
+ // Overlay is a directory, base state now matters.
+ // Base state has 3 states to check but 2 outcomes:
+ // A. It's a file or non-readable in the base (return just the overlay)
+ // B. It's an accessible directory in the base (return a UnionFile)
+
+ // If base is file or nonreadable, return overlay
+ dir, err = IsDir(u.base, name)
+ if !dir || err != nil {
+ return u.layer.Open(name)
+ }
+
+ // Both base & layer are directories
+ // Return union file (if opens are without error)
+ bfile, bErr := u.base.Open(name)
+ lfile, lErr := u.layer.Open(name)
+
+ // If either have errors at this point something is very wrong. Return nil and the errors
+ if bErr != nil || lErr != nil {
+ return nil, fmt.Errorf("BaseErr: %v\nOverlayErr: %v", bErr, lErr)
+ }
+
+ return &UnionFile{Base: bfile, Layer: lfile}, nil
+}
+
+func (u *CopyOnWriteFs) Mkdir(name string, perm os.FileMode) error {
+ dir, err := IsDir(u.base, name)
+ if err != nil {
+ return u.layer.MkdirAll(name, perm)
+ }
+ if dir {
+ return ErrFileExists
+ }
+ return u.layer.MkdirAll(name, perm)
+}
+
+func (u *CopyOnWriteFs) Name() string {
+ return "CopyOnWriteFs"
+}
+
+func (u *CopyOnWriteFs) MkdirAll(name string, perm os.FileMode) error {
+ dir, err := IsDir(u.base, name)
+ if err != nil {
+ return u.layer.MkdirAll(name, perm)
+ }
+ if dir {
+ // This is in line with how os.MkdirAll behaves.
+ return nil
+ }
+ return u.layer.MkdirAll(name, perm)
+}
+
+func (u *CopyOnWriteFs) Create(name string) (File, error) {
+ return u.OpenFile(name, os.O_CREATE|os.O_TRUNC|os.O_RDWR, 0666)
+}
diff --git a/vendor/github.com/spf13/afero/go.mod b/vendor/github.com/spf13/afero/go.mod
new file mode 100644
index 00000000..08685509
--- /dev/null
+++ b/vendor/github.com/spf13/afero/go.mod
@@ -0,0 +1,3 @@
+module github.com/spf13/afero
+
+require golang.org/x/text v0.3.0
diff --git a/vendor/github.com/spf13/afero/go.sum b/vendor/github.com/spf13/afero/go.sum
new file mode 100644
index 00000000..6bad37b2
--- /dev/null
+++ b/vendor/github.com/spf13/afero/go.sum
@@ -0,0 +1,2 @@
+golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg=
+golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
diff --git a/vendor/github.com/spf13/afero/httpFs.go b/vendor/github.com/spf13/afero/httpFs.go
new file mode 100644
index 00000000..c4219368
--- /dev/null
+++ b/vendor/github.com/spf13/afero/httpFs.go
@@ -0,0 +1,110 @@
+// Copyright © 2014 Steve Francia .
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package afero
+
+import (
+ "errors"
+ "net/http"
+ "os"
+ "path"
+ "path/filepath"
+ "strings"
+ "time"
+)
+
+type httpDir struct {
+ basePath string
+ fs HttpFs
+}
+
+func (d httpDir) Open(name string) (http.File, error) {
+ if filepath.Separator != '/' && strings.IndexRune(name, filepath.Separator) >= 0 ||
+ strings.Contains(name, "\x00") {
+ return nil, errors.New("http: invalid character in file path")
+ }
+ dir := string(d.basePath)
+ if dir == "" {
+ dir = "."
+ }
+
+ f, err := d.fs.Open(filepath.Join(dir, filepath.FromSlash(path.Clean("/"+name))))
+ if err != nil {
+ return nil, err
+ }
+ return f, nil
+}
+
+type HttpFs struct {
+ source Fs
+}
+
+func NewHttpFs(source Fs) *HttpFs {
+ return &HttpFs{source: source}
+}
+
+func (h HttpFs) Dir(s string) *httpDir {
+ return &httpDir{basePath: s, fs: h}
+}
+
+func (h HttpFs) Name() string { return "h HttpFs" }
+
+func (h HttpFs) Create(name string) (File, error) {
+ return h.source.Create(name)
+}
+
+func (h HttpFs) Chmod(name string, mode os.FileMode) error {
+ return h.source.Chmod(name, mode)
+}
+
+func (h HttpFs) Chtimes(name string, atime time.Time, mtime time.Time) error {
+ return h.source.Chtimes(name, atime, mtime)
+}
+
+func (h HttpFs) Mkdir(name string, perm os.FileMode) error {
+ return h.source.Mkdir(name, perm)
+}
+
+func (h HttpFs) MkdirAll(path string, perm os.FileMode) error {
+ return h.source.MkdirAll(path, perm)
+}
+
+func (h HttpFs) Open(name string) (http.File, error) {
+ f, err := h.source.Open(name)
+ if err == nil {
+ if httpfile, ok := f.(http.File); ok {
+ return httpfile, nil
+ }
+ }
+ return nil, err
+}
+
+func (h HttpFs) OpenFile(name string, flag int, perm os.FileMode) (File, error) {
+ return h.source.OpenFile(name, flag, perm)
+}
+
+func (h HttpFs) Remove(name string) error {
+ return h.source.Remove(name)
+}
+
+func (h HttpFs) RemoveAll(path string) error {
+ return h.source.RemoveAll(path)
+}
+
+func (h HttpFs) Rename(oldname, newname string) error {
+ return h.source.Rename(oldname, newname)
+}
+
+func (h HttpFs) Stat(name string) (os.FileInfo, error) {
+ return h.source.Stat(name)
+}
diff --git a/vendor/github.com/spf13/afero/ioutil.go b/vendor/github.com/spf13/afero/ioutil.go
new file mode 100644
index 00000000..5c3a3d8f
--- /dev/null
+++ b/vendor/github.com/spf13/afero/ioutil.go
@@ -0,0 +1,230 @@
+// Copyright ©2015 The Go Authors
+// Copyright ©2015 Steve Francia
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package afero
+
+import (
+ "bytes"
+ "io"
+ "os"
+ "path/filepath"
+ "sort"
+ "strconv"
+ "sync"
+ "time"
+)
+
+// byName implements sort.Interface.
+type byName []os.FileInfo
+
+func (f byName) Len() int { return len(f) }
+func (f byName) Less(i, j int) bool { return f[i].Name() < f[j].Name() }
+func (f byName) Swap(i, j int) { f[i], f[j] = f[j], f[i] }
+
+// ReadDir reads the directory named by dirname and returns
+// a list of sorted directory entries.
+func (a Afero) ReadDir(dirname string) ([]os.FileInfo, error) {
+ return ReadDir(a.Fs, dirname)
+}
+
+func ReadDir(fs Fs, dirname string) ([]os.FileInfo, error) {
+ f, err := fs.Open(dirname)
+ if err != nil {
+ return nil, err
+ }
+ list, err := f.Readdir(-1)
+ f.Close()
+ if err != nil {
+ return nil, err
+ }
+ sort.Sort(byName(list))
+ return list, nil
+}
+
+// ReadFile reads the file named by filename and returns the contents.
+// A successful call returns err == nil, not err == EOF. Because ReadFile
+// reads the whole file, it does not treat an EOF from Read as an error
+// to be reported.
+func (a Afero) ReadFile(filename string) ([]byte, error) {
+ return ReadFile(a.Fs, filename)
+}
+
+func ReadFile(fs Fs, filename string) ([]byte, error) {
+ f, err := fs.Open(filename)
+ if err != nil {
+ return nil, err
+ }
+ defer f.Close()
+ // It's a good but not certain bet that FileInfo will tell us exactly how much to
+ // read, so let's try it but be prepared for the answer to be wrong.
+ var n int64
+
+ if fi, err := f.Stat(); err == nil {
+ // Don't preallocate a huge buffer, just in case.
+ if size := fi.Size(); size < 1e9 {
+ n = size
+ }
+ }
+ // As initial capacity for readAll, use n + a little extra in case Size is zero,
+ // and to avoid another allocation after Read has filled the buffer. The readAll
+ // call will read into its allocated internal buffer cheaply. If the size was
+ // wrong, we'll either waste some space off the end or reallocate as needed, but
+ // in the overwhelmingly common case we'll get it just right.
+ return readAll(f, n+bytes.MinRead)
+}
+
+// readAll reads from r until an error or EOF and returns the data it read
+// from the internal buffer allocated with a specified capacity.
+func readAll(r io.Reader, capacity int64) (b []byte, err error) {
+ buf := bytes.NewBuffer(make([]byte, 0, capacity))
+ // If the buffer overflows, we will get bytes.ErrTooLarge.
+ // Return that as an error. Any other panic remains.
+ defer func() {
+ e := recover()
+ if e == nil {
+ return
+ }
+ if panicErr, ok := e.(error); ok && panicErr == bytes.ErrTooLarge {
+ err = panicErr
+ } else {
+ panic(e)
+ }
+ }()
+ _, err = buf.ReadFrom(r)
+ return buf.Bytes(), err
+}
+
+// ReadAll reads from r until an error or EOF and returns the data it read.
+// A successful call returns err == nil, not err == EOF. Because ReadAll is
+// defined to read from src until EOF, it does not treat an EOF from Read
+// as an error to be reported.
+func ReadAll(r io.Reader) ([]byte, error) {
+ return readAll(r, bytes.MinRead)
+}
+
+// WriteFile writes data to a file named by filename.
+// If the file does not exist, WriteFile creates it with permissions perm;
+// otherwise WriteFile truncates it before writing.
+func (a Afero) WriteFile(filename string, data []byte, perm os.FileMode) error {
+ return WriteFile(a.Fs, filename, data, perm)
+}
+
+func WriteFile(fs Fs, filename string, data []byte, perm os.FileMode) error {
+ f, err := fs.OpenFile(filename, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, perm)
+ if err != nil {
+ return err
+ }
+ n, err := f.Write(data)
+ if err == nil && n < len(data) {
+ err = io.ErrShortWrite
+ }
+ if err1 := f.Close(); err == nil {
+ err = err1
+ }
+ return err
+}
+
+// Random number state.
+// We generate random temporary file names so that there's a good
+// chance the file doesn't exist yet - keeps the number of tries in
+// TempFile to a minimum.
+var rand uint32
+var randmu sync.Mutex
+
+func reseed() uint32 {
+ return uint32(time.Now().UnixNano() + int64(os.Getpid()))
+}
+
+func nextSuffix() string {
+ randmu.Lock()
+ r := rand
+ if r == 0 {
+ r = reseed()
+ }
+ r = r*1664525 + 1013904223 // constants from Numerical Recipes
+ rand = r
+ randmu.Unlock()
+ return strconv.Itoa(int(1e9 + r%1e9))[1:]
+}
+
+// TempFile creates a new temporary file in the directory dir
+// with a name beginning with prefix, opens the file for reading
+// and writing, and returns the resulting *File.
+// If dir is the empty string, TempFile uses the default directory
+// for temporary files (see os.TempDir).
+// Multiple programs calling TempFile simultaneously
+// will not choose the same file. The caller can use f.Name()
+// to find the pathname of the file. It is the caller's responsibility
+// to remove the file when no longer needed.
+func (a Afero) TempFile(dir, prefix string) (f File, err error) {
+ return TempFile(a.Fs, dir, prefix)
+}
+
+func TempFile(fs Fs, dir, prefix string) (f File, err error) {
+ if dir == "" {
+ dir = os.TempDir()
+ }
+
+ nconflict := 0
+ for i := 0; i < 10000; i++ {
+ name := filepath.Join(dir, prefix+nextSuffix())
+ f, err = fs.OpenFile(name, os.O_RDWR|os.O_CREATE|os.O_EXCL, 0600)
+ if os.IsExist(err) {
+ if nconflict++; nconflict > 10 {
+ randmu.Lock()
+ rand = reseed()
+ randmu.Unlock()
+ }
+ continue
+ }
+ break
+ }
+ return
+}
+
+// TempDir creates a new temporary directory in the directory dir
+// with a name beginning with prefix and returns the path of the
+// new directory. If dir is the empty string, TempDir uses the
+// default directory for temporary files (see os.TempDir).
+// Multiple programs calling TempDir simultaneously
+// will not choose the same directory. It is the caller's responsibility
+// to remove the directory when no longer needed.
+func (a Afero) TempDir(dir, prefix string) (name string, err error) {
+ return TempDir(a.Fs, dir, prefix)
+}
+func TempDir(fs Fs, dir, prefix string) (name string, err error) {
+ if dir == "" {
+ dir = os.TempDir()
+ }
+
+ nconflict := 0
+ for i := 0; i < 10000; i++ {
+ try := filepath.Join(dir, prefix+nextSuffix())
+ err = fs.Mkdir(try, 0700)
+ if os.IsExist(err) {
+ if nconflict++; nconflict > 10 {
+ randmu.Lock()
+ rand = reseed()
+ randmu.Unlock()
+ }
+ continue
+ }
+ if err == nil {
+ name = try
+ }
+ break
+ }
+ return
+}
diff --git a/vendor/github.com/spf13/afero/lstater.go b/vendor/github.com/spf13/afero/lstater.go
new file mode 100644
index 00000000..89c1bfc0
--- /dev/null
+++ b/vendor/github.com/spf13/afero/lstater.go
@@ -0,0 +1,27 @@
+// Copyright © 2018 Steve Francia .
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package afero
+
+import (
+ "os"
+)
+
+// Lstater is an optional interface in Afero. It is only implemented by the
+// filesystems saying so.
+// It will call Lstat if the filesystem iself is, or it delegates to, the os filesystem.
+// Else it will call Stat.
+// In addtion to the FileInfo, it will return a boolean telling whether Lstat was called or not.
+type Lstater interface {
+ LstatIfPossible(name string) (os.FileInfo, bool, error)
+}
diff --git a/vendor/github.com/spf13/afero/match.go b/vendor/github.com/spf13/afero/match.go
new file mode 100644
index 00000000..c18a87fb
--- /dev/null
+++ b/vendor/github.com/spf13/afero/match.go
@@ -0,0 +1,110 @@
+// Copyright © 2014 Steve Francia .
+// Copyright 2009 The Go Authors. All rights reserved.
+
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package afero
+
+import (
+ "path/filepath"
+ "sort"
+ "strings"
+)
+
+// Glob returns the names of all files matching pattern or nil
+// if there is no matching file. The syntax of patterns is the same
+// as in Match. The pattern may describe hierarchical names such as
+// /usr/*/bin/ed (assuming the Separator is '/').
+//
+// Glob ignores file system errors such as I/O errors reading directories.
+// The only possible returned error is ErrBadPattern, when pattern
+// is malformed.
+//
+// This was adapted from (http://golang.org/pkg/path/filepath) and uses several
+// built-ins from that package.
+func Glob(fs Fs, pattern string) (matches []string, err error) {
+ if !hasMeta(pattern) {
+ // Lstat not supported by a ll filesystems.
+ if _, err = lstatIfPossible(fs, pattern); err != nil {
+ return nil, nil
+ }
+ return []string{pattern}, nil
+ }
+
+ dir, file := filepath.Split(pattern)
+ switch dir {
+ case "":
+ dir = "."
+ case string(filepath.Separator):
+ // nothing
+ default:
+ dir = dir[0 : len(dir)-1] // chop off trailing separator
+ }
+
+ if !hasMeta(dir) {
+ return glob(fs, dir, file, nil)
+ }
+
+ var m []string
+ m, err = Glob(fs, dir)
+ if err != nil {
+ return
+ }
+ for _, d := range m {
+ matches, err = glob(fs, d, file, matches)
+ if err != nil {
+ return
+ }
+ }
+ return
+}
+
+// glob searches for files matching pattern in the directory dir
+// and appends them to matches. If the directory cannot be
+// opened, it returns the existing matches. New matches are
+// added in lexicographical order.
+func glob(fs Fs, dir, pattern string, matches []string) (m []string, e error) {
+ m = matches
+ fi, err := fs.Stat(dir)
+ if err != nil {
+ return
+ }
+ if !fi.IsDir() {
+ return
+ }
+ d, err := fs.Open(dir)
+ if err != nil {
+ return
+ }
+ defer d.Close()
+
+ names, _ := d.Readdirnames(-1)
+ sort.Strings(names)
+
+ for _, n := range names {
+ matched, err := filepath.Match(pattern, n)
+ if err != nil {
+ return m, err
+ }
+ if matched {
+ m = append(m, filepath.Join(dir, n))
+ }
+ }
+ return
+}
+
+// hasMeta reports whether path contains any of the magic characters
+// recognized by Match.
+func hasMeta(path string) bool {
+ // TODO(niemeyer): Should other magic characters be added here?
+ return strings.IndexAny(path, "*?[") >= 0
+}
diff --git a/vendor/github.com/spf13/afero/mem/dir.go b/vendor/github.com/spf13/afero/mem/dir.go
new file mode 100644
index 00000000..e104013f
--- /dev/null
+++ b/vendor/github.com/spf13/afero/mem/dir.go
@@ -0,0 +1,37 @@
+// Copyright © 2014 Steve Francia .
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package mem
+
+type Dir interface {
+ Len() int
+ Names() []string
+ Files() []*FileData
+ Add(*FileData)
+ Remove(*FileData)
+}
+
+func RemoveFromMemDir(dir *FileData, f *FileData) {
+ dir.memDir.Remove(f)
+}
+
+func AddToMemDir(dir *FileData, f *FileData) {
+ dir.memDir.Add(f)
+}
+
+func InitializeDir(d *FileData) {
+ if d.memDir == nil {
+ d.dir = true
+ d.memDir = &DirMap{}
+ }
+}
diff --git a/vendor/github.com/spf13/afero/mem/dirmap.go b/vendor/github.com/spf13/afero/mem/dirmap.go
new file mode 100644
index 00000000..03a57ee5
--- /dev/null
+++ b/vendor/github.com/spf13/afero/mem/dirmap.go
@@ -0,0 +1,43 @@
+// Copyright © 2015 Steve Francia .
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package mem
+
+import "sort"
+
+type DirMap map[string]*FileData
+
+func (m DirMap) Len() int { return len(m) }
+func (m DirMap) Add(f *FileData) { m[f.name] = f }
+func (m DirMap) Remove(f *FileData) { delete(m, f.name) }
+func (m DirMap) Files() (files []*FileData) {
+ for _, f := range m {
+ files = append(files, f)
+ }
+ sort.Sort(filesSorter(files))
+ return files
+}
+
+// implement sort.Interface for []*FileData
+type filesSorter []*FileData
+
+func (s filesSorter) Len() int { return len(s) }
+func (s filesSorter) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
+func (s filesSorter) Less(i, j int) bool { return s[i].name < s[j].name }
+
+func (m DirMap) Names() (names []string) {
+ for x := range m {
+ names = append(names, x)
+ }
+ return names
+}
diff --git a/vendor/github.com/spf13/afero/mem/file.go b/vendor/github.com/spf13/afero/mem/file.go
new file mode 100644
index 00000000..7af2fb56
--- /dev/null
+++ b/vendor/github.com/spf13/afero/mem/file.go
@@ -0,0 +1,317 @@
+// Copyright © 2015 Steve Francia .
+// Copyright 2013 tsuru authors. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package mem
+
+import (
+ "bytes"
+ "errors"
+ "io"
+ "os"
+ "path/filepath"
+ "sync"
+ "sync/atomic"
+)
+
+import "time"
+
+const FilePathSeparator = string(filepath.Separator)
+
+type File struct {
+ // atomic requires 64-bit alignment for struct field access
+ at int64
+ readDirCount int64
+ closed bool
+ readOnly bool
+ fileData *FileData
+}
+
+func NewFileHandle(data *FileData) *File {
+ return &File{fileData: data}
+}
+
+func NewReadOnlyFileHandle(data *FileData) *File {
+ return &File{fileData: data, readOnly: true}
+}
+
+func (f File) Data() *FileData {
+ return f.fileData
+}
+
+type FileData struct {
+ sync.Mutex
+ name string
+ data []byte
+ memDir Dir
+ dir bool
+ mode os.FileMode
+ modtime time.Time
+}
+
+func (d *FileData) Name() string {
+ d.Lock()
+ defer d.Unlock()
+ return d.name
+}
+
+func CreateFile(name string) *FileData {
+ return &FileData{name: name, mode: os.ModeTemporary, modtime: time.Now()}
+}
+
+func CreateDir(name string) *FileData {
+ return &FileData{name: name, memDir: &DirMap{}, dir: true}
+}
+
+func ChangeFileName(f *FileData, newname string) {
+ f.Lock()
+ f.name = newname
+ f.Unlock()
+}
+
+func SetMode(f *FileData, mode os.FileMode) {
+ f.Lock()
+ f.mode = mode
+ f.Unlock()
+}
+
+func SetModTime(f *FileData, mtime time.Time) {
+ f.Lock()
+ setModTime(f, mtime)
+ f.Unlock()
+}
+
+func setModTime(f *FileData, mtime time.Time) {
+ f.modtime = mtime
+}
+
+func GetFileInfo(f *FileData) *FileInfo {
+ return &FileInfo{f}
+}
+
+func (f *File) Open() error {
+ atomic.StoreInt64(&f.at, 0)
+ atomic.StoreInt64(&f.readDirCount, 0)
+ f.fileData.Lock()
+ f.closed = false
+ f.fileData.Unlock()
+ return nil
+}
+
+func (f *File) Close() error {
+ f.fileData.Lock()
+ f.closed = true
+ if !f.readOnly {
+ setModTime(f.fileData, time.Now())
+ }
+ f.fileData.Unlock()
+ return nil
+}
+
+func (f *File) Name() string {
+ return f.fileData.Name()
+}
+
+func (f *File) Stat() (os.FileInfo, error) {
+ return &FileInfo{f.fileData}, nil
+}
+
+func (f *File) Sync() error {
+ return nil
+}
+
+func (f *File) Readdir(count int) (res []os.FileInfo, err error) {
+ if !f.fileData.dir {
+ return nil, &os.PathError{Op: "readdir", Path: f.fileData.name, Err: errors.New("not a dir")}
+ }
+ var outLength int64
+
+ f.fileData.Lock()
+ files := f.fileData.memDir.Files()[f.readDirCount:]
+ if count > 0 {
+ if len(files) < count {
+ outLength = int64(len(files))
+ } else {
+ outLength = int64(count)
+ }
+ if len(files) == 0 {
+ err = io.EOF
+ }
+ } else {
+ outLength = int64(len(files))
+ }
+ f.readDirCount += outLength
+ f.fileData.Unlock()
+
+ res = make([]os.FileInfo, outLength)
+ for i := range res {
+ res[i] = &FileInfo{files[i]}
+ }
+
+ return res, err
+}
+
+func (f *File) Readdirnames(n int) (names []string, err error) {
+ fi, err := f.Readdir(n)
+ names = make([]string, len(fi))
+ for i, f := range fi {
+ _, names[i] = filepath.Split(f.Name())
+ }
+ return names, err
+}
+
+func (f *File) Read(b []byte) (n int, err error) {
+ f.fileData.Lock()
+ defer f.fileData.Unlock()
+ if f.closed == true {
+ return 0, ErrFileClosed
+ }
+ if len(b) > 0 && int(f.at) == len(f.fileData.data) {
+ return 0, io.EOF
+ }
+ if int(f.at) > len(f.fileData.data) {
+ return 0, io.ErrUnexpectedEOF
+ }
+ if len(f.fileData.data)-int(f.at) >= len(b) {
+ n = len(b)
+ } else {
+ n = len(f.fileData.data) - int(f.at)
+ }
+ copy(b, f.fileData.data[f.at:f.at+int64(n)])
+ atomic.AddInt64(&f.at, int64(n))
+ return
+}
+
+func (f *File) ReadAt(b []byte, off int64) (n int, err error) {
+ atomic.StoreInt64(&f.at, off)
+ return f.Read(b)
+}
+
+func (f *File) Truncate(size int64) error {
+ if f.closed == true {
+ return ErrFileClosed
+ }
+ if f.readOnly {
+ return &os.PathError{Op: "truncate", Path: f.fileData.name, Err: errors.New("file handle is read only")}
+ }
+ if size < 0 {
+ return ErrOutOfRange
+ }
+ if size > int64(len(f.fileData.data)) {
+ diff := size - int64(len(f.fileData.data))
+ f.fileData.data = append(f.fileData.data, bytes.Repeat([]byte{00}, int(diff))...)
+ } else {
+ f.fileData.data = f.fileData.data[0:size]
+ }
+ setModTime(f.fileData, time.Now())
+ return nil
+}
+
+func (f *File) Seek(offset int64, whence int) (int64, error) {
+ if f.closed == true {
+ return 0, ErrFileClosed
+ }
+ switch whence {
+ case 0:
+ atomic.StoreInt64(&f.at, offset)
+ case 1:
+ atomic.AddInt64(&f.at, int64(offset))
+ case 2:
+ atomic.StoreInt64(&f.at, int64(len(f.fileData.data))+offset)
+ }
+ return f.at, nil
+}
+
+func (f *File) Write(b []byte) (n int, err error) {
+ if f.readOnly {
+ return 0, &os.PathError{Op: "write", Path: f.fileData.name, Err: errors.New("file handle is read only")}
+ }
+ n = len(b)
+ cur := atomic.LoadInt64(&f.at)
+ f.fileData.Lock()
+ defer f.fileData.Unlock()
+ diff := cur - int64(len(f.fileData.data))
+ var tail []byte
+ if n+int(cur) < len(f.fileData.data) {
+ tail = f.fileData.data[n+int(cur):]
+ }
+ if diff > 0 {
+ f.fileData.data = append(bytes.Repeat([]byte{00}, int(diff)), b...)
+ f.fileData.data = append(f.fileData.data, tail...)
+ } else {
+ f.fileData.data = append(f.fileData.data[:cur], b...)
+ f.fileData.data = append(f.fileData.data, tail...)
+ }
+ setModTime(f.fileData, time.Now())
+
+ atomic.StoreInt64(&f.at, int64(len(f.fileData.data)))
+ return
+}
+
+func (f *File) WriteAt(b []byte, off int64) (n int, err error) {
+ atomic.StoreInt64(&f.at, off)
+ return f.Write(b)
+}
+
+func (f *File) WriteString(s string) (ret int, err error) {
+ return f.Write([]byte(s))
+}
+
+func (f *File) Info() *FileInfo {
+ return &FileInfo{f.fileData}
+}
+
+type FileInfo struct {
+ *FileData
+}
+
+// Implements os.FileInfo
+func (s *FileInfo) Name() string {
+ s.Lock()
+ _, name := filepath.Split(s.name)
+ s.Unlock()
+ return name
+}
+func (s *FileInfo) Mode() os.FileMode {
+ s.Lock()
+ defer s.Unlock()
+ return s.mode
+}
+func (s *FileInfo) ModTime() time.Time {
+ s.Lock()
+ defer s.Unlock()
+ return s.modtime
+}
+func (s *FileInfo) IsDir() bool {
+ s.Lock()
+ defer s.Unlock()
+ return s.dir
+}
+func (s *FileInfo) Sys() interface{} { return nil }
+func (s *FileInfo) Size() int64 {
+ if s.IsDir() {
+ return int64(42)
+ }
+ s.Lock()
+ defer s.Unlock()
+ return int64(len(s.data))
+}
+
+var (
+ ErrFileClosed = errors.New("File is closed")
+ ErrOutOfRange = errors.New("Out of range")
+ ErrTooLarge = errors.New("Too large")
+ ErrFileNotFound = os.ErrNotExist
+ ErrFileExists = os.ErrExist
+ ErrDestinationExists = os.ErrExist
+)
diff --git a/vendor/github.com/spf13/afero/memmap.go b/vendor/github.com/spf13/afero/memmap.go
new file mode 100644
index 00000000..09498e70
--- /dev/null
+++ b/vendor/github.com/spf13/afero/memmap.go
@@ -0,0 +1,365 @@
+// Copyright © 2014 Steve Francia .
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package afero
+
+import (
+ "fmt"
+ "log"
+ "os"
+ "path/filepath"
+ "strings"
+ "sync"
+ "time"
+
+ "github.com/spf13/afero/mem"
+)
+
+type MemMapFs struct {
+ mu sync.RWMutex
+ data map[string]*mem.FileData
+ init sync.Once
+}
+
+func NewMemMapFs() Fs {
+ return &MemMapFs{}
+}
+
+func (m *MemMapFs) getData() map[string]*mem.FileData {
+ m.init.Do(func() {
+ m.data = make(map[string]*mem.FileData)
+ // Root should always exist, right?
+ // TODO: what about windows?
+ m.data[FilePathSeparator] = mem.CreateDir(FilePathSeparator)
+ })
+ return m.data
+}
+
+func (*MemMapFs) Name() string { return "MemMapFS" }
+
+func (m *MemMapFs) Create(name string) (File, error) {
+ name = normalizePath(name)
+ m.mu.Lock()
+ file := mem.CreateFile(name)
+ m.getData()[name] = file
+ m.registerWithParent(file)
+ m.mu.Unlock()
+ return mem.NewFileHandle(file), nil
+}
+
+func (m *MemMapFs) unRegisterWithParent(fileName string) error {
+ f, err := m.lockfreeOpen(fileName)
+ if err != nil {
+ return err
+ }
+ parent := m.findParent(f)
+ if parent == nil {
+ log.Panic("parent of ", f.Name(), " is nil")
+ }
+
+ parent.Lock()
+ mem.RemoveFromMemDir(parent, f)
+ parent.Unlock()
+ return nil
+}
+
+func (m *MemMapFs) findParent(f *mem.FileData) *mem.FileData {
+ pdir, _ := filepath.Split(f.Name())
+ pdir = filepath.Clean(pdir)
+ pfile, err := m.lockfreeOpen(pdir)
+ if err != nil {
+ return nil
+ }
+ return pfile
+}
+
+func (m *MemMapFs) registerWithParent(f *mem.FileData) {
+ if f == nil {
+ return
+ }
+ parent := m.findParent(f)
+ if parent == nil {
+ pdir := filepath.Dir(filepath.Clean(f.Name()))
+ err := m.lockfreeMkdir(pdir, 0777)
+ if err != nil {
+ //log.Println("Mkdir error:", err)
+ return
+ }
+ parent, err = m.lockfreeOpen(pdir)
+ if err != nil {
+ //log.Println("Open after Mkdir error:", err)
+ return
+ }
+ }
+
+ parent.Lock()
+ mem.InitializeDir(parent)
+ mem.AddToMemDir(parent, f)
+ parent.Unlock()
+}
+
+func (m *MemMapFs) lockfreeMkdir(name string, perm os.FileMode) error {
+ name = normalizePath(name)
+ x, ok := m.getData()[name]
+ if ok {
+ // Only return ErrFileExists if it's a file, not a directory.
+ i := mem.FileInfo{FileData: x}
+ if !i.IsDir() {
+ return ErrFileExists
+ }
+ } else {
+ item := mem.CreateDir(name)
+ m.getData()[name] = item
+ m.registerWithParent(item)
+ }
+ return nil
+}
+
+func (m *MemMapFs) Mkdir(name string, perm os.FileMode) error {
+ name = normalizePath(name)
+
+ m.mu.RLock()
+ _, ok := m.getData()[name]
+ m.mu.RUnlock()
+ if ok {
+ return &os.PathError{Op: "mkdir", Path: name, Err: ErrFileExists}
+ }
+
+ m.mu.Lock()
+ item := mem.CreateDir(name)
+ m.getData()[name] = item
+ m.registerWithParent(item)
+ m.mu.Unlock()
+
+ m.Chmod(name, perm|os.ModeDir)
+
+ return nil
+}
+
+func (m *MemMapFs) MkdirAll(path string, perm os.FileMode) error {
+ err := m.Mkdir(path, perm)
+ if err != nil {
+ if err.(*os.PathError).Err == ErrFileExists {
+ return nil
+ }
+ return err
+ }
+ return nil
+}
+
+// Handle some relative paths
+func normalizePath(path string) string {
+ path = filepath.Clean(path)
+
+ switch path {
+ case ".":
+ return FilePathSeparator
+ case "..":
+ return FilePathSeparator
+ default:
+ return path
+ }
+}
+
+func (m *MemMapFs) Open(name string) (File, error) {
+ f, err := m.open(name)
+ if f != nil {
+ return mem.NewReadOnlyFileHandle(f), err
+ }
+ return nil, err
+}
+
+func (m *MemMapFs) openWrite(name string) (File, error) {
+ f, err := m.open(name)
+ if f != nil {
+ return mem.NewFileHandle(f), err
+ }
+ return nil, err
+}
+
+func (m *MemMapFs) open(name string) (*mem.FileData, error) {
+ name = normalizePath(name)
+
+ m.mu.RLock()
+ f, ok := m.getData()[name]
+ m.mu.RUnlock()
+ if !ok {
+ return nil, &os.PathError{Op: "open", Path: name, Err: ErrFileNotFound}
+ }
+ return f, nil
+}
+
+func (m *MemMapFs) lockfreeOpen(name string) (*mem.FileData, error) {
+ name = normalizePath(name)
+ f, ok := m.getData()[name]
+ if ok {
+ return f, nil
+ } else {
+ return nil, ErrFileNotFound
+ }
+}
+
+func (m *MemMapFs) OpenFile(name string, flag int, perm os.FileMode) (File, error) {
+ chmod := false
+ file, err := m.openWrite(name)
+ if os.IsNotExist(err) && (flag&os.O_CREATE > 0) {
+ file, err = m.Create(name)
+ chmod = true
+ }
+ if err != nil {
+ return nil, err
+ }
+ if flag == os.O_RDONLY {
+ file = mem.NewReadOnlyFileHandle(file.(*mem.File).Data())
+ }
+ if flag&os.O_APPEND > 0 {
+ _, err = file.Seek(0, os.SEEK_END)
+ if err != nil {
+ file.Close()
+ return nil, err
+ }
+ }
+ if flag&os.O_TRUNC > 0 && flag&(os.O_RDWR|os.O_WRONLY) > 0 {
+ err = file.Truncate(0)
+ if err != nil {
+ file.Close()
+ return nil, err
+ }
+ }
+ if chmod {
+ m.Chmod(name, perm)
+ }
+ return file, nil
+}
+
+func (m *MemMapFs) Remove(name string) error {
+ name = normalizePath(name)
+
+ m.mu.Lock()
+ defer m.mu.Unlock()
+
+ if _, ok := m.getData()[name]; ok {
+ err := m.unRegisterWithParent(name)
+ if err != nil {
+ return &os.PathError{Op: "remove", Path: name, Err: err}
+ }
+ delete(m.getData(), name)
+ } else {
+ return &os.PathError{Op: "remove", Path: name, Err: os.ErrNotExist}
+ }
+ return nil
+}
+
+func (m *MemMapFs) RemoveAll(path string) error {
+ path = normalizePath(path)
+ m.mu.Lock()
+ m.unRegisterWithParent(path)
+ m.mu.Unlock()
+
+ m.mu.RLock()
+ defer m.mu.RUnlock()
+
+ for p, _ := range m.getData() {
+ if strings.HasPrefix(p, path) {
+ m.mu.RUnlock()
+ m.mu.Lock()
+ delete(m.getData(), p)
+ m.mu.Unlock()
+ m.mu.RLock()
+ }
+ }
+ return nil
+}
+
+func (m *MemMapFs) Rename(oldname, newname string) error {
+ oldname = normalizePath(oldname)
+ newname = normalizePath(newname)
+
+ if oldname == newname {
+ return nil
+ }
+
+ m.mu.RLock()
+ defer m.mu.RUnlock()
+ if _, ok := m.getData()[oldname]; ok {
+ m.mu.RUnlock()
+ m.mu.Lock()
+ m.unRegisterWithParent(oldname)
+ fileData := m.getData()[oldname]
+ delete(m.getData(), oldname)
+ mem.ChangeFileName(fileData, newname)
+ m.getData()[newname] = fileData
+ m.registerWithParent(fileData)
+ m.mu.Unlock()
+ m.mu.RLock()
+ } else {
+ return &os.PathError{Op: "rename", Path: oldname, Err: ErrFileNotFound}
+ }
+ return nil
+}
+
+func (m *MemMapFs) Stat(name string) (os.FileInfo, error) {
+ f, err := m.Open(name)
+ if err != nil {
+ return nil, err
+ }
+ fi := mem.GetFileInfo(f.(*mem.File).Data())
+ return fi, nil
+}
+
+func (m *MemMapFs) Chmod(name string, mode os.FileMode) error {
+ name = normalizePath(name)
+
+ m.mu.RLock()
+ f, ok := m.getData()[name]
+ m.mu.RUnlock()
+ if !ok {
+ return &os.PathError{Op: "chmod", Path: name, Err: ErrFileNotFound}
+ }
+
+ m.mu.Lock()
+ mem.SetMode(f, mode)
+ m.mu.Unlock()
+
+ return nil
+}
+
+func (m *MemMapFs) Chtimes(name string, atime time.Time, mtime time.Time) error {
+ name = normalizePath(name)
+
+ m.mu.RLock()
+ f, ok := m.getData()[name]
+ m.mu.RUnlock()
+ if !ok {
+ return &os.PathError{Op: "chtimes", Path: name, Err: ErrFileNotFound}
+ }
+
+ m.mu.Lock()
+ mem.SetModTime(f, mtime)
+ m.mu.Unlock()
+
+ return nil
+}
+
+func (m *MemMapFs) List() {
+ for _, x := range m.data {
+ y := mem.FileInfo{FileData: x}
+ fmt.Println(x.Name(), y.Size())
+ }
+}
+
+// func debugMemMapList(fs Fs) {
+// if x, ok := fs.(*MemMapFs); ok {
+// x.List()
+// }
+// }
diff --git a/vendor/github.com/spf13/afero/os.go b/vendor/github.com/spf13/afero/os.go
new file mode 100644
index 00000000..13cc1b84
--- /dev/null
+++ b/vendor/github.com/spf13/afero/os.go
@@ -0,0 +1,101 @@
+// Copyright © 2014 Steve Francia .
+// Copyright 2013 tsuru authors. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package afero
+
+import (
+ "os"
+ "time"
+)
+
+var _ Lstater = (*OsFs)(nil)
+
+// OsFs is a Fs implementation that uses functions provided by the os package.
+//
+// For details in any method, check the documentation of the os package
+// (http://golang.org/pkg/os/).
+type OsFs struct{}
+
+func NewOsFs() Fs {
+ return &OsFs{}
+}
+
+func (OsFs) Name() string { return "OsFs" }
+
+func (OsFs) Create(name string) (File, error) {
+ f, e := os.Create(name)
+ if f == nil {
+ // while this looks strange, we need to return a bare nil (of type nil) not
+ // a nil value of type *os.File or nil won't be nil
+ return nil, e
+ }
+ return f, e
+}
+
+func (OsFs) Mkdir(name string, perm os.FileMode) error {
+ return os.Mkdir(name, perm)
+}
+
+func (OsFs) MkdirAll(path string, perm os.FileMode) error {
+ return os.MkdirAll(path, perm)
+}
+
+func (OsFs) Open(name string) (File, error) {
+ f, e := os.Open(name)
+ if f == nil {
+ // while this looks strange, we need to return a bare nil (of type nil) not
+ // a nil value of type *os.File or nil won't be nil
+ return nil, e
+ }
+ return f, e
+}
+
+func (OsFs) OpenFile(name string, flag int, perm os.FileMode) (File, error) {
+ f, e := os.OpenFile(name, flag, perm)
+ if f == nil {
+ // while this looks strange, we need to return a bare nil (of type nil) not
+ // a nil value of type *os.File or nil won't be nil
+ return nil, e
+ }
+ return f, e
+}
+
+func (OsFs) Remove(name string) error {
+ return os.Remove(name)
+}
+
+func (OsFs) RemoveAll(path string) error {
+ return os.RemoveAll(path)
+}
+
+func (OsFs) Rename(oldname, newname string) error {
+ return os.Rename(oldname, newname)
+}
+
+func (OsFs) Stat(name string) (os.FileInfo, error) {
+ return os.Stat(name)
+}
+
+func (OsFs) Chmod(name string, mode os.FileMode) error {
+ return os.Chmod(name, mode)
+}
+
+func (OsFs) Chtimes(name string, atime time.Time, mtime time.Time) error {
+ return os.Chtimes(name, atime, mtime)
+}
+
+func (OsFs) LstatIfPossible(name string) (os.FileInfo, bool, error) {
+ fi, err := os.Lstat(name)
+ return fi, true, err
+}
diff --git a/vendor/github.com/spf13/afero/path.go b/vendor/github.com/spf13/afero/path.go
new file mode 100644
index 00000000..18f60a0f
--- /dev/null
+++ b/vendor/github.com/spf13/afero/path.go
@@ -0,0 +1,106 @@
+// Copyright ©2015 The Go Authors
+// Copyright ©2015 Steve Francia
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package afero
+
+import (
+ "os"
+ "path/filepath"
+ "sort"
+)
+
+// readDirNames reads the directory named by dirname and returns
+// a sorted list of directory entries.
+// adapted from https://golang.org/src/path/filepath/path.go
+func readDirNames(fs Fs, dirname string) ([]string, error) {
+ f, err := fs.Open(dirname)
+ if err != nil {
+ return nil, err
+ }
+ names, err := f.Readdirnames(-1)
+ f.Close()
+ if err != nil {
+ return nil, err
+ }
+ sort.Strings(names)
+ return names, nil
+}
+
+// walk recursively descends path, calling walkFn
+// adapted from https://golang.org/src/path/filepath/path.go
+func walk(fs Fs, path string, info os.FileInfo, walkFn filepath.WalkFunc) error {
+ err := walkFn(path, info, nil)
+ if err != nil {
+ if info.IsDir() && err == filepath.SkipDir {
+ return nil
+ }
+ return err
+ }
+
+ if !info.IsDir() {
+ return nil
+ }
+
+ names, err := readDirNames(fs, path)
+ if err != nil {
+ return walkFn(path, info, err)
+ }
+
+ for _, name := range names {
+ filename := filepath.Join(path, name)
+ fileInfo, err := lstatIfPossible(fs, filename)
+ if err != nil {
+ if err := walkFn(filename, fileInfo, err); err != nil && err != filepath.SkipDir {
+ return err
+ }
+ } else {
+ err = walk(fs, filename, fileInfo, walkFn)
+ if err != nil {
+ if !fileInfo.IsDir() || err != filepath.SkipDir {
+ return err
+ }
+ }
+ }
+ }
+ return nil
+}
+
+// if the filesystem supports it, use Lstat, else use fs.Stat
+func lstatIfPossible(fs Fs, path string) (os.FileInfo, error) {
+ if lfs, ok := fs.(Lstater); ok {
+ fi, _, err := lfs.LstatIfPossible(path)
+ return fi, err
+ }
+ return fs.Stat(path)
+}
+
+// Walk walks the file tree rooted at root, calling walkFn for each file or
+// directory in the tree, including root. All errors that arise visiting files
+// and directories are filtered by walkFn. The files are walked in lexical
+// order, which makes the output deterministic but means that for very
+// large directories Walk can be inefficient.
+// Walk does not follow symbolic links.
+
+func (a Afero) Walk(root string, walkFn filepath.WalkFunc) error {
+ return Walk(a.Fs, root, walkFn)
+}
+
+func Walk(fs Fs, root string, walkFn filepath.WalkFunc) error {
+ info, err := lstatIfPossible(fs, root)
+ if err != nil {
+ return walkFn(root, nil, err)
+ }
+ return walk(fs, root, info, walkFn)
+}
diff --git a/vendor/github.com/spf13/afero/readonlyfs.go b/vendor/github.com/spf13/afero/readonlyfs.go
new file mode 100644
index 00000000..c6376ec3
--- /dev/null
+++ b/vendor/github.com/spf13/afero/readonlyfs.go
@@ -0,0 +1,80 @@
+package afero
+
+import (
+ "os"
+ "syscall"
+ "time"
+)
+
+var _ Lstater = (*ReadOnlyFs)(nil)
+
+type ReadOnlyFs struct {
+ source Fs
+}
+
+func NewReadOnlyFs(source Fs) Fs {
+ return &ReadOnlyFs{source: source}
+}
+
+func (r *ReadOnlyFs) ReadDir(name string) ([]os.FileInfo, error) {
+ return ReadDir(r.source, name)
+}
+
+func (r *ReadOnlyFs) Chtimes(n string, a, m time.Time) error {
+ return syscall.EPERM
+}
+
+func (r *ReadOnlyFs) Chmod(n string, m os.FileMode) error {
+ return syscall.EPERM
+}
+
+func (r *ReadOnlyFs) Name() string {
+ return "ReadOnlyFilter"
+}
+
+func (r *ReadOnlyFs) Stat(name string) (os.FileInfo, error) {
+ return r.source.Stat(name)
+}
+
+func (r *ReadOnlyFs) LstatIfPossible(name string) (os.FileInfo, bool, error) {
+ if lsf, ok := r.source.(Lstater); ok {
+ return lsf.LstatIfPossible(name)
+ }
+ fi, err := r.Stat(name)
+ return fi, false, err
+}
+
+func (r *ReadOnlyFs) Rename(o, n string) error {
+ return syscall.EPERM
+}
+
+func (r *ReadOnlyFs) RemoveAll(p string) error {
+ return syscall.EPERM
+}
+
+func (r *ReadOnlyFs) Remove(n string) error {
+ return syscall.EPERM
+}
+
+func (r *ReadOnlyFs) OpenFile(name string, flag int, perm os.FileMode) (File, error) {
+ if flag&(os.O_WRONLY|syscall.O_RDWR|os.O_APPEND|os.O_CREATE|os.O_TRUNC) != 0 {
+ return nil, syscall.EPERM
+ }
+ return r.source.OpenFile(name, flag, perm)
+}
+
+func (r *ReadOnlyFs) Open(n string) (File, error) {
+ return r.source.Open(n)
+}
+
+func (r *ReadOnlyFs) Mkdir(n string, p os.FileMode) error {
+ return syscall.EPERM
+}
+
+func (r *ReadOnlyFs) MkdirAll(n string, p os.FileMode) error {
+ return syscall.EPERM
+}
+
+func (r *ReadOnlyFs) Create(n string) (File, error) {
+ return nil, syscall.EPERM
+}
diff --git a/vendor/github.com/spf13/afero/regexpfs.go b/vendor/github.com/spf13/afero/regexpfs.go
new file mode 100644
index 00000000..9d92dbc0
--- /dev/null
+++ b/vendor/github.com/spf13/afero/regexpfs.go
@@ -0,0 +1,214 @@
+package afero
+
+import (
+ "os"
+ "regexp"
+ "syscall"
+ "time"
+)
+
+// The RegexpFs filters files (not directories) by regular expression. Only
+// files matching the given regexp will be allowed, all others get a ENOENT error (
+// "No such file or directory").
+//
+type RegexpFs struct {
+ re *regexp.Regexp
+ source Fs
+}
+
+func NewRegexpFs(source Fs, re *regexp.Regexp) Fs {
+ return &RegexpFs{source: source, re: re}
+}
+
+type RegexpFile struct {
+ f File
+ re *regexp.Regexp
+}
+
+func (r *RegexpFs) matchesName(name string) error {
+ if r.re == nil {
+ return nil
+ }
+ if r.re.MatchString(name) {
+ return nil
+ }
+ return syscall.ENOENT
+}
+
+func (r *RegexpFs) dirOrMatches(name string) error {
+ dir, err := IsDir(r.source, name)
+ if err != nil {
+ return err
+ }
+ if dir {
+ return nil
+ }
+ return r.matchesName(name)
+}
+
+func (r *RegexpFs) Chtimes(name string, a, m time.Time) error {
+ if err := r.dirOrMatches(name); err != nil {
+ return err
+ }
+ return r.source.Chtimes(name, a, m)
+}
+
+func (r *RegexpFs) Chmod(name string, mode os.FileMode) error {
+ if err := r.dirOrMatches(name); err != nil {
+ return err
+ }
+ return r.source.Chmod(name, mode)
+}
+
+func (r *RegexpFs) Name() string {
+ return "RegexpFs"
+}
+
+func (r *RegexpFs) Stat(name string) (os.FileInfo, error) {
+ if err := r.dirOrMatches(name); err != nil {
+ return nil, err
+ }
+ return r.source.Stat(name)
+}
+
+func (r *RegexpFs) Rename(oldname, newname string) error {
+ dir, err := IsDir(r.source, oldname)
+ if err != nil {
+ return err
+ }
+ if dir {
+ return nil
+ }
+ if err := r.matchesName(oldname); err != nil {
+ return err
+ }
+ if err := r.matchesName(newname); err != nil {
+ return err
+ }
+ return r.source.Rename(oldname, newname)
+}
+
+func (r *RegexpFs) RemoveAll(p string) error {
+ dir, err := IsDir(r.source, p)
+ if err != nil {
+ return err
+ }
+ if !dir {
+ if err := r.matchesName(p); err != nil {
+ return err
+ }
+ }
+ return r.source.RemoveAll(p)
+}
+
+func (r *RegexpFs) Remove(name string) error {
+ if err := r.dirOrMatches(name); err != nil {
+ return err
+ }
+ return r.source.Remove(name)
+}
+
+func (r *RegexpFs) OpenFile(name string, flag int, perm os.FileMode) (File, error) {
+ if err := r.dirOrMatches(name); err != nil {
+ return nil, err
+ }
+ return r.source.OpenFile(name, flag, perm)
+}
+
+func (r *RegexpFs) Open(name string) (File, error) {
+ dir, err := IsDir(r.source, name)
+ if err != nil {
+ return nil, err
+ }
+ if !dir {
+ if err := r.matchesName(name); err != nil {
+ return nil, err
+ }
+ }
+ f, err := r.source.Open(name)
+ return &RegexpFile{f: f, re: r.re}, nil
+}
+
+func (r *RegexpFs) Mkdir(n string, p os.FileMode) error {
+ return r.source.Mkdir(n, p)
+}
+
+func (r *RegexpFs) MkdirAll(n string, p os.FileMode) error {
+ return r.source.MkdirAll(n, p)
+}
+
+func (r *RegexpFs) Create(name string) (File, error) {
+ if err := r.matchesName(name); err != nil {
+ return nil, err
+ }
+ return r.source.Create(name)
+}
+
+func (f *RegexpFile) Close() error {
+ return f.f.Close()
+}
+
+func (f *RegexpFile) Read(s []byte) (int, error) {
+ return f.f.Read(s)
+}
+
+func (f *RegexpFile) ReadAt(s []byte, o int64) (int, error) {
+ return f.f.ReadAt(s, o)
+}
+
+func (f *RegexpFile) Seek(o int64, w int) (int64, error) {
+ return f.f.Seek(o, w)
+}
+
+func (f *RegexpFile) Write(s []byte) (int, error) {
+ return f.f.Write(s)
+}
+
+func (f *RegexpFile) WriteAt(s []byte, o int64) (int, error) {
+ return f.f.WriteAt(s, o)
+}
+
+func (f *RegexpFile) Name() string {
+ return f.f.Name()
+}
+
+func (f *RegexpFile) Readdir(c int) (fi []os.FileInfo, err error) {
+ var rfi []os.FileInfo
+ rfi, err = f.f.Readdir(c)
+ if err != nil {
+ return nil, err
+ }
+ for _, i := range rfi {
+ if i.IsDir() || f.re.MatchString(i.Name()) {
+ fi = append(fi, i)
+ }
+ }
+ return fi, nil
+}
+
+func (f *RegexpFile) Readdirnames(c int) (n []string, err error) {
+ fi, err := f.Readdir(c)
+ if err != nil {
+ return nil, err
+ }
+ for _, s := range fi {
+ n = append(n, s.Name())
+ }
+ return n, nil
+}
+
+func (f *RegexpFile) Stat() (os.FileInfo, error) {
+ return f.f.Stat()
+}
+
+func (f *RegexpFile) Sync() error {
+ return f.f.Sync()
+}
+
+func (f *RegexpFile) Truncate(s int64) error {
+ return f.f.Truncate(s)
+}
+
+func (f *RegexpFile) WriteString(s string) (int, error) {
+ return f.f.WriteString(s)
+}
diff --git a/vendor/github.com/spf13/afero/unionFile.go b/vendor/github.com/spf13/afero/unionFile.go
new file mode 100644
index 00000000..1e78f7d1
--- /dev/null
+++ b/vendor/github.com/spf13/afero/unionFile.go
@@ -0,0 +1,305 @@
+package afero
+
+import (
+ "io"
+ "os"
+ "path/filepath"
+ "syscall"
+)
+
+// The UnionFile implements the afero.File interface and will be returned
+// when reading a directory present at least in the overlay or opening a file
+// for writing.
+//
+// The calls to
+// Readdir() and Readdirnames() merge the file os.FileInfo / names from the
+// base and the overlay - for files present in both layers, only those
+// from the overlay will be used.
+//
+// When opening files for writing (Create() / OpenFile() with the right flags)
+// the operations will be done in both layers, starting with the overlay. A
+// successful read in the overlay will move the cursor position in the base layer
+// by the number of bytes read.
+type UnionFile struct {
+ Base File
+ Layer File
+ Merger DirsMerger
+ off int
+ files []os.FileInfo
+}
+
+func (f *UnionFile) Close() error {
+ // first close base, so we have a newer timestamp in the overlay. If we'd close
+ // the overlay first, we'd get a cacheStale the next time we access this file
+ // -> cache would be useless ;-)
+ if f.Base != nil {
+ f.Base.Close()
+ }
+ if f.Layer != nil {
+ return f.Layer.Close()
+ }
+ return BADFD
+}
+
+func (f *UnionFile) Read(s []byte) (int, error) {
+ if f.Layer != nil {
+ n, err := f.Layer.Read(s)
+ if (err == nil || err == io.EOF) && f.Base != nil {
+ // advance the file position also in the base file, the next
+ // call may be a write at this position (or a seek with SEEK_CUR)
+ if _, seekErr := f.Base.Seek(int64(n), os.SEEK_CUR); seekErr != nil {
+ // only overwrite err in case the seek fails: we need to
+ // report an eventual io.EOF to the caller
+ err = seekErr
+ }
+ }
+ return n, err
+ }
+ if f.Base != nil {
+ return f.Base.Read(s)
+ }
+ return 0, BADFD
+}
+
+func (f *UnionFile) ReadAt(s []byte, o int64) (int, error) {
+ if f.Layer != nil {
+ n, err := f.Layer.ReadAt(s, o)
+ if (err == nil || err == io.EOF) && f.Base != nil {
+ _, err = f.Base.Seek(o+int64(n), os.SEEK_SET)
+ }
+ return n, err
+ }
+ if f.Base != nil {
+ return f.Base.ReadAt(s, o)
+ }
+ return 0, BADFD
+}
+
+func (f *UnionFile) Seek(o int64, w int) (pos int64, err error) {
+ if f.Layer != nil {
+ pos, err = f.Layer.Seek(o, w)
+ if (err == nil || err == io.EOF) && f.Base != nil {
+ _, err = f.Base.Seek(o, w)
+ }
+ return pos, err
+ }
+ if f.Base != nil {
+ return f.Base.Seek(o, w)
+ }
+ return 0, BADFD
+}
+
+func (f *UnionFile) Write(s []byte) (n int, err error) {
+ if f.Layer != nil {
+ n, err = f.Layer.Write(s)
+ if err == nil && f.Base != nil { // hmm, do we have fixed size files where a write may hit the EOF mark?
+ _, err = f.Base.Write(s)
+ }
+ return n, err
+ }
+ if f.Base != nil {
+ return f.Base.Write(s)
+ }
+ return 0, BADFD
+}
+
+func (f *UnionFile) WriteAt(s []byte, o int64) (n int, err error) {
+ if f.Layer != nil {
+ n, err = f.Layer.WriteAt(s, o)
+ if err == nil && f.Base != nil {
+ _, err = f.Base.WriteAt(s, o)
+ }
+ return n, err
+ }
+ if f.Base != nil {
+ return f.Base.WriteAt(s, o)
+ }
+ return 0, BADFD
+}
+
+func (f *UnionFile) Name() string {
+ if f.Layer != nil {
+ return f.Layer.Name()
+ }
+ return f.Base.Name()
+}
+
+// DirsMerger is how UnionFile weaves two directories together.
+// It takes the FileInfo slices from the layer and the base and returns a
+// single view.
+type DirsMerger func(lofi, bofi []os.FileInfo) ([]os.FileInfo, error)
+
+var defaultUnionMergeDirsFn = func(lofi, bofi []os.FileInfo) ([]os.FileInfo, error) {
+ var files = make(map[string]os.FileInfo)
+
+ for _, fi := range lofi {
+ files[fi.Name()] = fi
+ }
+
+ for _, fi := range bofi {
+ if _, exists := files[fi.Name()]; !exists {
+ files[fi.Name()] = fi
+ }
+ }
+
+ rfi := make([]os.FileInfo, len(files))
+
+ i := 0
+ for _, fi := range files {
+ rfi[i] = fi
+ i++
+ }
+
+ return rfi, nil
+
+}
+
+// Readdir will weave the two directories together and
+// return a single view of the overlayed directories
+func (f *UnionFile) Readdir(c int) (ofi []os.FileInfo, err error) {
+ var merge DirsMerger = f.Merger
+ if merge == nil {
+ merge = defaultUnionMergeDirsFn
+ }
+
+ if f.off == 0 {
+ var lfi []os.FileInfo
+ if f.Layer != nil {
+ lfi, err = f.Layer.Readdir(-1)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ var bfi []os.FileInfo
+ if f.Base != nil {
+ bfi, err = f.Base.Readdir(-1)
+ if err != nil {
+ return nil, err
+ }
+
+ }
+ merged, err := merge(lfi, bfi)
+ if err != nil {
+ return nil, err
+ }
+ f.files = append(f.files, merged...)
+ }
+ if c == -1 {
+ return f.files[f.off:], nil
+ }
+ defer func() { f.off += c }()
+ return f.files[f.off:c], nil
+}
+
+func (f *UnionFile) Readdirnames(c int) ([]string, error) {
+ rfi, err := f.Readdir(c)
+ if err != nil {
+ return nil, err
+ }
+ var names []string
+ for _, fi := range rfi {
+ names = append(names, fi.Name())
+ }
+ return names, nil
+}
+
+func (f *UnionFile) Stat() (os.FileInfo, error) {
+ if f.Layer != nil {
+ return f.Layer.Stat()
+ }
+ if f.Base != nil {
+ return f.Base.Stat()
+ }
+ return nil, BADFD
+}
+
+func (f *UnionFile) Sync() (err error) {
+ if f.Layer != nil {
+ err = f.Layer.Sync()
+ if err == nil && f.Base != nil {
+ err = f.Base.Sync()
+ }
+ return err
+ }
+ if f.Base != nil {
+ return f.Base.Sync()
+ }
+ return BADFD
+}
+
+func (f *UnionFile) Truncate(s int64) (err error) {
+ if f.Layer != nil {
+ err = f.Layer.Truncate(s)
+ if err == nil && f.Base != nil {
+ err = f.Base.Truncate(s)
+ }
+ return err
+ }
+ if f.Base != nil {
+ return f.Base.Truncate(s)
+ }
+ return BADFD
+}
+
+func (f *UnionFile) WriteString(s string) (n int, err error) {
+ if f.Layer != nil {
+ n, err = f.Layer.WriteString(s)
+ if err == nil && f.Base != nil {
+ _, err = f.Base.WriteString(s)
+ }
+ return n, err
+ }
+ if f.Base != nil {
+ return f.Base.WriteString(s)
+ }
+ return 0, BADFD
+}
+
+func copyToLayer(base Fs, layer Fs, name string) error {
+ bfh, err := base.Open(name)
+ if err != nil {
+ return err
+ }
+ defer bfh.Close()
+
+ // First make sure the directory exists
+ exists, err := Exists(layer, filepath.Dir(name))
+ if err != nil {
+ return err
+ }
+ if !exists {
+ err = layer.MkdirAll(filepath.Dir(name), 0777) // FIXME?
+ if err != nil {
+ return err
+ }
+ }
+
+ // Create the file on the overlay
+ lfh, err := layer.Create(name)
+ if err != nil {
+ return err
+ }
+ n, err := io.Copy(lfh, bfh)
+ if err != nil {
+ // If anything fails, clean up the file
+ layer.Remove(name)
+ lfh.Close()
+ return err
+ }
+
+ bfi, err := bfh.Stat()
+ if err != nil || bfi.Size() != n {
+ layer.Remove(name)
+ lfh.Close()
+ return syscall.EIO
+ }
+
+ err = lfh.Close()
+ if err != nil {
+ layer.Remove(name)
+ lfh.Close()
+ return err
+ }
+ return layer.Chtimes(name, bfi.ModTime(), bfi.ModTime())
+}
diff --git a/vendor/github.com/spf13/afero/util.go b/vendor/github.com/spf13/afero/util.go
new file mode 100644
index 00000000..4f253f48
--- /dev/null
+++ b/vendor/github.com/spf13/afero/util.go
@@ -0,0 +1,330 @@
+// Copyright ©2015 Steve Francia
+// Portions Copyright ©2015 The Hugo Authors
+// Portions Copyright 2016-present Bjørn Erik Pedersen
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package afero
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "os"
+ "path/filepath"
+ "strings"
+ "unicode"
+
+ "golang.org/x/text/transform"
+ "golang.org/x/text/unicode/norm"
+)
+
+// Filepath separator defined by os.Separator.
+const FilePathSeparator = string(filepath.Separator)
+
+// Takes a reader and a path and writes the content
+func (a Afero) WriteReader(path string, r io.Reader) (err error) {
+ return WriteReader(a.Fs, path, r)
+}
+
+func WriteReader(fs Fs, path string, r io.Reader) (err error) {
+ dir, _ := filepath.Split(path)
+ ospath := filepath.FromSlash(dir)
+
+ if ospath != "" {
+ err = fs.MkdirAll(ospath, 0777) // rwx, rw, r
+ if err != nil {
+ if err != os.ErrExist {
+ return err
+ }
+ }
+ }
+
+ file, err := fs.Create(path)
+ if err != nil {
+ return
+ }
+ defer file.Close()
+
+ _, err = io.Copy(file, r)
+ return
+}
+
+// Same as WriteReader but checks to see if file/directory already exists.
+func (a Afero) SafeWriteReader(path string, r io.Reader) (err error) {
+ return SafeWriteReader(a.Fs, path, r)
+}
+
+func SafeWriteReader(fs Fs, path string, r io.Reader) (err error) {
+ dir, _ := filepath.Split(path)
+ ospath := filepath.FromSlash(dir)
+
+ if ospath != "" {
+ err = fs.MkdirAll(ospath, 0777) // rwx, rw, r
+ if err != nil {
+ return
+ }
+ }
+
+ exists, err := Exists(fs, path)
+ if err != nil {
+ return
+ }
+ if exists {
+ return fmt.Errorf("%v already exists", path)
+ }
+
+ file, err := fs.Create(path)
+ if err != nil {
+ return
+ }
+ defer file.Close()
+
+ _, err = io.Copy(file, r)
+ return
+}
+
+func (a Afero) GetTempDir(subPath string) string {
+ return GetTempDir(a.Fs, subPath)
+}
+
+// GetTempDir returns the default temp directory with trailing slash
+// if subPath is not empty then it will be created recursively with mode 777 rwx rwx rwx
+func GetTempDir(fs Fs, subPath string) string {
+ addSlash := func(p string) string {
+ if FilePathSeparator != p[len(p)-1:] {
+ p = p + FilePathSeparator
+ }
+ return p
+ }
+ dir := addSlash(os.TempDir())
+
+ if subPath != "" {
+ // preserve windows backslash :-(
+ if FilePathSeparator == "\\" {
+ subPath = strings.Replace(subPath, "\\", "____", -1)
+ }
+ dir = dir + UnicodeSanitize((subPath))
+ if FilePathSeparator == "\\" {
+ dir = strings.Replace(dir, "____", "\\", -1)
+ }
+
+ if exists, _ := Exists(fs, dir); exists {
+ return addSlash(dir)
+ }
+
+ err := fs.MkdirAll(dir, 0777)
+ if err != nil {
+ panic(err)
+ }
+ dir = addSlash(dir)
+ }
+ return dir
+}
+
+// Rewrite string to remove non-standard path characters
+func UnicodeSanitize(s string) string {
+ source := []rune(s)
+ target := make([]rune, 0, len(source))
+
+ for _, r := range source {
+ if unicode.IsLetter(r) ||
+ unicode.IsDigit(r) ||
+ unicode.IsMark(r) ||
+ r == '.' ||
+ r == '/' ||
+ r == '\\' ||
+ r == '_' ||
+ r == '-' ||
+ r == '%' ||
+ r == ' ' ||
+ r == '#' {
+ target = append(target, r)
+ }
+ }
+
+ return string(target)
+}
+
+// Transform characters with accents into plain forms.
+func NeuterAccents(s string) string {
+ t := transform.Chain(norm.NFD, transform.RemoveFunc(isMn), norm.NFC)
+ result, _, _ := transform.String(t, string(s))
+
+ return result
+}
+
+func isMn(r rune) bool {
+ return unicode.Is(unicode.Mn, r) // Mn: nonspacing marks
+}
+
+func (a Afero) FileContainsBytes(filename string, subslice []byte) (bool, error) {
+ return FileContainsBytes(a.Fs, filename, subslice)
+}
+
+// Check if a file contains a specified byte slice.
+func FileContainsBytes(fs Fs, filename string, subslice []byte) (bool, error) {
+ f, err := fs.Open(filename)
+ if err != nil {
+ return false, err
+ }
+ defer f.Close()
+
+ return readerContainsAny(f, subslice), nil
+}
+
+func (a Afero) FileContainsAnyBytes(filename string, subslices [][]byte) (bool, error) {
+ return FileContainsAnyBytes(a.Fs, filename, subslices)
+}
+
+// Check if a file contains any of the specified byte slices.
+func FileContainsAnyBytes(fs Fs, filename string, subslices [][]byte) (bool, error) {
+ f, err := fs.Open(filename)
+ if err != nil {
+ return false, err
+ }
+ defer f.Close()
+
+ return readerContainsAny(f, subslices...), nil
+}
+
+// readerContains reports whether any of the subslices is within r.
+func readerContainsAny(r io.Reader, subslices ...[]byte) bool {
+
+ if r == nil || len(subslices) == 0 {
+ return false
+ }
+
+ largestSlice := 0
+
+ for _, sl := range subslices {
+ if len(sl) > largestSlice {
+ largestSlice = len(sl)
+ }
+ }
+
+ if largestSlice == 0 {
+ return false
+ }
+
+ bufflen := largestSlice * 4
+ halflen := bufflen / 2
+ buff := make([]byte, bufflen)
+ var err error
+ var n, i int
+
+ for {
+ i++
+ if i == 1 {
+ n, err = io.ReadAtLeast(r, buff[:halflen], halflen)
+ } else {
+ if i != 2 {
+ // shift left to catch overlapping matches
+ copy(buff[:], buff[halflen:])
+ }
+ n, err = io.ReadAtLeast(r, buff[halflen:], halflen)
+ }
+
+ if n > 0 {
+ for _, sl := range subslices {
+ if bytes.Contains(buff, sl) {
+ return true
+ }
+ }
+ }
+
+ if err != nil {
+ break
+ }
+ }
+ return false
+}
+
+func (a Afero) DirExists(path string) (bool, error) {
+ return DirExists(a.Fs, path)
+}
+
+// DirExists checks if a path exists and is a directory.
+func DirExists(fs Fs, path string) (bool, error) {
+ fi, err := fs.Stat(path)
+ if err == nil && fi.IsDir() {
+ return true, nil
+ }
+ if os.IsNotExist(err) {
+ return false, nil
+ }
+ return false, err
+}
+
+func (a Afero) IsDir(path string) (bool, error) {
+ return IsDir(a.Fs, path)
+}
+
+// IsDir checks if a given path is a directory.
+func IsDir(fs Fs, path string) (bool, error) {
+ fi, err := fs.Stat(path)
+ if err != nil {
+ return false, err
+ }
+ return fi.IsDir(), nil
+}
+
+func (a Afero) IsEmpty(path string) (bool, error) {
+ return IsEmpty(a.Fs, path)
+}
+
+// IsEmpty checks if a given file or directory is empty.
+func IsEmpty(fs Fs, path string) (bool, error) {
+ if b, _ := Exists(fs, path); !b {
+ return false, fmt.Errorf("%q path does not exist", path)
+ }
+ fi, err := fs.Stat(path)
+ if err != nil {
+ return false, err
+ }
+ if fi.IsDir() {
+ f, err := fs.Open(path)
+ if err != nil {
+ return false, err
+ }
+ defer f.Close()
+ list, err := f.Readdir(-1)
+ return len(list) == 0, nil
+ }
+ return fi.Size() == 0, nil
+}
+
+func (a Afero) Exists(path string) (bool, error) {
+ return Exists(a.Fs, path)
+}
+
+// Check if a file or directory exists.
+func Exists(fs Fs, path string) (bool, error) {
+ _, err := fs.Stat(path)
+ if err == nil {
+ return true, nil
+ }
+ if os.IsNotExist(err) {
+ return false, nil
+ }
+ return false, err
+}
+
+func FullBaseFsPath(basePathFs *BasePathFs, relativePath string) string {
+ combinedPath := filepath.Join(basePathFs.path, relativePath)
+ if parent, ok := basePathFs.source.(*BasePathFs); ok {
+ return FullBaseFsPath(parent, combinedPath)
+ }
+
+ return combinedPath
+}
diff --git a/vendor/github.com/spf13/cast/LICENSE b/vendor/github.com/spf13/cast/LICENSE
new file mode 100644
index 00000000..4527efb9
--- /dev/null
+++ b/vendor/github.com/spf13/cast/LICENSE
@@ -0,0 +1,21 @@
+The MIT License (MIT)
+
+Copyright (c) 2014 Steve Francia
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
\ No newline at end of file
diff --git a/vendor/github.com/spf13/cast/Makefile b/vendor/github.com/spf13/cast/Makefile
new file mode 100644
index 00000000..7ccf8930
--- /dev/null
+++ b/vendor/github.com/spf13/cast/Makefile
@@ -0,0 +1,38 @@
+# A Self-Documenting Makefile: http://marmelab.com/blog/2016/02/29/auto-documented-makefile.html
+
+.PHONY: check fmt lint test test-race vet test-cover-html help
+.DEFAULT_GOAL := help
+
+check: test-race fmt vet lint ## Run tests and linters
+
+test: ## Run tests
+ go test ./...
+
+test-race: ## Run tests with race detector
+ go test -race ./...
+
+fmt: ## Run gofmt linter
+ @for d in `go list` ; do \
+ if [ "`gofmt -l -s $$GOPATH/src/$$d | tee /dev/stderr`" ]; then \
+ echo "^ improperly formatted go files" && echo && exit 1; \
+ fi \
+ done
+
+lint: ## Run golint linter
+ @for d in `go list` ; do \
+ if [ "`golint $$d | tee /dev/stderr`" ]; then \
+ echo "^ golint errors!" && echo && exit 1; \
+ fi \
+ done
+
+vet: ## Run go vet linter
+ @if [ "`go vet | tee /dev/stderr`" ]; then \
+ echo "^ go vet errors!" && echo && exit 1; \
+ fi
+
+test-cover-html: ## Generate test coverage report
+ go test -coverprofile=coverage.out -covermode=count
+ go tool cover -func=coverage.out
+
+help:
+ @grep -E '^[a-zA-Z0-9_-]+:.*?## .*$$' $(MAKEFILE_LIST) | sort | awk 'BEGIN {FS = ":.*?## "}; {printf "\033[36m%-30s\033[0m %s\n", $$1, $$2}'
diff --git a/vendor/github.com/spf13/cast/README.md b/vendor/github.com/spf13/cast/README.md
new file mode 100644
index 00000000..e6939397
--- /dev/null
+++ b/vendor/github.com/spf13/cast/README.md
@@ -0,0 +1,75 @@
+cast
+====
+[](https://godoc.org/github.com/spf13/cast)
+[](https://travis-ci.org/spf13/cast)
+[](https://goreportcard.com/report/github.com/spf13/cast)
+
+Easy and safe casting from one type to another in Go
+
+Don’t Panic! ... Cast
+
+## What is Cast?
+
+Cast is a library to convert between different go types in a consistent and easy way.
+
+Cast provides simple functions to easily convert a number to a string, an
+interface into a bool, etc. Cast does this intelligently when an obvious
+conversion is possible. It doesn’t make any attempts to guess what you meant,
+for example you can only convert a string to an int when it is a string
+representation of an int such as “8”. Cast was developed for use in
+[Hugo](http://hugo.spf13.com), a website engine which uses YAML, TOML or JSON
+for meta data.
+
+## Why use Cast?
+
+When working with dynamic data in Go you often need to cast or convert the data
+from one type into another. Cast goes beyond just using type assertion (though
+it uses that when possible) to provide a very straightforward and convenient
+library.
+
+If you are working with interfaces to handle things like dynamic content
+you’ll need an easy way to convert an interface into a given type. This
+is the library for you.
+
+If you are taking in data from YAML, TOML or JSON or other formats which lack
+full types, then Cast is the library for you.
+
+## Usage
+
+Cast provides a handful of To_____ methods. These methods will always return
+the desired type. **If input is provided that will not convert to that type, the
+0 or nil value for that type will be returned**.
+
+Cast also provides identical methods To_____E. These return the same result as
+the To_____ methods, plus an additional error which tells you if it successfully
+converted. Using these methods you can tell the difference between when the
+input matched the zero value or when the conversion failed and the zero value
+was returned.
+
+The following examples are merely a sample of what is available. Please review
+the code for a complete set.
+
+### Example ‘ToString’:
+
+ cast.ToString("mayonegg") // "mayonegg"
+ cast.ToString(8) // "8"
+ cast.ToString(8.31) // "8.31"
+ cast.ToString([]byte("one time")) // "one time"
+ cast.ToString(nil) // ""
+
+ var foo interface{} = "one more time"
+ cast.ToString(foo) // "one more time"
+
+
+### Example ‘ToInt’:
+
+ cast.ToInt(8) // 8
+ cast.ToInt(8.31) // 8
+ cast.ToInt("8") // 8
+ cast.ToInt(true) // 1
+ cast.ToInt(false) // 0
+
+ var eight interface{} = 8
+ cast.ToInt(eight) // 8
+ cast.ToInt(nil) // 0
+
diff --git a/vendor/github.com/spf13/cast/cast.go b/vendor/github.com/spf13/cast/cast.go
new file mode 100644
index 00000000..9fba638d
--- /dev/null
+++ b/vendor/github.com/spf13/cast/cast.go
@@ -0,0 +1,171 @@
+// Copyright © 2014 Steve Francia .
+//
+// Use of this source code is governed by an MIT-style
+// license that can be found in the LICENSE file.
+
+// Package cast provides easy and safe casting in Go.
+package cast
+
+import "time"
+
+// ToBool casts an interface to a bool type.
+func ToBool(i interface{}) bool {
+ v, _ := ToBoolE(i)
+ return v
+}
+
+// ToTime casts an interface to a time.Time type.
+func ToTime(i interface{}) time.Time {
+ v, _ := ToTimeE(i)
+ return v
+}
+
+// ToDuration casts an interface to a time.Duration type.
+func ToDuration(i interface{}) time.Duration {
+ v, _ := ToDurationE(i)
+ return v
+}
+
+// ToFloat64 casts an interface to a float64 type.
+func ToFloat64(i interface{}) float64 {
+ v, _ := ToFloat64E(i)
+ return v
+}
+
+// ToFloat32 casts an interface to a float32 type.
+func ToFloat32(i interface{}) float32 {
+ v, _ := ToFloat32E(i)
+ return v
+}
+
+// ToInt64 casts an interface to an int64 type.
+func ToInt64(i interface{}) int64 {
+ v, _ := ToInt64E(i)
+ return v
+}
+
+// ToInt32 casts an interface to an int32 type.
+func ToInt32(i interface{}) int32 {
+ v, _ := ToInt32E(i)
+ return v
+}
+
+// ToInt16 casts an interface to an int16 type.
+func ToInt16(i interface{}) int16 {
+ v, _ := ToInt16E(i)
+ return v
+}
+
+// ToInt8 casts an interface to an int8 type.
+func ToInt8(i interface{}) int8 {
+ v, _ := ToInt8E(i)
+ return v
+}
+
+// ToInt casts an interface to an int type.
+func ToInt(i interface{}) int {
+ v, _ := ToIntE(i)
+ return v
+}
+
+// ToUint casts an interface to a uint type.
+func ToUint(i interface{}) uint {
+ v, _ := ToUintE(i)
+ return v
+}
+
+// ToUint64 casts an interface to a uint64 type.
+func ToUint64(i interface{}) uint64 {
+ v, _ := ToUint64E(i)
+ return v
+}
+
+// ToUint32 casts an interface to a uint32 type.
+func ToUint32(i interface{}) uint32 {
+ v, _ := ToUint32E(i)
+ return v
+}
+
+// ToUint16 casts an interface to a uint16 type.
+func ToUint16(i interface{}) uint16 {
+ v, _ := ToUint16E(i)
+ return v
+}
+
+// ToUint8 casts an interface to a uint8 type.
+func ToUint8(i interface{}) uint8 {
+ v, _ := ToUint8E(i)
+ return v
+}
+
+// ToString casts an interface to a string type.
+func ToString(i interface{}) string {
+ v, _ := ToStringE(i)
+ return v
+}
+
+// ToStringMapString casts an interface to a map[string]string type.
+func ToStringMapString(i interface{}) map[string]string {
+ v, _ := ToStringMapStringE(i)
+ return v
+}
+
+// ToStringMapStringSlice casts an interface to a map[string][]string type.
+func ToStringMapStringSlice(i interface{}) map[string][]string {
+ v, _ := ToStringMapStringSliceE(i)
+ return v
+}
+
+// ToStringMapBool casts an interface to a map[string]bool type.
+func ToStringMapBool(i interface{}) map[string]bool {
+ v, _ := ToStringMapBoolE(i)
+ return v
+}
+
+// ToStringMapInt casts an interface to a map[string]int type.
+func ToStringMapInt(i interface{}) map[string]int {
+ v, _ := ToStringMapIntE(i)
+ return v
+}
+
+// ToStringMapInt64 casts an interface to a map[string]int64 type.
+func ToStringMapInt64(i interface{}) map[string]int64 {
+ v, _ := ToStringMapInt64E(i)
+ return v
+}
+
+// ToStringMap casts an interface to a map[string]interface{} type.
+func ToStringMap(i interface{}) map[string]interface{} {
+ v, _ := ToStringMapE(i)
+ return v
+}
+
+// ToSlice casts an interface to a []interface{} type.
+func ToSlice(i interface{}) []interface{} {
+ v, _ := ToSliceE(i)
+ return v
+}
+
+// ToBoolSlice casts an interface to a []bool type.
+func ToBoolSlice(i interface{}) []bool {
+ v, _ := ToBoolSliceE(i)
+ return v
+}
+
+// ToStringSlice casts an interface to a []string type.
+func ToStringSlice(i interface{}) []string {
+ v, _ := ToStringSliceE(i)
+ return v
+}
+
+// ToIntSlice casts an interface to a []int type.
+func ToIntSlice(i interface{}) []int {
+ v, _ := ToIntSliceE(i)
+ return v
+}
+
+// ToDurationSlice casts an interface to a []time.Duration type.
+func ToDurationSlice(i interface{}) []time.Duration {
+ v, _ := ToDurationSliceE(i)
+ return v
+}
diff --git a/vendor/github.com/spf13/cast/caste.go b/vendor/github.com/spf13/cast/caste.go
new file mode 100644
index 00000000..a4859fb0
--- /dev/null
+++ b/vendor/github.com/spf13/cast/caste.go
@@ -0,0 +1,1249 @@
+// Copyright © 2014 Steve Francia