Kaynağa Gözat

Encapsulate backends (#22)

* move to Go vendor module, use golang alpine build to ensure Go version 1.13

* vendor deps for backend encapsulation

* encapsultate backend functionality into interfaces, re-implement leveldb as a backend

* organize imports

* modicy api tests to use backend

* rename LeveLDBBackend to Backend to avoid stuttering

* remove custom error type for route not found, not needed
adamallred 6 yıl önce
ebeveyn
işleme
ba6aa79dd7
100 değiştirilmiş dosya ile 15290 ekleme ve 306 silme
  1. 4 11
      Dockerfile
  2. 17 0
      backend/backend.go
  3. 193 0
      backend/leveldb/leveldb.go
  4. 16 15
      backend/leveldb/leveldb_iter.go
  5. 49 22
      backend/leveldb/leveldb_test.go
  6. 49 0
      cmd/go/main.go
  7. 0 222
      context/context.go
  8. 9 0
      go.mod
  9. 155 0
      go.sum
  10. 58 0
      internal/routes.go
  11. 0 36
      main.go
  12. 5 0
      vendor/github.com/fsnotify/fsnotify/.editorconfig
  13. 6 0
      vendor/github.com/fsnotify/fsnotify/.gitignore
  14. 30 0
      vendor/github.com/fsnotify/fsnotify/.travis.yml
  15. 52 0
      vendor/github.com/fsnotify/fsnotify/AUTHORS
  16. 317 0
      vendor/github.com/fsnotify/fsnotify/CHANGELOG.md
  17. 77 0
      vendor/github.com/fsnotify/fsnotify/CONTRIBUTING.md
  18. 28 0
      vendor/github.com/fsnotify/fsnotify/LICENSE
  19. 79 0
      vendor/github.com/fsnotify/fsnotify/README.md
  20. 37 0
      vendor/github.com/fsnotify/fsnotify/fen.go
  21. 66 0
      vendor/github.com/fsnotify/fsnotify/fsnotify.go
  22. 337 0
      vendor/github.com/fsnotify/fsnotify/inotify.go
  23. 187 0
      vendor/github.com/fsnotify/fsnotify/inotify_poller.go
  24. 521 0
      vendor/github.com/fsnotify/fsnotify/kqueue.go
  25. 11 0
      vendor/github.com/fsnotify/fsnotify/open_mode_bsd.go
  26. 12 0
      vendor/github.com/fsnotify/fsnotify/open_mode_darwin.go
  27. 561 0
      vendor/github.com/fsnotify/fsnotify/windows.go
  28. 16 0
      vendor/github.com/golang/snappy/.gitignore
  29. 15 0
      vendor/github.com/golang/snappy/AUTHORS
  30. 37 0
      vendor/github.com/golang/snappy/CONTRIBUTORS
  31. 27 0
      vendor/github.com/golang/snappy/LICENSE
  32. 107 0
      vendor/github.com/golang/snappy/README
  33. 237 0
      vendor/github.com/golang/snappy/decode.go
  34. 14 0
      vendor/github.com/golang/snappy/decode_amd64.go
  35. 490 0
      vendor/github.com/golang/snappy/decode_amd64.s
  36. 101 0
      vendor/github.com/golang/snappy/decode_other.go
  37. 285 0
      vendor/github.com/golang/snappy/encode.go
  38. 29 0
      vendor/github.com/golang/snappy/encode_amd64.go
  39. 730 0
      vendor/github.com/golang/snappy/encode_amd64.s
  40. 238 0
      vendor/github.com/golang/snappy/encode_other.go
  41. 98 0
      vendor/github.com/golang/snappy/snappy.go
  42. 9 0
      vendor/github.com/hashicorp/hcl/.gitignore
  43. 13 0
      vendor/github.com/hashicorp/hcl/.travis.yml
  44. 354 0
      vendor/github.com/hashicorp/hcl/LICENSE
  45. 18 0
      vendor/github.com/hashicorp/hcl/Makefile
  46. 125 0
      vendor/github.com/hashicorp/hcl/README.md
  47. 19 0
      vendor/github.com/hashicorp/hcl/appveyor.yml
  48. 729 0
      vendor/github.com/hashicorp/hcl/decoder.go
  49. 3 0
      vendor/github.com/hashicorp/hcl/go.mod
  50. 2 0
      vendor/github.com/hashicorp/hcl/go.sum
  51. 11 0
      vendor/github.com/hashicorp/hcl/hcl.go
  52. 219 0
      vendor/github.com/hashicorp/hcl/hcl/ast/ast.go
  53. 52 0
      vendor/github.com/hashicorp/hcl/hcl/ast/walk.go
  54. 17 0
      vendor/github.com/hashicorp/hcl/hcl/parser/error.go
  55. 532 0
      vendor/github.com/hashicorp/hcl/hcl/parser/parser.go
  56. 789 0
      vendor/github.com/hashicorp/hcl/hcl/printer/nodes.go
  57. 66 0
      vendor/github.com/hashicorp/hcl/hcl/printer/printer.go
  58. 652 0
      vendor/github.com/hashicorp/hcl/hcl/scanner/scanner.go
  59. 241 0
      vendor/github.com/hashicorp/hcl/hcl/strconv/quote.go
  60. 46 0
      vendor/github.com/hashicorp/hcl/hcl/token/position.go
  61. 219 0
      vendor/github.com/hashicorp/hcl/hcl/token/token.go
  62. 117 0
      vendor/github.com/hashicorp/hcl/json/parser/flatten.go
  63. 313 0
      vendor/github.com/hashicorp/hcl/json/parser/parser.go
  64. 451 0
      vendor/github.com/hashicorp/hcl/json/scanner/scanner.go
  65. 46 0
      vendor/github.com/hashicorp/hcl/json/token/position.go
  66. 118 0
      vendor/github.com/hashicorp/hcl/json/token/token.go
  67. 38 0
      vendor/github.com/hashicorp/hcl/lex.go
  68. 39 0
      vendor/github.com/hashicorp/hcl/parse.go
  69. 6 0
      vendor/github.com/magiconair/properties/.gitignore
  70. 12 0
      vendor/github.com/magiconair/properties/.travis.yml
  71. 139 0
      vendor/github.com/magiconair/properties/CHANGELOG.md
  72. 25 0
      vendor/github.com/magiconair/properties/LICENSE
  73. 129 0
      vendor/github.com/magiconair/properties/README.md
  74. 289 0
      vendor/github.com/magiconair/properties/decode.go
  75. 156 0
      vendor/github.com/magiconair/properties/doc.go
  76. 1 0
      vendor/github.com/magiconair/properties/go.mod
  77. 34 0
      vendor/github.com/magiconair/properties/integrate.go
  78. 407 0
      vendor/github.com/magiconair/properties/lex.go
  79. 292 0
      vendor/github.com/magiconair/properties/load.go
  80. 95 0
      vendor/github.com/magiconair/properties/parser.go
  81. 833 0
      vendor/github.com/magiconair/properties/properties.go
  82. 31 0
      vendor/github.com/magiconair/properties/rangecheck.go
  83. 8 0
      vendor/github.com/mitchellh/mapstructure/.travis.yml
  84. 21 0
      vendor/github.com/mitchellh/mapstructure/CHANGELOG.md
  85. 21 0
      vendor/github.com/mitchellh/mapstructure/LICENSE
  86. 46 0
      vendor/github.com/mitchellh/mapstructure/README.md
  87. 217 0
      vendor/github.com/mitchellh/mapstructure/decode_hooks.go
  88. 50 0
      vendor/github.com/mitchellh/mapstructure/error.go
  89. 1 0
      vendor/github.com/mitchellh/mapstructure/go.mod
  90. 1149 0
      vendor/github.com/mitchellh/mapstructure/mapstructure.go
  91. 2 0
      vendor/github.com/pelletier/go-toml/.gitignore
  92. 23 0
      vendor/github.com/pelletier/go-toml/.travis.yml
  93. 21 0
      vendor/github.com/pelletier/go-toml/LICENSE
  94. 131 0
      vendor/github.com/pelletier/go-toml/README.md
  95. 164 0
      vendor/github.com/pelletier/go-toml/benchmark.json
  96. 32 0
      vendor/github.com/pelletier/go-toml/benchmark.sh
  97. 244 0
      vendor/github.com/pelletier/go-toml/benchmark.toml
  98. 121 0
      vendor/github.com/pelletier/go-toml/benchmark.yml
  99. 23 0
      vendor/github.com/pelletier/go-toml/doc.go
  100. 29 0
      vendor/github.com/pelletier/go-toml/example-crlf.toml

+ 4 - 11
Dockerfile

@@ -1,15 +1,8 @@
-FROM alpine
+FROM golang:alpine
 
-ENV GOPATH /go
-COPY . /go/src/github.com/kellegous/go
-RUN apk update \
-  && apk add go git musl-dev \
-  && go get github.com/kellegous/go \
-  && apk del go git musl-dev \
-  && rm -rf /var/cache/apk/* \
-  && rm -rf /go/src /go/pkg \
-  && mkdir /data
+COPY . /src
+RUN cd /src/cmd/go && go build -mod=vendor -o /usr/bin/go
 
 EXPOSE 8067
 
-CMD ["/go/bin/go", "--data=/data"]
+CMD ["/usr/bin/go", "--data=/data"]

+ 17 - 0
backend/backend.go

@@ -0,0 +1,17 @@
+package backend
+
+import (
+	"context"
+
+	"github.com/kellegous/go/internal"
+)
+
+type Backend interface {
+	Close() error
+	Get(ctx context.Context, id string) (*internal.Route, error)
+	Put(ctx context.Context, key string, route *internal.Route) error
+	Del(ctx context.Context, id string) error
+	GetAll(ctx context.Context) (map[string]internal.Route, error)
+	List(ctx context.Context, start string) (internal.RouteIterator, error)
+	NextID(ctx context.Context) (uint64, error)
+}

+ 193 - 0
backend/leveldb/leveldb.go

@@ -0,0 +1,193 @@
+package leveldb
+
+import (
+	"bytes"
+	"context"
+	"encoding/binary"
+	"errors"
+	"os"
+	"path/filepath"
+	"sync"
+
+	"github.com/syndtr/goleveldb/leveldb"
+	"github.com/syndtr/goleveldb/leveldb/opt"
+	"github.com/syndtr/goleveldb/leveldb/util"
+
+	"github.com/kellegous/go/internal"
+)
+
+const (
+	routesDbFilename = "routes.db"
+	idLogFilename    = "id"
+)
+
+// Backend provides access to the leveldb store.
+type Backend struct {
+	// Path contains the location on disk where this DB exists.
+	path string
+	db   *leveldb.DB
+	lck  sync.Mutex
+	id   uint64
+}
+
+// Commit the given ID to the data store.
+func commit(filename string, id uint64) error {
+	w, err := os.Create(filename)
+	if err != nil {
+		return err
+	}
+	defer w.Close()
+
+	if err := binary.Write(w, binary.LittleEndian, id); err != nil {
+		return err
+	}
+
+	return w.Sync()
+}
+
+// Load the current ID from the data store.
+func load(filename string) (uint64, error) {
+	if _, err := os.Stat(filename); err != nil {
+		return 0, commit(filename, 0)
+	}
+
+	r, err := os.Open(filename)
+	if err != nil {
+		return 0, err
+	}
+	defer r.Close()
+
+	var id uint64
+	if err := binary.Read(r, binary.LittleEndian, &id); err != nil {
+		return 0, err
+	}
+
+	return id, nil
+}
+
+// New instantiates a new Backend
+func New(path string) (*Backend, error) {
+	backend := Backend{
+		path: path,
+	}
+
+	if _, err := os.Stat(backend.path); err != nil {
+		if err := os.MkdirAll(path, os.ModePerm); err != nil {
+			return nil, err
+		}
+	}
+
+	// open the database
+	db, err := leveldb.OpenFile(filepath.Join(backend.path, routesDbFilename), nil)
+	if err != nil {
+		return nil, err
+	}
+	backend.db = db
+
+	id, err := load(filepath.Join(backend.path, idLogFilename))
+	if err != nil {
+		return nil, err
+	}
+	backend.id = id
+
+	return &backend, nil
+}
+
+// Close the resources associated with this backend.
+func (backend *Backend) Close() error {
+	return backend.db.Close()
+}
+
+// Get retreives a shortcut from the data store.
+func (backend *Backend) Get(ctx context.Context, name string) (*internal.Route, error) {
+	val, err := backend.db.Get([]byte(name), nil)
+	if err != nil {
+		if errors.Is(err, leveldb.ErrNotFound) {
+			return nil, internal.ErrRouteNotFound
+		}
+		return nil, err
+	}
+
+	rt := &internal.Route{}
+	if err := rt.Read(bytes.NewBuffer(val)); err != nil {
+		return nil, err
+	}
+
+	return rt, nil
+}
+
+// Put stores a new shortcut in the data store.
+func (backend *Backend) Put(ctx context.Context, key string, rt *internal.Route) error {
+	var buf bytes.Buffer
+	if err := rt.Write(&buf); err != nil {
+		return err
+	}
+
+	return backend.db.Put([]byte(key), buf.Bytes(), &opt.WriteOptions{Sync: true})
+}
+
+// Del removes an existing shortcut from the data store.
+func (backend *Backend) Del(ctx context.Context, key string) error {
+	return backend.db.Delete([]byte(key), &opt.WriteOptions{Sync: true})
+}
+
+// List all routes in an iterator, starting with the key prefix of start (which can also be nil).
+func (backend *Backend) List(ctx context.Context, start string) (internal.RouteIterator, error) {
+	return &RouteIterator{
+		it: backend.db.NewIterator(&util.Range{
+			Start: []byte(start),
+			Limit: nil,
+		}, nil),
+	}, nil
+}
+
+// GetAll gets everything in the db to dump it out for backup purposes
+func (backend *Backend) GetAll(ctx context.Context) (map[string]internal.Route, error) {
+	golinks := map[string]internal.Route{}
+	iter := backend.db.NewIterator(nil, nil)
+	defer iter.Release()
+
+	for iter.Next() {
+		key := iter.Key()
+		val := iter.Value()
+		rt := &internal.Route{}
+		if err := rt.Read(bytes.NewBuffer(val)); err != nil {
+			return nil, err
+		}
+		golinks[string(key[:])] = *rt
+	}
+
+	if err := iter.Error(); err != nil {
+		return nil, err
+	}
+
+	return golinks, nil
+}
+
+func (backend *Backend) commit(id uint64) error {
+	w, err := os.Create(filepath.Join(backend.path, idLogFilename))
+	if err != nil {
+		return err
+	}
+	defer w.Close()
+
+	if err := binary.Write(w, binary.LittleEndian, id); err != nil {
+		return err
+	}
+
+	return w.Sync()
+}
+
+// NextID generates the next numeric ID to be used for an auto-named shortcut.
+func (backend *Backend) NextID(ctx context.Context) (uint64, error) {
+	backend.lck.Lock()
+	defer backend.lck.Unlock()
+
+	backend.id++
+
+	if err := commit(filepath.Join(backend.path, idLogFilename), backend.id); err != nil {
+		return 0, err
+	}
+
+	return backend.id, nil
+}

+ 16 - 15
context/iter.go → backend/leveldb/leveldb_iter.go

@@ -1,22 +1,23 @@
-package context
+package leveldb
 
 import (
 	"bytes"
 
+	"github.com/kellegous/go/internal"
 	"github.com/syndtr/goleveldb/leveldb/iterator"
 )
 
-// Iter allows iteration of the named routes in the store.
-type Iter struct {
+// RouteIterator allows iteration of the named routes in the store.
+type RouteIterator struct {
 	it   iterator.Iterator
 	name string
-	rt   *Route
+	rt   *internal.Route
 	err  error
 }
 
-func (i *Iter) decode() error {
-	rt := &Route{}
-	if err := rt.read(bytes.NewBuffer(i.it.Value())); err != nil {
+func (i *RouteIterator) decode() error {
+	rt := &internal.Route{}
+	if err := rt.Read(bytes.NewBuffer(i.it.Value())); err != nil {
 		return err
 	}
 
@@ -26,12 +27,12 @@ func (i *Iter) decode() error {
 }
 
 // Valid indicates whether the current values of the iterator are valid.
-func (i *Iter) Valid() bool {
+func (i *RouteIterator) Valid() bool {
 	return i.it.Valid() && i.err == nil
 }
 
 // Next advances the iterator to the next value.
-func (i *Iter) Next() bool {
+func (i *RouteIterator) Next() bool {
 	i.name = ""
 	i.rt = nil
 
@@ -48,11 +49,11 @@ func (i *Iter) Next() bool {
 }
 
 // Seek ...
-func (i *Iter) Seek(cur []byte) bool {
+func (i *RouteIterator) Seek(cur string) bool {
 	i.name = ""
 	i.rt = nil
 
-	v := i.it.Seek(cur)
+	v := i.it.Seek([]byte(cur))
 
 	if !i.it.Valid() {
 		return v
@@ -66,7 +67,7 @@ func (i *Iter) Seek(cur []byte) bool {
 }
 
 // Error returns any active error that has stopped the iterator.
-func (i *Iter) Error() error {
+func (i *RouteIterator) Error() error {
 	if err := i.it.Error(); err != nil {
 		return err
 	}
@@ -75,16 +76,16 @@ func (i *Iter) Error() error {
 }
 
 // Name is the name of the current route.
-func (i *Iter) Name() string {
+func (i *RouteIterator) Name() string {
 	return i.name
 }
 
 // Route is the current route.
-func (i *Iter) Route() *Route {
+func (i *RouteIterator) Route() *internal.Route {
 	return i.rt
 }
 
 // Release disposes of the resources in the iterator.
-func (i *Iter) Release() {
+func (i *RouteIterator) Release() {
 	i.it.Release()
 }

+ 49 - 22
context/context_test.go → backend/leveldb/leveldb_test.go

@@ -1,6 +1,7 @@
-package context
+package leveldb
 
 import (
+	"context"
 	"fmt"
 	"io/ioutil"
 	"os"
@@ -8,7 +9,7 @@ import (
 	"testing"
 	"time"
 
-	"github.com/syndtr/goleveldb/leveldb"
+	"github.com/kellegous/go/internal"
 )
 
 func TestGetPut(t *testing.T) {
@@ -18,26 +19,29 @@ func TestGetPut(t *testing.T) {
 	}
 	defer os.RemoveAll(tmp)
 
-	ctx, err := Open(filepath.Join(tmp, "data"))
+	backend, err := New(filepath.Join(tmp, "data"))
 	if err != nil {
 		t.Fatal(err)
 	}
-	defer ctx.Close()
+	defer backend.Close()
 
-	if _, err := ctx.Get("not_found"); err != leveldb.ErrNotFound {
-		t.Fatalf("expected ErrNotFound, got \"%v\"", err)
+	ctx, cancel := context.WithTimeout(context.Background(), time.Minute)
+	defer cancel()
+
+	if _, err := backend.Get(ctx, "not_found"); err != internal.ErrRouteNotFound {
+		t.Fatalf("expected ErrRouteNotFound, got \"%v\"", err)
 	}
 
-	a := &Route{
+	a := &internal.Route{
 		URL:  "http://www.kellegous.com/",
 		Time: time.Now(),
 	}
 
-	if err := ctx.Put("key", a); err != nil {
+	if err := backend.Put(ctx, "key", a); err != nil {
 		t.Fatal(err)
 	}
 
-	b, err := ctx.Get("key")
+	b, err := backend.Get(ctx, "key")
 	if err != nil {
 		t.Fatal(err)
 	}
@@ -58,15 +62,18 @@ func TestNextID(t *testing.T) {
 	}
 	defer os.RemoveAll(tmp)
 
-	ctx, err := Open(filepath.Join(tmp, "data"))
+	backend, err := New(filepath.Join(tmp, "data"))
 	if err != nil {
 		t.Fatal(err)
 	}
-	defer ctx.Close()
+	defer backend.Close()
+
+	ctx, cancel := context.WithTimeout(context.Background(), time.Minute)
+	defer cancel()
 
 	var e uint64 = 1
 	for i := 0; i < 501; i++ {
-		r, err := ctx.NextID()
+		r, err := backend.NextID(ctx)
 		if err != nil {
 			t.Fatal(err)
 		}
@@ -86,12 +93,15 @@ func TestEmptyList(t *testing.T) {
 	}
 	defer os.RemoveAll(tmp)
 
-	ctx, err := Open(filepath.Join(tmp, "data"))
+	backend, err := New(filepath.Join(tmp, "data"))
 	if err != nil {
 		t.Fatal(err)
 	}
 
-	it := ctx.List(nil)
+	ctx, cancel := context.WithTimeout(context.Background(), time.Minute)
+	defer cancel()
+
+	it, err := backend.List(ctx, "")
 	defer it.Release()
 
 	if it.Valid() {
@@ -107,9 +117,9 @@ func TestEmptyList(t *testing.T) {
 	}
 }
 
-func putRoutes(ctx *Context, names ...string) error {
+func putRoutes(ctx context.Context, backend *Backend, names ...string) error {
 	for _, name := range names {
-		if err := ctx.Put(name, &Route{
+		if err := backend.Put(ctx, name, &internal.Route{
 			URL:  fmt.Sprintf("http://%s/", name),
 			Time: time.Unix(0, 420),
 		}); err != nil {
@@ -119,7 +129,7 @@ func putRoutes(ctx *Context, names ...string) error {
 	return nil
 }
 
-func mustBeIterOf(t *testing.T, iter *Iter, names ...string) {
+func mustBeIterOf(t *testing.T, iter internal.RouteIterator, names ...string) {
 	defer iter.Release()
 
 	if iter.Valid() {
@@ -172,16 +182,33 @@ func TestList(t *testing.T) {
 	}
 	defer os.RemoveAll(tmp)
 
-	ctx, err := Open(filepath.Join(tmp, "data"))
+	backend, err := New(filepath.Join(tmp, "data"))
 	if err != nil {
 		t.Fatal(err)
 	}
 
-	if err := putRoutes(ctx, "a", "c", "d"); err != nil {
+	ctx, cancel := context.WithTimeout(context.Background(), time.Minute)
+	defer cancel()
+
+	if err := putRoutes(ctx, backend, "a", "c", "d"); err != nil {
+		t.Fatal(err)
+	}
+
+	iter, err := backend.List(ctx, "")
+	if err != nil {
 		t.Fatal(err)
 	}
+	mustBeIterOf(t, iter, "a", "c", "d")
 
-	mustBeIterOf(t, ctx.List(nil), "a", "c", "d")
-	mustBeIterOf(t, ctx.List([]byte{'b'}), "c", "d")
-	mustBeIterOf(t, ctx.List([]byte{'z'}))
+	iter, err = backend.List(ctx, "b")
+	if err != nil {
+		t.Fatal(err)
+	}
+	mustBeIterOf(t, iter, "c", "d")
+
+	iter, err = backend.List(ctx, "z")
+	if err != nil {
+		t.Fatal(err)
+	}
+	mustBeIterOf(t, iter)
 }

+ 49 - 0
cmd/go/main.go

@@ -0,0 +1,49 @@
+package main
+
+import (
+	"fmt"
+	"log"
+	"strings"
+
+	"github.com/spf13/pflag"
+	"github.com/spf13/viper"
+
+	"github.com/kellegous/go/backend"
+	"github.com/kellegous/go/backend/leveldb"
+	"github.com/kellegous/go/web"
+)
+
+func main() {
+	pflag.String("addr", ":8067", "default bind address")
+	pflag.Bool("admin", false, "allow admin-level requests")
+	pflag.String("version", "", "version string")
+	pflag.String("backend", "leveldb", "backing store to use. Only 'leveldb' currently supported.")
+	pflag.String("data", "data", "The location of the leveldb data directory")
+
+	pflag.Parse()
+
+	if err := viper.BindPFlags(pflag.CommandLine); err != nil {
+		log.Panic(err)
+	}
+
+	// allow env vars to set pflags
+	viper.AutomaticEnv()
+	viper.SetEnvKeyReplacer(strings.NewReplacer("-", "_"))
+
+	var backend backend.Backend
+
+	switch viper.GetString("backend") {
+	case "leveldb":
+		var err error
+		backend, err = leveldb.New(viper.GetString("data"))
+		if err != nil {
+			log.Panic(err)
+		}
+	default:
+		log.Panic(fmt.Sprintf("unknown backend %s", viper.GetString("backend")))
+	}
+
+	defer backend.Close()
+
+	log.Panic(web.ListenAndServe(backend))
+}

+ 0 - 222
context/context.go

@@ -1,222 +0,0 @@
-package context
-
-import (
-	"bytes"
-	"encoding/binary"
-	"io"
-	"io/ioutil"
-	"os"
-	"path/filepath"
-	"sync"
-	"time"
-
-	"github.com/syndtr/goleveldb/leveldb"
-	"github.com/syndtr/goleveldb/leveldb/opt"
-	"github.com/syndtr/goleveldb/leveldb/util"
-)
-
-const (
-	routesDbFilename = "routes.db"
-	idLogFilename    = "id"
-)
-
-// Route is the value part of a shortcut.
-type Route struct {
-	URL  string    `json:"url"`
-	Time time.Time `json:"time"`
-}
-
-// Serialize this Route into the given Writer.
-func (o *Route) write(w io.Writer) error {
-	if err := binary.Write(w, binary.LittleEndian, o.Time.UnixNano()); err != nil {
-		return err
-	}
-
-	if _, err := w.Write([]byte(o.URL)); err != nil {
-		return err
-	}
-
-	return nil
-}
-
-// Deserialize this Route from the given Reader.
-func (o *Route) read(r io.Reader) error {
-	var t int64
-	if err := binary.Read(r, binary.LittleEndian, &t); err != nil {
-		return err
-	}
-
-	b, err := ioutil.ReadAll(r)
-	if err != nil {
-		return err
-	}
-
-	o.URL = string(b)
-	o.Time = time.Unix(0, t)
-	return nil
-}
-
-// Context provides access to the data store.
-type Context struct {
-	path string
-	db   *leveldb.DB
-	lck  sync.Mutex
-	id   uint64
-}
-
-// Commit the given ID to the data store.
-func commit(filename string, id uint64) error {
-	w, err := os.Create(filename)
-	if err != nil {
-		return err
-	}
-	defer w.Close()
-
-	if err := binary.Write(w, binary.LittleEndian, id); err != nil {
-		return err
-	}
-
-	return w.Sync()
-}
-
-// Load the current ID from the data store.
-func load(filename string) (uint64, error) {
-	if _, err := os.Stat(filename); err != nil {
-		return 0, commit(filename, 0)
-	}
-
-	r, err := os.Open(filename)
-	if err != nil {
-		return 0, err
-	}
-	defer r.Close()
-
-	var id uint64
-	if err := binary.Read(r, binary.LittleEndian, &id); err != nil {
-		return 0, err
-	}
-
-	return id, nil
-}
-
-// Open the context using path as the data store location.
-func Open(path string) (*Context, error) {
-	if _, err := os.Stat(path); err != nil {
-		if err := os.MkdirAll(path, os.ModePerm); err != nil {
-			return nil, err
-		}
-	}
-
-	// open the database
-	db, err := leveldb.OpenFile(filepath.Join(path, routesDbFilename), nil)
-	if err != nil {
-		return nil, err
-	}
-
-	id, err := load(filepath.Join(path, idLogFilename))
-	if err != nil {
-		return nil, err
-	}
-
-	return &Context{
-		path: path,
-		db:   db,
-		id:   id,
-	}, nil
-}
-
-// Close the resources associated with this context.
-func (c *Context) Close() error {
-	return c.db.Close()
-}
-
-// Get retreives a shortcut from the data store.
-func (c *Context) Get(name string) (*Route, error) {
-	val, err := c.db.Get([]byte(name), nil)
-	if err != nil {
-		return nil, err
-	}
-
-	rt := &Route{}
-	if err := rt.read(bytes.NewBuffer(val)); err != nil {
-		return nil, err
-	}
-
-	return rt, nil
-}
-
-// Put stores a new shortcut in the data store.
-func (c *Context) Put(key string, rt *Route) error {
-	var buf bytes.Buffer
-	if err := rt.write(&buf); err != nil {
-		return err
-	}
-
-	return c.db.Put([]byte(key), buf.Bytes(), &opt.WriteOptions{Sync: true})
-}
-
-// Del removes an existing shortcut from the data store.
-func (c *Context) Del(key string) error {
-	return c.db.Delete([]byte(key), &opt.WriteOptions{Sync: true})
-}
-
-// List all routes in an iterator, starting with the key prefix of start (which can also be nil).
-func (c *Context) List(start []byte) *Iter {
-	return &Iter{
-		it: c.db.NewIterator(&util.Range{
-			Start: start,
-			Limit: nil,
-		}, nil),
-	}
-}
-
-// GetAll gets everything in the db to dump it out for backup purposes
-func (c *Context) GetAll() (map[string]Route, error) {
-	golinks := map[string]Route{}
-	iter := c.db.NewIterator(nil, nil)
-	defer iter.Release()
-
-	for iter.Next() {
-		key := iter.Key()
-		val := iter.Value()
-		rt := &Route{}
-		if err := rt.read(bytes.NewBuffer(val)); err != nil {
-			return nil, err
-		}
-		golinks[string(key[:])] = *rt
-	}
-
-	if err := iter.Error(); err != nil {
-		return nil, err
-	}
-
-	return golinks, nil
-}
-
-func (c *Context) commit(id uint64) error {
-	w, err := os.Create(filepath.Join(c.path, idLogFilename))
-	if err != nil {
-		return err
-	}
-	defer w.Close()
-
-	if err := binary.Write(w, binary.LittleEndian, id); err != nil {
-		return err
-	}
-
-	return w.Sync()
-}
-
-// NextID generates the next numeric ID to be used for an auto-named shortcut.
-func (c *Context) NextID() (uint64, error) {
-	c.lck.Lock()
-	defer c.lck.Unlock()
-
-	c.id++
-
-	if err := commit(filepath.Join(c.path, idLogFilename), c.id); err != nil {
-		return 0, err
-	}
-
-	return c.id, nil
-}

+ 9 - 0
go.mod

@@ -0,0 +1,9 @@
+module github.com/kellegous/go
+
+go 1.13
+
+require (
+	github.com/spf13/pflag v1.0.5
+	github.com/spf13/viper v1.5.0
+	github.com/syndtr/goleveldb v1.0.0
+)

+ 155 - 0
go.sum

@@ -0,0 +1,155 @@
+cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
+github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
+github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
+github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
+github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
+github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8=
+github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
+github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
+github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
+github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
+github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk=
+github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
+github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
+github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
+github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
+github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
+github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
+github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no=
+github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I=
+github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
+github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
+github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
+github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
+github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
+github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
+github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
+github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4=
+github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
+github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
+github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
+github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
+github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
+github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db h1:woRePGFeVFfLKN/pOkfl+p/TAqKOfFu+7KPlMVpok/w=
+github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
+github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
+github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
+github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ=
+github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs=
+github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk=
+github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
+github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4=
+github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ=
+github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI=
+github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
+github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo=
+github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
+github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q=
+github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
+github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
+github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
+github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
+github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
+github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
+github.com/magiconair/properties v1.8.1 h1:ZC2Vc7/ZFkGmsVC9KvOjumD+G5lXy2RtTKyzRKO2BQ4=
+github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
+github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
+github.com/mitchellh/mapstructure v1.1.2 h1:fmNYVwqnSfB9mZU6OS2O6GsXM+wcskZDuKQzvN1EDeE=
+github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
+github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
+github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U=
+github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
+github.com/onsi/ginkgo v1.7.0 h1:WSHQ+IS43OoUrWtD1/bbclrwK8TTH5hzp+umCiuxHgs=
+github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
+github.com/onsi/gomega v1.4.3 h1:RE1xgDvH7imwFD45h+u2SgIfERHlS2yNG4DObb5BSKU=
+github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
+github.com/pelletier/go-toml v1.2.0 h1:T5zMGML61Wp+FlcbWjRDT7yAxhJNAiPPLOFECq181zc=
+github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic=
+github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
+github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
+github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
+github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
+github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso=
+github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
+github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
+github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
+github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
+github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
+github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
+github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU=
+github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg=
+github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
+github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM=
+github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
+github.com/spf13/afero v1.1.2 h1:m8/z1t7/fwjysjQRYbP0RD+bUIF/8tJwPdEZsI83ACI=
+github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ=
+github.com/spf13/cast v1.3.0 h1:oget//CVOEoFewqQxwr0Ej5yjygnqGkvggSE/gB35Q8=
+github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
+github.com/spf13/jwalterweatherman v1.0.0 h1:XHEdyB+EcvlqZamSM4ZOMGlc93t6AcsBEu9Gc1vn7yk=
+github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo=
+github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
+github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
+github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
+github.com/spf13/viper v1.5.0 h1:GpsTwfsQ27oS/Aha/6d1oD7tpKIqWnOA6tgOX9HHkt4=
+github.com/spf13/viper v1.5.0/go.mod h1:AkYRkVJF8TkSG/xet6PzXX+l39KhhXa2pdqVSxnTcn4=
+github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
+github.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1w=
+github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
+github.com/subosito/gotenv v1.2.0 h1:Slr1R9HxAlEKefgq5jn9U+DnETlIUa6HfgEzj0g5d7s=
+github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw=
+github.com/syndtr/goleveldb v1.0.0 h1:fBdIW9lB4Iz0n9khmH8w27SJ3QEJ7+IgjPEwGSZiFdE=
+github.com/syndtr/goleveldb v1.0.0/go.mod h1:ZVVdQEZoIme9iO1Ch2Jdy24qqXrMMOU6lpPAyBWyWuQ=
+github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
+github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc=
+github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU=
+github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q=
+go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
+go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
+go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
+go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
+golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
+golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
+golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
+golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
+golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
+golang.org/x/net v0.0.0-20190522155817-f3200d17e092 h1:4QSRKanuywn15aTZvI/mIDEgPQpswuFndXpOj3rKEco=
+golang.org/x/net v0.0.0-20190522155817-f3200d17e092/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
+golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
+golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a h1:1BGLXjeY4akVXGgbC9HugT3Jv3hCI0z56oJR5vAMgBU=
+golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg=
+golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
+golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
+golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
+google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
+google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
+google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
+google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
+gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
+gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4=
+gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
+gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo=
+gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ=
+gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
+gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74=
+gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.2.4 h1:/eiJrUcujPVeJ3xlSWaiNi3uSVmDGBK1pDHUHAnao1I=
+gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=

+ 58 - 0
internal/routes.go

@@ -0,0 +1,58 @@
+package internal
+
+import (
+	"encoding/binary"
+	"errors"
+	"io"
+	"io/ioutil"
+	"time"
+)
+
+// Route is the value part of a shortcut.
+type Route struct {
+	URL  string    `json:"url"`
+	Time time.Time `json:"time"`
+}
+
+// RouteIterator allows iteration of the named routes in the store.
+type RouteIterator interface {
+	Valid() bool
+	Next() bool
+	Seek(string) bool
+	Error() error
+	Name() string
+	Route() *Route
+	Release()
+}
+
+var ErrRouteNotFound = errors.New("route not found")
+
+// Serialize this Route into the given Writer.
+func (o *Route) Write(w io.Writer) error {
+	if err := binary.Write(w, binary.LittleEndian, o.Time.UnixNano()); err != nil {
+		return err
+	}
+
+	if _, err := w.Write([]byte(o.URL)); err != nil {
+		return err
+	}
+
+	return nil
+}
+
+// Deserialize this Route from the given Reader.
+func (o *Route) Read(r io.Reader) error {
+	var t int64
+	if err := binary.Read(r, binary.LittleEndian, &t); err != nil {
+		return err
+	}
+
+	b, err := ioutil.ReadAll(r)
+	if err != nil {
+		return err
+	}
+
+	o.URL = string(b)
+	o.Time = time.Unix(0, t)
+	return nil
+}

+ 0 - 36
main.go

@@ -1,36 +0,0 @@
-package main
-
-import (
-	"flag"
-	"log"
-
-	"github.com/kellegous/go/context"
-	"github.com/kellegous/go/web"
-)
-
-var version string
-
-func getVersion() string {
-	if version == "" {
-		return "none"
-	}
-	return version
-}
-
-func main() {
-	flagData := flag.String("data", "data",
-		"The location to use for the data store")
-	flagAddr := flag.String("addr", ":8067",
-		"The address that the HTTP server will bind")
-	flagAdmin := flag.Bool("admin", false,
-		"If allowing admin level requests")
-	flag.Parse()
-
-	ctx, err := context.Open(*flagData)
-	if err != nil {
-		log.Panic(err)
-	}
-	defer ctx.Close()
-
-	log.Panic(web.ListenAndServe(*flagAddr, *flagAdmin, getVersion(), ctx))
-}

+ 5 - 0
vendor/github.com/fsnotify/fsnotify/.editorconfig

@@ -0,0 +1,5 @@
+root = true
+
+[*]
+indent_style = tab
+indent_size = 4

+ 6 - 0
vendor/github.com/fsnotify/fsnotify/.gitignore

@@ -0,0 +1,6 @@
+# Setup a Global .gitignore for OS and editor generated files:
+# https://help.github.com/articles/ignoring-files
+# git config --global core.excludesfile ~/.gitignore_global
+
+.vagrant
+*.sublime-project

+ 30 - 0
vendor/github.com/fsnotify/fsnotify/.travis.yml

@@ -0,0 +1,30 @@
+sudo: false
+language: go
+
+go:
+  - 1.8.x
+  - 1.9.x
+  - tip
+
+matrix:
+  allow_failures:
+    - go: tip
+  fast_finish: true
+
+before_script:
+  - go get -u github.com/golang/lint/golint
+
+script:
+  - go test -v --race ./...
+
+after_script:
+  - test -z "$(gofmt -s -l -w . | tee /dev/stderr)"
+  - test -z "$(golint ./...     | tee /dev/stderr)"
+  - go vet ./...
+
+os:
+  - linux
+  - osx
+
+notifications:
+  email: false

+ 52 - 0
vendor/github.com/fsnotify/fsnotify/AUTHORS

@@ -0,0 +1,52 @@
+# Names should be added to this file as
+#	Name or Organization <email address>
+# The email address is not required for organizations.
+
+# You can update this list using the following command:
+#
+#   $ git shortlog -se | awk '{print $2 " " $3 " " $4}'
+
+# Please keep the list sorted.
+
+Aaron L <aaron@bettercoder.net>
+Adrien Bustany <adrien@bustany.org>
+Amit Krishnan <amit.krishnan@oracle.com>
+Anmol Sethi <me@anmol.io>
+Bjørn Erik Pedersen <bjorn.erik.pedersen@gmail.com>
+Bruno Bigras <bigras.bruno@gmail.com>
+Caleb Spare <cespare@gmail.com>
+Case Nelson <case@teammating.com>
+Chris Howey <chris@howey.me> <howeyc@gmail.com>
+Christoffer Buchholz <christoffer.buchholz@gmail.com>
+Daniel Wagner-Hall <dawagner@gmail.com>
+Dave Cheney <dave@cheney.net>
+Evan Phoenix <evan@fallingsnow.net>
+Francisco Souza <f@souza.cc>
+Hari haran <hariharan.uno@gmail.com>
+John C Barstow
+Kelvin Fo <vmirage@gmail.com>
+Ken-ichirou MATSUZAWA <chamas@h4.dion.ne.jp>
+Matt Layher <mdlayher@gmail.com>
+Nathan Youngman <git@nathany.com>
+Nickolai Zeldovich <nickolai@csail.mit.edu>
+Patrick <patrick@dropbox.com>
+Paul Hammond <paul@paulhammond.org>
+Pawel Knap <pawelknap88@gmail.com>
+Pieter Droogendijk <pieter@binky.org.uk>
+Pursuit92 <JoshChase@techpursuit.net>
+Riku Voipio <riku.voipio@linaro.org>
+Rob Figueiredo <robfig@gmail.com>
+Rodrigo Chiossi <rodrigochiossi@gmail.com>
+Slawek Ligus <root@ooz.ie>
+Soge Zhang <zhssoge@gmail.com>
+Tiffany Jernigan <tiffany.jernigan@intel.com>
+Tilak Sharma <tilaks@google.com>
+Tom Payne <twpayne@gmail.com>
+Travis Cline <travis.cline@gmail.com>
+Tudor Golubenco <tudor.g@gmail.com>
+Vahe Khachikyan <vahe@live.ca>
+Yukang <moorekang@gmail.com>
+bronze1man <bronze1man@gmail.com>
+debrando <denis.brandolini@gmail.com>
+henrikedwards <henrik.edwards@gmail.com>
+铁哥 <guotie.9@gmail.com>

+ 317 - 0
vendor/github.com/fsnotify/fsnotify/CHANGELOG.md

@@ -0,0 +1,317 @@
+# Changelog
+
+## v1.4.7 / 2018-01-09
+
+* BSD/macOS: Fix possible deadlock on closing the watcher on kqueue (thanks @nhooyr and @glycerine)
+* Tests: Fix missing verb on format string (thanks @rchiossi)
+* Linux: Fix deadlock in Remove (thanks @aarondl)
+* Linux: Watch.Add improvements (avoid race, fix consistency, reduce garbage) (thanks @twpayne)
+* Docs: Moved FAQ into the README (thanks @vahe)
+* Linux: Properly handle inotify's IN_Q_OVERFLOW event (thanks @zeldovich)
+* Docs: replace references to OS X with macOS
+
+## v1.4.2 / 2016-10-10
+
+* Linux: use InotifyInit1 with IN_CLOEXEC to stop leaking a file descriptor to a child process when using fork/exec [#178](https://github.com/fsnotify/fsnotify/pull/178) (thanks @pattyshack)
+
+## v1.4.1 / 2016-10-04
+
+* Fix flaky inotify stress test on Linux [#177](https://github.com/fsnotify/fsnotify/pull/177) (thanks @pattyshack)
+
+## v1.4.0 / 2016-10-01
+
+* add a String() method to Event.Op [#165](https://github.com/fsnotify/fsnotify/pull/165) (thanks @oozie)
+
+## v1.3.1 / 2016-06-28
+
+* Windows: fix for double backslash when watching the root of a drive [#151](https://github.com/fsnotify/fsnotify/issues/151) (thanks @brunoqc)
+
+## v1.3.0 / 2016-04-19
+
+* Support linux/arm64 by [patching](https://go-review.googlesource.com/#/c/21971/) x/sys/unix and switching to to it from syscall (thanks @suihkulokki) [#135](https://github.com/fsnotify/fsnotify/pull/135)
+
+## v1.2.10 / 2016-03-02
+
+* Fix golint errors in windows.go [#121](https://github.com/fsnotify/fsnotify/pull/121) (thanks @tiffanyfj)
+
+## v1.2.9 / 2016-01-13
+
+kqueue: Fix logic for CREATE after REMOVE [#111](https://github.com/fsnotify/fsnotify/pull/111) (thanks @bep)
+
+## v1.2.8 / 2015-12-17
+
+* kqueue: fix race condition in Close [#105](https://github.com/fsnotify/fsnotify/pull/105) (thanks @djui for reporting the issue and @ppknap for writing a failing test)
+* inotify: fix race in test
+* enable race detection for continuous integration (Linux, Mac, Windows)
+
+## v1.2.5 / 2015-10-17
+
+* inotify: use epoll_create1 for arm64 support (requires Linux 2.6.27 or later) [#100](https://github.com/fsnotify/fsnotify/pull/100) (thanks @suihkulokki)
+* inotify: fix path leaks [#73](https://github.com/fsnotify/fsnotify/pull/73) (thanks @chamaken)
+* kqueue: watch for rename events on subdirectories [#83](https://github.com/fsnotify/fsnotify/pull/83) (thanks @guotie)
+* kqueue: avoid infinite loops from symlinks cycles [#101](https://github.com/fsnotify/fsnotify/pull/101) (thanks @illicitonion)
+
+## v1.2.1 / 2015-10-14
+
+* kqueue: don't watch named pipes [#98](https://github.com/fsnotify/fsnotify/pull/98) (thanks @evanphx)
+
+## v1.2.0 / 2015-02-08
+
+* inotify: use epoll to wake up readEvents [#66](https://github.com/fsnotify/fsnotify/pull/66) (thanks @PieterD)
+* inotify: closing watcher should now always shut down goroutine [#63](https://github.com/fsnotify/fsnotify/pull/63) (thanks @PieterD)
+* kqueue: close kqueue after removing watches, fixes [#59](https://github.com/fsnotify/fsnotify/issues/59)
+
+## v1.1.1 / 2015-02-05
+
+* inotify: Retry read on EINTR [#61](https://github.com/fsnotify/fsnotify/issues/61) (thanks @PieterD)
+
+## v1.1.0 / 2014-12-12
+
+* kqueue: rework internals [#43](https://github.com/fsnotify/fsnotify/pull/43)
+    * add low-level functions
+    * only need to store flags on directories
+    * less mutexes [#13](https://github.com/fsnotify/fsnotify/issues/13)
+    * done can be an unbuffered channel
+    * remove calls to os.NewSyscallError
+* More efficient string concatenation for Event.String() [#52](https://github.com/fsnotify/fsnotify/pull/52) (thanks @mdlayher)
+* kqueue: fix regression in  rework causing subdirectories to be watched [#48](https://github.com/fsnotify/fsnotify/issues/48)
+* kqueue: cleanup internal watch before sending remove event [#51](https://github.com/fsnotify/fsnotify/issues/51)
+
+## v1.0.4 / 2014-09-07
+
+* kqueue: add dragonfly to the build tags.
+* Rename source code files, rearrange code so exported APIs are at the top.
+* Add done channel to example code. [#37](https://github.com/fsnotify/fsnotify/pull/37) (thanks @chenyukang)
+
+## v1.0.3 / 2014-08-19
+
+* [Fix] Windows MOVED_TO now translates to Create like on BSD and Linux. [#36](https://github.com/fsnotify/fsnotify/issues/36)
+
+## v1.0.2 / 2014-08-17
+
+* [Fix] Missing create events on macOS. [#14](https://github.com/fsnotify/fsnotify/issues/14) (thanks @zhsso)
+* [Fix] Make ./path and path equivalent. (thanks @zhsso)
+
+## v1.0.0 / 2014-08-15
+
+* [API] Remove AddWatch on Windows, use Add.
+* Improve documentation for exported identifiers. [#30](https://github.com/fsnotify/fsnotify/issues/30)
+* Minor updates based on feedback from golint.
+
+## dev / 2014-07-09
+
+* Moved to [github.com/fsnotify/fsnotify](https://github.com/fsnotify/fsnotify).
+* Use os.NewSyscallError instead of returning errno (thanks @hariharan-uno)
+
+## dev / 2014-07-04
+
+* kqueue: fix incorrect mutex used in Close()
+* Update example to demonstrate usage of Op.
+
+## dev / 2014-06-28
+
+* [API] Don't set the Write Op for attribute notifications [#4](https://github.com/fsnotify/fsnotify/issues/4)
+* Fix for String() method on Event (thanks Alex Brainman)
+* Don't build on Plan 9 or Solaris (thanks @4ad)
+
+## dev / 2014-06-21
+
+* Events channel of type Event rather than *Event.
+* [internal] use syscall constants directly for inotify and kqueue.
+* [internal] kqueue: rename events to kevents and fileEvent to event.
+
+## dev / 2014-06-19
+
+* Go 1.3+ required on Windows (uses syscall.ERROR_MORE_DATA internally).
+* [internal] remove cookie from Event struct (unused).
+* [internal] Event struct has the same definition across every OS.
+* [internal] remove internal watch and removeWatch methods.
+
+## dev / 2014-06-12
+
+* [API] Renamed Watch() to Add() and RemoveWatch() to Remove().
+* [API] Pluralized channel names: Events and Errors.
+* [API] Renamed FileEvent struct to Event.
+* [API] Op constants replace methods like IsCreate().
+
+## dev / 2014-06-12
+
+* Fix data race on kevent buffer (thanks @tilaks) [#98](https://github.com/howeyc/fsnotify/pull/98)
+
+## dev / 2014-05-23
+
+* [API] Remove current implementation of WatchFlags.
+    * current implementation doesn't take advantage of OS for efficiency
+    * provides little benefit over filtering events as they are received, but has  extra bookkeeping and mutexes
+    * no tests for the current implementation
+    * not fully implemented on Windows [#93](https://github.com/howeyc/fsnotify/issues/93#issuecomment-39285195)
+
+## v0.9.3 / 2014-12-31
+
+* kqueue: cleanup internal watch before sending remove event [#51](https://github.com/fsnotify/fsnotify/issues/51)
+
+## v0.9.2 / 2014-08-17
+
+* [Backport] Fix missing create events on macOS. [#14](https://github.com/fsnotify/fsnotify/issues/14) (thanks @zhsso)
+
+## v0.9.1 / 2014-06-12
+
+* Fix data race on kevent buffer (thanks @tilaks) [#98](https://github.com/howeyc/fsnotify/pull/98)
+
+## v0.9.0 / 2014-01-17
+
+* IsAttrib() for events that only concern a file's metadata [#79][] (thanks @abustany)
+* [Fix] kqueue: fix deadlock [#77][] (thanks @cespare)
+* [NOTICE] Development has moved to `code.google.com/p/go.exp/fsnotify` in preparation for inclusion in the Go standard library.
+
+## v0.8.12 / 2013-11-13
+
+* [API] Remove FD_SET and friends from Linux adapter
+
+## v0.8.11 / 2013-11-02
+
+* [Doc] Add Changelog [#72][] (thanks @nathany)
+* [Doc] Spotlight and double modify events on macOS [#62][] (reported by @paulhammond)
+
+## v0.8.10 / 2013-10-19
+
+* [Fix] kqueue: remove file watches when parent directory is removed [#71][] (reported by @mdwhatcott)
+* [Fix] kqueue: race between Close and readEvents [#70][] (reported by @bernerdschaefer)
+* [Doc] specify OS-specific limits in README (thanks @debrando)
+
+## v0.8.9 / 2013-09-08
+
+* [Doc] Contributing (thanks @nathany)
+* [Doc] update package path in example code [#63][] (thanks @paulhammond)
+* [Doc] GoCI badge in README (Linux only) [#60][]
+* [Doc] Cross-platform testing with Vagrant  [#59][] (thanks @nathany)
+
+## v0.8.8 / 2013-06-17
+
+* [Fix] Windows: handle `ERROR_MORE_DATA` on Windows [#49][] (thanks @jbowtie)
+
+## v0.8.7 / 2013-06-03
+
+* [API] Make syscall flags internal
+* [Fix] inotify: ignore event changes
+* [Fix] race in symlink test [#45][] (reported by @srid)
+* [Fix] tests on Windows
+* lower case error messages
+
+## v0.8.6 / 2013-05-23
+
+* kqueue: Use EVT_ONLY flag on Darwin
+* [Doc] Update README with full example
+
+## v0.8.5 / 2013-05-09
+
+* [Fix] inotify: allow monitoring of "broken" symlinks (thanks @tsg)
+
+## v0.8.4 / 2013-04-07
+
+* [Fix] kqueue: watch all file events [#40][] (thanks @ChrisBuchholz)
+
+## v0.8.3 / 2013-03-13
+
+* [Fix] inoitfy/kqueue memory leak [#36][] (reported by @nbkolchin)
+* [Fix] kqueue: use fsnFlags for watching a directory [#33][] (reported by @nbkolchin)
+
+## v0.8.2 / 2013-02-07
+
+* [Doc] add Authors
+* [Fix] fix data races for map access [#29][] (thanks @fsouza)
+
+## v0.8.1 / 2013-01-09
+
+* [Fix] Windows path separators
+* [Doc] BSD License
+
+## v0.8.0 / 2012-11-09
+
+* kqueue: directory watching improvements (thanks @vmirage)
+* inotify: add `IN_MOVED_TO` [#25][] (requested by @cpisto)
+* [Fix] kqueue: deleting watched directory [#24][] (reported by @jakerr)
+
+## v0.7.4 / 2012-10-09
+
+* [Fix] inotify: fixes from https://codereview.appspot.com/5418045/ (ugorji)
+* [Fix] kqueue: preserve watch flags when watching for delete [#21][] (reported by @robfig)
+* [Fix] kqueue: watch the directory even if it isn't a new watch (thanks @robfig)
+* [Fix] kqueue: modify after recreation of file
+
+## v0.7.3 / 2012-09-27
+
+* [Fix] kqueue: watch with an existing folder inside the watched folder (thanks @vmirage)
+* [Fix] kqueue: no longer get duplicate CREATE events
+
+## v0.7.2 / 2012-09-01
+
+* kqueue: events for created directories
+
+## v0.7.1 / 2012-07-14
+
+* [Fix] for renaming files
+
+## v0.7.0 / 2012-07-02
+
+* [Feature] FSNotify flags
+* [Fix] inotify: Added file name back to event path
+
+## v0.6.0 / 2012-06-06
+
+* kqueue: watch files after directory created (thanks @tmc)
+
+## v0.5.1 / 2012-05-22
+
+* [Fix] inotify: remove all watches before Close()
+
+## v0.5.0 / 2012-05-03
+
+* [API] kqueue: return errors during watch instead of sending over channel
+* kqueue: match symlink behavior on Linux
+* inotify: add `DELETE_SELF` (requested by @taralx)
+* [Fix] kqueue: handle EINTR (reported by @robfig)
+* [Doc] Godoc example [#1][] (thanks @davecheney)
+
+## v0.4.0 / 2012-03-30
+
+* Go 1 released: build with go tool
+* [Feature] Windows support using winfsnotify
+* Windows does not have attribute change notifications
+* Roll attribute notifications into IsModify
+
+## v0.3.0 / 2012-02-19
+
+* kqueue: add files when watch directory
+
+## v0.2.0 / 2011-12-30
+
+* update to latest Go weekly code
+
+## v0.1.0 / 2011-10-19
+
+* kqueue: add watch on file creation to match inotify
+* kqueue: create file event
+* inotify: ignore `IN_IGNORED` events
+* event String()
+* linux: common FileEvent functions
+* initial commit
+
+[#79]: https://github.com/howeyc/fsnotify/pull/79
+[#77]: https://github.com/howeyc/fsnotify/pull/77
+[#72]: https://github.com/howeyc/fsnotify/issues/72
+[#71]: https://github.com/howeyc/fsnotify/issues/71
+[#70]: https://github.com/howeyc/fsnotify/issues/70
+[#63]: https://github.com/howeyc/fsnotify/issues/63
+[#62]: https://github.com/howeyc/fsnotify/issues/62
+[#60]: https://github.com/howeyc/fsnotify/issues/60
+[#59]: https://github.com/howeyc/fsnotify/issues/59
+[#49]: https://github.com/howeyc/fsnotify/issues/49
+[#45]: https://github.com/howeyc/fsnotify/issues/45
+[#40]: https://github.com/howeyc/fsnotify/issues/40
+[#36]: https://github.com/howeyc/fsnotify/issues/36
+[#33]: https://github.com/howeyc/fsnotify/issues/33
+[#29]: https://github.com/howeyc/fsnotify/issues/29
+[#25]: https://github.com/howeyc/fsnotify/issues/25
+[#24]: https://github.com/howeyc/fsnotify/issues/24
+[#21]: https://github.com/howeyc/fsnotify/issues/21

+ 77 - 0
vendor/github.com/fsnotify/fsnotify/CONTRIBUTING.md

@@ -0,0 +1,77 @@
+# Contributing
+
+## Issues
+
+* Request features and report bugs using the [GitHub Issue Tracker](https://github.com/fsnotify/fsnotify/issues).
+* Please indicate the platform you are using fsnotify on.
+* A code example to reproduce the problem is appreciated.
+
+## Pull Requests
+
+### Contributor License Agreement
+
+fsnotify is derived from code in the [golang.org/x/exp](https://godoc.org/golang.org/x/exp) package and it may be included [in the standard library](https://github.com/fsnotify/fsnotify/issues/1) in the future. Therefore fsnotify carries the same [LICENSE](https://github.com/fsnotify/fsnotify/blob/master/LICENSE) as Go. Contributors retain their copyright, so you need to fill out a short form before we can accept your contribution: [Google Individual Contributor License Agreement](https://developers.google.com/open-source/cla/individual).
+
+Please indicate that you have signed the CLA in your pull request.
+
+### How fsnotify is Developed
+
+* Development is done on feature branches.
+* Tests are run on BSD, Linux, macOS and Windows.
+* Pull requests are reviewed and [applied to master][am] using [hub][].
+  * Maintainers may modify or squash commits rather than asking contributors to.
+* To issue a new release, the maintainers will:
+  * Update the CHANGELOG
+  * Tag a version, which will become available through gopkg.in.
+ 
+### How to Fork
+
+For smooth sailing, always use the original import path. Installing with `go get` makes this easy. 
+
+1. Install from GitHub (`go get -u github.com/fsnotify/fsnotify`)
+2. Create your feature branch (`git checkout -b my-new-feature`)
+3. Ensure everything works and the tests pass (see below)
+4. Commit your changes (`git commit -am 'Add some feature'`)
+
+Contribute upstream:
+
+1. Fork fsnotify on GitHub
+2. Add your remote (`git remote add fork git@github.com:mycompany/repo.git`)
+3. Push to the branch (`git push fork my-new-feature`)
+4. Create a new Pull Request on GitHub
+
+This workflow is [thoroughly explained by Katrina Owen](https://splice.com/blog/contributing-open-source-git-repositories-go/).
+
+### Testing
+
+fsnotify uses build tags to compile different code on Linux, BSD, macOS, and Windows.
+
+Before doing a pull request, please do your best to test your changes on multiple platforms, and list which platforms you were able/unable to test on.
+
+To aid in cross-platform testing there is a Vagrantfile for Linux and BSD.
+
+* Install [Vagrant](http://www.vagrantup.com/) and [VirtualBox](https://www.virtualbox.org/)
+* Setup [Vagrant Gopher](https://github.com/nathany/vagrant-gopher) in your `src` folder.
+* Run `vagrant up` from the project folder. You can also setup just one box with `vagrant up linux` or `vagrant up bsd` (note: the BSD box doesn't support Windows hosts at this time, and NFS may prompt for your host OS password)
+* Once setup, you can run the test suite on a given OS with a single command `vagrant ssh linux -c 'cd fsnotify/fsnotify; go test'`.
+* When you're done, you will want to halt or destroy the Vagrant boxes.
+
+Notice: fsnotify file system events won't trigger in shared folders. The tests get around this limitation by using the /tmp directory.
+
+Right now there is no equivalent solution for Windows and macOS, but there are Windows VMs [freely available from Microsoft](http://www.modern.ie/en-us/virtualization-tools#downloads).
+
+### Maintainers
+
+Help maintaining fsnotify is welcome. To be a maintainer:
+
+* Submit a pull request and sign the CLA as above.
+* You must be able to run the test suite on Mac, Windows, Linux and BSD.
+
+To keep master clean, the fsnotify project uses the "apply mail" workflow outlined in Nathaniel Talbott's post ["Merge pull request" Considered Harmful][am]. This requires installing [hub][].
+
+All code changes should be internal pull requests.
+
+Releases are tagged using [Semantic Versioning](http://semver.org/).
+
+[hub]: https://github.com/github/hub
+[am]: http://blog.spreedly.com/2014/06/24/merge-pull-request-considered-harmful/#.VGa5yZPF_Zs

+ 28 - 0
vendor/github.com/fsnotify/fsnotify/LICENSE

@@ -0,0 +1,28 @@
+Copyright (c) 2012 The Go Authors. All rights reserved.
+Copyright (c) 2012 fsnotify Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+   * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+   * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+   * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

+ 79 - 0
vendor/github.com/fsnotify/fsnotify/README.md

@@ -0,0 +1,79 @@
+# File system notifications for Go
+
+[![GoDoc](https://godoc.org/github.com/fsnotify/fsnotify?status.svg)](https://godoc.org/github.com/fsnotify/fsnotify) [![Go Report Card](https://goreportcard.com/badge/github.com/fsnotify/fsnotify)](https://goreportcard.com/report/github.com/fsnotify/fsnotify)
+
+fsnotify utilizes [golang.org/x/sys](https://godoc.org/golang.org/x/sys) rather than `syscall` from the standard library. Ensure you have the latest version installed by running:
+
+```console
+go get -u golang.org/x/sys/...
+```
+
+Cross platform: Windows, Linux, BSD and macOS.
+
+|Adapter   |OS        |Status    |
+|----------|----------|----------|
+|inotify   |Linux 2.6.27 or later, Android\*|Supported [![Build Status](https://travis-ci.org/fsnotify/fsnotify.svg?branch=master)](https://travis-ci.org/fsnotify/fsnotify)|
+|kqueue    |BSD, macOS, iOS\*|Supported [![Build Status](https://travis-ci.org/fsnotify/fsnotify.svg?branch=master)](https://travis-ci.org/fsnotify/fsnotify)|
+|ReadDirectoryChangesW|Windows|Supported [![Build status](https://ci.appveyor.com/api/projects/status/ivwjubaih4r0udeh/branch/master?svg=true)](https://ci.appveyor.com/project/NathanYoungman/fsnotify/branch/master)|
+|FSEvents  |macOS         |[Planned](https://github.com/fsnotify/fsnotify/issues/11)|
+|FEN       |Solaris 11    |[In Progress](https://github.com/fsnotify/fsnotify/issues/12)|
+|fanotify  |Linux 2.6.37+ | |
+|USN Journals |Windows    |[Maybe](https://github.com/fsnotify/fsnotify/issues/53)|
+|Polling   |*All*         |[Maybe](https://github.com/fsnotify/fsnotify/issues/9)|
+
+\* Android and iOS are untested.
+
+Please see [the documentation](https://godoc.org/github.com/fsnotify/fsnotify) and consult the [FAQ](#faq) for usage information.
+
+## API stability
+
+fsnotify is a fork of [howeyc/fsnotify](https://godoc.org/github.com/howeyc/fsnotify) with a new API as of v1.0. The API is based on [this design document](http://goo.gl/MrYxyA). 
+
+All [releases](https://github.com/fsnotify/fsnotify/releases) are tagged based on [Semantic Versioning](http://semver.org/). Further API changes are [planned](https://github.com/fsnotify/fsnotify/milestones), and will be tagged with a new major revision number.
+
+Go 1.6 supports dependencies located in the `vendor/` folder. Unless you are creating a library, it is recommended that you copy fsnotify into `vendor/github.com/fsnotify/fsnotify` within your project, and likewise for `golang.org/x/sys`.
+
+## Contributing
+
+Please refer to [CONTRIBUTING][] before opening an issue or pull request.
+
+## Example
+
+See [example_test.go](https://github.com/fsnotify/fsnotify/blob/master/example_test.go).
+
+## FAQ
+
+**When a file is moved to another directory is it still being watched?**
+
+No (it shouldn't be, unless you are watching where it was moved to).
+
+**When I watch a directory, are all subdirectories watched as well?**
+
+No, you must add watches for any directory you want to watch (a recursive watcher is on the roadmap [#18][]).
+
+**Do I have to watch the Error and Event channels in a separate goroutine?**
+
+As of now, yes. Looking into making this single-thread friendly (see [howeyc #7][#7])
+
+**Why am I receiving multiple events for the same file on OS X?**
+
+Spotlight indexing on OS X can result in multiple events (see [howeyc #62][#62]). A temporary workaround is to add your folder(s) to the *Spotlight Privacy settings* until we have a native FSEvents implementation (see [#11][]).
+
+**How many files can be watched at once?**
+
+There are OS-specific limits as to how many watches can be created:
+* Linux: /proc/sys/fs/inotify/max_user_watches contains the limit, reaching this limit results in a "no space left on device" error.
+* BSD / OSX: sysctl variables "kern.maxfiles" and "kern.maxfilesperproc", reaching these limits results in a "too many open files" error.
+
+[#62]: https://github.com/howeyc/fsnotify/issues/62
+[#18]: https://github.com/fsnotify/fsnotify/issues/18
+[#11]: https://github.com/fsnotify/fsnotify/issues/11
+[#7]: https://github.com/howeyc/fsnotify/issues/7
+
+[contributing]: https://github.com/fsnotify/fsnotify/blob/master/CONTRIBUTING.md
+
+## Related Projects
+
+* [notify](https://github.com/rjeczalik/notify)
+* [fsevents](https://github.com/fsnotify/fsevents)
+

+ 37 - 0
vendor/github.com/fsnotify/fsnotify/fen.go

@@ -0,0 +1,37 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build solaris
+
+package fsnotify
+
+import (
+	"errors"
+)
+
+// Watcher watches a set of files, delivering events to a channel.
+type Watcher struct {
+	Events chan Event
+	Errors chan error
+}
+
+// NewWatcher establishes a new watcher with the underlying OS and begins waiting for events.
+func NewWatcher() (*Watcher, error) {
+	return nil, errors.New("FEN based watcher not yet supported for fsnotify\n")
+}
+
+// Close removes all watches and closes the events channel.
+func (w *Watcher) Close() error {
+	return nil
+}
+
+// Add starts watching the named file or directory (non-recursively).
+func (w *Watcher) Add(name string) error {
+	return nil
+}
+
+// Remove stops watching the the named file or directory (non-recursively).
+func (w *Watcher) Remove(name string) error {
+	return nil
+}

+ 66 - 0
vendor/github.com/fsnotify/fsnotify/fsnotify.go

@@ -0,0 +1,66 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !plan9
+
+// Package fsnotify provides a platform-independent interface for file system notifications.
+package fsnotify
+
+import (
+	"bytes"
+	"errors"
+	"fmt"
+)
+
+// Event represents a single file system notification.
+type Event struct {
+	Name string // Relative path to the file or directory.
+	Op   Op     // File operation that triggered the event.
+}
+
+// Op describes a set of file operations.
+type Op uint32
+
+// These are the generalized file operations that can trigger a notification.
+const (
+	Create Op = 1 << iota
+	Write
+	Remove
+	Rename
+	Chmod
+)
+
+func (op Op) String() string {
+	// Use a buffer for efficient string concatenation
+	var buffer bytes.Buffer
+
+	if op&Create == Create {
+		buffer.WriteString("|CREATE")
+	}
+	if op&Remove == Remove {
+		buffer.WriteString("|REMOVE")
+	}
+	if op&Write == Write {
+		buffer.WriteString("|WRITE")
+	}
+	if op&Rename == Rename {
+		buffer.WriteString("|RENAME")
+	}
+	if op&Chmod == Chmod {
+		buffer.WriteString("|CHMOD")
+	}
+	if buffer.Len() == 0 {
+		return ""
+	}
+	return buffer.String()[1:] // Strip leading pipe
+}
+
+// String returns a string representation of the event in the form
+// "file: REMOVE|WRITE|..."
+func (e Event) String() string {
+	return fmt.Sprintf("%q: %s", e.Name, e.Op.String())
+}
+
+// Common errors that can be reported by a watcher
+var ErrEventOverflow = errors.New("fsnotify queue overflow")

+ 337 - 0
vendor/github.com/fsnotify/fsnotify/inotify.go

@@ -0,0 +1,337 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build linux
+
+package fsnotify
+
+import (
+	"errors"
+	"fmt"
+	"io"
+	"os"
+	"path/filepath"
+	"strings"
+	"sync"
+	"unsafe"
+
+	"golang.org/x/sys/unix"
+)
+
+// Watcher watches a set of files, delivering events to a channel.
+type Watcher struct {
+	Events   chan Event
+	Errors   chan error
+	mu       sync.Mutex // Map access
+	fd       int
+	poller   *fdPoller
+	watches  map[string]*watch // Map of inotify watches (key: path)
+	paths    map[int]string    // Map of watched paths (key: watch descriptor)
+	done     chan struct{}     // Channel for sending a "quit message" to the reader goroutine
+	doneResp chan struct{}     // Channel to respond to Close
+}
+
+// NewWatcher establishes a new watcher with the underlying OS and begins waiting for events.
+func NewWatcher() (*Watcher, error) {
+	// Create inotify fd
+	fd, errno := unix.InotifyInit1(unix.IN_CLOEXEC)
+	if fd == -1 {
+		return nil, errno
+	}
+	// Create epoll
+	poller, err := newFdPoller(fd)
+	if err != nil {
+		unix.Close(fd)
+		return nil, err
+	}
+	w := &Watcher{
+		fd:       fd,
+		poller:   poller,
+		watches:  make(map[string]*watch),
+		paths:    make(map[int]string),
+		Events:   make(chan Event),
+		Errors:   make(chan error),
+		done:     make(chan struct{}),
+		doneResp: make(chan struct{}),
+	}
+
+	go w.readEvents()
+	return w, nil
+}
+
+func (w *Watcher) isClosed() bool {
+	select {
+	case <-w.done:
+		return true
+	default:
+		return false
+	}
+}
+
+// Close removes all watches and closes the events channel.
+func (w *Watcher) Close() error {
+	if w.isClosed() {
+		return nil
+	}
+
+	// Send 'close' signal to goroutine, and set the Watcher to closed.
+	close(w.done)
+
+	// Wake up goroutine
+	w.poller.wake()
+
+	// Wait for goroutine to close
+	<-w.doneResp
+
+	return nil
+}
+
+// Add starts watching the named file or directory (non-recursively).
+func (w *Watcher) Add(name string) error {
+	name = filepath.Clean(name)
+	if w.isClosed() {
+		return errors.New("inotify instance already closed")
+	}
+
+	const agnosticEvents = unix.IN_MOVED_TO | unix.IN_MOVED_FROM |
+		unix.IN_CREATE | unix.IN_ATTRIB | unix.IN_MODIFY |
+		unix.IN_MOVE_SELF | unix.IN_DELETE | unix.IN_DELETE_SELF
+
+	var flags uint32 = agnosticEvents
+
+	w.mu.Lock()
+	defer w.mu.Unlock()
+	watchEntry := w.watches[name]
+	if watchEntry != nil {
+		flags |= watchEntry.flags | unix.IN_MASK_ADD
+	}
+	wd, errno := unix.InotifyAddWatch(w.fd, name, flags)
+	if wd == -1 {
+		return errno
+	}
+
+	if watchEntry == nil {
+		w.watches[name] = &watch{wd: uint32(wd), flags: flags}
+		w.paths[wd] = name
+	} else {
+		watchEntry.wd = uint32(wd)
+		watchEntry.flags = flags
+	}
+
+	return nil
+}
+
+// Remove stops watching the named file or directory (non-recursively).
+func (w *Watcher) Remove(name string) error {
+	name = filepath.Clean(name)
+
+	// Fetch the watch.
+	w.mu.Lock()
+	defer w.mu.Unlock()
+	watch, ok := w.watches[name]
+
+	// Remove it from inotify.
+	if !ok {
+		return fmt.Errorf("can't remove non-existent inotify watch for: %s", name)
+	}
+
+	// We successfully removed the watch if InotifyRmWatch doesn't return an
+	// error, we need to clean up our internal state to ensure it matches
+	// inotify's kernel state.
+	delete(w.paths, int(watch.wd))
+	delete(w.watches, name)
+
+	// inotify_rm_watch will return EINVAL if the file has been deleted;
+	// the inotify will already have been removed.
+	// watches and pathes are deleted in ignoreLinux() implicitly and asynchronously
+	// by calling inotify_rm_watch() below. e.g. readEvents() goroutine receives IN_IGNORE
+	// so that EINVAL means that the wd is being rm_watch()ed or its file removed
+	// by another thread and we have not received IN_IGNORE event.
+	success, errno := unix.InotifyRmWatch(w.fd, watch.wd)
+	if success == -1 {
+		// TODO: Perhaps it's not helpful to return an error here in every case.
+		// the only two possible errors are:
+		// EBADF, which happens when w.fd is not a valid file descriptor of any kind.
+		// EINVAL, which is when fd is not an inotify descriptor or wd is not a valid watch descriptor.
+		// Watch descriptors are invalidated when they are removed explicitly or implicitly;
+		// explicitly by inotify_rm_watch, implicitly when the file they are watching is deleted.
+		return errno
+	}
+
+	return nil
+}
+
+type watch struct {
+	wd    uint32 // Watch descriptor (as returned by the inotify_add_watch() syscall)
+	flags uint32 // inotify flags of this watch (see inotify(7) for the list of valid flags)
+}
+
+// readEvents reads from the inotify file descriptor, converts the
+// received events into Event objects and sends them via the Events channel
+func (w *Watcher) readEvents() {
+	var (
+		buf   [unix.SizeofInotifyEvent * 4096]byte // Buffer for a maximum of 4096 raw events
+		n     int                                  // Number of bytes read with read()
+		errno error                                // Syscall errno
+		ok    bool                                 // For poller.wait
+	)
+
+	defer close(w.doneResp)
+	defer close(w.Errors)
+	defer close(w.Events)
+	defer unix.Close(w.fd)
+	defer w.poller.close()
+
+	for {
+		// See if we have been closed.
+		if w.isClosed() {
+			return
+		}
+
+		ok, errno = w.poller.wait()
+		if errno != nil {
+			select {
+			case w.Errors <- errno:
+			case <-w.done:
+				return
+			}
+			continue
+		}
+
+		if !ok {
+			continue
+		}
+
+		n, errno = unix.Read(w.fd, buf[:])
+		// If a signal interrupted execution, see if we've been asked to close, and try again.
+		// http://man7.org/linux/man-pages/man7/signal.7.html :
+		// "Before Linux 3.8, reads from an inotify(7) file descriptor were not restartable"
+		if errno == unix.EINTR {
+			continue
+		}
+
+		// unix.Read might have been woken up by Close. If so, we're done.
+		if w.isClosed() {
+			return
+		}
+
+		if n < unix.SizeofInotifyEvent {
+			var err error
+			if n == 0 {
+				// If EOF is received. This should really never happen.
+				err = io.EOF
+			} else if n < 0 {
+				// If an error occurred while reading.
+				err = errno
+			} else {
+				// Read was too short.
+				err = errors.New("notify: short read in readEvents()")
+			}
+			select {
+			case w.Errors <- err:
+			case <-w.done:
+				return
+			}
+			continue
+		}
+
+		var offset uint32
+		// We don't know how many events we just read into the buffer
+		// While the offset points to at least one whole event...
+		for offset <= uint32(n-unix.SizeofInotifyEvent) {
+			// Point "raw" to the event in the buffer
+			raw := (*unix.InotifyEvent)(unsafe.Pointer(&buf[offset]))
+
+			mask := uint32(raw.Mask)
+			nameLen := uint32(raw.Len)
+
+			if mask&unix.IN_Q_OVERFLOW != 0 {
+				select {
+				case w.Errors <- ErrEventOverflow:
+				case <-w.done:
+					return
+				}
+			}
+
+			// If the event happened to the watched directory or the watched file, the kernel
+			// doesn't append the filename to the event, but we would like to always fill the
+			// the "Name" field with a valid filename. We retrieve the path of the watch from
+			// the "paths" map.
+			w.mu.Lock()
+			name, ok := w.paths[int(raw.Wd)]
+			// IN_DELETE_SELF occurs when the file/directory being watched is removed.
+			// This is a sign to clean up the maps, otherwise we are no longer in sync
+			// with the inotify kernel state which has already deleted the watch
+			// automatically.
+			if ok && mask&unix.IN_DELETE_SELF == unix.IN_DELETE_SELF {
+				delete(w.paths, int(raw.Wd))
+				delete(w.watches, name)
+			}
+			w.mu.Unlock()
+
+			if nameLen > 0 {
+				// Point "bytes" at the first byte of the filename
+				bytes := (*[unix.PathMax]byte)(unsafe.Pointer(&buf[offset+unix.SizeofInotifyEvent]))
+				// The filename is padded with NULL bytes. TrimRight() gets rid of those.
+				name += "/" + strings.TrimRight(string(bytes[0:nameLen]), "\000")
+			}
+
+			event := newEvent(name, mask)
+
+			// Send the events that are not ignored on the events channel
+			if !event.ignoreLinux(mask) {
+				select {
+				case w.Events <- event:
+				case <-w.done:
+					return
+				}
+			}
+
+			// Move to the next event in the buffer
+			offset += unix.SizeofInotifyEvent + nameLen
+		}
+	}
+}
+
+// Certain types of events can be "ignored" and not sent over the Events
+// channel. Such as events marked ignore by the kernel, or MODIFY events
+// against files that do not exist.
+func (e *Event) ignoreLinux(mask uint32) bool {
+	// Ignore anything the inotify API says to ignore
+	if mask&unix.IN_IGNORED == unix.IN_IGNORED {
+		return true
+	}
+
+	// If the event is not a DELETE or RENAME, the file must exist.
+	// Otherwise the event is ignored.
+	// *Note*: this was put in place because it was seen that a MODIFY
+	// event was sent after the DELETE. This ignores that MODIFY and
+	// assumes a DELETE will come or has come if the file doesn't exist.
+	if !(e.Op&Remove == Remove || e.Op&Rename == Rename) {
+		_, statErr := os.Lstat(e.Name)
+		return os.IsNotExist(statErr)
+	}
+	return false
+}
+
+// newEvent returns an platform-independent Event based on an inotify mask.
+func newEvent(name string, mask uint32) Event {
+	e := Event{Name: name}
+	if mask&unix.IN_CREATE == unix.IN_CREATE || mask&unix.IN_MOVED_TO == unix.IN_MOVED_TO {
+		e.Op |= Create
+	}
+	if mask&unix.IN_DELETE_SELF == unix.IN_DELETE_SELF || mask&unix.IN_DELETE == unix.IN_DELETE {
+		e.Op |= Remove
+	}
+	if mask&unix.IN_MODIFY == unix.IN_MODIFY {
+		e.Op |= Write
+	}
+	if mask&unix.IN_MOVE_SELF == unix.IN_MOVE_SELF || mask&unix.IN_MOVED_FROM == unix.IN_MOVED_FROM {
+		e.Op |= Rename
+	}
+	if mask&unix.IN_ATTRIB == unix.IN_ATTRIB {
+		e.Op |= Chmod
+	}
+	return e
+}

+ 187 - 0
vendor/github.com/fsnotify/fsnotify/inotify_poller.go

@@ -0,0 +1,187 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build linux
+
+package fsnotify
+
+import (
+	"errors"
+
+	"golang.org/x/sys/unix"
+)
+
+type fdPoller struct {
+	fd   int    // File descriptor (as returned by the inotify_init() syscall)
+	epfd int    // Epoll file descriptor
+	pipe [2]int // Pipe for waking up
+}
+
+func emptyPoller(fd int) *fdPoller {
+	poller := new(fdPoller)
+	poller.fd = fd
+	poller.epfd = -1
+	poller.pipe[0] = -1
+	poller.pipe[1] = -1
+	return poller
+}
+
+// Create a new inotify poller.
+// This creates an inotify handler, and an epoll handler.
+func newFdPoller(fd int) (*fdPoller, error) {
+	var errno error
+	poller := emptyPoller(fd)
+	defer func() {
+		if errno != nil {
+			poller.close()
+		}
+	}()
+	poller.fd = fd
+
+	// Create epoll fd
+	poller.epfd, errno = unix.EpollCreate1(0)
+	if poller.epfd == -1 {
+		return nil, errno
+	}
+	// Create pipe; pipe[0] is the read end, pipe[1] the write end.
+	errno = unix.Pipe2(poller.pipe[:], unix.O_NONBLOCK)
+	if errno != nil {
+		return nil, errno
+	}
+
+	// Register inotify fd with epoll
+	event := unix.EpollEvent{
+		Fd:     int32(poller.fd),
+		Events: unix.EPOLLIN,
+	}
+	errno = unix.EpollCtl(poller.epfd, unix.EPOLL_CTL_ADD, poller.fd, &event)
+	if errno != nil {
+		return nil, errno
+	}
+
+	// Register pipe fd with epoll
+	event = unix.EpollEvent{
+		Fd:     int32(poller.pipe[0]),
+		Events: unix.EPOLLIN,
+	}
+	errno = unix.EpollCtl(poller.epfd, unix.EPOLL_CTL_ADD, poller.pipe[0], &event)
+	if errno != nil {
+		return nil, errno
+	}
+
+	return poller, nil
+}
+
+// Wait using epoll.
+// Returns true if something is ready to be read,
+// false if there is not.
+func (poller *fdPoller) wait() (bool, error) {
+	// 3 possible events per fd, and 2 fds, makes a maximum of 6 events.
+	// I don't know whether epoll_wait returns the number of events returned,
+	// or the total number of events ready.
+	// I decided to catch both by making the buffer one larger than the maximum.
+	events := make([]unix.EpollEvent, 7)
+	for {
+		n, errno := unix.EpollWait(poller.epfd, events, -1)
+		if n == -1 {
+			if errno == unix.EINTR {
+				continue
+			}
+			return false, errno
+		}
+		if n == 0 {
+			// If there are no events, try again.
+			continue
+		}
+		if n > 6 {
+			// This should never happen. More events were returned than should be possible.
+			return false, errors.New("epoll_wait returned more events than I know what to do with")
+		}
+		ready := events[:n]
+		epollhup := false
+		epollerr := false
+		epollin := false
+		for _, event := range ready {
+			if event.Fd == int32(poller.fd) {
+				if event.Events&unix.EPOLLHUP != 0 {
+					// This should not happen, but if it does, treat it as a wakeup.
+					epollhup = true
+				}
+				if event.Events&unix.EPOLLERR != 0 {
+					// If an error is waiting on the file descriptor, we should pretend
+					// something is ready to read, and let unix.Read pick up the error.
+					epollerr = true
+				}
+				if event.Events&unix.EPOLLIN != 0 {
+					// There is data to read.
+					epollin = true
+				}
+			}
+			if event.Fd == int32(poller.pipe[0]) {
+				if event.Events&unix.EPOLLHUP != 0 {
+					// Write pipe descriptor was closed, by us. This means we're closing down the
+					// watcher, and we should wake up.
+				}
+				if event.Events&unix.EPOLLERR != 0 {
+					// If an error is waiting on the pipe file descriptor.
+					// This is an absolute mystery, and should never ever happen.
+					return false, errors.New("Error on the pipe descriptor.")
+				}
+				if event.Events&unix.EPOLLIN != 0 {
+					// This is a regular wakeup, so we have to clear the buffer.
+					err := poller.clearWake()
+					if err != nil {
+						return false, err
+					}
+				}
+			}
+		}
+
+		if epollhup || epollerr || epollin {
+			return true, nil
+		}
+		return false, nil
+	}
+}
+
+// Close the write end of the poller.
+func (poller *fdPoller) wake() error {
+	buf := make([]byte, 1)
+	n, errno := unix.Write(poller.pipe[1], buf)
+	if n == -1 {
+		if errno == unix.EAGAIN {
+			// Buffer is full, poller will wake.
+			return nil
+		}
+		return errno
+	}
+	return nil
+}
+
+func (poller *fdPoller) clearWake() error {
+	// You have to be woken up a LOT in order to get to 100!
+	buf := make([]byte, 100)
+	n, errno := unix.Read(poller.pipe[0], buf)
+	if n == -1 {
+		if errno == unix.EAGAIN {
+			// Buffer is empty, someone else cleared our wake.
+			return nil
+		}
+		return errno
+	}
+	return nil
+}
+
+// Close all poller file descriptors, but not the one passed to it.
+func (poller *fdPoller) close() {
+	if poller.pipe[1] != -1 {
+		unix.Close(poller.pipe[1])
+	}
+	if poller.pipe[0] != -1 {
+		unix.Close(poller.pipe[0])
+	}
+	if poller.epfd != -1 {
+		unix.Close(poller.epfd)
+	}
+}

+ 521 - 0
vendor/github.com/fsnotify/fsnotify/kqueue.go

@@ -0,0 +1,521 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build freebsd openbsd netbsd dragonfly darwin
+
+package fsnotify
+
+import (
+	"errors"
+	"fmt"
+	"io/ioutil"
+	"os"
+	"path/filepath"
+	"sync"
+	"time"
+
+	"golang.org/x/sys/unix"
+)
+
+// Watcher watches a set of files, delivering events to a channel.
+type Watcher struct {
+	Events chan Event
+	Errors chan error
+	done   chan struct{} // Channel for sending a "quit message" to the reader goroutine
+
+	kq int // File descriptor (as returned by the kqueue() syscall).
+
+	mu              sync.Mutex        // Protects access to watcher data
+	watches         map[string]int    // Map of watched file descriptors (key: path).
+	externalWatches map[string]bool   // Map of watches added by user of the library.
+	dirFlags        map[string]uint32 // Map of watched directories to fflags used in kqueue.
+	paths           map[int]pathInfo  // Map file descriptors to path names for processing kqueue events.
+	fileExists      map[string]bool   // Keep track of if we know this file exists (to stop duplicate create events).
+	isClosed        bool              // Set to true when Close() is first called
+}
+
+type pathInfo struct {
+	name  string
+	isDir bool
+}
+
+// NewWatcher establishes a new watcher with the underlying OS and begins waiting for events.
+func NewWatcher() (*Watcher, error) {
+	kq, err := kqueue()
+	if err != nil {
+		return nil, err
+	}
+
+	w := &Watcher{
+		kq:              kq,
+		watches:         make(map[string]int),
+		dirFlags:        make(map[string]uint32),
+		paths:           make(map[int]pathInfo),
+		fileExists:      make(map[string]bool),
+		externalWatches: make(map[string]bool),
+		Events:          make(chan Event),
+		Errors:          make(chan error),
+		done:            make(chan struct{}),
+	}
+
+	go w.readEvents()
+	return w, nil
+}
+
+// Close removes all watches and closes the events channel.
+func (w *Watcher) Close() error {
+	w.mu.Lock()
+	if w.isClosed {
+		w.mu.Unlock()
+		return nil
+	}
+	w.isClosed = true
+
+	// copy paths to remove while locked
+	var pathsToRemove = make([]string, 0, len(w.watches))
+	for name := range w.watches {
+		pathsToRemove = append(pathsToRemove, name)
+	}
+	w.mu.Unlock()
+	// unlock before calling Remove, which also locks
+
+	for _, name := range pathsToRemove {
+		w.Remove(name)
+	}
+
+	// send a "quit" message to the reader goroutine
+	close(w.done)
+
+	return nil
+}
+
+// Add starts watching the named file or directory (non-recursively).
+func (w *Watcher) Add(name string) error {
+	w.mu.Lock()
+	w.externalWatches[name] = true
+	w.mu.Unlock()
+	_, err := w.addWatch(name, noteAllEvents)
+	return err
+}
+
+// Remove stops watching the the named file or directory (non-recursively).
+func (w *Watcher) Remove(name string) error {
+	name = filepath.Clean(name)
+	w.mu.Lock()
+	watchfd, ok := w.watches[name]
+	w.mu.Unlock()
+	if !ok {
+		return fmt.Errorf("can't remove non-existent kevent watch for: %s", name)
+	}
+
+	const registerRemove = unix.EV_DELETE
+	if err := register(w.kq, []int{watchfd}, registerRemove, 0); err != nil {
+		return err
+	}
+
+	unix.Close(watchfd)
+
+	w.mu.Lock()
+	isDir := w.paths[watchfd].isDir
+	delete(w.watches, name)
+	delete(w.paths, watchfd)
+	delete(w.dirFlags, name)
+	w.mu.Unlock()
+
+	// Find all watched paths that are in this directory that are not external.
+	if isDir {
+		var pathsToRemove []string
+		w.mu.Lock()
+		for _, path := range w.paths {
+			wdir, _ := filepath.Split(path.name)
+			if filepath.Clean(wdir) == name {
+				if !w.externalWatches[path.name] {
+					pathsToRemove = append(pathsToRemove, path.name)
+				}
+			}
+		}
+		w.mu.Unlock()
+		for _, name := range pathsToRemove {
+			// Since these are internal, not much sense in propagating error
+			// to the user, as that will just confuse them with an error about
+			// a path they did not explicitly watch themselves.
+			w.Remove(name)
+		}
+	}
+
+	return nil
+}
+
+// Watch all events (except NOTE_EXTEND, NOTE_LINK, NOTE_REVOKE)
+const noteAllEvents = unix.NOTE_DELETE | unix.NOTE_WRITE | unix.NOTE_ATTRIB | unix.NOTE_RENAME
+
+// keventWaitTime to block on each read from kevent
+var keventWaitTime = durationToTimespec(100 * time.Millisecond)
+
+// addWatch adds name to the watched file set.
+// The flags are interpreted as described in kevent(2).
+// Returns the real path to the file which was added, if any, which may be different from the one passed in the case of symlinks.
+func (w *Watcher) addWatch(name string, flags uint32) (string, error) {
+	var isDir bool
+	// Make ./name and name equivalent
+	name = filepath.Clean(name)
+
+	w.mu.Lock()
+	if w.isClosed {
+		w.mu.Unlock()
+		return "", errors.New("kevent instance already closed")
+	}
+	watchfd, alreadyWatching := w.watches[name]
+	// We already have a watch, but we can still override flags.
+	if alreadyWatching {
+		isDir = w.paths[watchfd].isDir
+	}
+	w.mu.Unlock()
+
+	if !alreadyWatching {
+		fi, err := os.Lstat(name)
+		if err != nil {
+			return "", err
+		}
+
+		// Don't watch sockets.
+		if fi.Mode()&os.ModeSocket == os.ModeSocket {
+			return "", nil
+		}
+
+		// Don't watch named pipes.
+		if fi.Mode()&os.ModeNamedPipe == os.ModeNamedPipe {
+			return "", nil
+		}
+
+		// Follow Symlinks
+		// Unfortunately, Linux can add bogus symlinks to watch list without
+		// issue, and Windows can't do symlinks period (AFAIK). To  maintain
+		// consistency, we will act like everything is fine. There will simply
+		// be no file events for broken symlinks.
+		// Hence the returns of nil on errors.
+		if fi.Mode()&os.ModeSymlink == os.ModeSymlink {
+			name, err = filepath.EvalSymlinks(name)
+			if err != nil {
+				return "", nil
+			}
+
+			w.mu.Lock()
+			_, alreadyWatching = w.watches[name]
+			w.mu.Unlock()
+
+			if alreadyWatching {
+				return name, nil
+			}
+
+			fi, err = os.Lstat(name)
+			if err != nil {
+				return "", nil
+			}
+		}
+
+		watchfd, err = unix.Open(name, openMode, 0700)
+		if watchfd == -1 {
+			return "", err
+		}
+
+		isDir = fi.IsDir()
+	}
+
+	const registerAdd = unix.EV_ADD | unix.EV_CLEAR | unix.EV_ENABLE
+	if err := register(w.kq, []int{watchfd}, registerAdd, flags); err != nil {
+		unix.Close(watchfd)
+		return "", err
+	}
+
+	if !alreadyWatching {
+		w.mu.Lock()
+		w.watches[name] = watchfd
+		w.paths[watchfd] = pathInfo{name: name, isDir: isDir}
+		w.mu.Unlock()
+	}
+
+	if isDir {
+		// Watch the directory if it has not been watched before,
+		// or if it was watched before, but perhaps only a NOTE_DELETE (watchDirectoryFiles)
+		w.mu.Lock()
+
+		watchDir := (flags&unix.NOTE_WRITE) == unix.NOTE_WRITE &&
+			(!alreadyWatching || (w.dirFlags[name]&unix.NOTE_WRITE) != unix.NOTE_WRITE)
+		// Store flags so this watch can be updated later
+		w.dirFlags[name] = flags
+		w.mu.Unlock()
+
+		if watchDir {
+			if err := w.watchDirectoryFiles(name); err != nil {
+				return "", err
+			}
+		}
+	}
+	return name, nil
+}
+
+// readEvents reads from kqueue and converts the received kevents into
+// Event values that it sends down the Events channel.
+func (w *Watcher) readEvents() {
+	eventBuffer := make([]unix.Kevent_t, 10)
+
+loop:
+	for {
+		// See if there is a message on the "done" channel
+		select {
+		case <-w.done:
+			break loop
+		default:
+		}
+
+		// Get new events
+		kevents, err := read(w.kq, eventBuffer, &keventWaitTime)
+		// EINTR is okay, the syscall was interrupted before timeout expired.
+		if err != nil && err != unix.EINTR {
+			select {
+			case w.Errors <- err:
+			case <-w.done:
+				break loop
+			}
+			continue
+		}
+
+		// Flush the events we received to the Events channel
+		for len(kevents) > 0 {
+			kevent := &kevents[0]
+			watchfd := int(kevent.Ident)
+			mask := uint32(kevent.Fflags)
+			w.mu.Lock()
+			path := w.paths[watchfd]
+			w.mu.Unlock()
+			event := newEvent(path.name, mask)
+
+			if path.isDir && !(event.Op&Remove == Remove) {
+				// Double check to make sure the directory exists. This can happen when
+				// we do a rm -fr on a recursively watched folders and we receive a
+				// modification event first but the folder has been deleted and later
+				// receive the delete event
+				if _, err := os.Lstat(event.Name); os.IsNotExist(err) {
+					// mark is as delete event
+					event.Op |= Remove
+				}
+			}
+
+			if event.Op&Rename == Rename || event.Op&Remove == Remove {
+				w.Remove(event.Name)
+				w.mu.Lock()
+				delete(w.fileExists, event.Name)
+				w.mu.Unlock()
+			}
+
+			if path.isDir && event.Op&Write == Write && !(event.Op&Remove == Remove) {
+				w.sendDirectoryChangeEvents(event.Name)
+			} else {
+				// Send the event on the Events channel.
+				select {
+				case w.Events <- event:
+				case <-w.done:
+					break loop
+				}
+			}
+
+			if event.Op&Remove == Remove {
+				// Look for a file that may have overwritten this.
+				// For example, mv f1 f2 will delete f2, then create f2.
+				if path.isDir {
+					fileDir := filepath.Clean(event.Name)
+					w.mu.Lock()
+					_, found := w.watches[fileDir]
+					w.mu.Unlock()
+					if found {
+						// make sure the directory exists before we watch for changes. When we
+						// do a recursive watch and perform rm -fr, the parent directory might
+						// have gone missing, ignore the missing directory and let the
+						// upcoming delete event remove the watch from the parent directory.
+						if _, err := os.Lstat(fileDir); err == nil {
+							w.sendDirectoryChangeEvents(fileDir)
+						}
+					}
+				} else {
+					filePath := filepath.Clean(event.Name)
+					if fileInfo, err := os.Lstat(filePath); err == nil {
+						w.sendFileCreatedEventIfNew(filePath, fileInfo)
+					}
+				}
+			}
+
+			// Move to next event
+			kevents = kevents[1:]
+		}
+	}
+
+	// cleanup
+	err := unix.Close(w.kq)
+	if err != nil {
+		// only way the previous loop breaks is if w.done was closed so we need to async send to w.Errors.
+		select {
+		case w.Errors <- err:
+		default:
+		}
+	}
+	close(w.Events)
+	close(w.Errors)
+}
+
+// newEvent returns an platform-independent Event based on kqueue Fflags.
+func newEvent(name string, mask uint32) Event {
+	e := Event{Name: name}
+	if mask&unix.NOTE_DELETE == unix.NOTE_DELETE {
+		e.Op |= Remove
+	}
+	if mask&unix.NOTE_WRITE == unix.NOTE_WRITE {
+		e.Op |= Write
+	}
+	if mask&unix.NOTE_RENAME == unix.NOTE_RENAME {
+		e.Op |= Rename
+	}
+	if mask&unix.NOTE_ATTRIB == unix.NOTE_ATTRIB {
+		e.Op |= Chmod
+	}
+	return e
+}
+
+func newCreateEvent(name string) Event {
+	return Event{Name: name, Op: Create}
+}
+
+// watchDirectoryFiles to mimic inotify when adding a watch on a directory
+func (w *Watcher) watchDirectoryFiles(dirPath string) error {
+	// Get all files
+	files, err := ioutil.ReadDir(dirPath)
+	if err != nil {
+		return err
+	}
+
+	for _, fileInfo := range files {
+		filePath := filepath.Join(dirPath, fileInfo.Name())
+		filePath, err = w.internalWatch(filePath, fileInfo)
+		if err != nil {
+			return err
+		}
+
+		w.mu.Lock()
+		w.fileExists[filePath] = true
+		w.mu.Unlock()
+	}
+
+	return nil
+}
+
+// sendDirectoryEvents searches the directory for newly created files
+// and sends them over the event channel. This functionality is to have
+// the BSD version of fsnotify match Linux inotify which provides a
+// create event for files created in a watched directory.
+func (w *Watcher) sendDirectoryChangeEvents(dirPath string) {
+	// Get all files
+	files, err := ioutil.ReadDir(dirPath)
+	if err != nil {
+		select {
+		case w.Errors <- err:
+		case <-w.done:
+			return
+		}
+	}
+
+	// Search for new files
+	for _, fileInfo := range files {
+		filePath := filepath.Join(dirPath, fileInfo.Name())
+		err := w.sendFileCreatedEventIfNew(filePath, fileInfo)
+
+		if err != nil {
+			return
+		}
+	}
+}
+
+// sendFileCreatedEvent sends a create event if the file isn't already being tracked.
+func (w *Watcher) sendFileCreatedEventIfNew(filePath string, fileInfo os.FileInfo) (err error) {
+	w.mu.Lock()
+	_, doesExist := w.fileExists[filePath]
+	w.mu.Unlock()
+	if !doesExist {
+		// Send create event
+		select {
+		case w.Events <- newCreateEvent(filePath):
+		case <-w.done:
+			return
+		}
+	}
+
+	// like watchDirectoryFiles (but without doing another ReadDir)
+	filePath, err = w.internalWatch(filePath, fileInfo)
+	if err != nil {
+		return err
+	}
+
+	w.mu.Lock()
+	w.fileExists[filePath] = true
+	w.mu.Unlock()
+
+	return nil
+}
+
+func (w *Watcher) internalWatch(name string, fileInfo os.FileInfo) (string, error) {
+	if fileInfo.IsDir() {
+		// mimic Linux providing delete events for subdirectories
+		// but preserve the flags used if currently watching subdirectory
+		w.mu.Lock()
+		flags := w.dirFlags[name]
+		w.mu.Unlock()
+
+		flags |= unix.NOTE_DELETE | unix.NOTE_RENAME
+		return w.addWatch(name, flags)
+	}
+
+	// watch file to mimic Linux inotify
+	return w.addWatch(name, noteAllEvents)
+}
+
+// kqueue creates a new kernel event queue and returns a descriptor.
+func kqueue() (kq int, err error) {
+	kq, err = unix.Kqueue()
+	if kq == -1 {
+		return kq, err
+	}
+	return kq, nil
+}
+
+// register events with the queue
+func register(kq int, fds []int, flags int, fflags uint32) error {
+	changes := make([]unix.Kevent_t, len(fds))
+
+	for i, fd := range fds {
+		// SetKevent converts int to the platform-specific types:
+		unix.SetKevent(&changes[i], fd, unix.EVFILT_VNODE, flags)
+		changes[i].Fflags = fflags
+	}
+
+	// register the events
+	success, err := unix.Kevent(kq, changes, nil, nil)
+	if success == -1 {
+		return err
+	}
+	return nil
+}
+
+// read retrieves pending events, or waits until an event occurs.
+// A timeout of nil blocks indefinitely, while 0 polls the queue.
+func read(kq int, events []unix.Kevent_t, timeout *unix.Timespec) ([]unix.Kevent_t, error) {
+	n, err := unix.Kevent(kq, nil, events, timeout)
+	if err != nil {
+		return nil, err
+	}
+	return events[0:n], nil
+}
+
+// durationToTimespec prepares a timeout value
+func durationToTimespec(d time.Duration) unix.Timespec {
+	return unix.NsecToTimespec(d.Nanoseconds())
+}

+ 11 - 0
vendor/github.com/fsnotify/fsnotify/open_mode_bsd.go

@@ -0,0 +1,11 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build freebsd openbsd netbsd dragonfly
+
+package fsnotify
+
+import "golang.org/x/sys/unix"
+
+const openMode = unix.O_NONBLOCK | unix.O_RDONLY

+ 12 - 0
vendor/github.com/fsnotify/fsnotify/open_mode_darwin.go

@@ -0,0 +1,12 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build darwin
+
+package fsnotify
+
+import "golang.org/x/sys/unix"
+
+// note: this constant is not defined on BSD
+const openMode = unix.O_EVTONLY

+ 561 - 0
vendor/github.com/fsnotify/fsnotify/windows.go

@@ -0,0 +1,561 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build windows
+
+package fsnotify
+
+import (
+	"errors"
+	"fmt"
+	"os"
+	"path/filepath"
+	"runtime"
+	"sync"
+	"syscall"
+	"unsafe"
+)
+
+// Watcher watches a set of files, delivering events to a channel.
+type Watcher struct {
+	Events   chan Event
+	Errors   chan error
+	isClosed bool           // Set to true when Close() is first called
+	mu       sync.Mutex     // Map access
+	port     syscall.Handle // Handle to completion port
+	watches  watchMap       // Map of watches (key: i-number)
+	input    chan *input    // Inputs to the reader are sent on this channel
+	quit     chan chan<- error
+}
+
+// NewWatcher establishes a new watcher with the underlying OS and begins waiting for events.
+func NewWatcher() (*Watcher, error) {
+	port, e := syscall.CreateIoCompletionPort(syscall.InvalidHandle, 0, 0, 0)
+	if e != nil {
+		return nil, os.NewSyscallError("CreateIoCompletionPort", e)
+	}
+	w := &Watcher{
+		port:    port,
+		watches: make(watchMap),
+		input:   make(chan *input, 1),
+		Events:  make(chan Event, 50),
+		Errors:  make(chan error),
+		quit:    make(chan chan<- error, 1),
+	}
+	go w.readEvents()
+	return w, nil
+}
+
+// Close removes all watches and closes the events channel.
+func (w *Watcher) Close() error {
+	if w.isClosed {
+		return nil
+	}
+	w.isClosed = true
+
+	// Send "quit" message to the reader goroutine
+	ch := make(chan error)
+	w.quit <- ch
+	if err := w.wakeupReader(); err != nil {
+		return err
+	}
+	return <-ch
+}
+
+// Add starts watching the named file or directory (non-recursively).
+func (w *Watcher) Add(name string) error {
+	if w.isClosed {
+		return errors.New("watcher already closed")
+	}
+	in := &input{
+		op:    opAddWatch,
+		path:  filepath.Clean(name),
+		flags: sysFSALLEVENTS,
+		reply: make(chan error),
+	}
+	w.input <- in
+	if err := w.wakeupReader(); err != nil {
+		return err
+	}
+	return <-in.reply
+}
+
+// Remove stops watching the the named file or directory (non-recursively).
+func (w *Watcher) Remove(name string) error {
+	in := &input{
+		op:    opRemoveWatch,
+		path:  filepath.Clean(name),
+		reply: make(chan error),
+	}
+	w.input <- in
+	if err := w.wakeupReader(); err != nil {
+		return err
+	}
+	return <-in.reply
+}
+
+const (
+	// Options for AddWatch
+	sysFSONESHOT = 0x80000000
+	sysFSONLYDIR = 0x1000000
+
+	// Events
+	sysFSACCESS     = 0x1
+	sysFSALLEVENTS  = 0xfff
+	sysFSATTRIB     = 0x4
+	sysFSCLOSE      = 0x18
+	sysFSCREATE     = 0x100
+	sysFSDELETE     = 0x200
+	sysFSDELETESELF = 0x400
+	sysFSMODIFY     = 0x2
+	sysFSMOVE       = 0xc0
+	sysFSMOVEDFROM  = 0x40
+	sysFSMOVEDTO    = 0x80
+	sysFSMOVESELF   = 0x800
+
+	// Special events
+	sysFSIGNORED   = 0x8000
+	sysFSQOVERFLOW = 0x4000
+)
+
+func newEvent(name string, mask uint32) Event {
+	e := Event{Name: name}
+	if mask&sysFSCREATE == sysFSCREATE || mask&sysFSMOVEDTO == sysFSMOVEDTO {
+		e.Op |= Create
+	}
+	if mask&sysFSDELETE == sysFSDELETE || mask&sysFSDELETESELF == sysFSDELETESELF {
+		e.Op |= Remove
+	}
+	if mask&sysFSMODIFY == sysFSMODIFY {
+		e.Op |= Write
+	}
+	if mask&sysFSMOVE == sysFSMOVE || mask&sysFSMOVESELF == sysFSMOVESELF || mask&sysFSMOVEDFROM == sysFSMOVEDFROM {
+		e.Op |= Rename
+	}
+	if mask&sysFSATTRIB == sysFSATTRIB {
+		e.Op |= Chmod
+	}
+	return e
+}
+
+const (
+	opAddWatch = iota
+	opRemoveWatch
+)
+
+const (
+	provisional uint64 = 1 << (32 + iota)
+)
+
+type input struct {
+	op    int
+	path  string
+	flags uint32
+	reply chan error
+}
+
+type inode struct {
+	handle syscall.Handle
+	volume uint32
+	index  uint64
+}
+
+type watch struct {
+	ov     syscall.Overlapped
+	ino    *inode            // i-number
+	path   string            // Directory path
+	mask   uint64            // Directory itself is being watched with these notify flags
+	names  map[string]uint64 // Map of names being watched and their notify flags
+	rename string            // Remembers the old name while renaming a file
+	buf    [4096]byte
+}
+
+type indexMap map[uint64]*watch
+type watchMap map[uint32]indexMap
+
+func (w *Watcher) wakeupReader() error {
+	e := syscall.PostQueuedCompletionStatus(w.port, 0, 0, nil)
+	if e != nil {
+		return os.NewSyscallError("PostQueuedCompletionStatus", e)
+	}
+	return nil
+}
+
+func getDir(pathname string) (dir string, err error) {
+	attr, e := syscall.GetFileAttributes(syscall.StringToUTF16Ptr(pathname))
+	if e != nil {
+		return "", os.NewSyscallError("GetFileAttributes", e)
+	}
+	if attr&syscall.FILE_ATTRIBUTE_DIRECTORY != 0 {
+		dir = pathname
+	} else {
+		dir, _ = filepath.Split(pathname)
+		dir = filepath.Clean(dir)
+	}
+	return
+}
+
+func getIno(path string) (ino *inode, err error) {
+	h, e := syscall.CreateFile(syscall.StringToUTF16Ptr(path),
+		syscall.FILE_LIST_DIRECTORY,
+		syscall.FILE_SHARE_READ|syscall.FILE_SHARE_WRITE|syscall.FILE_SHARE_DELETE,
+		nil, syscall.OPEN_EXISTING,
+		syscall.FILE_FLAG_BACKUP_SEMANTICS|syscall.FILE_FLAG_OVERLAPPED, 0)
+	if e != nil {
+		return nil, os.NewSyscallError("CreateFile", e)
+	}
+	var fi syscall.ByHandleFileInformation
+	if e = syscall.GetFileInformationByHandle(h, &fi); e != nil {
+		syscall.CloseHandle(h)
+		return nil, os.NewSyscallError("GetFileInformationByHandle", e)
+	}
+	ino = &inode{
+		handle: h,
+		volume: fi.VolumeSerialNumber,
+		index:  uint64(fi.FileIndexHigh)<<32 | uint64(fi.FileIndexLow),
+	}
+	return ino, nil
+}
+
+// Must run within the I/O thread.
+func (m watchMap) get(ino *inode) *watch {
+	if i := m[ino.volume]; i != nil {
+		return i[ino.index]
+	}
+	return nil
+}
+
+// Must run within the I/O thread.
+func (m watchMap) set(ino *inode, watch *watch) {
+	i := m[ino.volume]
+	if i == nil {
+		i = make(indexMap)
+		m[ino.volume] = i
+	}
+	i[ino.index] = watch
+}
+
+// Must run within the I/O thread.
+func (w *Watcher) addWatch(pathname string, flags uint64) error {
+	dir, err := getDir(pathname)
+	if err != nil {
+		return err
+	}
+	if flags&sysFSONLYDIR != 0 && pathname != dir {
+		return nil
+	}
+	ino, err := getIno(dir)
+	if err != nil {
+		return err
+	}
+	w.mu.Lock()
+	watchEntry := w.watches.get(ino)
+	w.mu.Unlock()
+	if watchEntry == nil {
+		if _, e := syscall.CreateIoCompletionPort(ino.handle, w.port, 0, 0); e != nil {
+			syscall.CloseHandle(ino.handle)
+			return os.NewSyscallError("CreateIoCompletionPort", e)
+		}
+		watchEntry = &watch{
+			ino:   ino,
+			path:  dir,
+			names: make(map[string]uint64),
+		}
+		w.mu.Lock()
+		w.watches.set(ino, watchEntry)
+		w.mu.Unlock()
+		flags |= provisional
+	} else {
+		syscall.CloseHandle(ino.handle)
+	}
+	if pathname == dir {
+		watchEntry.mask |= flags
+	} else {
+		watchEntry.names[filepath.Base(pathname)] |= flags
+	}
+	if err = w.startRead(watchEntry); err != nil {
+		return err
+	}
+	if pathname == dir {
+		watchEntry.mask &= ^provisional
+	} else {
+		watchEntry.names[filepath.Base(pathname)] &= ^provisional
+	}
+	return nil
+}
+
+// Must run within the I/O thread.
+func (w *Watcher) remWatch(pathname string) error {
+	dir, err := getDir(pathname)
+	if err != nil {
+		return err
+	}
+	ino, err := getIno(dir)
+	if err != nil {
+		return err
+	}
+	w.mu.Lock()
+	watch := w.watches.get(ino)
+	w.mu.Unlock()
+	if watch == nil {
+		return fmt.Errorf("can't remove non-existent watch for: %s", pathname)
+	}
+	if pathname == dir {
+		w.sendEvent(watch.path, watch.mask&sysFSIGNORED)
+		watch.mask = 0
+	} else {
+		name := filepath.Base(pathname)
+		w.sendEvent(filepath.Join(watch.path, name), watch.names[name]&sysFSIGNORED)
+		delete(watch.names, name)
+	}
+	return w.startRead(watch)
+}
+
+// Must run within the I/O thread.
+func (w *Watcher) deleteWatch(watch *watch) {
+	for name, mask := range watch.names {
+		if mask&provisional == 0 {
+			w.sendEvent(filepath.Join(watch.path, name), mask&sysFSIGNORED)
+		}
+		delete(watch.names, name)
+	}
+	if watch.mask != 0 {
+		if watch.mask&provisional == 0 {
+			w.sendEvent(watch.path, watch.mask&sysFSIGNORED)
+		}
+		watch.mask = 0
+	}
+}
+
+// Must run within the I/O thread.
+func (w *Watcher) startRead(watch *watch) error {
+	if e := syscall.CancelIo(watch.ino.handle); e != nil {
+		w.Errors <- os.NewSyscallError("CancelIo", e)
+		w.deleteWatch(watch)
+	}
+	mask := toWindowsFlags(watch.mask)
+	for _, m := range watch.names {
+		mask |= toWindowsFlags(m)
+	}
+	if mask == 0 {
+		if e := syscall.CloseHandle(watch.ino.handle); e != nil {
+			w.Errors <- os.NewSyscallError("CloseHandle", e)
+		}
+		w.mu.Lock()
+		delete(w.watches[watch.ino.volume], watch.ino.index)
+		w.mu.Unlock()
+		return nil
+	}
+	e := syscall.ReadDirectoryChanges(watch.ino.handle, &watch.buf[0],
+		uint32(unsafe.Sizeof(watch.buf)), false, mask, nil, &watch.ov, 0)
+	if e != nil {
+		err := os.NewSyscallError("ReadDirectoryChanges", e)
+		if e == syscall.ERROR_ACCESS_DENIED && watch.mask&provisional == 0 {
+			// Watched directory was probably removed
+			if w.sendEvent(watch.path, watch.mask&sysFSDELETESELF) {
+				if watch.mask&sysFSONESHOT != 0 {
+					watch.mask = 0
+				}
+			}
+			err = nil
+		}
+		w.deleteWatch(watch)
+		w.startRead(watch)
+		return err
+	}
+	return nil
+}
+
+// readEvents reads from the I/O completion port, converts the
+// received events into Event objects and sends them via the Events channel.
+// Entry point to the I/O thread.
+func (w *Watcher) readEvents() {
+	var (
+		n, key uint32
+		ov     *syscall.Overlapped
+	)
+	runtime.LockOSThread()
+
+	for {
+		e := syscall.GetQueuedCompletionStatus(w.port, &n, &key, &ov, syscall.INFINITE)
+		watch := (*watch)(unsafe.Pointer(ov))
+
+		if watch == nil {
+			select {
+			case ch := <-w.quit:
+				w.mu.Lock()
+				var indexes []indexMap
+				for _, index := range w.watches {
+					indexes = append(indexes, index)
+				}
+				w.mu.Unlock()
+				for _, index := range indexes {
+					for _, watch := range index {
+						w.deleteWatch(watch)
+						w.startRead(watch)
+					}
+				}
+				var err error
+				if e := syscall.CloseHandle(w.port); e != nil {
+					err = os.NewSyscallError("CloseHandle", e)
+				}
+				close(w.Events)
+				close(w.Errors)
+				ch <- err
+				return
+			case in := <-w.input:
+				switch in.op {
+				case opAddWatch:
+					in.reply <- w.addWatch(in.path, uint64(in.flags))
+				case opRemoveWatch:
+					in.reply <- w.remWatch(in.path)
+				}
+			default:
+			}
+			continue
+		}
+
+		switch e {
+		case syscall.ERROR_MORE_DATA:
+			if watch == nil {
+				w.Errors <- errors.New("ERROR_MORE_DATA has unexpectedly null lpOverlapped buffer")
+			} else {
+				// The i/o succeeded but the buffer is full.
+				// In theory we should be building up a full packet.
+				// In practice we can get away with just carrying on.
+				n = uint32(unsafe.Sizeof(watch.buf))
+			}
+		case syscall.ERROR_ACCESS_DENIED:
+			// Watched directory was probably removed
+			w.sendEvent(watch.path, watch.mask&sysFSDELETESELF)
+			w.deleteWatch(watch)
+			w.startRead(watch)
+			continue
+		case syscall.ERROR_OPERATION_ABORTED:
+			// CancelIo was called on this handle
+			continue
+		default:
+			w.Errors <- os.NewSyscallError("GetQueuedCompletionPort", e)
+			continue
+		case nil:
+		}
+
+		var offset uint32
+		for {
+			if n == 0 {
+				w.Events <- newEvent("", sysFSQOVERFLOW)
+				w.Errors <- errors.New("short read in readEvents()")
+				break
+			}
+
+			// Point "raw" to the event in the buffer
+			raw := (*syscall.FileNotifyInformation)(unsafe.Pointer(&watch.buf[offset]))
+			buf := (*[syscall.MAX_PATH]uint16)(unsafe.Pointer(&raw.FileName))
+			name := syscall.UTF16ToString(buf[:raw.FileNameLength/2])
+			fullname := filepath.Join(watch.path, name)
+
+			var mask uint64
+			switch raw.Action {
+			case syscall.FILE_ACTION_REMOVED:
+				mask = sysFSDELETESELF
+			case syscall.FILE_ACTION_MODIFIED:
+				mask = sysFSMODIFY
+			case syscall.FILE_ACTION_RENAMED_OLD_NAME:
+				watch.rename = name
+			case syscall.FILE_ACTION_RENAMED_NEW_NAME:
+				if watch.names[watch.rename] != 0 {
+					watch.names[name] |= watch.names[watch.rename]
+					delete(watch.names, watch.rename)
+					mask = sysFSMOVESELF
+				}
+			}
+
+			sendNameEvent := func() {
+				if w.sendEvent(fullname, watch.names[name]&mask) {
+					if watch.names[name]&sysFSONESHOT != 0 {
+						delete(watch.names, name)
+					}
+				}
+			}
+			if raw.Action != syscall.FILE_ACTION_RENAMED_NEW_NAME {
+				sendNameEvent()
+			}
+			if raw.Action == syscall.FILE_ACTION_REMOVED {
+				w.sendEvent(fullname, watch.names[name]&sysFSIGNORED)
+				delete(watch.names, name)
+			}
+			if w.sendEvent(fullname, watch.mask&toFSnotifyFlags(raw.Action)) {
+				if watch.mask&sysFSONESHOT != 0 {
+					watch.mask = 0
+				}
+			}
+			if raw.Action == syscall.FILE_ACTION_RENAMED_NEW_NAME {
+				fullname = filepath.Join(watch.path, watch.rename)
+				sendNameEvent()
+			}
+
+			// Move to the next event in the buffer
+			if raw.NextEntryOffset == 0 {
+				break
+			}
+			offset += raw.NextEntryOffset
+
+			// Error!
+			if offset >= n {
+				w.Errors <- errors.New("Windows system assumed buffer larger than it is, events have likely been missed.")
+				break
+			}
+		}
+
+		if err := w.startRead(watch); err != nil {
+			w.Errors <- err
+		}
+	}
+}
+
+func (w *Watcher) sendEvent(name string, mask uint64) bool {
+	if mask == 0 {
+		return false
+	}
+	event := newEvent(name, uint32(mask))
+	select {
+	case ch := <-w.quit:
+		w.quit <- ch
+	case w.Events <- event:
+	}
+	return true
+}
+
+func toWindowsFlags(mask uint64) uint32 {
+	var m uint32
+	if mask&sysFSACCESS != 0 {
+		m |= syscall.FILE_NOTIFY_CHANGE_LAST_ACCESS
+	}
+	if mask&sysFSMODIFY != 0 {
+		m |= syscall.FILE_NOTIFY_CHANGE_LAST_WRITE
+	}
+	if mask&sysFSATTRIB != 0 {
+		m |= syscall.FILE_NOTIFY_CHANGE_ATTRIBUTES
+	}
+	if mask&(sysFSMOVE|sysFSCREATE|sysFSDELETE) != 0 {
+		m |= syscall.FILE_NOTIFY_CHANGE_FILE_NAME | syscall.FILE_NOTIFY_CHANGE_DIR_NAME
+	}
+	return m
+}
+
+func toFSnotifyFlags(action uint32) uint64 {
+	switch action {
+	case syscall.FILE_ACTION_ADDED:
+		return sysFSCREATE
+	case syscall.FILE_ACTION_REMOVED:
+		return sysFSDELETE
+	case syscall.FILE_ACTION_MODIFIED:
+		return sysFSMODIFY
+	case syscall.FILE_ACTION_RENAMED_OLD_NAME:
+		return sysFSMOVEDFROM
+	case syscall.FILE_ACTION_RENAMED_NEW_NAME:
+		return sysFSMOVEDTO
+	}
+	return 0
+}

+ 16 - 0
vendor/github.com/golang/snappy/.gitignore

@@ -0,0 +1,16 @@
+cmd/snappytool/snappytool
+testdata/bench
+
+# These explicitly listed benchmark data files are for an obsolete version of
+# snappy_test.go.
+testdata/alice29.txt
+testdata/asyoulik.txt
+testdata/fireworks.jpeg
+testdata/geo.protodata
+testdata/html
+testdata/html_x_4
+testdata/kppkn.gtb
+testdata/lcet10.txt
+testdata/paper-100k.pdf
+testdata/plrabn12.txt
+testdata/urls.10K

+ 15 - 0
vendor/github.com/golang/snappy/AUTHORS

@@ -0,0 +1,15 @@
+# This is the official list of Snappy-Go authors for copyright purposes.
+# This file is distinct from the CONTRIBUTORS files.
+# See the latter for an explanation.
+
+# Names should be added to this file as
+#	Name or Organization <email address>
+# The email address is not required for organizations.
+
+# Please keep the list sorted.
+
+Damian Gryski <dgryski@gmail.com>
+Google Inc.
+Jan Mercl <0xjnml@gmail.com>
+Rodolfo Carvalho <rhcarvalho@gmail.com>
+Sebastien Binet <seb.binet@gmail.com>

+ 37 - 0
vendor/github.com/golang/snappy/CONTRIBUTORS

@@ -0,0 +1,37 @@
+# This is the official list of people who can contribute
+# (and typically have contributed) code to the Snappy-Go repository.
+# The AUTHORS file lists the copyright holders; this file
+# lists people.  For example, Google employees are listed here
+# but not in AUTHORS, because Google holds the copyright.
+#
+# The submission process automatically checks to make sure
+# that people submitting code are listed in this file (by email address).
+#
+# Names should be added to this file only after verifying that
+# the individual or the individual's organization has agreed to
+# the appropriate Contributor License Agreement, found here:
+#
+#     http://code.google.com/legal/individual-cla-v1.0.html
+#     http://code.google.com/legal/corporate-cla-v1.0.html
+#
+# The agreement for individuals can be filled out on the web.
+#
+# When adding J Random Contributor's name to this file,
+# either J's name or J's organization's name should be
+# added to the AUTHORS file, depending on whether the
+# individual or corporate CLA was used.
+
+# Names should be added to this file like so:
+#     Name <email address>
+
+# Please keep the list sorted.
+
+Damian Gryski <dgryski@gmail.com>
+Jan Mercl <0xjnml@gmail.com>
+Kai Backman <kaib@golang.org>
+Marc-Antoine Ruel <maruel@chromium.org>
+Nigel Tao <nigeltao@golang.org>
+Rob Pike <r@golang.org>
+Rodolfo Carvalho <rhcarvalho@gmail.com>
+Russ Cox <rsc@golang.org>
+Sebastien Binet <seb.binet@gmail.com>

+ 27 - 0
vendor/github.com/golang/snappy/LICENSE

@@ -0,0 +1,27 @@
+Copyright (c) 2011 The Snappy-Go Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+   * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+   * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+   * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

+ 107 - 0
vendor/github.com/golang/snappy/README

@@ -0,0 +1,107 @@
+The Snappy compression format in the Go programming language.
+
+To download and install from source:
+$ go get github.com/golang/snappy
+
+Unless otherwise noted, the Snappy-Go source files are distributed
+under the BSD-style license found in the LICENSE file.
+
+
+
+Benchmarks.
+
+The golang/snappy benchmarks include compressing (Z) and decompressing (U) ten
+or so files, the same set used by the C++ Snappy code (github.com/google/snappy
+and note the "google", not "golang"). On an "Intel(R) Core(TM) i7-3770 CPU @
+3.40GHz", Go's GOARCH=amd64 numbers as of 2016-05-29:
+
+"go test -test.bench=."
+
+_UFlat0-8         2.19GB/s ± 0%  html
+_UFlat1-8         1.41GB/s ± 0%  urls
+_UFlat2-8         23.5GB/s ± 2%  jpg
+_UFlat3-8         1.91GB/s ± 0%  jpg_200
+_UFlat4-8         14.0GB/s ± 1%  pdf
+_UFlat5-8         1.97GB/s ± 0%  html4
+_UFlat6-8          814MB/s ± 0%  txt1
+_UFlat7-8          785MB/s ± 0%  txt2
+_UFlat8-8          857MB/s ± 0%  txt3
+_UFlat9-8          719MB/s ± 1%  txt4
+_UFlat10-8        2.84GB/s ± 0%  pb
+_UFlat11-8        1.05GB/s ± 0%  gaviota
+
+_ZFlat0-8         1.04GB/s ± 0%  html
+_ZFlat1-8          534MB/s ± 0%  urls
+_ZFlat2-8         15.7GB/s ± 1%  jpg
+_ZFlat3-8          740MB/s ± 3%  jpg_200
+_ZFlat4-8         9.20GB/s ± 1%  pdf
+_ZFlat5-8          991MB/s ± 0%  html4
+_ZFlat6-8          379MB/s ± 0%  txt1
+_ZFlat7-8          352MB/s ± 0%  txt2
+_ZFlat8-8          396MB/s ± 1%  txt3
+_ZFlat9-8          327MB/s ± 1%  txt4
+_ZFlat10-8        1.33GB/s ± 1%  pb
+_ZFlat11-8         605MB/s ± 1%  gaviota
+
+
+
+"go test -test.bench=. -tags=noasm"
+
+_UFlat0-8          621MB/s ± 2%  html
+_UFlat1-8          494MB/s ± 1%  urls
+_UFlat2-8         23.2GB/s ± 1%  jpg
+_UFlat3-8         1.12GB/s ± 1%  jpg_200
+_UFlat4-8         4.35GB/s ± 1%  pdf
+_UFlat5-8          609MB/s ± 0%  html4
+_UFlat6-8          296MB/s ± 0%  txt1
+_UFlat7-8          288MB/s ± 0%  txt2
+_UFlat8-8          309MB/s ± 1%  txt3
+_UFlat9-8          280MB/s ± 1%  txt4
+_UFlat10-8         753MB/s ± 0%  pb
+_UFlat11-8         400MB/s ± 0%  gaviota
+
+_ZFlat0-8          409MB/s ± 1%  html
+_ZFlat1-8          250MB/s ± 1%  urls
+_ZFlat2-8         12.3GB/s ± 1%  jpg
+_ZFlat3-8          132MB/s ± 0%  jpg_200
+_ZFlat4-8         2.92GB/s ± 0%  pdf
+_ZFlat5-8          405MB/s ± 1%  html4
+_ZFlat6-8          179MB/s ± 1%  txt1
+_ZFlat7-8          170MB/s ± 1%  txt2
+_ZFlat8-8          189MB/s ± 1%  txt3
+_ZFlat9-8          164MB/s ± 1%  txt4
+_ZFlat10-8         479MB/s ± 1%  pb
+_ZFlat11-8         270MB/s ± 1%  gaviota
+
+
+
+For comparison (Go's encoded output is byte-for-byte identical to C++'s), here
+are the numbers from C++ Snappy's
+
+make CXXFLAGS="-O2 -DNDEBUG -g" clean snappy_unittest.log && cat snappy_unittest.log
+
+BM_UFlat/0     2.4GB/s  html
+BM_UFlat/1     1.4GB/s  urls
+BM_UFlat/2    21.8GB/s  jpg
+BM_UFlat/3     1.5GB/s  jpg_200
+BM_UFlat/4    13.3GB/s  pdf
+BM_UFlat/5     2.1GB/s  html4
+BM_UFlat/6     1.0GB/s  txt1
+BM_UFlat/7   959.4MB/s  txt2
+BM_UFlat/8     1.0GB/s  txt3
+BM_UFlat/9   864.5MB/s  txt4
+BM_UFlat/10    2.9GB/s  pb
+BM_UFlat/11    1.2GB/s  gaviota
+
+BM_ZFlat/0   944.3MB/s  html (22.31 %)
+BM_ZFlat/1   501.6MB/s  urls (47.78 %)
+BM_ZFlat/2    14.3GB/s  jpg (99.95 %)
+BM_ZFlat/3   538.3MB/s  jpg_200 (73.00 %)
+BM_ZFlat/4     8.3GB/s  pdf (83.30 %)
+BM_ZFlat/5   903.5MB/s  html4 (22.52 %)
+BM_ZFlat/6   336.0MB/s  txt1 (57.88 %)
+BM_ZFlat/7   312.3MB/s  txt2 (61.91 %)
+BM_ZFlat/8   353.1MB/s  txt3 (54.99 %)
+BM_ZFlat/9   289.9MB/s  txt4 (66.26 %)
+BM_ZFlat/10    1.2GB/s  pb (19.68 %)
+BM_ZFlat/11  527.4MB/s  gaviota (37.72 %)

+ 237 - 0
vendor/github.com/golang/snappy/decode.go

@@ -0,0 +1,237 @@
+// Copyright 2011 The Snappy-Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package snappy
+
+import (
+	"encoding/binary"
+	"errors"
+	"io"
+)
+
+var (
+	// ErrCorrupt reports that the input is invalid.
+	ErrCorrupt = errors.New("snappy: corrupt input")
+	// ErrTooLarge reports that the uncompressed length is too large.
+	ErrTooLarge = errors.New("snappy: decoded block is too large")
+	// ErrUnsupported reports that the input isn't supported.
+	ErrUnsupported = errors.New("snappy: unsupported input")
+
+	errUnsupportedLiteralLength = errors.New("snappy: unsupported literal length")
+)
+
+// DecodedLen returns the length of the decoded block.
+func DecodedLen(src []byte) (int, error) {
+	v, _, err := decodedLen(src)
+	return v, err
+}
+
+// decodedLen returns the length of the decoded block and the number of bytes
+// that the length header occupied.
+func decodedLen(src []byte) (blockLen, headerLen int, err error) {
+	v, n := binary.Uvarint(src)
+	if n <= 0 || v > 0xffffffff {
+		return 0, 0, ErrCorrupt
+	}
+
+	const wordSize = 32 << (^uint(0) >> 32 & 1)
+	if wordSize == 32 && v > 0x7fffffff {
+		return 0, 0, ErrTooLarge
+	}
+	return int(v), n, nil
+}
+
+const (
+	decodeErrCodeCorrupt                  = 1
+	decodeErrCodeUnsupportedLiteralLength = 2
+)
+
+// Decode returns the decoded form of src. The returned slice may be a sub-
+// slice of dst if dst was large enough to hold the entire decoded block.
+// Otherwise, a newly allocated slice will be returned.
+//
+// The dst and src must not overlap. It is valid to pass a nil dst.
+func Decode(dst, src []byte) ([]byte, error) {
+	dLen, s, err := decodedLen(src)
+	if err != nil {
+		return nil, err
+	}
+	if dLen <= len(dst) {
+		dst = dst[:dLen]
+	} else {
+		dst = make([]byte, dLen)
+	}
+	switch decode(dst, src[s:]) {
+	case 0:
+		return dst, nil
+	case decodeErrCodeUnsupportedLiteralLength:
+		return nil, errUnsupportedLiteralLength
+	}
+	return nil, ErrCorrupt
+}
+
+// NewReader returns a new Reader that decompresses from r, using the framing
+// format described at
+// https://github.com/google/snappy/blob/master/framing_format.txt
+func NewReader(r io.Reader) *Reader {
+	return &Reader{
+		r:       r,
+		decoded: make([]byte, maxBlockSize),
+		buf:     make([]byte, maxEncodedLenOfMaxBlockSize+checksumSize),
+	}
+}
+
+// Reader is an io.Reader that can read Snappy-compressed bytes.
+type Reader struct {
+	r       io.Reader
+	err     error
+	decoded []byte
+	buf     []byte
+	// decoded[i:j] contains decoded bytes that have not yet been passed on.
+	i, j       int
+	readHeader bool
+}
+
+// Reset discards any buffered data, resets all state, and switches the Snappy
+// reader to read from r. This permits reusing a Reader rather than allocating
+// a new one.
+func (r *Reader) Reset(reader io.Reader) {
+	r.r = reader
+	r.err = nil
+	r.i = 0
+	r.j = 0
+	r.readHeader = false
+}
+
+func (r *Reader) readFull(p []byte, allowEOF bool) (ok bool) {
+	if _, r.err = io.ReadFull(r.r, p); r.err != nil {
+		if r.err == io.ErrUnexpectedEOF || (r.err == io.EOF && !allowEOF) {
+			r.err = ErrCorrupt
+		}
+		return false
+	}
+	return true
+}
+
+// Read satisfies the io.Reader interface.
+func (r *Reader) Read(p []byte) (int, error) {
+	if r.err != nil {
+		return 0, r.err
+	}
+	for {
+		if r.i < r.j {
+			n := copy(p, r.decoded[r.i:r.j])
+			r.i += n
+			return n, nil
+		}
+		if !r.readFull(r.buf[:4], true) {
+			return 0, r.err
+		}
+		chunkType := r.buf[0]
+		if !r.readHeader {
+			if chunkType != chunkTypeStreamIdentifier {
+				r.err = ErrCorrupt
+				return 0, r.err
+			}
+			r.readHeader = true
+		}
+		chunkLen := int(r.buf[1]) | int(r.buf[2])<<8 | int(r.buf[3])<<16
+		if chunkLen > len(r.buf) {
+			r.err = ErrUnsupported
+			return 0, r.err
+		}
+
+		// The chunk types are specified at
+		// https://github.com/google/snappy/blob/master/framing_format.txt
+		switch chunkType {
+		case chunkTypeCompressedData:
+			// Section 4.2. Compressed data (chunk type 0x00).
+			if chunkLen < checksumSize {
+				r.err = ErrCorrupt
+				return 0, r.err
+			}
+			buf := r.buf[:chunkLen]
+			if !r.readFull(buf, false) {
+				return 0, r.err
+			}
+			checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24
+			buf = buf[checksumSize:]
+
+			n, err := DecodedLen(buf)
+			if err != nil {
+				r.err = err
+				return 0, r.err
+			}
+			if n > len(r.decoded) {
+				r.err = ErrCorrupt
+				return 0, r.err
+			}
+			if _, err := Decode(r.decoded, buf); err != nil {
+				r.err = err
+				return 0, r.err
+			}
+			if crc(r.decoded[:n]) != checksum {
+				r.err = ErrCorrupt
+				return 0, r.err
+			}
+			r.i, r.j = 0, n
+			continue
+
+		case chunkTypeUncompressedData:
+			// Section 4.3. Uncompressed data (chunk type 0x01).
+			if chunkLen < checksumSize {
+				r.err = ErrCorrupt
+				return 0, r.err
+			}
+			buf := r.buf[:checksumSize]
+			if !r.readFull(buf, false) {
+				return 0, r.err
+			}
+			checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24
+			// Read directly into r.decoded instead of via r.buf.
+			n := chunkLen - checksumSize
+			if n > len(r.decoded) {
+				r.err = ErrCorrupt
+				return 0, r.err
+			}
+			if !r.readFull(r.decoded[:n], false) {
+				return 0, r.err
+			}
+			if crc(r.decoded[:n]) != checksum {
+				r.err = ErrCorrupt
+				return 0, r.err
+			}
+			r.i, r.j = 0, n
+			continue
+
+		case chunkTypeStreamIdentifier:
+			// Section 4.1. Stream identifier (chunk type 0xff).
+			if chunkLen != len(magicBody) {
+				r.err = ErrCorrupt
+				return 0, r.err
+			}
+			if !r.readFull(r.buf[:len(magicBody)], false) {
+				return 0, r.err
+			}
+			for i := 0; i < len(magicBody); i++ {
+				if r.buf[i] != magicBody[i] {
+					r.err = ErrCorrupt
+					return 0, r.err
+				}
+			}
+			continue
+		}
+
+		if chunkType <= 0x7f {
+			// Section 4.5. Reserved unskippable chunks (chunk types 0x02-0x7f).
+			r.err = ErrUnsupported
+			return 0, r.err
+		}
+		// Section 4.4 Padding (chunk type 0xfe).
+		// Section 4.6. Reserved skippable chunks (chunk types 0x80-0xfd).
+		if !r.readFull(r.buf[:chunkLen], false) {
+			return 0, r.err
+		}
+	}
+}

+ 14 - 0
vendor/github.com/golang/snappy/decode_amd64.go

@@ -0,0 +1,14 @@
+// Copyright 2016 The Snappy-Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !appengine
+// +build gc
+// +build !noasm
+
+package snappy
+
+// decode has the same semantics as in decode_other.go.
+//
+//go:noescape
+func decode(dst, src []byte) int

+ 490 - 0
vendor/github.com/golang/snappy/decode_amd64.s

@@ -0,0 +1,490 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !appengine
+// +build gc
+// +build !noasm
+
+#include "textflag.h"
+
+// The asm code generally follows the pure Go code in decode_other.go, except
+// where marked with a "!!!".
+
+// func decode(dst, src []byte) int
+//
+// All local variables fit into registers. The non-zero stack size is only to
+// spill registers and push args when issuing a CALL. The register allocation:
+//	- AX	scratch
+//	- BX	scratch
+//	- CX	length or x
+//	- DX	offset
+//	- SI	&src[s]
+//	- DI	&dst[d]
+//	+ R8	dst_base
+//	+ R9	dst_len
+//	+ R10	dst_base + dst_len
+//	+ R11	src_base
+//	+ R12	src_len
+//	+ R13	src_base + src_len
+//	- R14	used by doCopy
+//	- R15	used by doCopy
+//
+// The registers R8-R13 (marked with a "+") are set at the start of the
+// function, and after a CALL returns, and are not otherwise modified.
+//
+// The d variable is implicitly DI - R8,  and len(dst)-d is R10 - DI.
+// The s variable is implicitly SI - R11, and len(src)-s is R13 - SI.
+TEXT ·decode(SB), NOSPLIT, $48-56
+	// Initialize SI, DI and R8-R13.
+	MOVQ dst_base+0(FP), R8
+	MOVQ dst_len+8(FP), R9
+	MOVQ R8, DI
+	MOVQ R8, R10
+	ADDQ R9, R10
+	MOVQ src_base+24(FP), R11
+	MOVQ src_len+32(FP), R12
+	MOVQ R11, SI
+	MOVQ R11, R13
+	ADDQ R12, R13
+
+loop:
+	// for s < len(src)
+	CMPQ SI, R13
+	JEQ  end
+
+	// CX = uint32(src[s])
+	//
+	// switch src[s] & 0x03
+	MOVBLZX (SI), CX
+	MOVL    CX, BX
+	ANDL    $3, BX
+	CMPL    BX, $1
+	JAE     tagCopy
+
+	// ----------------------------------------
+	// The code below handles literal tags.
+
+	// case tagLiteral:
+	// x := uint32(src[s] >> 2)
+	// switch
+	SHRL $2, CX
+	CMPL CX, $60
+	JAE  tagLit60Plus
+
+	// case x < 60:
+	// s++
+	INCQ SI
+
+doLit:
+	// This is the end of the inner "switch", when we have a literal tag.
+	//
+	// We assume that CX == x and x fits in a uint32, where x is the variable
+	// used in the pure Go decode_other.go code.
+
+	// length = int(x) + 1
+	//
+	// Unlike the pure Go code, we don't need to check if length <= 0 because
+	// CX can hold 64 bits, so the increment cannot overflow.
+	INCQ CX
+
+	// Prepare to check if copying length bytes will run past the end of dst or
+	// src.
+	//
+	// AX = len(dst) - d
+	// BX = len(src) - s
+	MOVQ R10, AX
+	SUBQ DI, AX
+	MOVQ R13, BX
+	SUBQ SI, BX
+
+	// !!! Try a faster technique for short (16 or fewer bytes) copies.
+	//
+	// if length > 16 || len(dst)-d < 16 || len(src)-s < 16 {
+	//   goto callMemmove // Fall back on calling runtime·memmove.
+	// }
+	//
+	// The C++ snappy code calls this TryFastAppend. It also checks len(src)-s
+	// against 21 instead of 16, because it cannot assume that all of its input
+	// is contiguous in memory and so it needs to leave enough source bytes to
+	// read the next tag without refilling buffers, but Go's Decode assumes
+	// contiguousness (the src argument is a []byte).
+	CMPQ CX, $16
+	JGT  callMemmove
+	CMPQ AX, $16
+	JLT  callMemmove
+	CMPQ BX, $16
+	JLT  callMemmove
+
+	// !!! Implement the copy from src to dst as a 16-byte load and store.
+	// (Decode's documentation says that dst and src must not overlap.)
+	//
+	// This always copies 16 bytes, instead of only length bytes, but that's
+	// OK. If the input is a valid Snappy encoding then subsequent iterations
+	// will fix up the overrun. Otherwise, Decode returns a nil []byte (and a
+	// non-nil error), so the overrun will be ignored.
+	//
+	// Note that on amd64, it is legal and cheap to issue unaligned 8-byte or
+	// 16-byte loads and stores. This technique probably wouldn't be as
+	// effective on architectures that are fussier about alignment.
+	MOVOU 0(SI), X0
+	MOVOU X0, 0(DI)
+
+	// d += length
+	// s += length
+	ADDQ CX, DI
+	ADDQ CX, SI
+	JMP  loop
+
+callMemmove:
+	// if length > len(dst)-d || length > len(src)-s { etc }
+	CMPQ CX, AX
+	JGT  errCorrupt
+	CMPQ CX, BX
+	JGT  errCorrupt
+
+	// copy(dst[d:], src[s:s+length])
+	//
+	// This means calling runtime·memmove(&dst[d], &src[s], length), so we push
+	// DI, SI and CX as arguments. Coincidentally, we also need to spill those
+	// three registers to the stack, to save local variables across the CALL.
+	MOVQ DI, 0(SP)
+	MOVQ SI, 8(SP)
+	MOVQ CX, 16(SP)
+	MOVQ DI, 24(SP)
+	MOVQ SI, 32(SP)
+	MOVQ CX, 40(SP)
+	CALL runtime·memmove(SB)
+
+	// Restore local variables: unspill registers from the stack and
+	// re-calculate R8-R13.
+	MOVQ 24(SP), DI
+	MOVQ 32(SP), SI
+	MOVQ 40(SP), CX
+	MOVQ dst_base+0(FP), R8
+	MOVQ dst_len+8(FP), R9
+	MOVQ R8, R10
+	ADDQ R9, R10
+	MOVQ src_base+24(FP), R11
+	MOVQ src_len+32(FP), R12
+	MOVQ R11, R13
+	ADDQ R12, R13
+
+	// d += length
+	// s += length
+	ADDQ CX, DI
+	ADDQ CX, SI
+	JMP  loop
+
+tagLit60Plus:
+	// !!! This fragment does the
+	//
+	// s += x - 58; if uint(s) > uint(len(src)) { etc }
+	//
+	// checks. In the asm version, we code it once instead of once per switch case.
+	ADDQ CX, SI
+	SUBQ $58, SI
+	MOVQ SI, BX
+	SUBQ R11, BX
+	CMPQ BX, R12
+	JA   errCorrupt
+
+	// case x == 60:
+	CMPL CX, $61
+	JEQ  tagLit61
+	JA   tagLit62Plus
+
+	// x = uint32(src[s-1])
+	MOVBLZX -1(SI), CX
+	JMP     doLit
+
+tagLit61:
+	// case x == 61:
+	// x = uint32(src[s-2]) | uint32(src[s-1])<<8
+	MOVWLZX -2(SI), CX
+	JMP     doLit
+
+tagLit62Plus:
+	CMPL CX, $62
+	JA   tagLit63
+
+	// case x == 62:
+	// x = uint32(src[s-3]) | uint32(src[s-2])<<8 | uint32(src[s-1])<<16
+	MOVWLZX -3(SI), CX
+	MOVBLZX -1(SI), BX
+	SHLL    $16, BX
+	ORL     BX, CX
+	JMP     doLit
+
+tagLit63:
+	// case x == 63:
+	// x = uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24
+	MOVL -4(SI), CX
+	JMP  doLit
+
+// The code above handles literal tags.
+// ----------------------------------------
+// The code below handles copy tags.
+
+tagCopy4:
+	// case tagCopy4:
+	// s += 5
+	ADDQ $5, SI
+
+	// if uint(s) > uint(len(src)) { etc }
+	MOVQ SI, BX
+	SUBQ R11, BX
+	CMPQ BX, R12
+	JA   errCorrupt
+
+	// length = 1 + int(src[s-5])>>2
+	SHRQ $2, CX
+	INCQ CX
+
+	// offset = int(uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24)
+	MOVLQZX -4(SI), DX
+	JMP     doCopy
+
+tagCopy2:
+	// case tagCopy2:
+	// s += 3
+	ADDQ $3, SI
+
+	// if uint(s) > uint(len(src)) { etc }
+	MOVQ SI, BX
+	SUBQ R11, BX
+	CMPQ BX, R12
+	JA   errCorrupt
+
+	// length = 1 + int(src[s-3])>>2
+	SHRQ $2, CX
+	INCQ CX
+
+	// offset = int(uint32(src[s-2]) | uint32(src[s-1])<<8)
+	MOVWQZX -2(SI), DX
+	JMP     doCopy
+
+tagCopy:
+	// We have a copy tag. We assume that:
+	//	- BX == src[s] & 0x03
+	//	- CX == src[s]
+	CMPQ BX, $2
+	JEQ  tagCopy2
+	JA   tagCopy4
+
+	// case tagCopy1:
+	// s += 2
+	ADDQ $2, SI
+
+	// if uint(s) > uint(len(src)) { etc }
+	MOVQ SI, BX
+	SUBQ R11, BX
+	CMPQ BX, R12
+	JA   errCorrupt
+
+	// offset = int(uint32(src[s-2])&0xe0<<3 | uint32(src[s-1]))
+	MOVQ    CX, DX
+	ANDQ    $0xe0, DX
+	SHLQ    $3, DX
+	MOVBQZX -1(SI), BX
+	ORQ     BX, DX
+
+	// length = 4 + int(src[s-2])>>2&0x7
+	SHRQ $2, CX
+	ANDQ $7, CX
+	ADDQ $4, CX
+
+doCopy:
+	// This is the end of the outer "switch", when we have a copy tag.
+	//
+	// We assume that:
+	//	- CX == length && CX > 0
+	//	- DX == offset
+
+	// if offset <= 0 { etc }
+	CMPQ DX, $0
+	JLE  errCorrupt
+
+	// if d < offset { etc }
+	MOVQ DI, BX
+	SUBQ R8, BX
+	CMPQ BX, DX
+	JLT  errCorrupt
+
+	// if length > len(dst)-d { etc }
+	MOVQ R10, BX
+	SUBQ DI, BX
+	CMPQ CX, BX
+	JGT  errCorrupt
+
+	// forwardCopy(dst[d:d+length], dst[d-offset:]); d += length
+	//
+	// Set:
+	//	- R14 = len(dst)-d
+	//	- R15 = &dst[d-offset]
+	MOVQ R10, R14
+	SUBQ DI, R14
+	MOVQ DI, R15
+	SUBQ DX, R15
+
+	// !!! Try a faster technique for short (16 or fewer bytes) forward copies.
+	//
+	// First, try using two 8-byte load/stores, similar to the doLit technique
+	// above. Even if dst[d:d+length] and dst[d-offset:] can overlap, this is
+	// still OK if offset >= 8. Note that this has to be two 8-byte load/stores
+	// and not one 16-byte load/store, and the first store has to be before the
+	// second load, due to the overlap if offset is in the range [8, 16).
+	//
+	// if length > 16 || offset < 8 || len(dst)-d < 16 {
+	//   goto slowForwardCopy
+	// }
+	// copy 16 bytes
+	// d += length
+	CMPQ CX, $16
+	JGT  slowForwardCopy
+	CMPQ DX, $8
+	JLT  slowForwardCopy
+	CMPQ R14, $16
+	JLT  slowForwardCopy
+	MOVQ 0(R15), AX
+	MOVQ AX, 0(DI)
+	MOVQ 8(R15), BX
+	MOVQ BX, 8(DI)
+	ADDQ CX, DI
+	JMP  loop
+
+slowForwardCopy:
+	// !!! If the forward copy is longer than 16 bytes, or if offset < 8, we
+	// can still try 8-byte load stores, provided we can overrun up to 10 extra
+	// bytes. As above, the overrun will be fixed up by subsequent iterations
+	// of the outermost loop.
+	//
+	// The C++ snappy code calls this technique IncrementalCopyFastPath. Its
+	// commentary says:
+	//
+	// ----
+	//
+	// The main part of this loop is a simple copy of eight bytes at a time
+	// until we've copied (at least) the requested amount of bytes.  However,
+	// if d and d-offset are less than eight bytes apart (indicating a
+	// repeating pattern of length < 8), we first need to expand the pattern in
+	// order to get the correct results. For instance, if the buffer looks like
+	// this, with the eight-byte <d-offset> and <d> patterns marked as
+	// intervals:
+	//
+	//    abxxxxxxxxxxxx
+	//    [------]           d-offset
+	//      [------]         d
+	//
+	// a single eight-byte copy from <d-offset> to <d> will repeat the pattern
+	// once, after which we can move <d> two bytes without moving <d-offset>:
+	//
+	//    ababxxxxxxxxxx
+	//    [------]           d-offset
+	//        [------]       d
+	//
+	// and repeat the exercise until the two no longer overlap.
+	//
+	// This allows us to do very well in the special case of one single byte
+	// repeated many times, without taking a big hit for more general cases.
+	//
+	// The worst case of extra writing past the end of the match occurs when
+	// offset == 1 and length == 1; the last copy will read from byte positions
+	// [0..7] and write to [4..11], whereas it was only supposed to write to
+	// position 1. Thus, ten excess bytes.
+	//
+	// ----
+	//
+	// That "10 byte overrun" worst case is confirmed by Go's
+	// TestSlowForwardCopyOverrun, which also tests the fixUpSlowForwardCopy
+	// and finishSlowForwardCopy algorithm.
+	//
+	// if length > len(dst)-d-10 {
+	//   goto verySlowForwardCopy
+	// }
+	SUBQ $10, R14
+	CMPQ CX, R14
+	JGT  verySlowForwardCopy
+
+makeOffsetAtLeast8:
+	// !!! As above, expand the pattern so that offset >= 8 and we can use
+	// 8-byte load/stores.
+	//
+	// for offset < 8 {
+	//   copy 8 bytes from dst[d-offset:] to dst[d:]
+	//   length -= offset
+	//   d      += offset
+	//   offset += offset
+	//   // The two previous lines together means that d-offset, and therefore
+	//   // R15, is unchanged.
+	// }
+	CMPQ DX, $8
+	JGE  fixUpSlowForwardCopy
+	MOVQ (R15), BX
+	MOVQ BX, (DI)
+	SUBQ DX, CX
+	ADDQ DX, DI
+	ADDQ DX, DX
+	JMP  makeOffsetAtLeast8
+
+fixUpSlowForwardCopy:
+	// !!! Add length (which might be negative now) to d (implied by DI being
+	// &dst[d]) so that d ends up at the right place when we jump back to the
+	// top of the loop. Before we do that, though, we save DI to AX so that, if
+	// length is positive, copying the remaining length bytes will write to the
+	// right place.
+	MOVQ DI, AX
+	ADDQ CX, DI
+
+finishSlowForwardCopy:
+	// !!! Repeat 8-byte load/stores until length <= 0. Ending with a negative
+	// length means that we overrun, but as above, that will be fixed up by
+	// subsequent iterations of the outermost loop.
+	CMPQ CX, $0
+	JLE  loop
+	MOVQ (R15), BX
+	MOVQ BX, (AX)
+	ADDQ $8, R15
+	ADDQ $8, AX
+	SUBQ $8, CX
+	JMP  finishSlowForwardCopy
+
+verySlowForwardCopy:
+	// verySlowForwardCopy is a simple implementation of forward copy. In C
+	// parlance, this is a do/while loop instead of a while loop, since we know
+	// that length > 0. In Go syntax:
+	//
+	// for {
+	//   dst[d] = dst[d - offset]
+	//   d++
+	//   length--
+	//   if length == 0 {
+	//     break
+	//   }
+	// }
+	MOVB (R15), BX
+	MOVB BX, (DI)
+	INCQ R15
+	INCQ DI
+	DECQ CX
+	JNZ  verySlowForwardCopy
+	JMP  loop
+
+// The code above handles copy tags.
+// ----------------------------------------
+
+end:
+	// This is the end of the "for s < len(src)".
+	//
+	// if d != len(dst) { etc }
+	CMPQ DI, R10
+	JNE  errCorrupt
+
+	// return 0
+	MOVQ $0, ret+48(FP)
+	RET
+
+errCorrupt:
+	// return decodeErrCodeCorrupt
+	MOVQ $1, ret+48(FP)
+	RET

+ 101 - 0
vendor/github.com/golang/snappy/decode_other.go

@@ -0,0 +1,101 @@
+// Copyright 2016 The Snappy-Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !amd64 appengine !gc noasm
+
+package snappy
+
+// decode writes the decoding of src to dst. It assumes that the varint-encoded
+// length of the decompressed bytes has already been read, and that len(dst)
+// equals that length.
+//
+// It returns 0 on success or a decodeErrCodeXxx error code on failure.
+func decode(dst, src []byte) int {
+	var d, s, offset, length int
+	for s < len(src) {
+		switch src[s] & 0x03 {
+		case tagLiteral:
+			x := uint32(src[s] >> 2)
+			switch {
+			case x < 60:
+				s++
+			case x == 60:
+				s += 2
+				if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
+					return decodeErrCodeCorrupt
+				}
+				x = uint32(src[s-1])
+			case x == 61:
+				s += 3
+				if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
+					return decodeErrCodeCorrupt
+				}
+				x = uint32(src[s-2]) | uint32(src[s-1])<<8
+			case x == 62:
+				s += 4
+				if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
+					return decodeErrCodeCorrupt
+				}
+				x = uint32(src[s-3]) | uint32(src[s-2])<<8 | uint32(src[s-1])<<16
+			case x == 63:
+				s += 5
+				if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
+					return decodeErrCodeCorrupt
+				}
+				x = uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24
+			}
+			length = int(x) + 1
+			if length <= 0 {
+				return decodeErrCodeUnsupportedLiteralLength
+			}
+			if length > len(dst)-d || length > len(src)-s {
+				return decodeErrCodeCorrupt
+			}
+			copy(dst[d:], src[s:s+length])
+			d += length
+			s += length
+			continue
+
+		case tagCopy1:
+			s += 2
+			if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
+				return decodeErrCodeCorrupt
+			}
+			length = 4 + int(src[s-2])>>2&0x7
+			offset = int(uint32(src[s-2])&0xe0<<3 | uint32(src[s-1]))
+
+		case tagCopy2:
+			s += 3
+			if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
+				return decodeErrCodeCorrupt
+			}
+			length = 1 + int(src[s-3])>>2
+			offset = int(uint32(src[s-2]) | uint32(src[s-1])<<8)
+
+		case tagCopy4:
+			s += 5
+			if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
+				return decodeErrCodeCorrupt
+			}
+			length = 1 + int(src[s-5])>>2
+			offset = int(uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24)
+		}
+
+		if offset <= 0 || d < offset || length > len(dst)-d {
+			return decodeErrCodeCorrupt
+		}
+		// Copy from an earlier sub-slice of dst to a later sub-slice. Unlike
+		// the built-in copy function, this byte-by-byte copy always runs
+		// forwards, even if the slices overlap. Conceptually, this is:
+		//
+		// d += forwardCopy(dst[d:d+length], dst[d-offset:])
+		for end := d + length; d != end; d++ {
+			dst[d] = dst[d-offset]
+		}
+	}
+	if d != len(dst) {
+		return decodeErrCodeCorrupt
+	}
+	return 0
+}

+ 285 - 0
vendor/github.com/golang/snappy/encode.go

@@ -0,0 +1,285 @@
+// Copyright 2011 The Snappy-Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package snappy
+
+import (
+	"encoding/binary"
+	"errors"
+	"io"
+)
+
+// Encode returns the encoded form of src. The returned slice may be a sub-
+// slice of dst if dst was large enough to hold the entire encoded block.
+// Otherwise, a newly allocated slice will be returned.
+//
+// The dst and src must not overlap. It is valid to pass a nil dst.
+func Encode(dst, src []byte) []byte {
+	if n := MaxEncodedLen(len(src)); n < 0 {
+		panic(ErrTooLarge)
+	} else if len(dst) < n {
+		dst = make([]byte, n)
+	}
+
+	// The block starts with the varint-encoded length of the decompressed bytes.
+	d := binary.PutUvarint(dst, uint64(len(src)))
+
+	for len(src) > 0 {
+		p := src
+		src = nil
+		if len(p) > maxBlockSize {
+			p, src = p[:maxBlockSize], p[maxBlockSize:]
+		}
+		if len(p) < minNonLiteralBlockSize {
+			d += emitLiteral(dst[d:], p)
+		} else {
+			d += encodeBlock(dst[d:], p)
+		}
+	}
+	return dst[:d]
+}
+
+// inputMargin is the minimum number of extra input bytes to keep, inside
+// encodeBlock's inner loop. On some architectures, this margin lets us
+// implement a fast path for emitLiteral, where the copy of short (<= 16 byte)
+// literals can be implemented as a single load to and store from a 16-byte
+// register. That literal's actual length can be as short as 1 byte, so this
+// can copy up to 15 bytes too much, but that's OK as subsequent iterations of
+// the encoding loop will fix up the copy overrun, and this inputMargin ensures
+// that we don't overrun the dst and src buffers.
+const inputMargin = 16 - 1
+
+// minNonLiteralBlockSize is the minimum size of the input to encodeBlock that
+// could be encoded with a copy tag. This is the minimum with respect to the
+// algorithm used by encodeBlock, not a minimum enforced by the file format.
+//
+// The encoded output must start with at least a 1 byte literal, as there are
+// no previous bytes to copy. A minimal (1 byte) copy after that, generated
+// from an emitCopy call in encodeBlock's main loop, would require at least
+// another inputMargin bytes, for the reason above: we want any emitLiteral
+// calls inside encodeBlock's main loop to use the fast path if possible, which
+// requires being able to overrun by inputMargin bytes. Thus,
+// minNonLiteralBlockSize equals 1 + 1 + inputMargin.
+//
+// The C++ code doesn't use this exact threshold, but it could, as discussed at
+// https://groups.google.com/d/topic/snappy-compression/oGbhsdIJSJ8/discussion
+// The difference between Go (2+inputMargin) and C++ (inputMargin) is purely an
+// optimization. It should not affect the encoded form. This is tested by
+// TestSameEncodingAsCppShortCopies.
+const minNonLiteralBlockSize = 1 + 1 + inputMargin
+
+// MaxEncodedLen returns the maximum length of a snappy block, given its
+// uncompressed length.
+//
+// It will return a negative value if srcLen is too large to encode.
+func MaxEncodedLen(srcLen int) int {
+	n := uint64(srcLen)
+	if n > 0xffffffff {
+		return -1
+	}
+	// Compressed data can be defined as:
+	//    compressed := item* literal*
+	//    item       := literal* copy
+	//
+	// The trailing literal sequence has a space blowup of at most 62/60
+	// since a literal of length 60 needs one tag byte + one extra byte
+	// for length information.
+	//
+	// Item blowup is trickier to measure. Suppose the "copy" op copies
+	// 4 bytes of data. Because of a special check in the encoding code,
+	// we produce a 4-byte copy only if the offset is < 65536. Therefore
+	// the copy op takes 3 bytes to encode, and this type of item leads
+	// to at most the 62/60 blowup for representing literals.
+	//
+	// Suppose the "copy" op copies 5 bytes of data. If the offset is big
+	// enough, it will take 5 bytes to encode the copy op. Therefore the
+	// worst case here is a one-byte literal followed by a five-byte copy.
+	// That is, 6 bytes of input turn into 7 bytes of "compressed" data.
+	//
+	// This last factor dominates the blowup, so the final estimate is:
+	n = 32 + n + n/6
+	if n > 0xffffffff {
+		return -1
+	}
+	return int(n)
+}
+
+var errClosed = errors.New("snappy: Writer is closed")
+
+// NewWriter returns a new Writer that compresses to w.
+//
+// The Writer returned does not buffer writes. There is no need to Flush or
+// Close such a Writer.
+//
+// Deprecated: the Writer returned is not suitable for many small writes, only
+// for few large writes. Use NewBufferedWriter instead, which is efficient
+// regardless of the frequency and shape of the writes, and remember to Close
+// that Writer when done.
+func NewWriter(w io.Writer) *Writer {
+	return &Writer{
+		w:    w,
+		obuf: make([]byte, obufLen),
+	}
+}
+
+// NewBufferedWriter returns a new Writer that compresses to w, using the
+// framing format described at
+// https://github.com/google/snappy/blob/master/framing_format.txt
+//
+// The Writer returned buffers writes. Users must call Close to guarantee all
+// data has been forwarded to the underlying io.Writer. They may also call
+// Flush zero or more times before calling Close.
+func NewBufferedWriter(w io.Writer) *Writer {
+	return &Writer{
+		w:    w,
+		ibuf: make([]byte, 0, maxBlockSize),
+		obuf: make([]byte, obufLen),
+	}
+}
+
+// Writer is an io.Writer that can write Snappy-compressed bytes.
+type Writer struct {
+	w   io.Writer
+	err error
+
+	// ibuf is a buffer for the incoming (uncompressed) bytes.
+	//
+	// Its use is optional. For backwards compatibility, Writers created by the
+	// NewWriter function have ibuf == nil, do not buffer incoming bytes, and
+	// therefore do not need to be Flush'ed or Close'd.
+	ibuf []byte
+
+	// obuf is a buffer for the outgoing (compressed) bytes.
+	obuf []byte
+
+	// wroteStreamHeader is whether we have written the stream header.
+	wroteStreamHeader bool
+}
+
+// Reset discards the writer's state and switches the Snappy writer to write to
+// w. This permits reusing a Writer rather than allocating a new one.
+func (w *Writer) Reset(writer io.Writer) {
+	w.w = writer
+	w.err = nil
+	if w.ibuf != nil {
+		w.ibuf = w.ibuf[:0]
+	}
+	w.wroteStreamHeader = false
+}
+
+// Write satisfies the io.Writer interface.
+func (w *Writer) Write(p []byte) (nRet int, errRet error) {
+	if w.ibuf == nil {
+		// Do not buffer incoming bytes. This does not perform or compress well
+		// if the caller of Writer.Write writes many small slices. This
+		// behavior is therefore deprecated, but still supported for backwards
+		// compatibility with code that doesn't explicitly Flush or Close.
+		return w.write(p)
+	}
+
+	// The remainder of this method is based on bufio.Writer.Write from the
+	// standard library.
+
+	for len(p) > (cap(w.ibuf)-len(w.ibuf)) && w.err == nil {
+		var n int
+		if len(w.ibuf) == 0 {
+			// Large write, empty buffer.
+			// Write directly from p to avoid copy.
+			n, _ = w.write(p)
+		} else {
+			n = copy(w.ibuf[len(w.ibuf):cap(w.ibuf)], p)
+			w.ibuf = w.ibuf[:len(w.ibuf)+n]
+			w.Flush()
+		}
+		nRet += n
+		p = p[n:]
+	}
+	if w.err != nil {
+		return nRet, w.err
+	}
+	n := copy(w.ibuf[len(w.ibuf):cap(w.ibuf)], p)
+	w.ibuf = w.ibuf[:len(w.ibuf)+n]
+	nRet += n
+	return nRet, nil
+}
+
+func (w *Writer) write(p []byte) (nRet int, errRet error) {
+	if w.err != nil {
+		return 0, w.err
+	}
+	for len(p) > 0 {
+		obufStart := len(magicChunk)
+		if !w.wroteStreamHeader {
+			w.wroteStreamHeader = true
+			copy(w.obuf, magicChunk)
+			obufStart = 0
+		}
+
+		var uncompressed []byte
+		if len(p) > maxBlockSize {
+			uncompressed, p = p[:maxBlockSize], p[maxBlockSize:]
+		} else {
+			uncompressed, p = p, nil
+		}
+		checksum := crc(uncompressed)
+
+		// Compress the buffer, discarding the result if the improvement
+		// isn't at least 12.5%.
+		compressed := Encode(w.obuf[obufHeaderLen:], uncompressed)
+		chunkType := uint8(chunkTypeCompressedData)
+		chunkLen := 4 + len(compressed)
+		obufEnd := obufHeaderLen + len(compressed)
+		if len(compressed) >= len(uncompressed)-len(uncompressed)/8 {
+			chunkType = chunkTypeUncompressedData
+			chunkLen = 4 + len(uncompressed)
+			obufEnd = obufHeaderLen
+		}
+
+		// Fill in the per-chunk header that comes before the body.
+		w.obuf[len(magicChunk)+0] = chunkType
+		w.obuf[len(magicChunk)+1] = uint8(chunkLen >> 0)
+		w.obuf[len(magicChunk)+2] = uint8(chunkLen >> 8)
+		w.obuf[len(magicChunk)+3] = uint8(chunkLen >> 16)
+		w.obuf[len(magicChunk)+4] = uint8(checksum >> 0)
+		w.obuf[len(magicChunk)+5] = uint8(checksum >> 8)
+		w.obuf[len(magicChunk)+6] = uint8(checksum >> 16)
+		w.obuf[len(magicChunk)+7] = uint8(checksum >> 24)
+
+		if _, err := w.w.Write(w.obuf[obufStart:obufEnd]); err != nil {
+			w.err = err
+			return nRet, err
+		}
+		if chunkType == chunkTypeUncompressedData {
+			if _, err := w.w.Write(uncompressed); err != nil {
+				w.err = err
+				return nRet, err
+			}
+		}
+		nRet += len(uncompressed)
+	}
+	return nRet, nil
+}
+
+// Flush flushes the Writer to its underlying io.Writer.
+func (w *Writer) Flush() error {
+	if w.err != nil {
+		return w.err
+	}
+	if len(w.ibuf) == 0 {
+		return nil
+	}
+	w.write(w.ibuf)
+	w.ibuf = w.ibuf[:0]
+	return w.err
+}
+
+// Close calls Flush and then closes the Writer.
+func (w *Writer) Close() error {
+	w.Flush()
+	ret := w.err
+	if w.err == nil {
+		w.err = errClosed
+	}
+	return ret
+}

+ 29 - 0
vendor/github.com/golang/snappy/encode_amd64.go

@@ -0,0 +1,29 @@
+// Copyright 2016 The Snappy-Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !appengine
+// +build gc
+// +build !noasm
+
+package snappy
+
+// emitLiteral has the same semantics as in encode_other.go.
+//
+//go:noescape
+func emitLiteral(dst, lit []byte) int
+
+// emitCopy has the same semantics as in encode_other.go.
+//
+//go:noescape
+func emitCopy(dst []byte, offset, length int) int
+
+// extendMatch has the same semantics as in encode_other.go.
+//
+//go:noescape
+func extendMatch(src []byte, i, j int) int
+
+// encodeBlock has the same semantics as in encode_other.go.
+//
+//go:noescape
+func encodeBlock(dst, src []byte) (d int)

+ 730 - 0
vendor/github.com/golang/snappy/encode_amd64.s

@@ -0,0 +1,730 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !appengine
+// +build gc
+// +build !noasm
+
+#include "textflag.h"
+
+// The XXX lines assemble on Go 1.4, 1.5 and 1.7, but not 1.6, due to a
+// Go toolchain regression. See https://github.com/golang/go/issues/15426 and
+// https://github.com/golang/snappy/issues/29
+//
+// As a workaround, the package was built with a known good assembler, and
+// those instructions were disassembled by "objdump -d" to yield the
+//	4e 0f b7 7c 5c 78       movzwq 0x78(%rsp,%r11,2),%r15
+// style comments, in AT&T asm syntax. Note that rsp here is a physical
+// register, not Go/asm's SP pseudo-register (see https://golang.org/doc/asm).
+// The instructions were then encoded as "BYTE $0x.." sequences, which assemble
+// fine on Go 1.6.
+
+// The asm code generally follows the pure Go code in encode_other.go, except
+// where marked with a "!!!".
+
+// ----------------------------------------------------------------------------
+
+// func emitLiteral(dst, lit []byte) int
+//
+// All local variables fit into registers. The register allocation:
+//	- AX	len(lit)
+//	- BX	n
+//	- DX	return value
+//	- DI	&dst[i]
+//	- R10	&lit[0]
+//
+// The 24 bytes of stack space is to call runtime·memmove.
+//
+// The unusual register allocation of local variables, such as R10 for the
+// source pointer, matches the allocation used at the call site in encodeBlock,
+// which makes it easier to manually inline this function.
+TEXT ·emitLiteral(SB), NOSPLIT, $24-56
+	MOVQ dst_base+0(FP), DI
+	MOVQ lit_base+24(FP), R10
+	MOVQ lit_len+32(FP), AX
+	MOVQ AX, DX
+	MOVL AX, BX
+	SUBL $1, BX
+
+	CMPL BX, $60
+	JLT  oneByte
+	CMPL BX, $256
+	JLT  twoBytes
+
+threeBytes:
+	MOVB $0xf4, 0(DI)
+	MOVW BX, 1(DI)
+	ADDQ $3, DI
+	ADDQ $3, DX
+	JMP  memmove
+
+twoBytes:
+	MOVB $0xf0, 0(DI)
+	MOVB BX, 1(DI)
+	ADDQ $2, DI
+	ADDQ $2, DX
+	JMP  memmove
+
+oneByte:
+	SHLB $2, BX
+	MOVB BX, 0(DI)
+	ADDQ $1, DI
+	ADDQ $1, DX
+
+memmove:
+	MOVQ DX, ret+48(FP)
+
+	// copy(dst[i:], lit)
+	//
+	// This means calling runtime·memmove(&dst[i], &lit[0], len(lit)), so we push
+	// DI, R10 and AX as arguments.
+	MOVQ DI, 0(SP)
+	MOVQ R10, 8(SP)
+	MOVQ AX, 16(SP)
+	CALL runtime·memmove(SB)
+	RET
+
+// ----------------------------------------------------------------------------
+
+// func emitCopy(dst []byte, offset, length int) int
+//
+// All local variables fit into registers. The register allocation:
+//	- AX	length
+//	- SI	&dst[0]
+//	- DI	&dst[i]
+//	- R11	offset
+//
+// The unusual register allocation of local variables, such as R11 for the
+// offset, matches the allocation used at the call site in encodeBlock, which
+// makes it easier to manually inline this function.
+TEXT ·emitCopy(SB), NOSPLIT, $0-48
+	MOVQ dst_base+0(FP), DI
+	MOVQ DI, SI
+	MOVQ offset+24(FP), R11
+	MOVQ length+32(FP), AX
+
+loop0:
+	// for length >= 68 { etc }
+	CMPL AX, $68
+	JLT  step1
+
+	// Emit a length 64 copy, encoded as 3 bytes.
+	MOVB $0xfe, 0(DI)
+	MOVW R11, 1(DI)
+	ADDQ $3, DI
+	SUBL $64, AX
+	JMP  loop0
+
+step1:
+	// if length > 64 { etc }
+	CMPL AX, $64
+	JLE  step2
+
+	// Emit a length 60 copy, encoded as 3 bytes.
+	MOVB $0xee, 0(DI)
+	MOVW R11, 1(DI)
+	ADDQ $3, DI
+	SUBL $60, AX
+
+step2:
+	// if length >= 12 || offset >= 2048 { goto step3 }
+	CMPL AX, $12
+	JGE  step3
+	CMPL R11, $2048
+	JGE  step3
+
+	// Emit the remaining copy, encoded as 2 bytes.
+	MOVB R11, 1(DI)
+	SHRL $8, R11
+	SHLB $5, R11
+	SUBB $4, AX
+	SHLB $2, AX
+	ORB  AX, R11
+	ORB  $1, R11
+	MOVB R11, 0(DI)
+	ADDQ $2, DI
+
+	// Return the number of bytes written.
+	SUBQ SI, DI
+	MOVQ DI, ret+40(FP)
+	RET
+
+step3:
+	// Emit the remaining copy, encoded as 3 bytes.
+	SUBL $1, AX
+	SHLB $2, AX
+	ORB  $2, AX
+	MOVB AX, 0(DI)
+	MOVW R11, 1(DI)
+	ADDQ $3, DI
+
+	// Return the number of bytes written.
+	SUBQ SI, DI
+	MOVQ DI, ret+40(FP)
+	RET
+
+// ----------------------------------------------------------------------------
+
+// func extendMatch(src []byte, i, j int) int
+//
+// All local variables fit into registers. The register allocation:
+//	- DX	&src[0]
+//	- SI	&src[j]
+//	- R13	&src[len(src) - 8]
+//	- R14	&src[len(src)]
+//	- R15	&src[i]
+//
+// The unusual register allocation of local variables, such as R15 for a source
+// pointer, matches the allocation used at the call site in encodeBlock, which
+// makes it easier to manually inline this function.
+TEXT ·extendMatch(SB), NOSPLIT, $0-48
+	MOVQ src_base+0(FP), DX
+	MOVQ src_len+8(FP), R14
+	MOVQ i+24(FP), R15
+	MOVQ j+32(FP), SI
+	ADDQ DX, R14
+	ADDQ DX, R15
+	ADDQ DX, SI
+	MOVQ R14, R13
+	SUBQ $8, R13
+
+cmp8:
+	// As long as we are 8 or more bytes before the end of src, we can load and
+	// compare 8 bytes at a time. If those 8 bytes are equal, repeat.
+	CMPQ SI, R13
+	JA   cmp1
+	MOVQ (R15), AX
+	MOVQ (SI), BX
+	CMPQ AX, BX
+	JNE  bsf
+	ADDQ $8, R15
+	ADDQ $8, SI
+	JMP  cmp8
+
+bsf:
+	// If those 8 bytes were not equal, XOR the two 8 byte values, and return
+	// the index of the first byte that differs. The BSF instruction finds the
+	// least significant 1 bit, the amd64 architecture is little-endian, and
+	// the shift by 3 converts a bit index to a byte index.
+	XORQ AX, BX
+	BSFQ BX, BX
+	SHRQ $3, BX
+	ADDQ BX, SI
+
+	// Convert from &src[ret] to ret.
+	SUBQ DX, SI
+	MOVQ SI, ret+40(FP)
+	RET
+
+cmp1:
+	// In src's tail, compare 1 byte at a time.
+	CMPQ SI, R14
+	JAE  extendMatchEnd
+	MOVB (R15), AX
+	MOVB (SI), BX
+	CMPB AX, BX
+	JNE  extendMatchEnd
+	ADDQ $1, R15
+	ADDQ $1, SI
+	JMP  cmp1
+
+extendMatchEnd:
+	// Convert from &src[ret] to ret.
+	SUBQ DX, SI
+	MOVQ SI, ret+40(FP)
+	RET
+
+// ----------------------------------------------------------------------------
+
+// func encodeBlock(dst, src []byte) (d int)
+//
+// All local variables fit into registers, other than "var table". The register
+// allocation:
+//	- AX	.	.
+//	- BX	.	.
+//	- CX	56	shift (note that amd64 shifts by non-immediates must use CX).
+//	- DX	64	&src[0], tableSize
+//	- SI	72	&src[s]
+//	- DI	80	&dst[d]
+//	- R9	88	sLimit
+//	- R10	.	&src[nextEmit]
+//	- R11	96	prevHash, currHash, nextHash, offset
+//	- R12	104	&src[base], skip
+//	- R13	.	&src[nextS], &src[len(src) - 8]
+//	- R14	.	len(src), bytesBetweenHashLookups, &src[len(src)], x
+//	- R15	112	candidate
+//
+// The second column (56, 64, etc) is the stack offset to spill the registers
+// when calling other functions. We could pack this slightly tighter, but it's
+// simpler to have a dedicated spill map independent of the function called.
+//
+// "var table [maxTableSize]uint16" takes up 32768 bytes of stack space. An
+// extra 56 bytes, to call other functions, and an extra 64 bytes, to spill
+// local variables (registers) during calls gives 32768 + 56 + 64 = 32888.
+TEXT ·encodeBlock(SB), 0, $32888-56
+	MOVQ dst_base+0(FP), DI
+	MOVQ src_base+24(FP), SI
+	MOVQ src_len+32(FP), R14
+
+	// shift, tableSize := uint32(32-8), 1<<8
+	MOVQ $24, CX
+	MOVQ $256, DX
+
+calcShift:
+	// for ; tableSize < maxTableSize && tableSize < len(src); tableSize *= 2 {
+	//	shift--
+	// }
+	CMPQ DX, $16384
+	JGE  varTable
+	CMPQ DX, R14
+	JGE  varTable
+	SUBQ $1, CX
+	SHLQ $1, DX
+	JMP  calcShift
+
+varTable:
+	// var table [maxTableSize]uint16
+	//
+	// In the asm code, unlike the Go code, we can zero-initialize only the
+	// first tableSize elements. Each uint16 element is 2 bytes and each MOVOU
+	// writes 16 bytes, so we can do only tableSize/8 writes instead of the
+	// 2048 writes that would zero-initialize all of table's 32768 bytes.
+	SHRQ $3, DX
+	LEAQ table-32768(SP), BX
+	PXOR X0, X0
+
+memclr:
+	MOVOU X0, 0(BX)
+	ADDQ  $16, BX
+	SUBQ  $1, DX
+	JNZ   memclr
+
+	// !!! DX = &src[0]
+	MOVQ SI, DX
+
+	// sLimit := len(src) - inputMargin
+	MOVQ R14, R9
+	SUBQ $15, R9
+
+	// !!! Pre-emptively spill CX, DX and R9 to the stack. Their values don't
+	// change for the rest of the function.
+	MOVQ CX, 56(SP)
+	MOVQ DX, 64(SP)
+	MOVQ R9, 88(SP)
+
+	// nextEmit := 0
+	MOVQ DX, R10
+
+	// s := 1
+	ADDQ $1, SI
+
+	// nextHash := hash(load32(src, s), shift)
+	MOVL  0(SI), R11
+	IMULL $0x1e35a7bd, R11
+	SHRL  CX, R11
+
+outer:
+	// for { etc }
+
+	// skip := 32
+	MOVQ $32, R12
+
+	// nextS := s
+	MOVQ SI, R13
+
+	// candidate := 0
+	MOVQ $0, R15
+
+inner0:
+	// for { etc }
+
+	// s := nextS
+	MOVQ R13, SI
+
+	// bytesBetweenHashLookups := skip >> 5
+	MOVQ R12, R14
+	SHRQ $5, R14
+
+	// nextS = s + bytesBetweenHashLookups
+	ADDQ R14, R13
+
+	// skip += bytesBetweenHashLookups
+	ADDQ R14, R12
+
+	// if nextS > sLimit { goto emitRemainder }
+	MOVQ R13, AX
+	SUBQ DX, AX
+	CMPQ AX, R9
+	JA   emitRemainder
+
+	// candidate = int(table[nextHash])
+	// XXX: MOVWQZX table-32768(SP)(R11*2), R15
+	// XXX: 4e 0f b7 7c 5c 78       movzwq 0x78(%rsp,%r11,2),%r15
+	BYTE $0x4e
+	BYTE $0x0f
+	BYTE $0xb7
+	BYTE $0x7c
+	BYTE $0x5c
+	BYTE $0x78
+
+	// table[nextHash] = uint16(s)
+	MOVQ SI, AX
+	SUBQ DX, AX
+
+	// XXX: MOVW AX, table-32768(SP)(R11*2)
+	// XXX: 66 42 89 44 5c 78       mov    %ax,0x78(%rsp,%r11,2)
+	BYTE $0x66
+	BYTE $0x42
+	BYTE $0x89
+	BYTE $0x44
+	BYTE $0x5c
+	BYTE $0x78
+
+	// nextHash = hash(load32(src, nextS), shift)
+	MOVL  0(R13), R11
+	IMULL $0x1e35a7bd, R11
+	SHRL  CX, R11
+
+	// if load32(src, s) != load32(src, candidate) { continue } break
+	MOVL 0(SI), AX
+	MOVL (DX)(R15*1), BX
+	CMPL AX, BX
+	JNE  inner0
+
+fourByteMatch:
+	// As per the encode_other.go code:
+	//
+	// A 4-byte match has been found. We'll later see etc.
+
+	// !!! Jump to a fast path for short (<= 16 byte) literals. See the comment
+	// on inputMargin in encode.go.
+	MOVQ SI, AX
+	SUBQ R10, AX
+	CMPQ AX, $16
+	JLE  emitLiteralFastPath
+
+	// ----------------------------------------
+	// Begin inline of the emitLiteral call.
+	//
+	// d += emitLiteral(dst[d:], src[nextEmit:s])
+
+	MOVL AX, BX
+	SUBL $1, BX
+
+	CMPL BX, $60
+	JLT  inlineEmitLiteralOneByte
+	CMPL BX, $256
+	JLT  inlineEmitLiteralTwoBytes
+
+inlineEmitLiteralThreeBytes:
+	MOVB $0xf4, 0(DI)
+	MOVW BX, 1(DI)
+	ADDQ $3, DI
+	JMP  inlineEmitLiteralMemmove
+
+inlineEmitLiteralTwoBytes:
+	MOVB $0xf0, 0(DI)
+	MOVB BX, 1(DI)
+	ADDQ $2, DI
+	JMP  inlineEmitLiteralMemmove
+
+inlineEmitLiteralOneByte:
+	SHLB $2, BX
+	MOVB BX, 0(DI)
+	ADDQ $1, DI
+
+inlineEmitLiteralMemmove:
+	// Spill local variables (registers) onto the stack; call; unspill.
+	//
+	// copy(dst[i:], lit)
+	//
+	// This means calling runtime·memmove(&dst[i], &lit[0], len(lit)), so we push
+	// DI, R10 and AX as arguments.
+	MOVQ DI, 0(SP)
+	MOVQ R10, 8(SP)
+	MOVQ AX, 16(SP)
+	ADDQ AX, DI              // Finish the "d +=" part of "d += emitLiteral(etc)".
+	MOVQ SI, 72(SP)
+	MOVQ DI, 80(SP)
+	MOVQ R15, 112(SP)
+	CALL runtime·memmove(SB)
+	MOVQ 56(SP), CX
+	MOVQ 64(SP), DX
+	MOVQ 72(SP), SI
+	MOVQ 80(SP), DI
+	MOVQ 88(SP), R9
+	MOVQ 112(SP), R15
+	JMP  inner1
+
+inlineEmitLiteralEnd:
+	// End inline of the emitLiteral call.
+	// ----------------------------------------
+
+emitLiteralFastPath:
+	// !!! Emit the 1-byte encoding "uint8(len(lit)-1)<<2".
+	MOVB AX, BX
+	SUBB $1, BX
+	SHLB $2, BX
+	MOVB BX, (DI)
+	ADDQ $1, DI
+
+	// !!! Implement the copy from lit to dst as a 16-byte load and store.
+	// (Encode's documentation says that dst and src must not overlap.)
+	//
+	// This always copies 16 bytes, instead of only len(lit) bytes, but that's
+	// OK. Subsequent iterations will fix up the overrun.
+	//
+	// Note that on amd64, it is legal and cheap to issue unaligned 8-byte or
+	// 16-byte loads and stores. This technique probably wouldn't be as
+	// effective on architectures that are fussier about alignment.
+	MOVOU 0(R10), X0
+	MOVOU X0, 0(DI)
+	ADDQ  AX, DI
+
+inner1:
+	// for { etc }
+
+	// base := s
+	MOVQ SI, R12
+
+	// !!! offset := base - candidate
+	MOVQ R12, R11
+	SUBQ R15, R11
+	SUBQ DX, R11
+
+	// ----------------------------------------
+	// Begin inline of the extendMatch call.
+	//
+	// s = extendMatch(src, candidate+4, s+4)
+
+	// !!! R14 = &src[len(src)]
+	MOVQ src_len+32(FP), R14
+	ADDQ DX, R14
+
+	// !!! R13 = &src[len(src) - 8]
+	MOVQ R14, R13
+	SUBQ $8, R13
+
+	// !!! R15 = &src[candidate + 4]
+	ADDQ $4, R15
+	ADDQ DX, R15
+
+	// !!! s += 4
+	ADDQ $4, SI
+
+inlineExtendMatchCmp8:
+	// As long as we are 8 or more bytes before the end of src, we can load and
+	// compare 8 bytes at a time. If those 8 bytes are equal, repeat.
+	CMPQ SI, R13
+	JA   inlineExtendMatchCmp1
+	MOVQ (R15), AX
+	MOVQ (SI), BX
+	CMPQ AX, BX
+	JNE  inlineExtendMatchBSF
+	ADDQ $8, R15
+	ADDQ $8, SI
+	JMP  inlineExtendMatchCmp8
+
+inlineExtendMatchBSF:
+	// If those 8 bytes were not equal, XOR the two 8 byte values, and return
+	// the index of the first byte that differs. The BSF instruction finds the
+	// least significant 1 bit, the amd64 architecture is little-endian, and
+	// the shift by 3 converts a bit index to a byte index.
+	XORQ AX, BX
+	BSFQ BX, BX
+	SHRQ $3, BX
+	ADDQ BX, SI
+	JMP  inlineExtendMatchEnd
+
+inlineExtendMatchCmp1:
+	// In src's tail, compare 1 byte at a time.
+	CMPQ SI, R14
+	JAE  inlineExtendMatchEnd
+	MOVB (R15), AX
+	MOVB (SI), BX
+	CMPB AX, BX
+	JNE  inlineExtendMatchEnd
+	ADDQ $1, R15
+	ADDQ $1, SI
+	JMP  inlineExtendMatchCmp1
+
+inlineExtendMatchEnd:
+	// End inline of the extendMatch call.
+	// ----------------------------------------
+
+	// ----------------------------------------
+	// Begin inline of the emitCopy call.
+	//
+	// d += emitCopy(dst[d:], base-candidate, s-base)
+
+	// !!! length := s - base
+	MOVQ SI, AX
+	SUBQ R12, AX
+
+inlineEmitCopyLoop0:
+	// for length >= 68 { etc }
+	CMPL AX, $68
+	JLT  inlineEmitCopyStep1
+
+	// Emit a length 64 copy, encoded as 3 bytes.
+	MOVB $0xfe, 0(DI)
+	MOVW R11, 1(DI)
+	ADDQ $3, DI
+	SUBL $64, AX
+	JMP  inlineEmitCopyLoop0
+
+inlineEmitCopyStep1:
+	// if length > 64 { etc }
+	CMPL AX, $64
+	JLE  inlineEmitCopyStep2
+
+	// Emit a length 60 copy, encoded as 3 bytes.
+	MOVB $0xee, 0(DI)
+	MOVW R11, 1(DI)
+	ADDQ $3, DI
+	SUBL $60, AX
+
+inlineEmitCopyStep2:
+	// if length >= 12 || offset >= 2048 { goto inlineEmitCopyStep3 }
+	CMPL AX, $12
+	JGE  inlineEmitCopyStep3
+	CMPL R11, $2048
+	JGE  inlineEmitCopyStep3
+
+	// Emit the remaining copy, encoded as 2 bytes.
+	MOVB R11, 1(DI)
+	SHRL $8, R11
+	SHLB $5, R11
+	SUBB $4, AX
+	SHLB $2, AX
+	ORB  AX, R11
+	ORB  $1, R11
+	MOVB R11, 0(DI)
+	ADDQ $2, DI
+	JMP  inlineEmitCopyEnd
+
+inlineEmitCopyStep3:
+	// Emit the remaining copy, encoded as 3 bytes.
+	SUBL $1, AX
+	SHLB $2, AX
+	ORB  $2, AX
+	MOVB AX, 0(DI)
+	MOVW R11, 1(DI)
+	ADDQ $3, DI
+
+inlineEmitCopyEnd:
+	// End inline of the emitCopy call.
+	// ----------------------------------------
+
+	// nextEmit = s
+	MOVQ SI, R10
+
+	// if s >= sLimit { goto emitRemainder }
+	MOVQ SI, AX
+	SUBQ DX, AX
+	CMPQ AX, R9
+	JAE  emitRemainder
+
+	// As per the encode_other.go code:
+	//
+	// We could immediately etc.
+
+	// x := load64(src, s-1)
+	MOVQ -1(SI), R14
+
+	// prevHash := hash(uint32(x>>0), shift)
+	MOVL  R14, R11
+	IMULL $0x1e35a7bd, R11
+	SHRL  CX, R11
+
+	// table[prevHash] = uint16(s-1)
+	MOVQ SI, AX
+	SUBQ DX, AX
+	SUBQ $1, AX
+
+	// XXX: MOVW AX, table-32768(SP)(R11*2)
+	// XXX: 66 42 89 44 5c 78       mov    %ax,0x78(%rsp,%r11,2)
+	BYTE $0x66
+	BYTE $0x42
+	BYTE $0x89
+	BYTE $0x44
+	BYTE $0x5c
+	BYTE $0x78
+
+	// currHash := hash(uint32(x>>8), shift)
+	SHRQ  $8, R14
+	MOVL  R14, R11
+	IMULL $0x1e35a7bd, R11
+	SHRL  CX, R11
+
+	// candidate = int(table[currHash])
+	// XXX: MOVWQZX table-32768(SP)(R11*2), R15
+	// XXX: 4e 0f b7 7c 5c 78       movzwq 0x78(%rsp,%r11,2),%r15
+	BYTE $0x4e
+	BYTE $0x0f
+	BYTE $0xb7
+	BYTE $0x7c
+	BYTE $0x5c
+	BYTE $0x78
+
+	// table[currHash] = uint16(s)
+	ADDQ $1, AX
+
+	// XXX: MOVW AX, table-32768(SP)(R11*2)
+	// XXX: 66 42 89 44 5c 78       mov    %ax,0x78(%rsp,%r11,2)
+	BYTE $0x66
+	BYTE $0x42
+	BYTE $0x89
+	BYTE $0x44
+	BYTE $0x5c
+	BYTE $0x78
+
+	// if uint32(x>>8) == load32(src, candidate) { continue }
+	MOVL (DX)(R15*1), BX
+	CMPL R14, BX
+	JEQ  inner1
+
+	// nextHash = hash(uint32(x>>16), shift)
+	SHRQ  $8, R14
+	MOVL  R14, R11
+	IMULL $0x1e35a7bd, R11
+	SHRL  CX, R11
+
+	// s++
+	ADDQ $1, SI
+
+	// break out of the inner1 for loop, i.e. continue the outer loop.
+	JMP outer
+
+emitRemainder:
+	// if nextEmit < len(src) { etc }
+	MOVQ src_len+32(FP), AX
+	ADDQ DX, AX
+	CMPQ R10, AX
+	JEQ  encodeBlockEnd
+
+	// d += emitLiteral(dst[d:], src[nextEmit:])
+	//
+	// Push args.
+	MOVQ DI, 0(SP)
+	MOVQ $0, 8(SP)   // Unnecessary, as the callee ignores it, but conservative.
+	MOVQ $0, 16(SP)  // Unnecessary, as the callee ignores it, but conservative.
+	MOVQ R10, 24(SP)
+	SUBQ R10, AX
+	MOVQ AX, 32(SP)
+	MOVQ AX, 40(SP)  // Unnecessary, as the callee ignores it, but conservative.
+
+	// Spill local variables (registers) onto the stack; call; unspill.
+	MOVQ DI, 80(SP)
+	CALL ·emitLiteral(SB)
+	MOVQ 80(SP), DI
+
+	// Finish the "d +=" part of "d += emitLiteral(etc)".
+	ADDQ 48(SP), DI
+
+encodeBlockEnd:
+	MOVQ dst_base+0(FP), AX
+	SUBQ AX, DI
+	MOVQ DI, d+48(FP)
+	RET

+ 238 - 0
vendor/github.com/golang/snappy/encode_other.go

@@ -0,0 +1,238 @@
+// Copyright 2016 The Snappy-Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !amd64 appengine !gc noasm
+
+package snappy
+
+func load32(b []byte, i int) uint32 {
+	b = b[i : i+4 : len(b)] // Help the compiler eliminate bounds checks on the next line.
+	return uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24
+}
+
+func load64(b []byte, i int) uint64 {
+	b = b[i : i+8 : len(b)] // Help the compiler eliminate bounds checks on the next line.
+	return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 |
+		uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56
+}
+
+// emitLiteral writes a literal chunk and returns the number of bytes written.
+//
+// It assumes that:
+//	dst is long enough to hold the encoded bytes
+//	1 <= len(lit) && len(lit) <= 65536
+func emitLiteral(dst, lit []byte) int {
+	i, n := 0, uint(len(lit)-1)
+	switch {
+	case n < 60:
+		dst[0] = uint8(n)<<2 | tagLiteral
+		i = 1
+	case n < 1<<8:
+		dst[0] = 60<<2 | tagLiteral
+		dst[1] = uint8(n)
+		i = 2
+	default:
+		dst[0] = 61<<2 | tagLiteral
+		dst[1] = uint8(n)
+		dst[2] = uint8(n >> 8)
+		i = 3
+	}
+	return i + copy(dst[i:], lit)
+}
+
+// emitCopy writes a copy chunk and returns the number of bytes written.
+//
+// It assumes that:
+//	dst is long enough to hold the encoded bytes
+//	1 <= offset && offset <= 65535
+//	4 <= length && length <= 65535
+func emitCopy(dst []byte, offset, length int) int {
+	i := 0
+	// The maximum length for a single tagCopy1 or tagCopy2 op is 64 bytes. The
+	// threshold for this loop is a little higher (at 68 = 64 + 4), and the
+	// length emitted down below is is a little lower (at 60 = 64 - 4), because
+	// it's shorter to encode a length 67 copy as a length 60 tagCopy2 followed
+	// by a length 7 tagCopy1 (which encodes as 3+2 bytes) than to encode it as
+	// a length 64 tagCopy2 followed by a length 3 tagCopy2 (which encodes as
+	// 3+3 bytes). The magic 4 in the 64±4 is because the minimum length for a
+	// tagCopy1 op is 4 bytes, which is why a length 3 copy has to be an
+	// encodes-as-3-bytes tagCopy2 instead of an encodes-as-2-bytes tagCopy1.
+	for length >= 68 {
+		// Emit a length 64 copy, encoded as 3 bytes.
+		dst[i+0] = 63<<2 | tagCopy2
+		dst[i+1] = uint8(offset)
+		dst[i+2] = uint8(offset >> 8)
+		i += 3
+		length -= 64
+	}
+	if length > 64 {
+		// Emit a length 60 copy, encoded as 3 bytes.
+		dst[i+0] = 59<<2 | tagCopy2
+		dst[i+1] = uint8(offset)
+		dst[i+2] = uint8(offset >> 8)
+		i += 3
+		length -= 60
+	}
+	if length >= 12 || offset >= 2048 {
+		// Emit the remaining copy, encoded as 3 bytes.
+		dst[i+0] = uint8(length-1)<<2 | tagCopy2
+		dst[i+1] = uint8(offset)
+		dst[i+2] = uint8(offset >> 8)
+		return i + 3
+	}
+	// Emit the remaining copy, encoded as 2 bytes.
+	dst[i+0] = uint8(offset>>8)<<5 | uint8(length-4)<<2 | tagCopy1
+	dst[i+1] = uint8(offset)
+	return i + 2
+}
+
+// extendMatch returns the largest k such that k <= len(src) and that
+// src[i:i+k-j] and src[j:k] have the same contents.
+//
+// It assumes that:
+//	0 <= i && i < j && j <= len(src)
+func extendMatch(src []byte, i, j int) int {
+	for ; j < len(src) && src[i] == src[j]; i, j = i+1, j+1 {
+	}
+	return j
+}
+
+func hash(u, shift uint32) uint32 {
+	return (u * 0x1e35a7bd) >> shift
+}
+
+// encodeBlock encodes a non-empty src to a guaranteed-large-enough dst. It
+// assumes that the varint-encoded length of the decompressed bytes has already
+// been written.
+//
+// It also assumes that:
+//	len(dst) >= MaxEncodedLen(len(src)) &&
+// 	minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize
+func encodeBlock(dst, src []byte) (d int) {
+	// Initialize the hash table. Its size ranges from 1<<8 to 1<<14 inclusive.
+	// The table element type is uint16, as s < sLimit and sLimit < len(src)
+	// and len(src) <= maxBlockSize and maxBlockSize == 65536.
+	const (
+		maxTableSize = 1 << 14
+		// tableMask is redundant, but helps the compiler eliminate bounds
+		// checks.
+		tableMask = maxTableSize - 1
+	)
+	shift := uint32(32 - 8)
+	for tableSize := 1 << 8; tableSize < maxTableSize && tableSize < len(src); tableSize *= 2 {
+		shift--
+	}
+	// In Go, all array elements are zero-initialized, so there is no advantage
+	// to a smaller tableSize per se. However, it matches the C++ algorithm,
+	// and in the asm versions of this code, we can get away with zeroing only
+	// the first tableSize elements.
+	var table [maxTableSize]uint16
+
+	// sLimit is when to stop looking for offset/length copies. The inputMargin
+	// lets us use a fast path for emitLiteral in the main loop, while we are
+	// looking for copies.
+	sLimit := len(src) - inputMargin
+
+	// nextEmit is where in src the next emitLiteral should start from.
+	nextEmit := 0
+
+	// The encoded form must start with a literal, as there are no previous
+	// bytes to copy, so we start looking for hash matches at s == 1.
+	s := 1
+	nextHash := hash(load32(src, s), shift)
+
+	for {
+		// Copied from the C++ snappy implementation:
+		//
+		// Heuristic match skipping: If 32 bytes are scanned with no matches
+		// found, start looking only at every other byte. If 32 more bytes are
+		// scanned (or skipped), look at every third byte, etc.. When a match
+		// is found, immediately go back to looking at every byte. This is a
+		// small loss (~5% performance, ~0.1% density) for compressible data
+		// due to more bookkeeping, but for non-compressible data (such as
+		// JPEG) it's a huge win since the compressor quickly "realizes" the
+		// data is incompressible and doesn't bother looking for matches
+		// everywhere.
+		//
+		// The "skip" variable keeps track of how many bytes there are since
+		// the last match; dividing it by 32 (ie. right-shifting by five) gives
+		// the number of bytes to move ahead for each iteration.
+		skip := 32
+
+		nextS := s
+		candidate := 0
+		for {
+			s = nextS
+			bytesBetweenHashLookups := skip >> 5
+			nextS = s + bytesBetweenHashLookups
+			skip += bytesBetweenHashLookups
+			if nextS > sLimit {
+				goto emitRemainder
+			}
+			candidate = int(table[nextHash&tableMask])
+			table[nextHash&tableMask] = uint16(s)
+			nextHash = hash(load32(src, nextS), shift)
+			if load32(src, s) == load32(src, candidate) {
+				break
+			}
+		}
+
+		// A 4-byte match has been found. We'll later see if more than 4 bytes
+		// match. But, prior to the match, src[nextEmit:s] are unmatched. Emit
+		// them as literal bytes.
+		d += emitLiteral(dst[d:], src[nextEmit:s])
+
+		// Call emitCopy, and then see if another emitCopy could be our next
+		// move. Repeat until we find no match for the input immediately after
+		// what was consumed by the last emitCopy call.
+		//
+		// If we exit this loop normally then we need to call emitLiteral next,
+		// though we don't yet know how big the literal will be. We handle that
+		// by proceeding to the next iteration of the main loop. We also can
+		// exit this loop via goto if we get close to exhausting the input.
+		for {
+			// Invariant: we have a 4-byte match at s, and no need to emit any
+			// literal bytes prior to s.
+			base := s
+
+			// Extend the 4-byte match as long as possible.
+			//
+			// This is an inlined version of:
+			//	s = extendMatch(src, candidate+4, s+4)
+			s += 4
+			for i := candidate + 4; s < len(src) && src[i] == src[s]; i, s = i+1, s+1 {
+			}
+
+			d += emitCopy(dst[d:], base-candidate, s-base)
+			nextEmit = s
+			if s >= sLimit {
+				goto emitRemainder
+			}
+
+			// We could immediately start working at s now, but to improve
+			// compression we first update the hash table at s-1 and at s. If
+			// another emitCopy is not our next move, also calculate nextHash
+			// at s+1. At least on GOARCH=amd64, these three hash calculations
+			// are faster as one load64 call (with some shifts) instead of
+			// three load32 calls.
+			x := load64(src, s-1)
+			prevHash := hash(uint32(x>>0), shift)
+			table[prevHash&tableMask] = uint16(s - 1)
+			currHash := hash(uint32(x>>8), shift)
+			candidate = int(table[currHash&tableMask])
+			table[currHash&tableMask] = uint16(s)
+			if uint32(x>>8) != load32(src, candidate) {
+				nextHash = hash(uint32(x>>16), shift)
+				s++
+				break
+			}
+		}
+	}
+
+emitRemainder:
+	if nextEmit < len(src) {
+		d += emitLiteral(dst[d:], src[nextEmit:])
+	}
+	return d
+}

+ 98 - 0
vendor/github.com/golang/snappy/snappy.go

@@ -0,0 +1,98 @@
+// Copyright 2011 The Snappy-Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package snappy implements the Snappy compression format. It aims for very
+// high speeds and reasonable compression.
+//
+// There are actually two Snappy formats: block and stream. They are related,
+// but different: trying to decompress block-compressed data as a Snappy stream
+// will fail, and vice versa. The block format is the Decode and Encode
+// functions and the stream format is the Reader and Writer types.
+//
+// The block format, the more common case, is used when the complete size (the
+// number of bytes) of the original data is known upfront, at the time
+// compression starts. The stream format, also known as the framing format, is
+// for when that isn't always true.
+//
+// The canonical, C++ implementation is at https://github.com/google/snappy and
+// it only implements the block format.
+package snappy // import "github.com/golang/snappy"
+
+import (
+	"hash/crc32"
+)
+
+/*
+Each encoded block begins with the varint-encoded length of the decoded data,
+followed by a sequence of chunks. Chunks begin and end on byte boundaries. The
+first byte of each chunk is broken into its 2 least and 6 most significant bits
+called l and m: l ranges in [0, 4) and m ranges in [0, 64). l is the chunk tag.
+Zero means a literal tag. All other values mean a copy tag.
+
+For literal tags:
+  - If m < 60, the next 1 + m bytes are literal bytes.
+  - Otherwise, let n be the little-endian unsigned integer denoted by the next
+    m - 59 bytes. The next 1 + n bytes after that are literal bytes.
+
+For copy tags, length bytes are copied from offset bytes ago, in the style of
+Lempel-Ziv compression algorithms. In particular:
+  - For l == 1, the offset ranges in [0, 1<<11) and the length in [4, 12).
+    The length is 4 + the low 3 bits of m. The high 3 bits of m form bits 8-10
+    of the offset. The next byte is bits 0-7 of the offset.
+  - For l == 2, the offset ranges in [0, 1<<16) and the length in [1, 65).
+    The length is 1 + m. The offset is the little-endian unsigned integer
+    denoted by the next 2 bytes.
+  - For l == 3, this tag is a legacy format that is no longer issued by most
+    encoders. Nonetheless, the offset ranges in [0, 1<<32) and the length in
+    [1, 65). The length is 1 + m. The offset is the little-endian unsigned
+    integer denoted by the next 4 bytes.
+*/
+const (
+	tagLiteral = 0x00
+	tagCopy1   = 0x01
+	tagCopy2   = 0x02
+	tagCopy4   = 0x03
+)
+
+const (
+	checksumSize    = 4
+	chunkHeaderSize = 4
+	magicChunk      = "\xff\x06\x00\x00" + magicBody
+	magicBody       = "sNaPpY"
+
+	// maxBlockSize is the maximum size of the input to encodeBlock. It is not
+	// part of the wire format per se, but some parts of the encoder assume
+	// that an offset fits into a uint16.
+	//
+	// Also, for the framing format (Writer type instead of Encode function),
+	// https://github.com/google/snappy/blob/master/framing_format.txt says
+	// that "the uncompressed data in a chunk must be no longer than 65536
+	// bytes".
+	maxBlockSize = 65536
+
+	// maxEncodedLenOfMaxBlockSize equals MaxEncodedLen(maxBlockSize), but is
+	// hard coded to be a const instead of a variable, so that obufLen can also
+	// be a const. Their equivalence is confirmed by
+	// TestMaxEncodedLenOfMaxBlockSize.
+	maxEncodedLenOfMaxBlockSize = 76490
+
+	obufHeaderLen = len(magicChunk) + checksumSize + chunkHeaderSize
+	obufLen       = obufHeaderLen + maxEncodedLenOfMaxBlockSize
+)
+
+const (
+	chunkTypeCompressedData   = 0x00
+	chunkTypeUncompressedData = 0x01
+	chunkTypePadding          = 0xfe
+	chunkTypeStreamIdentifier = 0xff
+)
+
+var crcTable = crc32.MakeTable(crc32.Castagnoli)
+
+// crc implements the checksum specified in section 3 of
+// https://github.com/google/snappy/blob/master/framing_format.txt
+func crc(b []byte) uint32 {
+	c := crc32.Update(0, crcTable, b)
+	return uint32(c>>15|c<<17) + 0xa282ead8
+}

+ 9 - 0
vendor/github.com/hashicorp/hcl/.gitignore

@@ -0,0 +1,9 @@
+y.output
+
+# ignore intellij files
+.idea
+*.iml
+*.ipr
+*.iws
+
+*.test

+ 13 - 0
vendor/github.com/hashicorp/hcl/.travis.yml

@@ -0,0 +1,13 @@
+sudo: false
+
+language: go
+
+go:
+  - 1.x
+  - tip
+
+branches:
+  only:
+    - master
+
+script: make test

+ 354 - 0
vendor/github.com/hashicorp/hcl/LICENSE

@@ -0,0 +1,354 @@
+Mozilla Public License, version 2.0
+
+1. Definitions
+
+1.1. “Contributor”
+
+     means each individual or legal entity that creates, contributes to the
+     creation of, or owns Covered Software.
+
+1.2. “Contributor Version”
+
+     means the combination of the Contributions of others (if any) used by a
+     Contributor and that particular Contributor’s Contribution.
+
+1.3. “Contribution”
+
+     means Covered Software of a particular Contributor.
+
+1.4. “Covered Software”
+
+     means Source Code Form to which the initial Contributor has attached the
+     notice in Exhibit A, the Executable Form of such Source Code Form, and
+     Modifications of such Source Code Form, in each case including portions
+     thereof.
+
+1.5. “Incompatible With Secondary Licenses”
+     means
+
+     a. that the initial Contributor has attached the notice described in
+        Exhibit B to the Covered Software; or
+
+     b. that the Covered Software was made available under the terms of version
+        1.1 or earlier of the License, but not also under the terms of a
+        Secondary License.
+
+1.6. “Executable Form”
+
+     means any form of the work other than Source Code Form.
+
+1.7. “Larger Work”
+
+     means a work that combines Covered Software with other material, in a separate
+     file or files, that is not Covered Software.
+
+1.8. “License”
+
+     means this document.
+
+1.9. “Licensable”
+
+     means having the right to grant, to the maximum extent possible, whether at the
+     time of the initial grant or subsequently, any and all of the rights conveyed by
+     this License.
+
+1.10. “Modifications”
+
+     means any of the following:
+
+     a. any file in Source Code Form that results from an addition to, deletion
+        from, or modification of the contents of Covered Software; or
+
+     b. any new file in Source Code Form that contains any Covered Software.
+
+1.11. “Patent Claims” of a Contributor
+
+      means any patent claim(s), including without limitation, method, process,
+      and apparatus claims, in any patent Licensable by such Contributor that
+      would be infringed, but for the grant of the License, by the making,
+      using, selling, offering for sale, having made, import, or transfer of
+      either its Contributions or its Contributor Version.
+
+1.12. “Secondary License”
+
+      means either the GNU General Public License, Version 2.0, the GNU Lesser
+      General Public License, Version 2.1, the GNU Affero General Public
+      License, Version 3.0, or any later versions of those licenses.
+
+1.13. “Source Code Form”
+
+      means the form of the work preferred for making modifications.
+
+1.14. “You” (or “Your”)
+
+      means an individual or a legal entity exercising rights under this
+      License. For legal entities, “You” includes any entity that controls, is
+      controlled by, or is under common control with You. For purposes of this
+      definition, “control” means (a) the power, direct or indirect, to cause
+      the direction or management of such entity, whether by contract or
+      otherwise, or (b) ownership of more than fifty percent (50%) of the
+      outstanding shares or beneficial ownership of such entity.
+
+
+2. License Grants and Conditions
+
+2.1. Grants
+
+     Each Contributor hereby grants You a world-wide, royalty-free,
+     non-exclusive license:
+
+     a. under intellectual property rights (other than patent or trademark)
+        Licensable by such Contributor to use, reproduce, make available,
+        modify, display, perform, distribute, and otherwise exploit its
+        Contributions, either on an unmodified basis, with Modifications, or as
+        part of a Larger Work; and
+
+     b. under Patent Claims of such Contributor to make, use, sell, offer for
+        sale, have made, import, and otherwise transfer either its Contributions
+        or its Contributor Version.
+
+2.2. Effective Date
+
+     The licenses granted in Section 2.1 with respect to any Contribution become
+     effective for each Contribution on the date the Contributor first distributes
+     such Contribution.
+
+2.3. Limitations on Grant Scope
+
+     The licenses granted in this Section 2 are the only rights granted under this
+     License. No additional rights or licenses will be implied from the distribution
+     or licensing of Covered Software under this License. Notwithstanding Section
+     2.1(b) above, no patent license is granted by a Contributor:
+
+     a. for any code that a Contributor has removed from Covered Software; or
+
+     b. for infringements caused by: (i) Your and any other third party’s
+        modifications of Covered Software, or (ii) the combination of its
+        Contributions with other software (except as part of its Contributor
+        Version); or
+
+     c. under Patent Claims infringed by Covered Software in the absence of its
+        Contributions.
+
+     This License does not grant any rights in the trademarks, service marks, or
+     logos of any Contributor (except as may be necessary to comply with the
+     notice requirements in Section 3.4).
+
+2.4. Subsequent Licenses
+
+     No Contributor makes additional grants as a result of Your choice to
+     distribute the Covered Software under a subsequent version of this License
+     (see Section 10.2) or under the terms of a Secondary License (if permitted
+     under the terms of Section 3.3).
+
+2.5. Representation
+
+     Each Contributor represents that the Contributor believes its Contributions
+     are its original creation(s) or it has sufficient rights to grant the
+     rights to its Contributions conveyed by this License.
+
+2.6. Fair Use
+
+     This License is not intended to limit any rights You have under applicable
+     copyright doctrines of fair use, fair dealing, or other equivalents.
+
+2.7. Conditions
+
+     Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in
+     Section 2.1.
+
+
+3. Responsibilities
+
+3.1. Distribution of Source Form
+
+     All distribution of Covered Software in Source Code Form, including any
+     Modifications that You create or to which You contribute, must be under the
+     terms of this License. You must inform recipients that the Source Code Form
+     of the Covered Software is governed by the terms of this License, and how
+     they can obtain a copy of this License. You may not attempt to alter or
+     restrict the recipients’ rights in the Source Code Form.
+
+3.2. Distribution of Executable Form
+
+     If You distribute Covered Software in Executable Form then:
+
+     a. such Covered Software must also be made available in Source Code Form,
+        as described in Section 3.1, and You must inform recipients of the
+        Executable Form how they can obtain a copy of such Source Code Form by
+        reasonable means in a timely manner, at a charge no more than the cost
+        of distribution to the recipient; and
+
+     b. You may distribute such Executable Form under the terms of this License,
+        or sublicense it under different terms, provided that the license for
+        the Executable Form does not attempt to limit or alter the recipients’
+        rights in the Source Code Form under this License.
+
+3.3. Distribution of a Larger Work
+
+     You may create and distribute a Larger Work under terms of Your choice,
+     provided that You also comply with the requirements of this License for the
+     Covered Software. If the Larger Work is a combination of Covered Software
+     with a work governed by one or more Secondary Licenses, and the Covered
+     Software is not Incompatible With Secondary Licenses, this License permits
+     You to additionally distribute such Covered Software under the terms of
+     such Secondary License(s), so that the recipient of the Larger Work may, at
+     their option, further distribute the Covered Software under the terms of
+     either this License or such Secondary License(s).
+
+3.4. Notices
+
+     You may not remove or alter the substance of any license notices (including
+     copyright notices, patent notices, disclaimers of warranty, or limitations
+     of liability) contained within the Source Code Form of the Covered
+     Software, except that You may alter any license notices to the extent
+     required to remedy known factual inaccuracies.
+
+3.5. Application of Additional Terms
+
+     You may choose to offer, and to charge a fee for, warranty, support,
+     indemnity or liability obligations to one or more recipients of Covered
+     Software. However, You may do so only on Your own behalf, and not on behalf
+     of any Contributor. You must make it absolutely clear that any such
+     warranty, support, indemnity, or liability obligation is offered by You
+     alone, and You hereby agree to indemnify every Contributor for any
+     liability incurred by such Contributor as a result of warranty, support,
+     indemnity or liability terms You offer. You may include additional
+     disclaimers of warranty and limitations of liability specific to any
+     jurisdiction.
+
+4. Inability to Comply Due to Statute or Regulation
+
+   If it is impossible for You to comply with any of the terms of this License
+   with respect to some or all of the Covered Software due to statute, judicial
+   order, or regulation then You must: (a) comply with the terms of this License
+   to the maximum extent possible; and (b) describe the limitations and the code
+   they affect. Such description must be placed in a text file included with all
+   distributions of the Covered Software under this License. Except to the
+   extent prohibited by statute or regulation, such description must be
+   sufficiently detailed for a recipient of ordinary skill to be able to
+   understand it.
+
+5. Termination
+
+5.1. The rights granted under this License will terminate automatically if You
+     fail to comply with any of its terms. However, if You become compliant,
+     then the rights granted under this License from a particular Contributor
+     are reinstated (a) provisionally, unless and until such Contributor
+     explicitly and finally terminates Your grants, and (b) on an ongoing basis,
+     if such Contributor fails to notify You of the non-compliance by some
+     reasonable means prior to 60 days after You have come back into compliance.
+     Moreover, Your grants from a particular Contributor are reinstated on an
+     ongoing basis if such Contributor notifies You of the non-compliance by
+     some reasonable means, this is the first time You have received notice of
+     non-compliance with this License from such Contributor, and You become
+     compliant prior to 30 days after Your receipt of the notice.
+
+5.2. If You initiate litigation against any entity by asserting a patent
+     infringement claim (excluding declaratory judgment actions, counter-claims,
+     and cross-claims) alleging that a Contributor Version directly or
+     indirectly infringes any patent, then the rights granted to You by any and
+     all Contributors for the Covered Software under Section 2.1 of this License
+     shall terminate.
+
+5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user
+     license agreements (excluding distributors and resellers) which have been
+     validly granted by You or Your distributors under this License prior to
+     termination shall survive termination.
+
+6. Disclaimer of Warranty
+
+   Covered Software is provided under this License on an “as is” basis, without
+   warranty of any kind, either expressed, implied, or statutory, including,
+   without limitation, warranties that the Covered Software is free of defects,
+   merchantable, fit for a particular purpose or non-infringing. The entire
+   risk as to the quality and performance of the Covered Software is with You.
+   Should any Covered Software prove defective in any respect, You (not any
+   Contributor) assume the cost of any necessary servicing, repair, or
+   correction. This disclaimer of warranty constitutes an essential part of this
+   License. No use of  any Covered Software is authorized under this License
+   except under this disclaimer.
+
+7. Limitation of Liability
+
+   Under no circumstances and under no legal theory, whether tort (including
+   negligence), contract, or otherwise, shall any Contributor, or anyone who
+   distributes Covered Software as permitted above, be liable to You for any
+   direct, indirect, special, incidental, or consequential damages of any
+   character including, without limitation, damages for lost profits, loss of
+   goodwill, work stoppage, computer failure or malfunction, or any and all
+   other commercial damages or losses, even if such party shall have been
+   informed of the possibility of such damages. This limitation of liability
+   shall not apply to liability for death or personal injury resulting from such
+   party’s negligence to the extent applicable law prohibits such limitation.
+   Some jurisdictions do not allow the exclusion or limitation of incidental or
+   consequential damages, so this exclusion and limitation may not apply to You.
+
+8. Litigation
+
+   Any litigation relating to this License may be brought only in the courts of
+   a jurisdiction where the defendant maintains its principal place of business
+   and such litigation shall be governed by laws of that jurisdiction, without
+   reference to its conflict-of-law provisions. Nothing in this Section shall
+   prevent a party’s ability to bring cross-claims or counter-claims.
+
+9. Miscellaneous
+
+   This License represents the complete agreement concerning the subject matter
+   hereof. If any provision of this License is held to be unenforceable, such
+   provision shall be reformed only to the extent necessary to make it
+   enforceable. Any law or regulation which provides that the language of a
+   contract shall be construed against the drafter shall not be used to construe
+   this License against a Contributor.
+
+
+10. Versions of the License
+
+10.1. New Versions
+
+      Mozilla Foundation is the license steward. Except as provided in Section
+      10.3, no one other than the license steward has the right to modify or
+      publish new versions of this License. Each version will be given a
+      distinguishing version number.
+
+10.2. Effect of New Versions
+
+      You may distribute the Covered Software under the terms of the version of
+      the License under which You originally received the Covered Software, or
+      under the terms of any subsequent version published by the license
+      steward.
+
+10.3. Modified Versions
+
+      If you create software not governed by this License, and you want to
+      create a new license for such software, you may create and use a modified
+      version of this License if you rename the license and remove any
+      references to the name of the license steward (except to note that such
+      modified license differs from this License).
+
+10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses
+      If You choose to distribute Source Code Form that is Incompatible With
+      Secondary Licenses under the terms of this version of the License, the
+      notice described in Exhibit B of this License must be attached.
+
+Exhibit A - Source Code Form License Notice
+
+      This Source Code Form is subject to the
+      terms of the Mozilla Public License, v.
+      2.0. If a copy of the MPL was not
+      distributed with this file, You can
+      obtain one at
+      http://mozilla.org/MPL/2.0/.
+
+If it is not possible or desirable to put the notice in a particular file, then
+You may include the notice in a location (such as a LICENSE file in a relevant
+directory) where a recipient would be likely to look for such a notice.
+
+You may add additional accurate notices of copyright ownership.
+
+Exhibit B - “Incompatible With Secondary Licenses” Notice
+
+      This Source Code Form is “Incompatible
+      With Secondary Licenses”, as defined by
+      the Mozilla Public License, v. 2.0.
+

+ 18 - 0
vendor/github.com/hashicorp/hcl/Makefile

@@ -0,0 +1,18 @@
+TEST?=./...
+
+default: test
+
+fmt: generate
+	go fmt ./...
+
+test: generate
+	go get -t ./...
+	go test $(TEST) $(TESTARGS)
+
+generate:
+	go generate ./...
+
+updatedeps:
+	go get -u golang.org/x/tools/cmd/stringer
+
+.PHONY: default generate test updatedeps

+ 125 - 0
vendor/github.com/hashicorp/hcl/README.md

@@ -0,0 +1,125 @@
+# HCL
+
+[![GoDoc](https://godoc.org/github.com/hashicorp/hcl?status.png)](https://godoc.org/github.com/hashicorp/hcl) [![Build Status](https://travis-ci.org/hashicorp/hcl.svg?branch=master)](https://travis-ci.org/hashicorp/hcl)
+
+HCL (HashiCorp Configuration Language) is a configuration language built
+by HashiCorp. The goal of HCL is to build a structured configuration language
+that is both human and machine friendly for use with command-line tools, but
+specifically targeted towards DevOps tools, servers, etc.
+
+HCL is also fully JSON compatible. That is, JSON can be used as completely
+valid input to a system expecting HCL. This helps makes systems
+interoperable with other systems.
+
+HCL is heavily inspired by
+[libucl](https://github.com/vstakhov/libucl),
+nginx configuration, and others similar.
+
+## Why?
+
+A common question when viewing HCL is to ask the question: why not
+JSON, YAML, etc.?
+
+Prior to HCL, the tools we built at [HashiCorp](http://www.hashicorp.com)
+used a variety of configuration languages from full programming languages
+such as Ruby to complete data structure languages such as JSON. What we
+learned is that some people wanted human-friendly configuration languages
+and some people wanted machine-friendly languages.
+
+JSON fits a nice balance in this, but is fairly verbose and most
+importantly doesn't support comments. With YAML, we found that beginners
+had a really hard time determining what the actual structure was, and
+ended up guessing more often than not whether to use a hyphen, colon, etc.
+in order to represent some configuration key.
+
+Full programming languages such as Ruby enable complex behavior
+a configuration language shouldn't usually allow, and also forces
+people to learn some set of Ruby.
+
+Because of this, we decided to create our own configuration language
+that is JSON-compatible. Our configuration language (HCL) is designed
+to be written and modified by humans. The API for HCL allows JSON
+as an input so that it is also machine-friendly (machines can generate
+JSON instead of trying to generate HCL).
+
+Our goal with HCL is not to alienate other configuration languages.
+It is instead to provide HCL as a specialized language for our tools,
+and JSON as the interoperability layer.
+
+## Syntax
+
+For a complete grammar, please see the parser itself. A high-level overview
+of the syntax and grammar is listed here.
+
+  * Single line comments start with `#` or `//`
+
+  * Multi-line comments are wrapped in `/*` and `*/`. Nested block comments
+    are not allowed. A multi-line comment (also known as a block comment)
+    terminates at the first `*/` found.
+
+  * Values are assigned with the syntax `key = value` (whitespace doesn't
+    matter). The value can be any primitive: a string, number, boolean,
+    object, or list.
+
+  * Strings are double-quoted and can contain any UTF-8 characters.
+    Example: `"Hello, World"`
+
+  * Multi-line strings start with `<<EOF` at the end of a line, and end
+    with `EOF` on its own line ([here documents](https://en.wikipedia.org/wiki/Here_document)).
+    Any text may be used in place of `EOF`. Example:
+```
+<<FOO
+hello
+world
+FOO
+```
+
+  * Numbers are assumed to be base 10. If you prefix a number with 0x,
+    it is treated as a hexadecimal. If it is prefixed with 0, it is
+    treated as an octal. Numbers can be in scientific notation: "1e10".
+
+  * Boolean values: `true`, `false`
+
+  * Arrays can be made by wrapping it in `[]`. Example:
+    `["foo", "bar", 42]`. Arrays can contain primitives,
+    other arrays, and objects. As an alternative, lists
+    of objects can be created with repeated blocks, using
+    this structure:
+
+    ```hcl
+    service {
+        key = "value"
+    }
+
+    service {
+        key = "value"
+    }
+    ```
+
+Objects and nested objects are created using the structure shown below:
+
+```
+variable "ami" {
+    description = "the AMI to use"
+}
+```
+This would be equivalent to the following json:
+``` json
+{
+  "variable": {
+      "ami": {
+          "description": "the AMI to use"
+        }
+    }
+}
+```
+
+## Thanks
+
+Thanks to:
+
+  * [@vstakhov](https://github.com/vstakhov) - The original libucl parser
+    and syntax that HCL was based off of.
+
+  * [@fatih](https://github.com/fatih) - The rewritten HCL parser
+    in pure Go (no goyacc) and support for a printer.

+ 19 - 0
vendor/github.com/hashicorp/hcl/appveyor.yml

@@ -0,0 +1,19 @@
+version: "build-{branch}-{build}"
+image: Visual Studio 2015
+clone_folder: c:\gopath\src\github.com\hashicorp\hcl
+environment:
+  GOPATH: c:\gopath
+init:
+  - git config --global core.autocrlf false
+install:
+- cmd: >-
+    echo %Path%
+
+    go version
+
+    go env
+
+    go get -t ./...
+
+build_script:
+- cmd: go test -v ./...

+ 729 - 0
vendor/github.com/hashicorp/hcl/decoder.go

@@ -0,0 +1,729 @@
+package hcl
+
+import (
+	"errors"
+	"fmt"
+	"reflect"
+	"sort"
+	"strconv"
+	"strings"
+
+	"github.com/hashicorp/hcl/hcl/ast"
+	"github.com/hashicorp/hcl/hcl/parser"
+	"github.com/hashicorp/hcl/hcl/token"
+)
+
+// This is the tag to use with structures to have settings for HCL
+const tagName = "hcl"
+
+var (
+	// nodeType holds a reference to the type of ast.Node
+	nodeType reflect.Type = findNodeType()
+)
+
+// Unmarshal accepts a byte slice as input and writes the
+// data to the value pointed to by v.
+func Unmarshal(bs []byte, v interface{}) error {
+	root, err := parse(bs)
+	if err != nil {
+		return err
+	}
+
+	return DecodeObject(v, root)
+}
+
+// Decode reads the given input and decodes it into the structure
+// given by `out`.
+func Decode(out interface{}, in string) error {
+	obj, err := Parse(in)
+	if err != nil {
+		return err
+	}
+
+	return DecodeObject(out, obj)
+}
+
+// DecodeObject is a lower-level version of Decode. It decodes a
+// raw Object into the given output.
+func DecodeObject(out interface{}, n ast.Node) error {
+	val := reflect.ValueOf(out)
+	if val.Kind() != reflect.Ptr {
+		return errors.New("result must be a pointer")
+	}
+
+	// If we have the file, we really decode the root node
+	if f, ok := n.(*ast.File); ok {
+		n = f.Node
+	}
+
+	var d decoder
+	return d.decode("root", n, val.Elem())
+}
+
+type decoder struct {
+	stack []reflect.Kind
+}
+
+func (d *decoder) decode(name string, node ast.Node, result reflect.Value) error {
+	k := result
+
+	// If we have an interface with a valid value, we use that
+	// for the check.
+	if result.Kind() == reflect.Interface {
+		elem := result.Elem()
+		if elem.IsValid() {
+			k = elem
+		}
+	}
+
+	// Push current onto stack unless it is an interface.
+	if k.Kind() != reflect.Interface {
+		d.stack = append(d.stack, k.Kind())
+
+		// Schedule a pop
+		defer func() {
+			d.stack = d.stack[:len(d.stack)-1]
+		}()
+	}
+
+	switch k.Kind() {
+	case reflect.Bool:
+		return d.decodeBool(name, node, result)
+	case reflect.Float32, reflect.Float64:
+		return d.decodeFloat(name, node, result)
+	case reflect.Int, reflect.Int32, reflect.Int64:
+		return d.decodeInt(name, node, result)
+	case reflect.Interface:
+		// When we see an interface, we make our own thing
+		return d.decodeInterface(name, node, result)
+	case reflect.Map:
+		return d.decodeMap(name, node, result)
+	case reflect.Ptr:
+		return d.decodePtr(name, node, result)
+	case reflect.Slice:
+		return d.decodeSlice(name, node, result)
+	case reflect.String:
+		return d.decodeString(name, node, result)
+	case reflect.Struct:
+		return d.decodeStruct(name, node, result)
+	default:
+		return &parser.PosError{
+			Pos: node.Pos(),
+			Err: fmt.Errorf("%s: unknown kind to decode into: %s", name, k.Kind()),
+		}
+	}
+}
+
+func (d *decoder) decodeBool(name string, node ast.Node, result reflect.Value) error {
+	switch n := node.(type) {
+	case *ast.LiteralType:
+		if n.Token.Type == token.BOOL {
+			v, err := strconv.ParseBool(n.Token.Text)
+			if err != nil {
+				return err
+			}
+
+			result.Set(reflect.ValueOf(v))
+			return nil
+		}
+	}
+
+	return &parser.PosError{
+		Pos: node.Pos(),
+		Err: fmt.Errorf("%s: unknown type %T", name, node),
+	}
+}
+
+func (d *decoder) decodeFloat(name string, node ast.Node, result reflect.Value) error {
+	switch n := node.(type) {
+	case *ast.LiteralType:
+		if n.Token.Type == token.FLOAT || n.Token.Type == token.NUMBER {
+			v, err := strconv.ParseFloat(n.Token.Text, 64)
+			if err != nil {
+				return err
+			}
+
+			result.Set(reflect.ValueOf(v).Convert(result.Type()))
+			return nil
+		}
+	}
+
+	return &parser.PosError{
+		Pos: node.Pos(),
+		Err: fmt.Errorf("%s: unknown type %T", name, node),
+	}
+}
+
+func (d *decoder) decodeInt(name string, node ast.Node, result reflect.Value) error {
+	switch n := node.(type) {
+	case *ast.LiteralType:
+		switch n.Token.Type {
+		case token.NUMBER:
+			v, err := strconv.ParseInt(n.Token.Text, 0, 0)
+			if err != nil {
+				return err
+			}
+
+			if result.Kind() == reflect.Interface {
+				result.Set(reflect.ValueOf(int(v)))
+			} else {
+				result.SetInt(v)
+			}
+			return nil
+		case token.STRING:
+			v, err := strconv.ParseInt(n.Token.Value().(string), 0, 0)
+			if err != nil {
+				return err
+			}
+
+			if result.Kind() == reflect.Interface {
+				result.Set(reflect.ValueOf(int(v)))
+			} else {
+				result.SetInt(v)
+			}
+			return nil
+		}
+	}
+
+	return &parser.PosError{
+		Pos: node.Pos(),
+		Err: fmt.Errorf("%s: unknown type %T", name, node),
+	}
+}
+
+func (d *decoder) decodeInterface(name string, node ast.Node, result reflect.Value) error {
+	// When we see an ast.Node, we retain the value to enable deferred decoding.
+	// Very useful in situations where we want to preserve ast.Node information
+	// like Pos
+	if result.Type() == nodeType && result.CanSet() {
+		result.Set(reflect.ValueOf(node))
+		return nil
+	}
+
+	var set reflect.Value
+	redecode := true
+
+	// For testing types, ObjectType should just be treated as a list. We
+	// set this to a temporary var because we want to pass in the real node.
+	testNode := node
+	if ot, ok := node.(*ast.ObjectType); ok {
+		testNode = ot.List
+	}
+
+	switch n := testNode.(type) {
+	case *ast.ObjectList:
+		// If we're at the root or we're directly within a slice, then we
+		// decode objects into map[string]interface{}, otherwise we decode
+		// them into lists.
+		if len(d.stack) == 0 || d.stack[len(d.stack)-1] == reflect.Slice {
+			var temp map[string]interface{}
+			tempVal := reflect.ValueOf(temp)
+			result := reflect.MakeMap(
+				reflect.MapOf(
+					reflect.TypeOf(""),
+					tempVal.Type().Elem()))
+
+			set = result
+		} else {
+			var temp []map[string]interface{}
+			tempVal := reflect.ValueOf(temp)
+			result := reflect.MakeSlice(
+				reflect.SliceOf(tempVal.Type().Elem()), 0, len(n.Items))
+			set = result
+		}
+	case *ast.ObjectType:
+		// If we're at the root or we're directly within a slice, then we
+		// decode objects into map[string]interface{}, otherwise we decode
+		// them into lists.
+		if len(d.stack) == 0 || d.stack[len(d.stack)-1] == reflect.Slice {
+			var temp map[string]interface{}
+			tempVal := reflect.ValueOf(temp)
+			result := reflect.MakeMap(
+				reflect.MapOf(
+					reflect.TypeOf(""),
+					tempVal.Type().Elem()))
+
+			set = result
+		} else {
+			var temp []map[string]interface{}
+			tempVal := reflect.ValueOf(temp)
+			result := reflect.MakeSlice(
+				reflect.SliceOf(tempVal.Type().Elem()), 0, 1)
+			set = result
+		}
+	case *ast.ListType:
+		var temp []interface{}
+		tempVal := reflect.ValueOf(temp)
+		result := reflect.MakeSlice(
+			reflect.SliceOf(tempVal.Type().Elem()), 0, 0)
+		set = result
+	case *ast.LiteralType:
+		switch n.Token.Type {
+		case token.BOOL:
+			var result bool
+			set = reflect.Indirect(reflect.New(reflect.TypeOf(result)))
+		case token.FLOAT:
+			var result float64
+			set = reflect.Indirect(reflect.New(reflect.TypeOf(result)))
+		case token.NUMBER:
+			var result int
+			set = reflect.Indirect(reflect.New(reflect.TypeOf(result)))
+		case token.STRING, token.HEREDOC:
+			set = reflect.Indirect(reflect.New(reflect.TypeOf("")))
+		default:
+			return &parser.PosError{
+				Pos: node.Pos(),
+				Err: fmt.Errorf("%s: cannot decode into interface: %T", name, node),
+			}
+		}
+	default:
+		return fmt.Errorf(
+			"%s: cannot decode into interface: %T",
+			name, node)
+	}
+
+	// Set the result to what its supposed to be, then reset
+	// result so we don't reflect into this method anymore.
+	result.Set(set)
+
+	if redecode {
+		// Revisit the node so that we can use the newly instantiated
+		// thing and populate it.
+		if err := d.decode(name, node, result); err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+func (d *decoder) decodeMap(name string, node ast.Node, result reflect.Value) error {
+	if item, ok := node.(*ast.ObjectItem); ok {
+		node = &ast.ObjectList{Items: []*ast.ObjectItem{item}}
+	}
+
+	if ot, ok := node.(*ast.ObjectType); ok {
+		node = ot.List
+	}
+
+	n, ok := node.(*ast.ObjectList)
+	if !ok {
+		return &parser.PosError{
+			Pos: node.Pos(),
+			Err: fmt.Errorf("%s: not an object type for map (%T)", name, node),
+		}
+	}
+
+	// If we have an interface, then we can address the interface,
+	// but not the slice itself, so get the element but set the interface
+	set := result
+	if result.Kind() == reflect.Interface {
+		result = result.Elem()
+	}
+
+	resultType := result.Type()
+	resultElemType := resultType.Elem()
+	resultKeyType := resultType.Key()
+	if resultKeyType.Kind() != reflect.String {
+		return &parser.PosError{
+			Pos: node.Pos(),
+			Err: fmt.Errorf("%s: map must have string keys", name),
+		}
+	}
+
+	// Make a map if it is nil
+	resultMap := result
+	if result.IsNil() {
+		resultMap = reflect.MakeMap(
+			reflect.MapOf(resultKeyType, resultElemType))
+	}
+
+	// Go through each element and decode it.
+	done := make(map[string]struct{})
+	for _, item := range n.Items {
+		if item.Val == nil {
+			continue
+		}
+
+		// github.com/hashicorp/terraform/issue/5740
+		if len(item.Keys) == 0 {
+			return &parser.PosError{
+				Pos: node.Pos(),
+				Err: fmt.Errorf("%s: map must have string keys", name),
+			}
+		}
+
+		// Get the key we're dealing with, which is the first item
+		keyStr := item.Keys[0].Token.Value().(string)
+
+		// If we've already processed this key, then ignore it
+		if _, ok := done[keyStr]; ok {
+			continue
+		}
+
+		// Determine the value. If we have more than one key, then we
+		// get the objectlist of only these keys.
+		itemVal := item.Val
+		if len(item.Keys) > 1 {
+			itemVal = n.Filter(keyStr)
+			done[keyStr] = struct{}{}
+		}
+
+		// Make the field name
+		fieldName := fmt.Sprintf("%s.%s", name, keyStr)
+
+		// Get the key/value as reflection values
+		key := reflect.ValueOf(keyStr)
+		val := reflect.Indirect(reflect.New(resultElemType))
+
+		// If we have a pre-existing value in the map, use that
+		oldVal := resultMap.MapIndex(key)
+		if oldVal.IsValid() {
+			val.Set(oldVal)
+		}
+
+		// Decode!
+		if err := d.decode(fieldName, itemVal, val); err != nil {
+			return err
+		}
+
+		// Set the value on the map
+		resultMap.SetMapIndex(key, val)
+	}
+
+	// Set the final map if we can
+	set.Set(resultMap)
+	return nil
+}
+
+func (d *decoder) decodePtr(name string, node ast.Node, result reflect.Value) error {
+	// Create an element of the concrete (non pointer) type and decode
+	// into that. Then set the value of the pointer to this type.
+	resultType := result.Type()
+	resultElemType := resultType.Elem()
+	val := reflect.New(resultElemType)
+	if err := d.decode(name, node, reflect.Indirect(val)); err != nil {
+		return err
+	}
+
+	result.Set(val)
+	return nil
+}
+
+func (d *decoder) decodeSlice(name string, node ast.Node, result reflect.Value) error {
+	// If we have an interface, then we can address the interface,
+	// but not the slice itself, so get the element but set the interface
+	set := result
+	if result.Kind() == reflect.Interface {
+		result = result.Elem()
+	}
+	// Create the slice if it isn't nil
+	resultType := result.Type()
+	resultElemType := resultType.Elem()
+	if result.IsNil() {
+		resultSliceType := reflect.SliceOf(resultElemType)
+		result = reflect.MakeSlice(
+			resultSliceType, 0, 0)
+	}
+
+	// Figure out the items we'll be copying into the slice
+	var items []ast.Node
+	switch n := node.(type) {
+	case *ast.ObjectList:
+		items = make([]ast.Node, len(n.Items))
+		for i, item := range n.Items {
+			items[i] = item
+		}
+	case *ast.ObjectType:
+		items = []ast.Node{n}
+	case *ast.ListType:
+		items = n.List
+	default:
+		return &parser.PosError{
+			Pos: node.Pos(),
+			Err: fmt.Errorf("unknown slice type: %T", node),
+		}
+	}
+
+	for i, item := range items {
+		fieldName := fmt.Sprintf("%s[%d]", name, i)
+
+		// Decode
+		val := reflect.Indirect(reflect.New(resultElemType))
+
+		// if item is an object that was decoded from ambiguous JSON and
+		// flattened, make sure it's expanded if it needs to decode into a
+		// defined structure.
+		item := expandObject(item, val)
+
+		if err := d.decode(fieldName, item, val); err != nil {
+			return err
+		}
+
+		// Append it onto the slice
+		result = reflect.Append(result, val)
+	}
+
+	set.Set(result)
+	return nil
+}
+
+// expandObject detects if an ambiguous JSON object was flattened to a List which
+// should be decoded into a struct, and expands the ast to properly deocode.
+func expandObject(node ast.Node, result reflect.Value) ast.Node {
+	item, ok := node.(*ast.ObjectItem)
+	if !ok {
+		return node
+	}
+
+	elemType := result.Type()
+
+	// our target type must be a struct
+	switch elemType.Kind() {
+	case reflect.Ptr:
+		switch elemType.Elem().Kind() {
+		case reflect.Struct:
+			//OK
+		default:
+			return node
+		}
+	case reflect.Struct:
+		//OK
+	default:
+		return node
+	}
+
+	// A list value will have a key and field name. If it had more fields,
+	// it wouldn't have been flattened.
+	if len(item.Keys) != 2 {
+		return node
+	}
+
+	keyToken := item.Keys[0].Token
+	item.Keys = item.Keys[1:]
+
+	// we need to un-flatten the ast enough to decode
+	newNode := &ast.ObjectItem{
+		Keys: []*ast.ObjectKey{
+			&ast.ObjectKey{
+				Token: keyToken,
+			},
+		},
+		Val: &ast.ObjectType{
+			List: &ast.ObjectList{
+				Items: []*ast.ObjectItem{item},
+			},
+		},
+	}
+
+	return newNode
+}
+
+func (d *decoder) decodeString(name string, node ast.Node, result reflect.Value) error {
+	switch n := node.(type) {
+	case *ast.LiteralType:
+		switch n.Token.Type {
+		case token.NUMBER:
+			result.Set(reflect.ValueOf(n.Token.Text).Convert(result.Type()))
+			return nil
+		case token.STRING, token.HEREDOC:
+			result.Set(reflect.ValueOf(n.Token.Value()).Convert(result.Type()))
+			return nil
+		}
+	}
+
+	return &parser.PosError{
+		Pos: node.Pos(),
+		Err: fmt.Errorf("%s: unknown type for string %T", name, node),
+	}
+}
+
+func (d *decoder) decodeStruct(name string, node ast.Node, result reflect.Value) error {
+	var item *ast.ObjectItem
+	if it, ok := node.(*ast.ObjectItem); ok {
+		item = it
+		node = it.Val
+	}
+
+	if ot, ok := node.(*ast.ObjectType); ok {
+		node = ot.List
+	}
+
+	// Handle the special case where the object itself is a literal. Previously
+	// the yacc parser would always ensure top-level elements were arrays. The new
+	// parser does not make the same guarantees, thus we need to convert any
+	// top-level literal elements into a list.
+	if _, ok := node.(*ast.LiteralType); ok && item != nil {
+		node = &ast.ObjectList{Items: []*ast.ObjectItem{item}}
+	}
+
+	list, ok := node.(*ast.ObjectList)
+	if !ok {
+		return &parser.PosError{
+			Pos: node.Pos(),
+			Err: fmt.Errorf("%s: not an object type for struct (%T)", name, node),
+		}
+	}
+
+	// This slice will keep track of all the structs we'll be decoding.
+	// There can be more than one struct if there are embedded structs
+	// that are squashed.
+	structs := make([]reflect.Value, 1, 5)
+	structs[0] = result
+
+	// Compile the list of all the fields that we're going to be decoding
+	// from all the structs.
+	type field struct {
+		field reflect.StructField
+		val   reflect.Value
+	}
+	fields := []field{}
+	for len(structs) > 0 {
+		structVal := structs[0]
+		structs = structs[1:]
+
+		structType := structVal.Type()
+		for i := 0; i < structType.NumField(); i++ {
+			fieldType := structType.Field(i)
+			tagParts := strings.Split(fieldType.Tag.Get(tagName), ",")
+
+			// Ignore fields with tag name "-"
+			if tagParts[0] == "-" {
+				continue
+			}
+
+			if fieldType.Anonymous {
+				fieldKind := fieldType.Type.Kind()
+				if fieldKind != reflect.Struct {
+					return &parser.PosError{
+						Pos: node.Pos(),
+						Err: fmt.Errorf("%s: unsupported type to struct: %s",
+							fieldType.Name, fieldKind),
+					}
+				}
+
+				// We have an embedded field. We "squash" the fields down
+				// if specified in the tag.
+				squash := false
+				for _, tag := range tagParts[1:] {
+					if tag == "squash" {
+						squash = true
+						break
+					}
+				}
+
+				if squash {
+					structs = append(
+						structs, result.FieldByName(fieldType.Name))
+					continue
+				}
+			}
+
+			// Normal struct field, store it away
+			fields = append(fields, field{fieldType, structVal.Field(i)})
+		}
+	}
+
+	usedKeys := make(map[string]struct{})
+	decodedFields := make([]string, 0, len(fields))
+	decodedFieldsVal := make([]reflect.Value, 0)
+	unusedKeysVal := make([]reflect.Value, 0)
+	for _, f := range fields {
+		field, fieldValue := f.field, f.val
+		if !fieldValue.IsValid() {
+			// This should never happen
+			panic("field is not valid")
+		}
+
+		// If we can't set the field, then it is unexported or something,
+		// and we just continue onwards.
+		if !fieldValue.CanSet() {
+			continue
+		}
+
+		fieldName := field.Name
+
+		tagValue := field.Tag.Get(tagName)
+		tagParts := strings.SplitN(tagValue, ",", 2)
+		if len(tagParts) >= 2 {
+			switch tagParts[1] {
+			case "decodedFields":
+				decodedFieldsVal = append(decodedFieldsVal, fieldValue)
+				continue
+			case "key":
+				if item == nil {
+					return &parser.PosError{
+						Pos: node.Pos(),
+						Err: fmt.Errorf("%s: %s asked for 'key', impossible",
+							name, fieldName),
+					}
+				}
+
+				fieldValue.SetString(item.Keys[0].Token.Value().(string))
+				continue
+			case "unusedKeys":
+				unusedKeysVal = append(unusedKeysVal, fieldValue)
+				continue
+			}
+		}
+
+		if tagParts[0] != "" {
+			fieldName = tagParts[0]
+		}
+
+		// Determine the element we'll use to decode. If it is a single
+		// match (only object with the field), then we decode it exactly.
+		// If it is a prefix match, then we decode the matches.
+		filter := list.Filter(fieldName)
+
+		prefixMatches := filter.Children()
+		matches := filter.Elem()
+		if len(matches.Items) == 0 && len(prefixMatches.Items) == 0 {
+			continue
+		}
+
+		// Track the used key
+		usedKeys[fieldName] = struct{}{}
+
+		// Create the field name and decode. We range over the elements
+		// because we actually want the value.
+		fieldName = fmt.Sprintf("%s.%s", name, fieldName)
+		if len(prefixMatches.Items) > 0 {
+			if err := d.decode(fieldName, prefixMatches, fieldValue); err != nil {
+				return err
+			}
+		}
+		for _, match := range matches.Items {
+			var decodeNode ast.Node = match.Val
+			if ot, ok := decodeNode.(*ast.ObjectType); ok {
+				decodeNode = &ast.ObjectList{Items: ot.List.Items}
+			}
+
+			if err := d.decode(fieldName, decodeNode, fieldValue); err != nil {
+				return err
+			}
+		}
+
+		decodedFields = append(decodedFields, field.Name)
+	}
+
+	if len(decodedFieldsVal) > 0 {
+		// Sort it so that it is deterministic
+		sort.Strings(decodedFields)
+
+		for _, v := range decodedFieldsVal {
+			v.Set(reflect.ValueOf(decodedFields))
+		}
+	}
+
+	return nil
+}
+
+// findNodeType returns the type of ast.Node
+func findNodeType() reflect.Type {
+	var nodeContainer struct {
+		Node ast.Node
+	}
+	value := reflect.ValueOf(nodeContainer).FieldByName("Node")
+	return value.Type()
+}

+ 3 - 0
vendor/github.com/hashicorp/hcl/go.mod

@@ -0,0 +1,3 @@
+module github.com/hashicorp/hcl
+
+require github.com/davecgh/go-spew v1.1.1

+ 2 - 0
vendor/github.com/hashicorp/hcl/go.sum

@@ -0,0 +1,2 @@
+github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
+github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=

+ 11 - 0
vendor/github.com/hashicorp/hcl/hcl.go

@@ -0,0 +1,11 @@
+// Package hcl decodes HCL into usable Go structures.
+//
+// hcl input can come in either pure HCL format or JSON format.
+// It can be parsed into an AST, and then decoded into a structure,
+// or it can be decoded directly from a string into a structure.
+//
+// If you choose to parse HCL into a raw AST, the benefit is that you
+// can write custom visitor implementations to implement custom
+// semantic checks. By default, HCL does not perform any semantic
+// checks.
+package hcl

+ 219 - 0
vendor/github.com/hashicorp/hcl/hcl/ast/ast.go

@@ -0,0 +1,219 @@
+// Package ast declares the types used to represent syntax trees for HCL
+// (HashiCorp Configuration Language)
+package ast
+
+import (
+	"fmt"
+	"strings"
+
+	"github.com/hashicorp/hcl/hcl/token"
+)
+
+// Node is an element in the abstract syntax tree.
+type Node interface {
+	node()
+	Pos() token.Pos
+}
+
+func (File) node()         {}
+func (ObjectList) node()   {}
+func (ObjectKey) node()    {}
+func (ObjectItem) node()   {}
+func (Comment) node()      {}
+func (CommentGroup) node() {}
+func (ObjectType) node()   {}
+func (LiteralType) node()  {}
+func (ListType) node()     {}
+
+// File represents a single HCL file
+type File struct {
+	Node     Node            // usually a *ObjectList
+	Comments []*CommentGroup // list of all comments in the source
+}
+
+func (f *File) Pos() token.Pos {
+	return f.Node.Pos()
+}
+
+// ObjectList represents a list of ObjectItems. An HCL file itself is an
+// ObjectList.
+type ObjectList struct {
+	Items []*ObjectItem
+}
+
+func (o *ObjectList) Add(item *ObjectItem) {
+	o.Items = append(o.Items, item)
+}
+
+// Filter filters out the objects with the given key list as a prefix.
+//
+// The returned list of objects contain ObjectItems where the keys have
+// this prefix already stripped off. This might result in objects with
+// zero-length key lists if they have no children.
+//
+// If no matches are found, an empty ObjectList (non-nil) is returned.
+func (o *ObjectList) Filter(keys ...string) *ObjectList {
+	var result ObjectList
+	for _, item := range o.Items {
+		// If there aren't enough keys, then ignore this
+		if len(item.Keys) < len(keys) {
+			continue
+		}
+
+		match := true
+		for i, key := range item.Keys[:len(keys)] {
+			key := key.Token.Value().(string)
+			if key != keys[i] && !strings.EqualFold(key, keys[i]) {
+				match = false
+				break
+			}
+		}
+		if !match {
+			continue
+		}
+
+		// Strip off the prefix from the children
+		newItem := *item
+		newItem.Keys = newItem.Keys[len(keys):]
+		result.Add(&newItem)
+	}
+
+	return &result
+}
+
+// Children returns further nested objects (key length > 0) within this
+// ObjectList. This should be used with Filter to get at child items.
+func (o *ObjectList) Children() *ObjectList {
+	var result ObjectList
+	for _, item := range o.Items {
+		if len(item.Keys) > 0 {
+			result.Add(item)
+		}
+	}
+
+	return &result
+}
+
+// Elem returns items in the list that are direct element assignments
+// (key length == 0). This should be used with Filter to get at elements.
+func (o *ObjectList) Elem() *ObjectList {
+	var result ObjectList
+	for _, item := range o.Items {
+		if len(item.Keys) == 0 {
+			result.Add(item)
+		}
+	}
+
+	return &result
+}
+
+func (o *ObjectList) Pos() token.Pos {
+	// always returns the uninitiliazed position
+	return o.Items[0].Pos()
+}
+
+// ObjectItem represents a HCL Object Item. An item is represented with a key
+// (or keys). It can be an assignment or an object (both normal and nested)
+type ObjectItem struct {
+	// keys is only one length long if it's of type assignment. If it's a
+	// nested object it can be larger than one. In that case "assign" is
+	// invalid as there is no assignments for a nested object.
+	Keys []*ObjectKey
+
+	// assign contains the position of "=", if any
+	Assign token.Pos
+
+	// val is the item itself. It can be an object,list, number, bool or a
+	// string. If key length is larger than one, val can be only of type
+	// Object.
+	Val Node
+
+	LeadComment *CommentGroup // associated lead comment
+	LineComment *CommentGroup // associated line comment
+}
+
+func (o *ObjectItem) Pos() token.Pos {
+	// I'm not entirely sure what causes this, but removing this causes
+	// a test failure. We should investigate at some point.
+	if len(o.Keys) == 0 {
+		return token.Pos{}
+	}
+
+	return o.Keys[0].Pos()
+}
+
+// ObjectKeys are either an identifier or of type string.
+type ObjectKey struct {
+	Token token.Token
+}
+
+func (o *ObjectKey) Pos() token.Pos {
+	return o.Token.Pos
+}
+
+// LiteralType represents a literal of basic type. Valid types are:
+// token.NUMBER, token.FLOAT, token.BOOL and token.STRING
+type LiteralType struct {
+	Token token.Token
+
+	// comment types, only used when in a list
+	LeadComment *CommentGroup
+	LineComment *CommentGroup
+}
+
+func (l *LiteralType) Pos() token.Pos {
+	return l.Token.Pos
+}
+
+// ListStatement represents a HCL List type
+type ListType struct {
+	Lbrack token.Pos // position of "["
+	Rbrack token.Pos // position of "]"
+	List   []Node    // the elements in lexical order
+}
+
+func (l *ListType) Pos() token.Pos {
+	return l.Lbrack
+}
+
+func (l *ListType) Add(node Node) {
+	l.List = append(l.List, node)
+}
+
+// ObjectType represents a HCL Object Type
+type ObjectType struct {
+	Lbrace token.Pos   // position of "{"
+	Rbrace token.Pos   // position of "}"
+	List   *ObjectList // the nodes in lexical order
+}
+
+func (o *ObjectType) Pos() token.Pos {
+	return o.Lbrace
+}
+
+// Comment node represents a single //, # style or /*- style commment
+type Comment struct {
+	Start token.Pos // position of / or #
+	Text  string
+}
+
+func (c *Comment) Pos() token.Pos {
+	return c.Start
+}
+
+// CommentGroup node represents a sequence of comments with no other tokens and
+// no empty lines between.
+type CommentGroup struct {
+	List []*Comment // len(List) > 0
+}
+
+func (c *CommentGroup) Pos() token.Pos {
+	return c.List[0].Pos()
+}
+
+//-------------------------------------------------------------------
+// GoStringer
+//-------------------------------------------------------------------
+
+func (o *ObjectKey) GoString() string  { return fmt.Sprintf("*%#v", *o) }
+func (o *ObjectList) GoString() string { return fmt.Sprintf("*%#v", *o) }

+ 52 - 0
vendor/github.com/hashicorp/hcl/hcl/ast/walk.go

@@ -0,0 +1,52 @@
+package ast
+
+import "fmt"
+
+// WalkFunc describes a function to be called for each node during a Walk. The
+// returned node can be used to rewrite the AST. Walking stops the returned
+// bool is false.
+type WalkFunc func(Node) (Node, bool)
+
+// Walk traverses an AST in depth-first order: It starts by calling fn(node);
+// node must not be nil. If fn returns true, Walk invokes fn recursively for
+// each of the non-nil children of node, followed by a call of fn(nil). The
+// returned node of fn can be used to rewrite the passed node to fn.
+func Walk(node Node, fn WalkFunc) Node {
+	rewritten, ok := fn(node)
+	if !ok {
+		return rewritten
+	}
+
+	switch n := node.(type) {
+	case *File:
+		n.Node = Walk(n.Node, fn)
+	case *ObjectList:
+		for i, item := range n.Items {
+			n.Items[i] = Walk(item, fn).(*ObjectItem)
+		}
+	case *ObjectKey:
+		// nothing to do
+	case *ObjectItem:
+		for i, k := range n.Keys {
+			n.Keys[i] = Walk(k, fn).(*ObjectKey)
+		}
+
+		if n.Val != nil {
+			n.Val = Walk(n.Val, fn)
+		}
+	case *LiteralType:
+		// nothing to do
+	case *ListType:
+		for i, l := range n.List {
+			n.List[i] = Walk(l, fn)
+		}
+	case *ObjectType:
+		n.List = Walk(n.List, fn).(*ObjectList)
+	default:
+		// should we panic here?
+		fmt.Printf("unknown type: %T\n", n)
+	}
+
+	fn(nil)
+	return rewritten
+}

+ 17 - 0
vendor/github.com/hashicorp/hcl/hcl/parser/error.go

@@ -0,0 +1,17 @@
+package parser
+
+import (
+	"fmt"
+
+	"github.com/hashicorp/hcl/hcl/token"
+)
+
+// PosError is a parse error that contains a position.
+type PosError struct {
+	Pos token.Pos
+	Err error
+}
+
+func (e *PosError) Error() string {
+	return fmt.Sprintf("At %s: %s", e.Pos, e.Err)
+}

+ 532 - 0
vendor/github.com/hashicorp/hcl/hcl/parser/parser.go

@@ -0,0 +1,532 @@
+// Package parser implements a parser for HCL (HashiCorp Configuration
+// Language)
+package parser
+
+import (
+	"bytes"
+	"errors"
+	"fmt"
+	"strings"
+
+	"github.com/hashicorp/hcl/hcl/ast"
+	"github.com/hashicorp/hcl/hcl/scanner"
+	"github.com/hashicorp/hcl/hcl/token"
+)
+
+type Parser struct {
+	sc *scanner.Scanner
+
+	// Last read token
+	tok       token.Token
+	commaPrev token.Token
+
+	comments    []*ast.CommentGroup
+	leadComment *ast.CommentGroup // last lead comment
+	lineComment *ast.CommentGroup // last line comment
+
+	enableTrace bool
+	indent      int
+	n           int // buffer size (max = 1)
+}
+
+func newParser(src []byte) *Parser {
+	return &Parser{
+		sc: scanner.New(src),
+	}
+}
+
+// Parse returns the fully parsed source and returns the abstract syntax tree.
+func Parse(src []byte) (*ast.File, error) {
+	// normalize all line endings
+	// since the scanner and output only work with "\n" line endings, we may
+	// end up with dangling "\r" characters in the parsed data.
+	src = bytes.Replace(src, []byte("\r\n"), []byte("\n"), -1)
+
+	p := newParser(src)
+	return p.Parse()
+}
+
+var errEofToken = errors.New("EOF token found")
+
+// Parse returns the fully parsed source and returns the abstract syntax tree.
+func (p *Parser) Parse() (*ast.File, error) {
+	f := &ast.File{}
+	var err, scerr error
+	p.sc.Error = func(pos token.Pos, msg string) {
+		scerr = &PosError{Pos: pos, Err: errors.New(msg)}
+	}
+
+	f.Node, err = p.objectList(false)
+	if scerr != nil {
+		return nil, scerr
+	}
+	if err != nil {
+		return nil, err
+	}
+
+	f.Comments = p.comments
+	return f, nil
+}
+
+// objectList parses a list of items within an object (generally k/v pairs).
+// The parameter" obj" tells this whether to we are within an object (braces:
+// '{', '}') or just at the top level. If we're within an object, we end
+// at an RBRACE.
+func (p *Parser) objectList(obj bool) (*ast.ObjectList, error) {
+	defer un(trace(p, "ParseObjectList"))
+	node := &ast.ObjectList{}
+
+	for {
+		if obj {
+			tok := p.scan()
+			p.unscan()
+			if tok.Type == token.RBRACE {
+				break
+			}
+		}
+
+		n, err := p.objectItem()
+		if err == errEofToken {
+			break // we are finished
+		}
+
+		// we don't return a nil node, because might want to use already
+		// collected items.
+		if err != nil {
+			return node, err
+		}
+
+		node.Add(n)
+
+		// object lists can be optionally comma-delimited e.g. when a list of maps
+		// is being expressed, so a comma is allowed here - it's simply consumed
+		tok := p.scan()
+		if tok.Type != token.COMMA {
+			p.unscan()
+		}
+	}
+	return node, nil
+}
+
+func (p *Parser) consumeComment() (comment *ast.Comment, endline int) {
+	endline = p.tok.Pos.Line
+
+	// count the endline if it's multiline comment, ie starting with /*
+	if len(p.tok.Text) > 1 && p.tok.Text[1] == '*' {
+		// don't use range here - no need to decode Unicode code points
+		for i := 0; i < len(p.tok.Text); i++ {
+			if p.tok.Text[i] == '\n' {
+				endline++
+			}
+		}
+	}
+
+	comment = &ast.Comment{Start: p.tok.Pos, Text: p.tok.Text}
+	p.tok = p.sc.Scan()
+	return
+}
+
+func (p *Parser) consumeCommentGroup(n int) (comments *ast.CommentGroup, endline int) {
+	var list []*ast.Comment
+	endline = p.tok.Pos.Line
+
+	for p.tok.Type == token.COMMENT && p.tok.Pos.Line <= endline+n {
+		var comment *ast.Comment
+		comment, endline = p.consumeComment()
+		list = append(list, comment)
+	}
+
+	// add comment group to the comments list
+	comments = &ast.CommentGroup{List: list}
+	p.comments = append(p.comments, comments)
+
+	return
+}
+
+// objectItem parses a single object item
+func (p *Parser) objectItem() (*ast.ObjectItem, error) {
+	defer un(trace(p, "ParseObjectItem"))
+
+	keys, err := p.objectKey()
+	if len(keys) > 0 && err == errEofToken {
+		// We ignore eof token here since it is an error if we didn't
+		// receive a value (but we did receive a key) for the item.
+		err = nil
+	}
+	if len(keys) > 0 && err != nil && p.tok.Type == token.RBRACE {
+		// This is a strange boolean statement, but what it means is:
+		// We have keys with no value, and we're likely in an object
+		// (since RBrace ends an object). For this, we set err to nil so
+		// we continue and get the error below of having the wrong value
+		// type.
+		err = nil
+
+		// Reset the token type so we don't think it completed fine. See
+		// objectType which uses p.tok.Type to check if we're done with
+		// the object.
+		p.tok.Type = token.EOF
+	}
+	if err != nil {
+		return nil, err
+	}
+
+	o := &ast.ObjectItem{
+		Keys: keys,
+	}
+
+	if p.leadComment != nil {
+		o.LeadComment = p.leadComment
+		p.leadComment = nil
+	}
+
+	switch p.tok.Type {
+	case token.ASSIGN:
+		o.Assign = p.tok.Pos
+		o.Val, err = p.object()
+		if err != nil {
+			return nil, err
+		}
+	case token.LBRACE:
+		o.Val, err = p.objectType()
+		if err != nil {
+			return nil, err
+		}
+	default:
+		keyStr := make([]string, 0, len(keys))
+		for _, k := range keys {
+			keyStr = append(keyStr, k.Token.Text)
+		}
+
+		return nil, &PosError{
+			Pos: p.tok.Pos,
+			Err: fmt.Errorf(
+				"key '%s' expected start of object ('{') or assignment ('=')",
+				strings.Join(keyStr, " ")),
+		}
+	}
+
+	// key=#comment
+	// val
+	if p.lineComment != nil {
+		o.LineComment, p.lineComment = p.lineComment, nil
+	}
+
+	// do a look-ahead for line comment
+	p.scan()
+	if len(keys) > 0 && o.Val.Pos().Line == keys[0].Pos().Line && p.lineComment != nil {
+		o.LineComment = p.lineComment
+		p.lineComment = nil
+	}
+	p.unscan()
+	return o, nil
+}
+
+// objectKey parses an object key and returns a ObjectKey AST
+func (p *Parser) objectKey() ([]*ast.ObjectKey, error) {
+	keyCount := 0
+	keys := make([]*ast.ObjectKey, 0)
+
+	for {
+		tok := p.scan()
+		switch tok.Type {
+		case token.EOF:
+			// It is very important to also return the keys here as well as
+			// the error. This is because we need to be able to tell if we
+			// did parse keys prior to finding the EOF, or if we just found
+			// a bare EOF.
+			return keys, errEofToken
+		case token.ASSIGN:
+			// assignment or object only, but not nested objects. this is not
+			// allowed: `foo bar = {}`
+			if keyCount > 1 {
+				return nil, &PosError{
+					Pos: p.tok.Pos,
+					Err: fmt.Errorf("nested object expected: LBRACE got: %s", p.tok.Type),
+				}
+			}
+
+			if keyCount == 0 {
+				return nil, &PosError{
+					Pos: p.tok.Pos,
+					Err: errors.New("no object keys found!"),
+				}
+			}
+
+			return keys, nil
+		case token.LBRACE:
+			var err error
+
+			// If we have no keys, then it is a syntax error. i.e. {{}} is not
+			// allowed.
+			if len(keys) == 0 {
+				err = &PosError{
+					Pos: p.tok.Pos,
+					Err: fmt.Errorf("expected: IDENT | STRING got: %s", p.tok.Type),
+				}
+			}
+
+			// object
+			return keys, err
+		case token.IDENT, token.STRING:
+			keyCount++
+			keys = append(keys, &ast.ObjectKey{Token: p.tok})
+		case token.ILLEGAL:
+			return keys, &PosError{
+				Pos: p.tok.Pos,
+				Err: fmt.Errorf("illegal character"),
+			}
+		default:
+			return keys, &PosError{
+				Pos: p.tok.Pos,
+				Err: fmt.Errorf("expected: IDENT | STRING | ASSIGN | LBRACE got: %s", p.tok.Type),
+			}
+		}
+	}
+}
+
+// object parses any type of object, such as number, bool, string, object or
+// list.
+func (p *Parser) object() (ast.Node, error) {
+	defer un(trace(p, "ParseType"))
+	tok := p.scan()
+
+	switch tok.Type {
+	case token.NUMBER, token.FLOAT, token.BOOL, token.STRING, token.HEREDOC:
+		return p.literalType()
+	case token.LBRACE:
+		return p.objectType()
+	case token.LBRACK:
+		return p.listType()
+	case token.COMMENT:
+		// implement comment
+	case token.EOF:
+		return nil, errEofToken
+	}
+
+	return nil, &PosError{
+		Pos: tok.Pos,
+		Err: fmt.Errorf("Unknown token: %+v", tok),
+	}
+}
+
+// objectType parses an object type and returns a ObjectType AST
+func (p *Parser) objectType() (*ast.ObjectType, error) {
+	defer un(trace(p, "ParseObjectType"))
+
+	// we assume that the currently scanned token is a LBRACE
+	o := &ast.ObjectType{
+		Lbrace: p.tok.Pos,
+	}
+
+	l, err := p.objectList(true)
+
+	// if we hit RBRACE, we are good to go (means we parsed all Items), if it's
+	// not a RBRACE, it's an syntax error and we just return it.
+	if err != nil && p.tok.Type != token.RBRACE {
+		return nil, err
+	}
+
+	// No error, scan and expect the ending to be a brace
+	if tok := p.scan(); tok.Type != token.RBRACE {
+		return nil, &PosError{
+			Pos: tok.Pos,
+			Err: fmt.Errorf("object expected closing RBRACE got: %s", tok.Type),
+		}
+	}
+
+	o.List = l
+	o.Rbrace = p.tok.Pos // advanced via parseObjectList
+	return o, nil
+}
+
+// listType parses a list type and returns a ListType AST
+func (p *Parser) listType() (*ast.ListType, error) {
+	defer un(trace(p, "ParseListType"))
+
+	// we assume that the currently scanned token is a LBRACK
+	l := &ast.ListType{
+		Lbrack: p.tok.Pos,
+	}
+
+	needComma := false
+	for {
+		tok := p.scan()
+		if needComma {
+			switch tok.Type {
+			case token.COMMA, token.RBRACK:
+			default:
+				return nil, &PosError{
+					Pos: tok.Pos,
+					Err: fmt.Errorf(
+						"error parsing list, expected comma or list end, got: %s",
+						tok.Type),
+				}
+			}
+		}
+		switch tok.Type {
+		case token.BOOL, token.NUMBER, token.FLOAT, token.STRING, token.HEREDOC:
+			node, err := p.literalType()
+			if err != nil {
+				return nil, err
+			}
+
+			// If there is a lead comment, apply it
+			if p.leadComment != nil {
+				node.LeadComment = p.leadComment
+				p.leadComment = nil
+			}
+
+			l.Add(node)
+			needComma = true
+		case token.COMMA:
+			// get next list item or we are at the end
+			// do a look-ahead for line comment
+			p.scan()
+			if p.lineComment != nil && len(l.List) > 0 {
+				lit, ok := l.List[len(l.List)-1].(*ast.LiteralType)
+				if ok {
+					lit.LineComment = p.lineComment
+					l.List[len(l.List)-1] = lit
+					p.lineComment = nil
+				}
+			}
+			p.unscan()
+
+			needComma = false
+			continue
+		case token.LBRACE:
+			// Looks like a nested object, so parse it out
+			node, err := p.objectType()
+			if err != nil {
+				return nil, &PosError{
+					Pos: tok.Pos,
+					Err: fmt.Errorf(
+						"error while trying to parse object within list: %s", err),
+				}
+			}
+			l.Add(node)
+			needComma = true
+		case token.LBRACK:
+			node, err := p.listType()
+			if err != nil {
+				return nil, &PosError{
+					Pos: tok.Pos,
+					Err: fmt.Errorf(
+						"error while trying to parse list within list: %s", err),
+				}
+			}
+			l.Add(node)
+		case token.RBRACK:
+			// finished
+			l.Rbrack = p.tok.Pos
+			return l, nil
+		default:
+			return nil, &PosError{
+				Pos: tok.Pos,
+				Err: fmt.Errorf("unexpected token while parsing list: %s", tok.Type),
+			}
+		}
+	}
+}
+
+// literalType parses a literal type and returns a LiteralType AST
+func (p *Parser) literalType() (*ast.LiteralType, error) {
+	defer un(trace(p, "ParseLiteral"))
+
+	return &ast.LiteralType{
+		Token: p.tok,
+	}, nil
+}
+
+// scan returns the next token from the underlying scanner. If a token has
+// been unscanned then read that instead. In the process, it collects any
+// comment groups encountered, and remembers the last lead and line comments.
+func (p *Parser) scan() token.Token {
+	// If we have a token on the buffer, then return it.
+	if p.n != 0 {
+		p.n = 0
+		return p.tok
+	}
+
+	// Otherwise read the next token from the scanner and Save it to the buffer
+	// in case we unscan later.
+	prev := p.tok
+	p.tok = p.sc.Scan()
+
+	if p.tok.Type == token.COMMENT {
+		var comment *ast.CommentGroup
+		var endline int
+
+		// fmt.Printf("p.tok.Pos.Line = %+v prev: %d endline %d \n",
+		// p.tok.Pos.Line, prev.Pos.Line, endline)
+		if p.tok.Pos.Line == prev.Pos.Line {
+			// The comment is on same line as the previous token; it
+			// cannot be a lead comment but may be a line comment.
+			comment, endline = p.consumeCommentGroup(0)
+			if p.tok.Pos.Line != endline {
+				// The next token is on a different line, thus
+				// the last comment group is a line comment.
+				p.lineComment = comment
+			}
+		}
+
+		// consume successor comments, if any
+		endline = -1
+		for p.tok.Type == token.COMMENT {
+			comment, endline = p.consumeCommentGroup(1)
+		}
+
+		if endline+1 == p.tok.Pos.Line && p.tok.Type != token.RBRACE {
+			switch p.tok.Type {
+			case token.RBRACE, token.RBRACK:
+				// Do not count for these cases
+			default:
+				// The next token is following on the line immediately after the
+				// comment group, thus the last comment group is a lead comment.
+				p.leadComment = comment
+			}
+		}
+
+	}
+
+	return p.tok
+}
+
+// unscan pushes the previously read token back onto the buffer.
+func (p *Parser) unscan() {
+	p.n = 1
+}
+
+// ----------------------------------------------------------------------------
+// Parsing support
+
+func (p *Parser) printTrace(a ...interface{}) {
+	if !p.enableTrace {
+		return
+	}
+
+	const dots = ". . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . "
+	const n = len(dots)
+	fmt.Printf("%5d:%3d: ", p.tok.Pos.Line, p.tok.Pos.Column)
+
+	i := 2 * p.indent
+	for i > n {
+		fmt.Print(dots)
+		i -= n
+	}
+	// i <= n
+	fmt.Print(dots[0:i])
+	fmt.Println(a...)
+}
+
+func trace(p *Parser, msg string) *Parser {
+	p.printTrace(msg, "(")
+	p.indent++
+	return p
+}
+
+// Usage pattern: defer un(trace(p, "..."))
+func un(p *Parser) {
+	p.indent--
+	p.printTrace(")")
+}

+ 789 - 0
vendor/github.com/hashicorp/hcl/hcl/printer/nodes.go

@@ -0,0 +1,789 @@
+package printer
+
+import (
+	"bytes"
+	"fmt"
+	"sort"
+
+	"github.com/hashicorp/hcl/hcl/ast"
+	"github.com/hashicorp/hcl/hcl/token"
+)
+
+const (
+	blank    = byte(' ')
+	newline  = byte('\n')
+	tab      = byte('\t')
+	infinity = 1 << 30 // offset or line
+)
+
+var (
+	unindent = []byte("\uE123") // in the private use space
+)
+
+type printer struct {
+	cfg  Config
+	prev token.Pos
+
+	comments           []*ast.CommentGroup // may be nil, contains all comments
+	standaloneComments []*ast.CommentGroup // contains all standalone comments (not assigned to any node)
+
+	enableTrace bool
+	indentTrace int
+}
+
+type ByPosition []*ast.CommentGroup
+
+func (b ByPosition) Len() int           { return len(b) }
+func (b ByPosition) Swap(i, j int)      { b[i], b[j] = b[j], b[i] }
+func (b ByPosition) Less(i, j int) bool { return b[i].Pos().Before(b[j].Pos()) }
+
+// collectComments comments all standalone comments which are not lead or line
+// comment
+func (p *printer) collectComments(node ast.Node) {
+	// first collect all comments. This is already stored in
+	// ast.File.(comments)
+	ast.Walk(node, func(nn ast.Node) (ast.Node, bool) {
+		switch t := nn.(type) {
+		case *ast.File:
+			p.comments = t.Comments
+			return nn, false
+		}
+		return nn, true
+	})
+
+	standaloneComments := make(map[token.Pos]*ast.CommentGroup, 0)
+	for _, c := range p.comments {
+		standaloneComments[c.Pos()] = c
+	}
+
+	// next remove all lead and line comments from the overall comment map.
+	// This will give us comments which are standalone, comments which are not
+	// assigned to any kind of node.
+	ast.Walk(node, func(nn ast.Node) (ast.Node, bool) {
+		switch t := nn.(type) {
+		case *ast.LiteralType:
+			if t.LeadComment != nil {
+				for _, comment := range t.LeadComment.List {
+					if _, ok := standaloneComments[comment.Pos()]; ok {
+						delete(standaloneComments, comment.Pos())
+					}
+				}
+			}
+
+			if t.LineComment != nil {
+				for _, comment := range t.LineComment.List {
+					if _, ok := standaloneComments[comment.Pos()]; ok {
+						delete(standaloneComments, comment.Pos())
+					}
+				}
+			}
+		case *ast.ObjectItem:
+			if t.LeadComment != nil {
+				for _, comment := range t.LeadComment.List {
+					if _, ok := standaloneComments[comment.Pos()]; ok {
+						delete(standaloneComments, comment.Pos())
+					}
+				}
+			}
+
+			if t.LineComment != nil {
+				for _, comment := range t.LineComment.List {
+					if _, ok := standaloneComments[comment.Pos()]; ok {
+						delete(standaloneComments, comment.Pos())
+					}
+				}
+			}
+		}
+
+		return nn, true
+	})
+
+	for _, c := range standaloneComments {
+		p.standaloneComments = append(p.standaloneComments, c)
+	}
+
+	sort.Sort(ByPosition(p.standaloneComments))
+}
+
+// output prints creates b printable HCL output and returns it.
+func (p *printer) output(n interface{}) []byte {
+	var buf bytes.Buffer
+
+	switch t := n.(type) {
+	case *ast.File:
+		// File doesn't trace so we add the tracing here
+		defer un(trace(p, "File"))
+		return p.output(t.Node)
+	case *ast.ObjectList:
+		defer un(trace(p, "ObjectList"))
+
+		var index int
+		for {
+			// Determine the location of the next actual non-comment
+			// item. If we're at the end, the next item is at "infinity"
+			var nextItem token.Pos
+			if index != len(t.Items) {
+				nextItem = t.Items[index].Pos()
+			} else {
+				nextItem = token.Pos{Offset: infinity, Line: infinity}
+			}
+
+			// Go through the standalone comments in the file and print out
+			// the comments that we should be for this object item.
+			for _, c := range p.standaloneComments {
+				// Go through all the comments in the group. The group
+				// should be printed together, not separated by double newlines.
+				printed := false
+				newlinePrinted := false
+				for _, comment := range c.List {
+					// We only care about comments after the previous item
+					// we've printed so that comments are printed in the
+					// correct locations (between two objects for example).
+					// And before the next item.
+					if comment.Pos().After(p.prev) && comment.Pos().Before(nextItem) {
+						// if we hit the end add newlines so we can print the comment
+						// we don't do this if prev is invalid which means the
+						// beginning of the file since the first comment should
+						// be at the first line.
+						if !newlinePrinted && p.prev.IsValid() && index == len(t.Items) {
+							buf.Write([]byte{newline, newline})
+							newlinePrinted = true
+						}
+
+						// Write the actual comment.
+						buf.WriteString(comment.Text)
+						buf.WriteByte(newline)
+
+						// Set printed to true to note that we printed something
+						printed = true
+					}
+				}
+
+				// If we're not at the last item, write a new line so
+				// that there is a newline separating this comment from
+				// the next object.
+				if printed && index != len(t.Items) {
+					buf.WriteByte(newline)
+				}
+			}
+
+			if index == len(t.Items) {
+				break
+			}
+
+			buf.Write(p.output(t.Items[index]))
+			if index != len(t.Items)-1 {
+				// Always write a newline to separate us from the next item
+				buf.WriteByte(newline)
+
+				// Need to determine if we're going to separate the next item
+				// with a blank line. The logic here is simple, though there
+				// are a few conditions:
+				//
+				//   1. The next object is more than one line away anyways,
+				//      so we need an empty line.
+				//
+				//   2. The next object is not a "single line" object, so
+				//      we need an empty line.
+				//
+				//   3. This current object is not a single line object,
+				//      so we need an empty line.
+				current := t.Items[index]
+				next := t.Items[index+1]
+				if next.Pos().Line != t.Items[index].Pos().Line+1 ||
+					!p.isSingleLineObject(next) ||
+					!p.isSingleLineObject(current) {
+					buf.WriteByte(newline)
+				}
+			}
+			index++
+		}
+	case *ast.ObjectKey:
+		buf.WriteString(t.Token.Text)
+	case *ast.ObjectItem:
+		p.prev = t.Pos()
+		buf.Write(p.objectItem(t))
+	case *ast.LiteralType:
+		buf.Write(p.literalType(t))
+	case *ast.ListType:
+		buf.Write(p.list(t))
+	case *ast.ObjectType:
+		buf.Write(p.objectType(t))
+	default:
+		fmt.Printf(" unknown type: %T\n", n)
+	}
+
+	return buf.Bytes()
+}
+
+func (p *printer) literalType(lit *ast.LiteralType) []byte {
+	result := []byte(lit.Token.Text)
+	switch lit.Token.Type {
+	case token.HEREDOC:
+		// Clear the trailing newline from heredocs
+		if result[len(result)-1] == '\n' {
+			result = result[:len(result)-1]
+		}
+
+		// Poison lines 2+ so that we don't indent them
+		result = p.heredocIndent(result)
+	case token.STRING:
+		// If this is a multiline string, poison lines 2+ so we don't
+		// indent them.
+		if bytes.IndexRune(result, '\n') >= 0 {
+			result = p.heredocIndent(result)
+		}
+	}
+
+	return result
+}
+
+// objectItem returns the printable HCL form of an object item. An object type
+// starts with one/multiple keys and has a value. The value might be of any
+// type.
+func (p *printer) objectItem(o *ast.ObjectItem) []byte {
+	defer un(trace(p, fmt.Sprintf("ObjectItem: %s", o.Keys[0].Token.Text)))
+	var buf bytes.Buffer
+
+	if o.LeadComment != nil {
+		for _, comment := range o.LeadComment.List {
+			buf.WriteString(comment.Text)
+			buf.WriteByte(newline)
+		}
+	}
+
+	// If key and val are on different lines, treat line comments like lead comments.
+	if o.LineComment != nil && o.Val.Pos().Line != o.Keys[0].Pos().Line {
+		for _, comment := range o.LineComment.List {
+			buf.WriteString(comment.Text)
+			buf.WriteByte(newline)
+		}
+	}
+
+	for i, k := range o.Keys {
+		buf.WriteString(k.Token.Text)
+		buf.WriteByte(blank)
+
+		// reach end of key
+		if o.Assign.IsValid() && i == len(o.Keys)-1 && len(o.Keys) == 1 {
+			buf.WriteString("=")
+			buf.WriteByte(blank)
+		}
+	}
+
+	buf.Write(p.output(o.Val))
+
+	if o.LineComment != nil && o.Val.Pos().Line == o.Keys[0].Pos().Line {
+		buf.WriteByte(blank)
+		for _, comment := range o.LineComment.List {
+			buf.WriteString(comment.Text)
+		}
+	}
+
+	return buf.Bytes()
+}
+
+// objectType returns the printable HCL form of an object type. An object type
+// begins with a brace and ends with a brace.
+func (p *printer) objectType(o *ast.ObjectType) []byte {
+	defer un(trace(p, "ObjectType"))
+	var buf bytes.Buffer
+	buf.WriteString("{")
+
+	var index int
+	var nextItem token.Pos
+	var commented, newlinePrinted bool
+	for {
+		// Determine the location of the next actual non-comment
+		// item. If we're at the end, the next item is the closing brace
+		if index != len(o.List.Items) {
+			nextItem = o.List.Items[index].Pos()
+		} else {
+			nextItem = o.Rbrace
+		}
+
+		// Go through the standalone comments in the file and print out
+		// the comments that we should be for this object item.
+		for _, c := range p.standaloneComments {
+			printed := false
+			var lastCommentPos token.Pos
+			for _, comment := range c.List {
+				// We only care about comments after the previous item
+				// we've printed so that comments are printed in the
+				// correct locations (between two objects for example).
+				// And before the next item.
+				if comment.Pos().After(p.prev) && comment.Pos().Before(nextItem) {
+					// If there are standalone comments and the initial newline has not
+					// been printed yet, do it now.
+					if !newlinePrinted {
+						newlinePrinted = true
+						buf.WriteByte(newline)
+					}
+
+					// add newline if it's between other printed nodes
+					if index > 0 {
+						commented = true
+						buf.WriteByte(newline)
+					}
+
+					// Store this position
+					lastCommentPos = comment.Pos()
+
+					// output the comment itself
+					buf.Write(p.indent(p.heredocIndent([]byte(comment.Text))))
+
+					// Set printed to true to note that we printed something
+					printed = true
+
+					/*
+						if index != len(o.List.Items) {
+							buf.WriteByte(newline) // do not print on the end
+						}
+					*/
+				}
+			}
+
+			// Stuff to do if we had comments
+			if printed {
+				// Always write a newline
+				buf.WriteByte(newline)
+
+				// If there is another item in the object and our comment
+				// didn't hug it directly, then make sure there is a blank
+				// line separating them.
+				if nextItem != o.Rbrace && nextItem.Line != lastCommentPos.Line+1 {
+					buf.WriteByte(newline)
+				}
+			}
+		}
+
+		if index == len(o.List.Items) {
+			p.prev = o.Rbrace
+			break
+		}
+
+		// At this point we are sure that it's not a totally empty block: print
+		// the initial newline if it hasn't been printed yet by the previous
+		// block about standalone comments.
+		if !newlinePrinted {
+			buf.WriteByte(newline)
+			newlinePrinted = true
+		}
+
+		// check if we have adjacent one liner items. If yes we'll going to align
+		// the comments.
+		var aligned []*ast.ObjectItem
+		for _, item := range o.List.Items[index:] {
+			// we don't group one line lists
+			if len(o.List.Items) == 1 {
+				break
+			}
+
+			// one means a oneliner with out any lead comment
+			// two means a oneliner with lead comment
+			// anything else might be something else
+			cur := lines(string(p.objectItem(item)))
+			if cur > 2 {
+				break
+			}
+
+			curPos := item.Pos()
+
+			nextPos := token.Pos{}
+			if index != len(o.List.Items)-1 {
+				nextPos = o.List.Items[index+1].Pos()
+			}
+
+			prevPos := token.Pos{}
+			if index != 0 {
+				prevPos = o.List.Items[index-1].Pos()
+			}
+
+			// fmt.Println("DEBUG ----------------")
+			// fmt.Printf("prev = %+v prevPos: %s\n", prev, prevPos)
+			// fmt.Printf("cur = %+v curPos: %s\n", cur, curPos)
+			// fmt.Printf("next = %+v nextPos: %s\n", next, nextPos)
+
+			if curPos.Line+1 == nextPos.Line {
+				aligned = append(aligned, item)
+				index++
+				continue
+			}
+
+			if curPos.Line-1 == prevPos.Line {
+				aligned = append(aligned, item)
+				index++
+
+				// finish if we have a new line or comment next. This happens
+				// if the next item is not adjacent
+				if curPos.Line+1 != nextPos.Line {
+					break
+				}
+				continue
+			}
+
+			break
+		}
+
+		// put newlines if the items are between other non aligned items.
+		// newlines are also added if there is a standalone comment already, so
+		// check it too
+		if !commented && index != len(aligned) {
+			buf.WriteByte(newline)
+		}
+
+		if len(aligned) >= 1 {
+			p.prev = aligned[len(aligned)-1].Pos()
+
+			items := p.alignedItems(aligned)
+			buf.Write(p.indent(items))
+		} else {
+			p.prev = o.List.Items[index].Pos()
+
+			buf.Write(p.indent(p.objectItem(o.List.Items[index])))
+			index++
+		}
+
+		buf.WriteByte(newline)
+	}
+
+	buf.WriteString("}")
+	return buf.Bytes()
+}
+
+func (p *printer) alignedItems(items []*ast.ObjectItem) []byte {
+	var buf bytes.Buffer
+
+	// find the longest key and value length, needed for alignment
+	var longestKeyLen int // longest key length
+	var longestValLen int // longest value length
+	for _, item := range items {
+		key := len(item.Keys[0].Token.Text)
+		val := len(p.output(item.Val))
+
+		if key > longestKeyLen {
+			longestKeyLen = key
+		}
+
+		if val > longestValLen {
+			longestValLen = val
+		}
+	}
+
+	for i, item := range items {
+		if item.LeadComment != nil {
+			for _, comment := range item.LeadComment.List {
+				buf.WriteString(comment.Text)
+				buf.WriteByte(newline)
+			}
+		}
+
+		for i, k := range item.Keys {
+			keyLen := len(k.Token.Text)
+			buf.WriteString(k.Token.Text)
+			for i := 0; i < longestKeyLen-keyLen+1; i++ {
+				buf.WriteByte(blank)
+			}
+
+			// reach end of key
+			if i == len(item.Keys)-1 && len(item.Keys) == 1 {
+				buf.WriteString("=")
+				buf.WriteByte(blank)
+			}
+		}
+
+		val := p.output(item.Val)
+		valLen := len(val)
+		buf.Write(val)
+
+		if item.Val.Pos().Line == item.Keys[0].Pos().Line && item.LineComment != nil {
+			for i := 0; i < longestValLen-valLen+1; i++ {
+				buf.WriteByte(blank)
+			}
+
+			for _, comment := range item.LineComment.List {
+				buf.WriteString(comment.Text)
+			}
+		}
+
+		// do not print for the last item
+		if i != len(items)-1 {
+			buf.WriteByte(newline)
+		}
+	}
+
+	return buf.Bytes()
+}
+
+// list returns the printable HCL form of an list type.
+func (p *printer) list(l *ast.ListType) []byte {
+	if p.isSingleLineList(l) {
+		return p.singleLineList(l)
+	}
+
+	var buf bytes.Buffer
+	buf.WriteString("[")
+	buf.WriteByte(newline)
+
+	var longestLine int
+	for _, item := range l.List {
+		// for now we assume that the list only contains literal types
+		if lit, ok := item.(*ast.LiteralType); ok {
+			lineLen := len(lit.Token.Text)
+			if lineLen > longestLine {
+				longestLine = lineLen
+			}
+		}
+	}
+
+	haveEmptyLine := false
+	for i, item := range l.List {
+		// If we have a lead comment, then we want to write that first
+		leadComment := false
+		if lit, ok := item.(*ast.LiteralType); ok && lit.LeadComment != nil {
+			leadComment = true
+
+			// Ensure an empty line before every element with a
+			// lead comment (except the first item in a list).
+			if !haveEmptyLine && i != 0 {
+				buf.WriteByte(newline)
+			}
+
+			for _, comment := range lit.LeadComment.List {
+				buf.Write(p.indent([]byte(comment.Text)))
+				buf.WriteByte(newline)
+			}
+		}
+
+		// also indent each line
+		val := p.output(item)
+		curLen := len(val)
+		buf.Write(p.indent(val))
+
+		// if this item is a heredoc, then we output the comma on
+		// the next line. This is the only case this happens.
+		comma := []byte{','}
+		if lit, ok := item.(*ast.LiteralType); ok && lit.Token.Type == token.HEREDOC {
+			buf.WriteByte(newline)
+			comma = p.indent(comma)
+		}
+
+		buf.Write(comma)
+
+		if lit, ok := item.(*ast.LiteralType); ok && lit.LineComment != nil {
+			// if the next item doesn't have any comments, do not align
+			buf.WriteByte(blank) // align one space
+			for i := 0; i < longestLine-curLen; i++ {
+				buf.WriteByte(blank)
+			}
+
+			for _, comment := range lit.LineComment.List {
+				buf.WriteString(comment.Text)
+			}
+		}
+
+		buf.WriteByte(newline)
+
+		// Ensure an empty line after every element with a
+		// lead comment (except the first item in a list).
+		haveEmptyLine = leadComment && i != len(l.List)-1
+		if haveEmptyLine {
+			buf.WriteByte(newline)
+		}
+	}
+
+	buf.WriteString("]")
+	return buf.Bytes()
+}
+
+// isSingleLineList returns true if:
+// * they were previously formatted entirely on one line
+// * they consist entirely of literals
+// * there are either no heredoc strings or the list has exactly one element
+// * there are no line comments
+func (printer) isSingleLineList(l *ast.ListType) bool {
+	for _, item := range l.List {
+		if item.Pos().Line != l.Lbrack.Line {
+			return false
+		}
+
+		lit, ok := item.(*ast.LiteralType)
+		if !ok {
+			return false
+		}
+
+		if lit.Token.Type == token.HEREDOC && len(l.List) != 1 {
+			return false
+		}
+
+		if lit.LineComment != nil {
+			return false
+		}
+	}
+
+	return true
+}
+
+// singleLineList prints a simple single line list.
+// For a definition of "simple", see isSingleLineList above.
+func (p *printer) singleLineList(l *ast.ListType) []byte {
+	buf := &bytes.Buffer{}
+
+	buf.WriteString("[")
+	for i, item := range l.List {
+		if i != 0 {
+			buf.WriteString(", ")
+		}
+
+		// Output the item itself
+		buf.Write(p.output(item))
+
+		// The heredoc marker needs to be at the end of line.
+		if lit, ok := item.(*ast.LiteralType); ok && lit.Token.Type == token.HEREDOC {
+			buf.WriteByte(newline)
+		}
+	}
+
+	buf.WriteString("]")
+	return buf.Bytes()
+}
+
+// indent indents the lines of the given buffer for each non-empty line
+func (p *printer) indent(buf []byte) []byte {
+	var prefix []byte
+	if p.cfg.SpacesWidth != 0 {
+		for i := 0; i < p.cfg.SpacesWidth; i++ {
+			prefix = append(prefix, blank)
+		}
+	} else {
+		prefix = []byte{tab}
+	}
+
+	var res []byte
+	bol := true
+	for _, c := range buf {
+		if bol && c != '\n' {
+			res = append(res, prefix...)
+		}
+
+		res = append(res, c)
+		bol = c == '\n'
+	}
+	return res
+}
+
+// unindent removes all the indentation from the tombstoned lines
+func (p *printer) unindent(buf []byte) []byte {
+	var res []byte
+	for i := 0; i < len(buf); i++ {
+		skip := len(buf)-i <= len(unindent)
+		if !skip {
+			skip = !bytes.Equal(unindent, buf[i:i+len(unindent)])
+		}
+		if skip {
+			res = append(res, buf[i])
+			continue
+		}
+
+		// We have a marker. we have to backtrace here and clean out
+		// any whitespace ahead of our tombstone up to a \n
+		for j := len(res) - 1; j >= 0; j-- {
+			if res[j] == '\n' {
+				break
+			}
+
+			res = res[:j]
+		}
+
+		// Skip the entire unindent marker
+		i += len(unindent) - 1
+	}
+
+	return res
+}
+
+// heredocIndent marks all the 2nd and further lines as unindentable
+func (p *printer) heredocIndent(buf []byte) []byte {
+	var res []byte
+	bol := false
+	for _, c := range buf {
+		if bol && c != '\n' {
+			res = append(res, unindent...)
+		}
+		res = append(res, c)
+		bol = c == '\n'
+	}
+	return res
+}
+
+// isSingleLineObject tells whether the given object item is a single
+// line object such as "obj {}".
+//
+// A single line object:
+//
+//   * has no lead comments (hence multi-line)
+//   * has no assignment
+//   * has no values in the stanza (within {})
+//
+func (p *printer) isSingleLineObject(val *ast.ObjectItem) bool {
+	// If there is a lead comment, can't be one line
+	if val.LeadComment != nil {
+		return false
+	}
+
+	// If there is assignment, we always break by line
+	if val.Assign.IsValid() {
+		return false
+	}
+
+	// If it isn't an object type, then its not a single line object
+	ot, ok := val.Val.(*ast.ObjectType)
+	if !ok {
+		return false
+	}
+
+	// If the object has no items, it is single line!
+	return len(ot.List.Items) == 0
+}
+
+func lines(txt string) int {
+	endline := 1
+	for i := 0; i < len(txt); i++ {
+		if txt[i] == '\n' {
+			endline++
+		}
+	}
+	return endline
+}
+
+// ----------------------------------------------------------------------------
+// Tracing support
+
+func (p *printer) printTrace(a ...interface{}) {
+	if !p.enableTrace {
+		return
+	}
+
+	const dots = ". . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . "
+	const n = len(dots)
+	i := 2 * p.indentTrace
+	for i > n {
+		fmt.Print(dots)
+		i -= n
+	}
+	// i <= n
+	fmt.Print(dots[0:i])
+	fmt.Println(a...)
+}
+
+func trace(p *printer, msg string) *printer {
+	p.printTrace(msg, "(")
+	p.indentTrace++
+	return p
+}
+
+// Usage pattern: defer un(trace(p, "..."))
+func un(p *printer) {
+	p.indentTrace--
+	p.printTrace(")")
+}

+ 66 - 0
vendor/github.com/hashicorp/hcl/hcl/printer/printer.go

@@ -0,0 +1,66 @@
+// Package printer implements printing of AST nodes to HCL format.
+package printer
+
+import (
+	"bytes"
+	"io"
+	"text/tabwriter"
+
+	"github.com/hashicorp/hcl/hcl/ast"
+	"github.com/hashicorp/hcl/hcl/parser"
+)
+
+var DefaultConfig = Config{
+	SpacesWidth: 2,
+}
+
+// A Config node controls the output of Fprint.
+type Config struct {
+	SpacesWidth int // if set, it will use spaces instead of tabs for alignment
+}
+
+func (c *Config) Fprint(output io.Writer, node ast.Node) error {
+	p := &printer{
+		cfg:                *c,
+		comments:           make([]*ast.CommentGroup, 0),
+		standaloneComments: make([]*ast.CommentGroup, 0),
+		// enableTrace:        true,
+	}
+
+	p.collectComments(node)
+
+	if _, err := output.Write(p.unindent(p.output(node))); err != nil {
+		return err
+	}
+
+	// flush tabwriter, if any
+	var err error
+	if tw, _ := output.(*tabwriter.Writer); tw != nil {
+		err = tw.Flush()
+	}
+
+	return err
+}
+
+// Fprint "pretty-prints" an HCL node to output
+// It calls Config.Fprint with default settings.
+func Fprint(output io.Writer, node ast.Node) error {
+	return DefaultConfig.Fprint(output, node)
+}
+
+// Format formats src HCL and returns the result.
+func Format(src []byte) ([]byte, error) {
+	node, err := parser.Parse(src)
+	if err != nil {
+		return nil, err
+	}
+
+	var buf bytes.Buffer
+	if err := DefaultConfig.Fprint(&buf, node); err != nil {
+		return nil, err
+	}
+
+	// Add trailing newline to result
+	buf.WriteString("\n")
+	return buf.Bytes(), nil
+}

+ 652 - 0
vendor/github.com/hashicorp/hcl/hcl/scanner/scanner.go

@@ -0,0 +1,652 @@
+// Package scanner implements a scanner for HCL (HashiCorp Configuration
+// Language) source text.
+package scanner
+
+import (
+	"bytes"
+	"fmt"
+	"os"
+	"regexp"
+	"unicode"
+	"unicode/utf8"
+
+	"github.com/hashicorp/hcl/hcl/token"
+)
+
+// eof represents a marker rune for the end of the reader.
+const eof = rune(0)
+
+// Scanner defines a lexical scanner
+type Scanner struct {
+	buf *bytes.Buffer // Source buffer for advancing and scanning
+	src []byte        // Source buffer for immutable access
+
+	// Source Position
+	srcPos  token.Pos // current position
+	prevPos token.Pos // previous position, used for peek() method
+
+	lastCharLen int // length of last character in bytes
+	lastLineLen int // length of last line in characters (for correct column reporting)
+
+	tokStart int // token text start position
+	tokEnd   int // token text end  position
+
+	// Error is called for each error encountered. If no Error
+	// function is set, the error is reported to os.Stderr.
+	Error func(pos token.Pos, msg string)
+
+	// ErrorCount is incremented by one for each error encountered.
+	ErrorCount int
+
+	// tokPos is the start position of most recently scanned token; set by
+	// Scan. The Filename field is always left untouched by the Scanner.  If
+	// an error is reported (via Error) and Position is invalid, the scanner is
+	// not inside a token.
+	tokPos token.Pos
+}
+
+// New creates and initializes a new instance of Scanner using src as
+// its source content.
+func New(src []byte) *Scanner {
+	// even though we accept a src, we read from a io.Reader compatible type
+	// (*bytes.Buffer). So in the future we might easily change it to streaming
+	// read.
+	b := bytes.NewBuffer(src)
+	s := &Scanner{
+		buf: b,
+		src: src,
+	}
+
+	// srcPosition always starts with 1
+	s.srcPos.Line = 1
+	return s
+}
+
+// next reads the next rune from the bufferred reader. Returns the rune(0) if
+// an error occurs (or io.EOF is returned).
+func (s *Scanner) next() rune {
+	ch, size, err := s.buf.ReadRune()
+	if err != nil {
+		// advance for error reporting
+		s.srcPos.Column++
+		s.srcPos.Offset += size
+		s.lastCharLen = size
+		return eof
+	}
+
+	// remember last position
+	s.prevPos = s.srcPos
+
+	s.srcPos.Column++
+	s.lastCharLen = size
+	s.srcPos.Offset += size
+
+	if ch == utf8.RuneError && size == 1 {
+		s.err("illegal UTF-8 encoding")
+		return ch
+	}
+
+	if ch == '\n' {
+		s.srcPos.Line++
+		s.lastLineLen = s.srcPos.Column
+		s.srcPos.Column = 0
+	}
+
+	if ch == '\x00' {
+		s.err("unexpected null character (0x00)")
+		return eof
+	}
+
+	if ch == '\uE123' {
+		s.err("unicode code point U+E123 reserved for internal use")
+		return utf8.RuneError
+	}
+
+	// debug
+	// fmt.Printf("ch: %q, offset:column: %d:%d\n", ch, s.srcPos.Offset, s.srcPos.Column)
+	return ch
+}
+
+// unread unreads the previous read Rune and updates the source position
+func (s *Scanner) unread() {
+	if err := s.buf.UnreadRune(); err != nil {
+		panic(err) // this is user fault, we should catch it
+	}
+	s.srcPos = s.prevPos // put back last position
+}
+
+// peek returns the next rune without advancing the reader.
+func (s *Scanner) peek() rune {
+	peek, _, err := s.buf.ReadRune()
+	if err != nil {
+		return eof
+	}
+
+	s.buf.UnreadRune()
+	return peek
+}
+
+// Scan scans the next token and returns the token.
+func (s *Scanner) Scan() token.Token {
+	ch := s.next()
+
+	// skip white space
+	for isWhitespace(ch) {
+		ch = s.next()
+	}
+
+	var tok token.Type
+
+	// token text markings
+	s.tokStart = s.srcPos.Offset - s.lastCharLen
+
+	// token position, initial next() is moving the offset by one(size of rune
+	// actually), though we are interested with the starting point
+	s.tokPos.Offset = s.srcPos.Offset - s.lastCharLen
+	if s.srcPos.Column > 0 {
+		// common case: last character was not a '\n'
+		s.tokPos.Line = s.srcPos.Line
+		s.tokPos.Column = s.srcPos.Column
+	} else {
+		// last character was a '\n'
+		// (we cannot be at the beginning of the source
+		// since we have called next() at least once)
+		s.tokPos.Line = s.srcPos.Line - 1
+		s.tokPos.Column = s.lastLineLen
+	}
+
+	switch {
+	case isLetter(ch):
+		tok = token.IDENT
+		lit := s.scanIdentifier()
+		if lit == "true" || lit == "false" {
+			tok = token.BOOL
+		}
+	case isDecimal(ch):
+		tok = s.scanNumber(ch)
+	default:
+		switch ch {
+		case eof:
+			tok = token.EOF
+		case '"':
+			tok = token.STRING
+			s.scanString()
+		case '#', '/':
+			tok = token.COMMENT
+			s.scanComment(ch)
+		case '.':
+			tok = token.PERIOD
+			ch = s.peek()
+			if isDecimal(ch) {
+				tok = token.FLOAT
+				ch = s.scanMantissa(ch)
+				ch = s.scanExponent(ch)
+			}
+		case '<':
+			tok = token.HEREDOC
+			s.scanHeredoc()
+		case '[':
+			tok = token.LBRACK
+		case ']':
+			tok = token.RBRACK
+		case '{':
+			tok = token.LBRACE
+		case '}':
+			tok = token.RBRACE
+		case ',':
+			tok = token.COMMA
+		case '=':
+			tok = token.ASSIGN
+		case '+':
+			tok = token.ADD
+		case '-':
+			if isDecimal(s.peek()) {
+				ch := s.next()
+				tok = s.scanNumber(ch)
+			} else {
+				tok = token.SUB
+			}
+		default:
+			s.err("illegal char")
+		}
+	}
+
+	// finish token ending
+	s.tokEnd = s.srcPos.Offset
+
+	// create token literal
+	var tokenText string
+	if s.tokStart >= 0 {
+		tokenText = string(s.src[s.tokStart:s.tokEnd])
+	}
+	s.tokStart = s.tokEnd // ensure idempotency of tokenText() call
+
+	return token.Token{
+		Type: tok,
+		Pos:  s.tokPos,
+		Text: tokenText,
+	}
+}
+
+func (s *Scanner) scanComment(ch rune) {
+	// single line comments
+	if ch == '#' || (ch == '/' && s.peek() != '*') {
+		if ch == '/' && s.peek() != '/' {
+			s.err("expected '/' for comment")
+			return
+		}
+
+		ch = s.next()
+		for ch != '\n' && ch >= 0 && ch != eof {
+			ch = s.next()
+		}
+		if ch != eof && ch >= 0 {
+			s.unread()
+		}
+		return
+	}
+
+	// be sure we get the character after /* This allows us to find comment's
+	// that are not erminated
+	if ch == '/' {
+		s.next()
+		ch = s.next() // read character after "/*"
+	}
+
+	// look for /* - style comments
+	for {
+		if ch < 0 || ch == eof {
+			s.err("comment not terminated")
+			break
+		}
+
+		ch0 := ch
+		ch = s.next()
+		if ch0 == '*' && ch == '/' {
+			break
+		}
+	}
+}
+
+// scanNumber scans a HCL number definition starting with the given rune
+func (s *Scanner) scanNumber(ch rune) token.Type {
+	if ch == '0' {
+		// check for hexadecimal, octal or float
+		ch = s.next()
+		if ch == 'x' || ch == 'X' {
+			// hexadecimal
+			ch = s.next()
+			found := false
+			for isHexadecimal(ch) {
+				ch = s.next()
+				found = true
+			}
+
+			if !found {
+				s.err("illegal hexadecimal number")
+			}
+
+			if ch != eof {
+				s.unread()
+			}
+
+			return token.NUMBER
+		}
+
+		// now it's either something like: 0421(octal) or 0.1231(float)
+		illegalOctal := false
+		for isDecimal(ch) {
+			ch = s.next()
+			if ch == '8' || ch == '9' {
+				// this is just a possibility. For example 0159 is illegal, but
+				// 0159.23 is valid. So we mark a possible illegal octal. If
+				// the next character is not a period, we'll print the error.
+				illegalOctal = true
+			}
+		}
+
+		if ch == 'e' || ch == 'E' {
+			ch = s.scanExponent(ch)
+			return token.FLOAT
+		}
+
+		if ch == '.' {
+			ch = s.scanFraction(ch)
+
+			if ch == 'e' || ch == 'E' {
+				ch = s.next()
+				ch = s.scanExponent(ch)
+			}
+			return token.FLOAT
+		}
+
+		if illegalOctal {
+			s.err("illegal octal number")
+		}
+
+		if ch != eof {
+			s.unread()
+		}
+		return token.NUMBER
+	}
+
+	s.scanMantissa(ch)
+	ch = s.next() // seek forward
+	if ch == 'e' || ch == 'E' {
+		ch = s.scanExponent(ch)
+		return token.FLOAT
+	}
+
+	if ch == '.' {
+		ch = s.scanFraction(ch)
+		if ch == 'e' || ch == 'E' {
+			ch = s.next()
+			ch = s.scanExponent(ch)
+		}
+		return token.FLOAT
+	}
+
+	if ch != eof {
+		s.unread()
+	}
+	return token.NUMBER
+}
+
+// scanMantissa scans the mantissa beginning from the rune. It returns the next
+// non decimal rune. It's used to determine wheter it's a fraction or exponent.
+func (s *Scanner) scanMantissa(ch rune) rune {
+	scanned := false
+	for isDecimal(ch) {
+		ch = s.next()
+		scanned = true
+	}
+
+	if scanned && ch != eof {
+		s.unread()
+	}
+	return ch
+}
+
+// scanFraction scans the fraction after the '.' rune
+func (s *Scanner) scanFraction(ch rune) rune {
+	if ch == '.' {
+		ch = s.peek() // we peek just to see if we can move forward
+		ch = s.scanMantissa(ch)
+	}
+	return ch
+}
+
+// scanExponent scans the remaining parts of an exponent after the 'e' or 'E'
+// rune.
+func (s *Scanner) scanExponent(ch rune) rune {
+	if ch == 'e' || ch == 'E' {
+		ch = s.next()
+		if ch == '-' || ch == '+' {
+			ch = s.next()
+		}
+		ch = s.scanMantissa(ch)
+	}
+	return ch
+}
+
+// scanHeredoc scans a heredoc string
+func (s *Scanner) scanHeredoc() {
+	// Scan the second '<' in example: '<<EOF'
+	if s.next() != '<' {
+		s.err("heredoc expected second '<', didn't see it")
+		return
+	}
+
+	// Get the original offset so we can read just the heredoc ident
+	offs := s.srcPos.Offset
+
+	// Scan the identifier
+	ch := s.next()
+
+	// Indented heredoc syntax
+	if ch == '-' {
+		ch = s.next()
+	}
+
+	for isLetter(ch) || isDigit(ch) {
+		ch = s.next()
+	}
+
+	// If we reached an EOF then that is not good
+	if ch == eof {
+		s.err("heredoc not terminated")
+		return
+	}
+
+	// Ignore the '\r' in Windows line endings
+	if ch == '\r' {
+		if s.peek() == '\n' {
+			ch = s.next()
+		}
+	}
+
+	// If we didn't reach a newline then that is also not good
+	if ch != '\n' {
+		s.err("invalid characters in heredoc anchor")
+		return
+	}
+
+	// Read the identifier
+	identBytes := s.src[offs : s.srcPos.Offset-s.lastCharLen]
+	if len(identBytes) == 0 || (len(identBytes) == 1 && identBytes[0] == '-') {
+		s.err("zero-length heredoc anchor")
+		return
+	}
+
+	var identRegexp *regexp.Regexp
+	if identBytes[0] == '-' {
+		identRegexp = regexp.MustCompile(fmt.Sprintf(`^[[:space:]]*%s\r*\z`, identBytes[1:]))
+	} else {
+		identRegexp = regexp.MustCompile(fmt.Sprintf(`^[[:space:]]*%s\r*\z`, identBytes))
+	}
+
+	// Read the actual string value
+	lineStart := s.srcPos.Offset
+	for {
+		ch := s.next()
+
+		// Special newline handling.
+		if ch == '\n' {
+			// Math is fast, so we first compare the byte counts to see if we have a chance
+			// of seeing the same identifier - if the length is less than the number of bytes
+			// in the identifier, this cannot be a valid terminator.
+			lineBytesLen := s.srcPos.Offset - s.lastCharLen - lineStart
+			if lineBytesLen >= len(identBytes) && identRegexp.Match(s.src[lineStart:s.srcPos.Offset-s.lastCharLen]) {
+				break
+			}
+
+			// Not an anchor match, record the start of a new line
+			lineStart = s.srcPos.Offset
+		}
+
+		if ch == eof {
+			s.err("heredoc not terminated")
+			return
+		}
+	}
+
+	return
+}
+
+// scanString scans a quoted string
+func (s *Scanner) scanString() {
+	braces := 0
+	for {
+		// '"' opening already consumed
+		// read character after quote
+		ch := s.next()
+
+		if (ch == '\n' && braces == 0) || ch < 0 || ch == eof {
+			s.err("literal not terminated")
+			return
+		}
+
+		if ch == '"' && braces == 0 {
+			break
+		}
+
+		// If we're going into a ${} then we can ignore quotes for awhile
+		if braces == 0 && ch == '$' && s.peek() == '{' {
+			braces++
+			s.next()
+		} else if braces > 0 && ch == '{' {
+			braces++
+		}
+		if braces > 0 && ch == '}' {
+			braces--
+		}
+
+		if ch == '\\' {
+			s.scanEscape()
+		}
+	}
+
+	return
+}
+
+// scanEscape scans an escape sequence
+func (s *Scanner) scanEscape() rune {
+	// http://en.cppreference.com/w/cpp/language/escape
+	ch := s.next() // read character after '/'
+	switch ch {
+	case 'a', 'b', 'f', 'n', 'r', 't', 'v', '\\', '"':
+		// nothing to do
+	case '0', '1', '2', '3', '4', '5', '6', '7':
+		// octal notation
+		ch = s.scanDigits(ch, 8, 3)
+	case 'x':
+		// hexademical notation
+		ch = s.scanDigits(s.next(), 16, 2)
+	case 'u':
+		// universal character name
+		ch = s.scanDigits(s.next(), 16, 4)
+	case 'U':
+		// universal character name
+		ch = s.scanDigits(s.next(), 16, 8)
+	default:
+		s.err("illegal char escape")
+	}
+	return ch
+}
+
+// scanDigits scans a rune with the given base for n times. For example an
+// octal notation \184 would yield in scanDigits(ch, 8, 3)
+func (s *Scanner) scanDigits(ch rune, base, n int) rune {
+	start := n
+	for n > 0 && digitVal(ch) < base {
+		ch = s.next()
+		if ch == eof {
+			// If we see an EOF, we halt any more scanning of digits
+			// immediately.
+			break
+		}
+
+		n--
+	}
+	if n > 0 {
+		s.err("illegal char escape")
+	}
+
+	if n != start && ch != eof {
+		// we scanned all digits, put the last non digit char back,
+		// only if we read anything at all
+		s.unread()
+	}
+
+	return ch
+}
+
+// scanIdentifier scans an identifier and returns the literal string
+func (s *Scanner) scanIdentifier() string {
+	offs := s.srcPos.Offset - s.lastCharLen
+	ch := s.next()
+	for isLetter(ch) || isDigit(ch) || ch == '-' || ch == '.' {
+		ch = s.next()
+	}
+
+	if ch != eof {
+		s.unread() // we got identifier, put back latest char
+	}
+
+	return string(s.src[offs:s.srcPos.Offset])
+}
+
+// recentPosition returns the position of the character immediately after the
+// character or token returned by the last call to Scan.
+func (s *Scanner) recentPosition() (pos token.Pos) {
+	pos.Offset = s.srcPos.Offset - s.lastCharLen
+	switch {
+	case s.srcPos.Column > 0:
+		// common case: last character was not a '\n'
+		pos.Line = s.srcPos.Line
+		pos.Column = s.srcPos.Column
+	case s.lastLineLen > 0:
+		// last character was a '\n'
+		// (we cannot be at the beginning of the source
+		// since we have called next() at least once)
+		pos.Line = s.srcPos.Line - 1
+		pos.Column = s.lastLineLen
+	default:
+		// at the beginning of the source
+		pos.Line = 1
+		pos.Column = 1
+	}
+	return
+}
+
+// err prints the error of any scanning to s.Error function. If the function is
+// not defined, by default it prints them to os.Stderr
+func (s *Scanner) err(msg string) {
+	s.ErrorCount++
+	pos := s.recentPosition()
+
+	if s.Error != nil {
+		s.Error(pos, msg)
+		return
+	}
+
+	fmt.Fprintf(os.Stderr, "%s: %s\n", pos, msg)
+}
+
+// isHexadecimal returns true if the given rune is a letter
+func isLetter(ch rune) bool {
+	return 'a' <= ch && ch <= 'z' || 'A' <= ch && ch <= 'Z' || ch == '_' || ch >= 0x80 && unicode.IsLetter(ch)
+}
+
+// isDigit returns true if the given rune is a decimal digit
+func isDigit(ch rune) bool {
+	return '0' <= ch && ch <= '9' || ch >= 0x80 && unicode.IsDigit(ch)
+}
+
+// isDecimal returns true if the given rune is a decimal number
+func isDecimal(ch rune) bool {
+	return '0' <= ch && ch <= '9'
+}
+
+// isHexadecimal returns true if the given rune is an hexadecimal number
+func isHexadecimal(ch rune) bool {
+	return '0' <= ch && ch <= '9' || 'a' <= ch && ch <= 'f' || 'A' <= ch && ch <= 'F'
+}
+
+// isWhitespace returns true if the rune is a space, tab, newline or carriage return
+func isWhitespace(ch rune) bool {
+	return ch == ' ' || ch == '\t' || ch == '\n' || ch == '\r'
+}
+
+// digitVal returns the integer value of a given octal,decimal or hexadecimal rune
+func digitVal(ch rune) int {
+	switch {
+	case '0' <= ch && ch <= '9':
+		return int(ch - '0')
+	case 'a' <= ch && ch <= 'f':
+		return int(ch - 'a' + 10)
+	case 'A' <= ch && ch <= 'F':
+		return int(ch - 'A' + 10)
+	}
+	return 16 // larger than any legal digit val
+}

+ 241 - 0
vendor/github.com/hashicorp/hcl/hcl/strconv/quote.go

@@ -0,0 +1,241 @@
+package strconv
+
+import (
+	"errors"
+	"unicode/utf8"
+)
+
+// ErrSyntax indicates that a value does not have the right syntax for the target type.
+var ErrSyntax = errors.New("invalid syntax")
+
+// Unquote interprets s as a single-quoted, double-quoted,
+// or backquoted Go string literal, returning the string value
+// that s quotes.  (If s is single-quoted, it would be a Go
+// character literal; Unquote returns the corresponding
+// one-character string.)
+func Unquote(s string) (t string, err error) {
+	n := len(s)
+	if n < 2 {
+		return "", ErrSyntax
+	}
+	quote := s[0]
+	if quote != s[n-1] {
+		return "", ErrSyntax
+	}
+	s = s[1 : n-1]
+
+	if quote != '"' {
+		return "", ErrSyntax
+	}
+	if !contains(s, '$') && !contains(s, '{') && contains(s, '\n') {
+		return "", ErrSyntax
+	}
+
+	// Is it trivial?  Avoid allocation.
+	if !contains(s, '\\') && !contains(s, quote) && !contains(s, '$') {
+		switch quote {
+		case '"':
+			return s, nil
+		case '\'':
+			r, size := utf8.DecodeRuneInString(s)
+			if size == len(s) && (r != utf8.RuneError || size != 1) {
+				return s, nil
+			}
+		}
+	}
+
+	var runeTmp [utf8.UTFMax]byte
+	buf := make([]byte, 0, 3*len(s)/2) // Try to avoid more allocations.
+	for len(s) > 0 {
+		// If we're starting a '${}' then let it through un-unquoted.
+		// Specifically: we don't unquote any characters within the `${}`
+		// section.
+		if s[0] == '$' && len(s) > 1 && s[1] == '{' {
+			buf = append(buf, '$', '{')
+			s = s[2:]
+
+			// Continue reading until we find the closing brace, copying as-is
+			braces := 1
+			for len(s) > 0 && braces > 0 {
+				r, size := utf8.DecodeRuneInString(s)
+				if r == utf8.RuneError {
+					return "", ErrSyntax
+				}
+
+				s = s[size:]
+
+				n := utf8.EncodeRune(runeTmp[:], r)
+				buf = append(buf, runeTmp[:n]...)
+
+				switch r {
+				case '{':
+					braces++
+				case '}':
+					braces--
+				}
+			}
+			if braces != 0 {
+				return "", ErrSyntax
+			}
+			if len(s) == 0 {
+				// If there's no string left, we're done!
+				break
+			} else {
+				// If there's more left, we need to pop back up to the top of the loop
+				// in case there's another interpolation in this string.
+				continue
+			}
+		}
+
+		if s[0] == '\n' {
+			return "", ErrSyntax
+		}
+
+		c, multibyte, ss, err := unquoteChar(s, quote)
+		if err != nil {
+			return "", err
+		}
+		s = ss
+		if c < utf8.RuneSelf || !multibyte {
+			buf = append(buf, byte(c))
+		} else {
+			n := utf8.EncodeRune(runeTmp[:], c)
+			buf = append(buf, runeTmp[:n]...)
+		}
+		if quote == '\'' && len(s) != 0 {
+			// single-quoted must be single character
+			return "", ErrSyntax
+		}
+	}
+	return string(buf), nil
+}
+
+// contains reports whether the string contains the byte c.
+func contains(s string, c byte) bool {
+	for i := 0; i < len(s); i++ {
+		if s[i] == c {
+			return true
+		}
+	}
+	return false
+}
+
+func unhex(b byte) (v rune, ok bool) {
+	c := rune(b)
+	switch {
+	case '0' <= c && c <= '9':
+		return c - '0', true
+	case 'a' <= c && c <= 'f':
+		return c - 'a' + 10, true
+	case 'A' <= c && c <= 'F':
+		return c - 'A' + 10, true
+	}
+	return
+}
+
+func unquoteChar(s string, quote byte) (value rune, multibyte bool, tail string, err error) {
+	// easy cases
+	switch c := s[0]; {
+	case c == quote && (quote == '\'' || quote == '"'):
+		err = ErrSyntax
+		return
+	case c >= utf8.RuneSelf:
+		r, size := utf8.DecodeRuneInString(s)
+		return r, true, s[size:], nil
+	case c != '\\':
+		return rune(s[0]), false, s[1:], nil
+	}
+
+	// hard case: c is backslash
+	if len(s) <= 1 {
+		err = ErrSyntax
+		return
+	}
+	c := s[1]
+	s = s[2:]
+
+	switch c {
+	case 'a':
+		value = '\a'
+	case 'b':
+		value = '\b'
+	case 'f':
+		value = '\f'
+	case 'n':
+		value = '\n'
+	case 'r':
+		value = '\r'
+	case 't':
+		value = '\t'
+	case 'v':
+		value = '\v'
+	case 'x', 'u', 'U':
+		n := 0
+		switch c {
+		case 'x':
+			n = 2
+		case 'u':
+			n = 4
+		case 'U':
+			n = 8
+		}
+		var v rune
+		if len(s) < n {
+			err = ErrSyntax
+			return
+		}
+		for j := 0; j < n; j++ {
+			x, ok := unhex(s[j])
+			if !ok {
+				err = ErrSyntax
+				return
+			}
+			v = v<<4 | x
+		}
+		s = s[n:]
+		if c == 'x' {
+			// single-byte string, possibly not UTF-8
+			value = v
+			break
+		}
+		if v > utf8.MaxRune {
+			err = ErrSyntax
+			return
+		}
+		value = v
+		multibyte = true
+	case '0', '1', '2', '3', '4', '5', '6', '7':
+		v := rune(c) - '0'
+		if len(s) < 2 {
+			err = ErrSyntax
+			return
+		}
+		for j := 0; j < 2; j++ { // one digit already; two more
+			x := rune(s[j]) - '0'
+			if x < 0 || x > 7 {
+				err = ErrSyntax
+				return
+			}
+			v = (v << 3) | x
+		}
+		s = s[2:]
+		if v > 255 {
+			err = ErrSyntax
+			return
+		}
+		value = v
+	case '\\':
+		value = '\\'
+	case '\'', '"':
+		if c != quote {
+			err = ErrSyntax
+			return
+		}
+		value = rune(c)
+	default:
+		err = ErrSyntax
+		return
+	}
+	tail = s
+	return
+}

+ 46 - 0
vendor/github.com/hashicorp/hcl/hcl/token/position.go

@@ -0,0 +1,46 @@
+package token
+
+import "fmt"
+
+// Pos describes an arbitrary source position
+// including the file, line, and column location.
+// A Position is valid if the line number is > 0.
+type Pos struct {
+	Filename string // filename, if any
+	Offset   int    // offset, starting at 0
+	Line     int    // line number, starting at 1
+	Column   int    // column number, starting at 1 (character count)
+}
+
+// IsValid returns true if the position is valid.
+func (p *Pos) IsValid() bool { return p.Line > 0 }
+
+// String returns a string in one of several forms:
+//
+//	file:line:column    valid position with file name
+//	line:column         valid position without file name
+//	file                invalid position with file name
+//	-                   invalid position without file name
+func (p Pos) String() string {
+	s := p.Filename
+	if p.IsValid() {
+		if s != "" {
+			s += ":"
+		}
+		s += fmt.Sprintf("%d:%d", p.Line, p.Column)
+	}
+	if s == "" {
+		s = "-"
+	}
+	return s
+}
+
+// Before reports whether the position p is before u.
+func (p Pos) Before(u Pos) bool {
+	return u.Offset > p.Offset || u.Line > p.Line
+}
+
+// After reports whether the position p is after u.
+func (p Pos) After(u Pos) bool {
+	return u.Offset < p.Offset || u.Line < p.Line
+}

+ 219 - 0
vendor/github.com/hashicorp/hcl/hcl/token/token.go

@@ -0,0 +1,219 @@
+// Package token defines constants representing the lexical tokens for HCL
+// (HashiCorp Configuration Language)
+package token
+
+import (
+	"fmt"
+	"strconv"
+	"strings"
+
+	hclstrconv "github.com/hashicorp/hcl/hcl/strconv"
+)
+
+// Token defines a single HCL token which can be obtained via the Scanner
+type Token struct {
+	Type Type
+	Pos  Pos
+	Text string
+	JSON bool
+}
+
+// Type is the set of lexical tokens of the HCL (HashiCorp Configuration Language)
+type Type int
+
+const (
+	// Special tokens
+	ILLEGAL Type = iota
+	EOF
+	COMMENT
+
+	identifier_beg
+	IDENT // literals
+	literal_beg
+	NUMBER  // 12345
+	FLOAT   // 123.45
+	BOOL    // true,false
+	STRING  // "abc"
+	HEREDOC // <<FOO\nbar\nFOO
+	literal_end
+	identifier_end
+
+	operator_beg
+	LBRACK // [
+	LBRACE // {
+	COMMA  // ,
+	PERIOD // .
+
+	RBRACK // ]
+	RBRACE // }
+
+	ASSIGN // =
+	ADD    // +
+	SUB    // -
+	operator_end
+)
+
+var tokens = [...]string{
+	ILLEGAL: "ILLEGAL",
+
+	EOF:     "EOF",
+	COMMENT: "COMMENT",
+
+	IDENT:  "IDENT",
+	NUMBER: "NUMBER",
+	FLOAT:  "FLOAT",
+	BOOL:   "BOOL",
+	STRING: "STRING",
+
+	LBRACK:  "LBRACK",
+	LBRACE:  "LBRACE",
+	COMMA:   "COMMA",
+	PERIOD:  "PERIOD",
+	HEREDOC: "HEREDOC",
+
+	RBRACK: "RBRACK",
+	RBRACE: "RBRACE",
+
+	ASSIGN: "ASSIGN",
+	ADD:    "ADD",
+	SUB:    "SUB",
+}
+
+// String returns the string corresponding to the token tok.
+func (t Type) String() string {
+	s := ""
+	if 0 <= t && t < Type(len(tokens)) {
+		s = tokens[t]
+	}
+	if s == "" {
+		s = "token(" + strconv.Itoa(int(t)) + ")"
+	}
+	return s
+}
+
+// IsIdentifier returns true for tokens corresponding to identifiers and basic
+// type literals; it returns false otherwise.
+func (t Type) IsIdentifier() bool { return identifier_beg < t && t < identifier_end }
+
+// IsLiteral returns true for tokens corresponding to basic type literals; it
+// returns false otherwise.
+func (t Type) IsLiteral() bool { return literal_beg < t && t < literal_end }
+
+// IsOperator returns true for tokens corresponding to operators and
+// delimiters; it returns false otherwise.
+func (t Type) IsOperator() bool { return operator_beg < t && t < operator_end }
+
+// String returns the token's literal text. Note that this is only
+// applicable for certain token types, such as token.IDENT,
+// token.STRING, etc..
+func (t Token) String() string {
+	return fmt.Sprintf("%s %s %s", t.Pos.String(), t.Type.String(), t.Text)
+}
+
+// Value returns the properly typed value for this token. The type of
+// the returned interface{} is guaranteed based on the Type field.
+//
+// This can only be called for literal types. If it is called for any other
+// type, this will panic.
+func (t Token) Value() interface{} {
+	switch t.Type {
+	case BOOL:
+		if t.Text == "true" {
+			return true
+		} else if t.Text == "false" {
+			return false
+		}
+
+		panic("unknown bool value: " + t.Text)
+	case FLOAT:
+		v, err := strconv.ParseFloat(t.Text, 64)
+		if err != nil {
+			panic(err)
+		}
+
+		return float64(v)
+	case NUMBER:
+		v, err := strconv.ParseInt(t.Text, 0, 64)
+		if err != nil {
+			panic(err)
+		}
+
+		return int64(v)
+	case IDENT:
+		return t.Text
+	case HEREDOC:
+		return unindentHeredoc(t.Text)
+	case STRING:
+		// Determine the Unquote method to use. If it came from JSON,
+		// then we need to use the built-in unquote since we have to
+		// escape interpolations there.
+		f := hclstrconv.Unquote
+		if t.JSON {
+			f = strconv.Unquote
+		}
+
+		// This case occurs if json null is used
+		if t.Text == "" {
+			return ""
+		}
+
+		v, err := f(t.Text)
+		if err != nil {
+			panic(fmt.Sprintf("unquote %s err: %s", t.Text, err))
+		}
+
+		return v
+	default:
+		panic(fmt.Sprintf("unimplemented Value for type: %s", t.Type))
+	}
+}
+
+// unindentHeredoc returns the string content of a HEREDOC if it is started with <<
+// and the content of a HEREDOC with the hanging indent removed if it is started with
+// a <<-, and the terminating line is at least as indented as the least indented line.
+func unindentHeredoc(heredoc string) string {
+	// We need to find the end of the marker
+	idx := strings.IndexByte(heredoc, '\n')
+	if idx == -1 {
+		panic("heredoc doesn't contain newline")
+	}
+
+	unindent := heredoc[2] == '-'
+
+	// We can optimize if the heredoc isn't marked for indentation
+	if !unindent {
+		return string(heredoc[idx+1 : len(heredoc)-idx+1])
+	}
+
+	// We need to unindent each line based on the indentation level of the marker
+	lines := strings.Split(string(heredoc[idx+1:len(heredoc)-idx+2]), "\n")
+	whitespacePrefix := lines[len(lines)-1]
+
+	isIndented := true
+	for _, v := range lines {
+		if strings.HasPrefix(v, whitespacePrefix) {
+			continue
+		}
+
+		isIndented = false
+		break
+	}
+
+	// If all lines are not at least as indented as the terminating mark, return the
+	// heredoc as is, but trim the leading space from the marker on the final line.
+	if !isIndented {
+		return strings.TrimRight(string(heredoc[idx+1:len(heredoc)-idx+1]), " \t")
+	}
+
+	unindentedLines := make([]string, len(lines))
+	for k, v := range lines {
+		if k == len(lines)-1 {
+			unindentedLines[k] = ""
+			break
+		}
+
+		unindentedLines[k] = strings.TrimPrefix(v, whitespacePrefix)
+	}
+
+	return strings.Join(unindentedLines, "\n")
+}

+ 117 - 0
vendor/github.com/hashicorp/hcl/json/parser/flatten.go

@@ -0,0 +1,117 @@
+package parser
+
+import "github.com/hashicorp/hcl/hcl/ast"
+
+// flattenObjects takes an AST node, walks it, and flattens
+func flattenObjects(node ast.Node) {
+	ast.Walk(node, func(n ast.Node) (ast.Node, bool) {
+		// We only care about lists, because this is what we modify
+		list, ok := n.(*ast.ObjectList)
+		if !ok {
+			return n, true
+		}
+
+		// Rebuild the item list
+		items := make([]*ast.ObjectItem, 0, len(list.Items))
+		frontier := make([]*ast.ObjectItem, len(list.Items))
+		copy(frontier, list.Items)
+		for len(frontier) > 0 {
+			// Pop the current item
+			n := len(frontier)
+			item := frontier[n-1]
+			frontier = frontier[:n-1]
+
+			switch v := item.Val.(type) {
+			case *ast.ObjectType:
+				items, frontier = flattenObjectType(v, item, items, frontier)
+			case *ast.ListType:
+				items, frontier = flattenListType(v, item, items, frontier)
+			default:
+				items = append(items, item)
+			}
+		}
+
+		// Reverse the list since the frontier model runs things backwards
+		for i := len(items)/2 - 1; i >= 0; i-- {
+			opp := len(items) - 1 - i
+			items[i], items[opp] = items[opp], items[i]
+		}
+
+		// Done! Set the original items
+		list.Items = items
+		return n, true
+	})
+}
+
+func flattenListType(
+	ot *ast.ListType,
+	item *ast.ObjectItem,
+	items []*ast.ObjectItem,
+	frontier []*ast.ObjectItem) ([]*ast.ObjectItem, []*ast.ObjectItem) {
+	// If the list is empty, keep the original list
+	if len(ot.List) == 0 {
+		items = append(items, item)
+		return items, frontier
+	}
+
+	// All the elements of this object must also be objects!
+	for _, subitem := range ot.List {
+		if _, ok := subitem.(*ast.ObjectType); !ok {
+			items = append(items, item)
+			return items, frontier
+		}
+	}
+
+	// Great! We have a match go through all the items and flatten
+	for _, elem := range ot.List {
+		// Add it to the frontier so that we can recurse
+		frontier = append(frontier, &ast.ObjectItem{
+			Keys:        item.Keys,
+			Assign:      item.Assign,
+			Val:         elem,
+			LeadComment: item.LeadComment,
+			LineComment: item.LineComment,
+		})
+	}
+
+	return items, frontier
+}
+
+func flattenObjectType(
+	ot *ast.ObjectType,
+	item *ast.ObjectItem,
+	items []*ast.ObjectItem,
+	frontier []*ast.ObjectItem) ([]*ast.ObjectItem, []*ast.ObjectItem) {
+	// If the list has no items we do not have to flatten anything
+	if ot.List.Items == nil {
+		items = append(items, item)
+		return items, frontier
+	}
+
+	// All the elements of this object must also be objects!
+	for _, subitem := range ot.List.Items {
+		if _, ok := subitem.Val.(*ast.ObjectType); !ok {
+			items = append(items, item)
+			return items, frontier
+		}
+	}
+
+	// Great! We have a match go through all the items and flatten
+	for _, subitem := range ot.List.Items {
+		// Copy the new key
+		keys := make([]*ast.ObjectKey, len(item.Keys)+len(subitem.Keys))
+		copy(keys, item.Keys)
+		copy(keys[len(item.Keys):], subitem.Keys)
+
+		// Add it to the frontier so that we can recurse
+		frontier = append(frontier, &ast.ObjectItem{
+			Keys:        keys,
+			Assign:      item.Assign,
+			Val:         subitem.Val,
+			LeadComment: item.LeadComment,
+			LineComment: item.LineComment,
+		})
+	}
+
+	return items, frontier
+}

+ 313 - 0
vendor/github.com/hashicorp/hcl/json/parser/parser.go

@@ -0,0 +1,313 @@
+package parser
+
+import (
+	"errors"
+	"fmt"
+
+	"github.com/hashicorp/hcl/hcl/ast"
+	hcltoken "github.com/hashicorp/hcl/hcl/token"
+	"github.com/hashicorp/hcl/json/scanner"
+	"github.com/hashicorp/hcl/json/token"
+)
+
+type Parser struct {
+	sc *scanner.Scanner
+
+	// Last read token
+	tok       token.Token
+	commaPrev token.Token
+
+	enableTrace bool
+	indent      int
+	n           int // buffer size (max = 1)
+}
+
+func newParser(src []byte) *Parser {
+	return &Parser{
+		sc: scanner.New(src),
+	}
+}
+
+// Parse returns the fully parsed source and returns the abstract syntax tree.
+func Parse(src []byte) (*ast.File, error) {
+	p := newParser(src)
+	return p.Parse()
+}
+
+var errEofToken = errors.New("EOF token found")
+
+// Parse returns the fully parsed source and returns the abstract syntax tree.
+func (p *Parser) Parse() (*ast.File, error) {
+	f := &ast.File{}
+	var err, scerr error
+	p.sc.Error = func(pos token.Pos, msg string) {
+		scerr = fmt.Errorf("%s: %s", pos, msg)
+	}
+
+	// The root must be an object in JSON
+	object, err := p.object()
+	if scerr != nil {
+		return nil, scerr
+	}
+	if err != nil {
+		return nil, err
+	}
+
+	// We make our final node an object list so it is more HCL compatible
+	f.Node = object.List
+
+	// Flatten it, which finds patterns and turns them into more HCL-like
+	// AST trees.
+	flattenObjects(f.Node)
+
+	return f, nil
+}
+
+func (p *Parser) objectList() (*ast.ObjectList, error) {
+	defer un(trace(p, "ParseObjectList"))
+	node := &ast.ObjectList{}
+
+	for {
+		n, err := p.objectItem()
+		if err == errEofToken {
+			break // we are finished
+		}
+
+		// we don't return a nil node, because might want to use already
+		// collected items.
+		if err != nil {
+			return node, err
+		}
+
+		node.Add(n)
+
+		// Check for a followup comma. If it isn't a comma, then we're done
+		if tok := p.scan(); tok.Type != token.COMMA {
+			break
+		}
+	}
+
+	return node, nil
+}
+
+// objectItem parses a single object item
+func (p *Parser) objectItem() (*ast.ObjectItem, error) {
+	defer un(trace(p, "ParseObjectItem"))
+
+	keys, err := p.objectKey()
+	if err != nil {
+		return nil, err
+	}
+
+	o := &ast.ObjectItem{
+		Keys: keys,
+	}
+
+	switch p.tok.Type {
+	case token.COLON:
+		pos := p.tok.Pos
+		o.Assign = hcltoken.Pos{
+			Filename: pos.Filename,
+			Offset:   pos.Offset,
+			Line:     pos.Line,
+			Column:   pos.Column,
+		}
+
+		o.Val, err = p.objectValue()
+		if err != nil {
+			return nil, err
+		}
+	}
+
+	return o, nil
+}
+
+// objectKey parses an object key and returns a ObjectKey AST
+func (p *Parser) objectKey() ([]*ast.ObjectKey, error) {
+	keyCount := 0
+	keys := make([]*ast.ObjectKey, 0)
+
+	for {
+		tok := p.scan()
+		switch tok.Type {
+		case token.EOF:
+			return nil, errEofToken
+		case token.STRING:
+			keyCount++
+			keys = append(keys, &ast.ObjectKey{
+				Token: p.tok.HCLToken(),
+			})
+		case token.COLON:
+			// If we have a zero keycount it means that we never got
+			// an object key, i.e. `{ :`. This is a syntax error.
+			if keyCount == 0 {
+				return nil, fmt.Errorf("expected: STRING got: %s", p.tok.Type)
+			}
+
+			// Done
+			return keys, nil
+		case token.ILLEGAL:
+			return nil, errors.New("illegal")
+		default:
+			return nil, fmt.Errorf("expected: STRING got: %s", p.tok.Type)
+		}
+	}
+}
+
+// object parses any type of object, such as number, bool, string, object or
+// list.
+func (p *Parser) objectValue() (ast.Node, error) {
+	defer un(trace(p, "ParseObjectValue"))
+	tok := p.scan()
+
+	switch tok.Type {
+	case token.NUMBER, token.FLOAT, token.BOOL, token.NULL, token.STRING:
+		return p.literalType()
+	case token.LBRACE:
+		return p.objectType()
+	case token.LBRACK:
+		return p.listType()
+	case token.EOF:
+		return nil, errEofToken
+	}
+
+	return nil, fmt.Errorf("Expected object value, got unknown token: %+v", tok)
+}
+
+// object parses any type of object, such as number, bool, string, object or
+// list.
+func (p *Parser) object() (*ast.ObjectType, error) {
+	defer un(trace(p, "ParseType"))
+	tok := p.scan()
+
+	switch tok.Type {
+	case token.LBRACE:
+		return p.objectType()
+	case token.EOF:
+		return nil, errEofToken
+	}
+
+	return nil, fmt.Errorf("Expected object, got unknown token: %+v", tok)
+}
+
+// objectType parses an object type and returns a ObjectType AST
+func (p *Parser) objectType() (*ast.ObjectType, error) {
+	defer un(trace(p, "ParseObjectType"))
+
+	// we assume that the currently scanned token is a LBRACE
+	o := &ast.ObjectType{}
+
+	l, err := p.objectList()
+
+	// if we hit RBRACE, we are good to go (means we parsed all Items), if it's
+	// not a RBRACE, it's an syntax error and we just return it.
+	if err != nil && p.tok.Type != token.RBRACE {
+		return nil, err
+	}
+
+	o.List = l
+	return o, nil
+}
+
+// listType parses a list type and returns a ListType AST
+func (p *Parser) listType() (*ast.ListType, error) {
+	defer un(trace(p, "ParseListType"))
+
+	// we assume that the currently scanned token is a LBRACK
+	l := &ast.ListType{}
+
+	for {
+		tok := p.scan()
+		switch tok.Type {
+		case token.NUMBER, token.FLOAT, token.STRING:
+			node, err := p.literalType()
+			if err != nil {
+				return nil, err
+			}
+
+			l.Add(node)
+		case token.COMMA:
+			continue
+		case token.LBRACE:
+			node, err := p.objectType()
+			if err != nil {
+				return nil, err
+			}
+
+			l.Add(node)
+		case token.BOOL:
+			// TODO(arslan) should we support? not supported by HCL yet
+		case token.LBRACK:
+			// TODO(arslan) should we support nested lists? Even though it's
+			// written in README of HCL, it's not a part of the grammar
+			// (not defined in parse.y)
+		case token.RBRACK:
+			// finished
+			return l, nil
+		default:
+			return nil, fmt.Errorf("unexpected token while parsing list: %s", tok.Type)
+		}
+
+	}
+}
+
+// literalType parses a literal type and returns a LiteralType AST
+func (p *Parser) literalType() (*ast.LiteralType, error) {
+	defer un(trace(p, "ParseLiteral"))
+
+	return &ast.LiteralType{
+		Token: p.tok.HCLToken(),
+	}, nil
+}
+
+// scan returns the next token from the underlying scanner. If a token has
+// been unscanned then read that instead.
+func (p *Parser) scan() token.Token {
+	// If we have a token on the buffer, then return it.
+	if p.n != 0 {
+		p.n = 0
+		return p.tok
+	}
+
+	p.tok = p.sc.Scan()
+	return p.tok
+}
+
+// unscan pushes the previously read token back onto the buffer.
+func (p *Parser) unscan() {
+	p.n = 1
+}
+
+// ----------------------------------------------------------------------------
+// Parsing support
+
+func (p *Parser) printTrace(a ...interface{}) {
+	if !p.enableTrace {
+		return
+	}
+
+	const dots = ". . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . "
+	const n = len(dots)
+	fmt.Printf("%5d:%3d: ", p.tok.Pos.Line, p.tok.Pos.Column)
+
+	i := 2 * p.indent
+	for i > n {
+		fmt.Print(dots)
+		i -= n
+	}
+	// i <= n
+	fmt.Print(dots[0:i])
+	fmt.Println(a...)
+}
+
+func trace(p *Parser, msg string) *Parser {
+	p.printTrace(msg, "(")
+	p.indent++
+	return p
+}
+
+// Usage pattern: defer un(trace(p, "..."))
+func un(p *Parser) {
+	p.indent--
+	p.printTrace(")")
+}

+ 451 - 0
vendor/github.com/hashicorp/hcl/json/scanner/scanner.go

@@ -0,0 +1,451 @@
+package scanner
+
+import (
+	"bytes"
+	"fmt"
+	"os"
+	"unicode"
+	"unicode/utf8"
+
+	"github.com/hashicorp/hcl/json/token"
+)
+
+// eof represents a marker rune for the end of the reader.
+const eof = rune(0)
+
+// Scanner defines a lexical scanner
+type Scanner struct {
+	buf *bytes.Buffer // Source buffer for advancing and scanning
+	src []byte        // Source buffer for immutable access
+
+	// Source Position
+	srcPos  token.Pos // current position
+	prevPos token.Pos // previous position, used for peek() method
+
+	lastCharLen int // length of last character in bytes
+	lastLineLen int // length of last line in characters (for correct column reporting)
+
+	tokStart int // token text start position
+	tokEnd   int // token text end  position
+
+	// Error is called for each error encountered. If no Error
+	// function is set, the error is reported to os.Stderr.
+	Error func(pos token.Pos, msg string)
+
+	// ErrorCount is incremented by one for each error encountered.
+	ErrorCount int
+
+	// tokPos is the start position of most recently scanned token; set by
+	// Scan. The Filename field is always left untouched by the Scanner.  If
+	// an error is reported (via Error) and Position is invalid, the scanner is
+	// not inside a token.
+	tokPos token.Pos
+}
+
+// New creates and initializes a new instance of Scanner using src as
+// its source content.
+func New(src []byte) *Scanner {
+	// even though we accept a src, we read from a io.Reader compatible type
+	// (*bytes.Buffer). So in the future we might easily change it to streaming
+	// read.
+	b := bytes.NewBuffer(src)
+	s := &Scanner{
+		buf: b,
+		src: src,
+	}
+
+	// srcPosition always starts with 1
+	s.srcPos.Line = 1
+	return s
+}
+
+// next reads the next rune from the bufferred reader. Returns the rune(0) if
+// an error occurs (or io.EOF is returned).
+func (s *Scanner) next() rune {
+	ch, size, err := s.buf.ReadRune()
+	if err != nil {
+		// advance for error reporting
+		s.srcPos.Column++
+		s.srcPos.Offset += size
+		s.lastCharLen = size
+		return eof
+	}
+
+	if ch == utf8.RuneError && size == 1 {
+		s.srcPos.Column++
+		s.srcPos.Offset += size
+		s.lastCharLen = size
+		s.err("illegal UTF-8 encoding")
+		return ch
+	}
+
+	// remember last position
+	s.prevPos = s.srcPos
+
+	s.srcPos.Column++
+	s.lastCharLen = size
+	s.srcPos.Offset += size
+
+	if ch == '\n' {
+		s.srcPos.Line++
+		s.lastLineLen = s.srcPos.Column
+		s.srcPos.Column = 0
+	}
+
+	// debug
+	// fmt.Printf("ch: %q, offset:column: %d:%d\n", ch, s.srcPos.Offset, s.srcPos.Column)
+	return ch
+}
+
+// unread unreads the previous read Rune and updates the source position
+func (s *Scanner) unread() {
+	if err := s.buf.UnreadRune(); err != nil {
+		panic(err) // this is user fault, we should catch it
+	}
+	s.srcPos = s.prevPos // put back last position
+}
+
+// peek returns the next rune without advancing the reader.
+func (s *Scanner) peek() rune {
+	peek, _, err := s.buf.ReadRune()
+	if err != nil {
+		return eof
+	}
+
+	s.buf.UnreadRune()
+	return peek
+}
+
+// Scan scans the next token and returns the token.
+func (s *Scanner) Scan() token.Token {
+	ch := s.next()
+
+	// skip white space
+	for isWhitespace(ch) {
+		ch = s.next()
+	}
+
+	var tok token.Type
+
+	// token text markings
+	s.tokStart = s.srcPos.Offset - s.lastCharLen
+
+	// token position, initial next() is moving the offset by one(size of rune
+	// actually), though we are interested with the starting point
+	s.tokPos.Offset = s.srcPos.Offset - s.lastCharLen
+	if s.srcPos.Column > 0 {
+		// common case: last character was not a '\n'
+		s.tokPos.Line = s.srcPos.Line
+		s.tokPos.Column = s.srcPos.Column
+	} else {
+		// last character was a '\n'
+		// (we cannot be at the beginning of the source
+		// since we have called next() at least once)
+		s.tokPos.Line = s.srcPos.Line - 1
+		s.tokPos.Column = s.lastLineLen
+	}
+
+	switch {
+	case isLetter(ch):
+		lit := s.scanIdentifier()
+		if lit == "true" || lit == "false" {
+			tok = token.BOOL
+		} else if lit == "null" {
+			tok = token.NULL
+		} else {
+			s.err("illegal char")
+		}
+	case isDecimal(ch):
+		tok = s.scanNumber(ch)
+	default:
+		switch ch {
+		case eof:
+			tok = token.EOF
+		case '"':
+			tok = token.STRING
+			s.scanString()
+		case '.':
+			tok = token.PERIOD
+			ch = s.peek()
+			if isDecimal(ch) {
+				tok = token.FLOAT
+				ch = s.scanMantissa(ch)
+				ch = s.scanExponent(ch)
+			}
+		case '[':
+			tok = token.LBRACK
+		case ']':
+			tok = token.RBRACK
+		case '{':
+			tok = token.LBRACE
+		case '}':
+			tok = token.RBRACE
+		case ',':
+			tok = token.COMMA
+		case ':':
+			tok = token.COLON
+		case '-':
+			if isDecimal(s.peek()) {
+				ch := s.next()
+				tok = s.scanNumber(ch)
+			} else {
+				s.err("illegal char")
+			}
+		default:
+			s.err("illegal char: " + string(ch))
+		}
+	}
+
+	// finish token ending
+	s.tokEnd = s.srcPos.Offset
+
+	// create token literal
+	var tokenText string
+	if s.tokStart >= 0 {
+		tokenText = string(s.src[s.tokStart:s.tokEnd])
+	}
+	s.tokStart = s.tokEnd // ensure idempotency of tokenText() call
+
+	return token.Token{
+		Type: tok,
+		Pos:  s.tokPos,
+		Text: tokenText,
+	}
+}
+
+// scanNumber scans a HCL number definition starting with the given rune
+func (s *Scanner) scanNumber(ch rune) token.Type {
+	zero := ch == '0'
+	pos := s.srcPos
+
+	s.scanMantissa(ch)
+	ch = s.next() // seek forward
+	if ch == 'e' || ch == 'E' {
+		ch = s.scanExponent(ch)
+		return token.FLOAT
+	}
+
+	if ch == '.' {
+		ch = s.scanFraction(ch)
+		if ch == 'e' || ch == 'E' {
+			ch = s.next()
+			ch = s.scanExponent(ch)
+		}
+		return token.FLOAT
+	}
+
+	if ch != eof {
+		s.unread()
+	}
+
+	// If we have a larger number and this is zero, error
+	if zero && pos != s.srcPos {
+		s.err("numbers cannot start with 0")
+	}
+
+	return token.NUMBER
+}
+
+// scanMantissa scans the mantissa beginning from the rune. It returns the next
+// non decimal rune. It's used to determine wheter it's a fraction or exponent.
+func (s *Scanner) scanMantissa(ch rune) rune {
+	scanned := false
+	for isDecimal(ch) {
+		ch = s.next()
+		scanned = true
+	}
+
+	if scanned && ch != eof {
+		s.unread()
+	}
+	return ch
+}
+
+// scanFraction scans the fraction after the '.' rune
+func (s *Scanner) scanFraction(ch rune) rune {
+	if ch == '.' {
+		ch = s.peek() // we peek just to see if we can move forward
+		ch = s.scanMantissa(ch)
+	}
+	return ch
+}
+
+// scanExponent scans the remaining parts of an exponent after the 'e' or 'E'
+// rune.
+func (s *Scanner) scanExponent(ch rune) rune {
+	if ch == 'e' || ch == 'E' {
+		ch = s.next()
+		if ch == '-' || ch == '+' {
+			ch = s.next()
+		}
+		ch = s.scanMantissa(ch)
+	}
+	return ch
+}
+
+// scanString scans a quoted string
+func (s *Scanner) scanString() {
+	braces := 0
+	for {
+		// '"' opening already consumed
+		// read character after quote
+		ch := s.next()
+
+		if ch == '\n' || ch < 0 || ch == eof {
+			s.err("literal not terminated")
+			return
+		}
+
+		if ch == '"' {
+			break
+		}
+
+		// If we're going into a ${} then we can ignore quotes for awhile
+		if braces == 0 && ch == '$' && s.peek() == '{' {
+			braces++
+			s.next()
+		} else if braces > 0 && ch == '{' {
+			braces++
+		}
+		if braces > 0 && ch == '}' {
+			braces--
+		}
+
+		if ch == '\\' {
+			s.scanEscape()
+		}
+	}
+
+	return
+}
+
+// scanEscape scans an escape sequence
+func (s *Scanner) scanEscape() rune {
+	// http://en.cppreference.com/w/cpp/language/escape
+	ch := s.next() // read character after '/'
+	switch ch {
+	case 'a', 'b', 'f', 'n', 'r', 't', 'v', '\\', '"':
+		// nothing to do
+	case '0', '1', '2', '3', '4', '5', '6', '7':
+		// octal notation
+		ch = s.scanDigits(ch, 8, 3)
+	case 'x':
+		// hexademical notation
+		ch = s.scanDigits(s.next(), 16, 2)
+	case 'u':
+		// universal character name
+		ch = s.scanDigits(s.next(), 16, 4)
+	case 'U':
+		// universal character name
+		ch = s.scanDigits(s.next(), 16, 8)
+	default:
+		s.err("illegal char escape")
+	}
+	return ch
+}
+
+// scanDigits scans a rune with the given base for n times. For example an
+// octal notation \184 would yield in scanDigits(ch, 8, 3)
+func (s *Scanner) scanDigits(ch rune, base, n int) rune {
+	for n > 0 && digitVal(ch) < base {
+		ch = s.next()
+		n--
+	}
+	if n > 0 {
+		s.err("illegal char escape")
+	}
+
+	// we scanned all digits, put the last non digit char back
+	s.unread()
+	return ch
+}
+
+// scanIdentifier scans an identifier and returns the literal string
+func (s *Scanner) scanIdentifier() string {
+	offs := s.srcPos.Offset - s.lastCharLen
+	ch := s.next()
+	for isLetter(ch) || isDigit(ch) || ch == '-' {
+		ch = s.next()
+	}
+
+	if ch != eof {
+		s.unread() // we got identifier, put back latest char
+	}
+
+	return string(s.src[offs:s.srcPos.Offset])
+}
+
+// recentPosition returns the position of the character immediately after the
+// character or token returned by the last call to Scan.
+func (s *Scanner) recentPosition() (pos token.Pos) {
+	pos.Offset = s.srcPos.Offset - s.lastCharLen
+	switch {
+	case s.srcPos.Column > 0:
+		// common case: last character was not a '\n'
+		pos.Line = s.srcPos.Line
+		pos.Column = s.srcPos.Column
+	case s.lastLineLen > 0:
+		// last character was a '\n'
+		// (we cannot be at the beginning of the source
+		// since we have called next() at least once)
+		pos.Line = s.srcPos.Line - 1
+		pos.Column = s.lastLineLen
+	default:
+		// at the beginning of the source
+		pos.Line = 1
+		pos.Column = 1
+	}
+	return
+}
+
+// err prints the error of any scanning to s.Error function. If the function is
+// not defined, by default it prints them to os.Stderr
+func (s *Scanner) err(msg string) {
+	s.ErrorCount++
+	pos := s.recentPosition()
+
+	if s.Error != nil {
+		s.Error(pos, msg)
+		return
+	}
+
+	fmt.Fprintf(os.Stderr, "%s: %s\n", pos, msg)
+}
+
+// isHexadecimal returns true if the given rune is a letter
+func isLetter(ch rune) bool {
+	return 'a' <= ch && ch <= 'z' || 'A' <= ch && ch <= 'Z' || ch == '_' || ch >= 0x80 && unicode.IsLetter(ch)
+}
+
+// isHexadecimal returns true if the given rune is a decimal digit
+func isDigit(ch rune) bool {
+	return '0' <= ch && ch <= '9' || ch >= 0x80 && unicode.IsDigit(ch)
+}
+
+// isHexadecimal returns true if the given rune is a decimal number
+func isDecimal(ch rune) bool {
+	return '0' <= ch && ch <= '9'
+}
+
+// isHexadecimal returns true if the given rune is an hexadecimal number
+func isHexadecimal(ch rune) bool {
+	return '0' <= ch && ch <= '9' || 'a' <= ch && ch <= 'f' || 'A' <= ch && ch <= 'F'
+}
+
+// isWhitespace returns true if the rune is a space, tab, newline or carriage return
+func isWhitespace(ch rune) bool {
+	return ch == ' ' || ch == '\t' || ch == '\n' || ch == '\r'
+}
+
+// digitVal returns the integer value of a given octal,decimal or hexadecimal rune
+func digitVal(ch rune) int {
+	switch {
+	case '0' <= ch && ch <= '9':
+		return int(ch - '0')
+	case 'a' <= ch && ch <= 'f':
+		return int(ch - 'a' + 10)
+	case 'A' <= ch && ch <= 'F':
+		return int(ch - 'A' + 10)
+	}
+	return 16 // larger than any legal digit val
+}

+ 46 - 0
vendor/github.com/hashicorp/hcl/json/token/position.go

@@ -0,0 +1,46 @@
+package token
+
+import "fmt"
+
+// Pos describes an arbitrary source position
+// including the file, line, and column location.
+// A Position is valid if the line number is > 0.
+type Pos struct {
+	Filename string // filename, if any
+	Offset   int    // offset, starting at 0
+	Line     int    // line number, starting at 1
+	Column   int    // column number, starting at 1 (character count)
+}
+
+// IsValid returns true if the position is valid.
+func (p *Pos) IsValid() bool { return p.Line > 0 }
+
+// String returns a string in one of several forms:
+//
+//	file:line:column    valid position with file name
+//	line:column         valid position without file name
+//	file                invalid position with file name
+//	-                   invalid position without file name
+func (p Pos) String() string {
+	s := p.Filename
+	if p.IsValid() {
+		if s != "" {
+			s += ":"
+		}
+		s += fmt.Sprintf("%d:%d", p.Line, p.Column)
+	}
+	if s == "" {
+		s = "-"
+	}
+	return s
+}
+
+// Before reports whether the position p is before u.
+func (p Pos) Before(u Pos) bool {
+	return u.Offset > p.Offset || u.Line > p.Line
+}
+
+// After reports whether the position p is after u.
+func (p Pos) After(u Pos) bool {
+	return u.Offset < p.Offset || u.Line < p.Line
+}

+ 118 - 0
vendor/github.com/hashicorp/hcl/json/token/token.go

@@ -0,0 +1,118 @@
+package token
+
+import (
+	"fmt"
+	"strconv"
+
+	hcltoken "github.com/hashicorp/hcl/hcl/token"
+)
+
+// Token defines a single HCL token which can be obtained via the Scanner
+type Token struct {
+	Type Type
+	Pos  Pos
+	Text string
+}
+
+// Type is the set of lexical tokens of the HCL (HashiCorp Configuration Language)
+type Type int
+
+const (
+	// Special tokens
+	ILLEGAL Type = iota
+	EOF
+
+	identifier_beg
+	literal_beg
+	NUMBER // 12345
+	FLOAT  // 123.45
+	BOOL   // true,false
+	STRING // "abc"
+	NULL   // null
+	literal_end
+	identifier_end
+
+	operator_beg
+	LBRACK // [
+	LBRACE // {
+	COMMA  // ,
+	PERIOD // .
+	COLON  // :
+
+	RBRACK // ]
+	RBRACE // }
+
+	operator_end
+)
+
+var tokens = [...]string{
+	ILLEGAL: "ILLEGAL",
+
+	EOF: "EOF",
+
+	NUMBER: "NUMBER",
+	FLOAT:  "FLOAT",
+	BOOL:   "BOOL",
+	STRING: "STRING",
+	NULL:   "NULL",
+
+	LBRACK: "LBRACK",
+	LBRACE: "LBRACE",
+	COMMA:  "COMMA",
+	PERIOD: "PERIOD",
+	COLON:  "COLON",
+
+	RBRACK: "RBRACK",
+	RBRACE: "RBRACE",
+}
+
+// String returns the string corresponding to the token tok.
+func (t Type) String() string {
+	s := ""
+	if 0 <= t && t < Type(len(tokens)) {
+		s = tokens[t]
+	}
+	if s == "" {
+		s = "token(" + strconv.Itoa(int(t)) + ")"
+	}
+	return s
+}
+
+// IsIdentifier returns true for tokens corresponding to identifiers and basic
+// type literals; it returns false otherwise.
+func (t Type) IsIdentifier() bool { return identifier_beg < t && t < identifier_end }
+
+// IsLiteral returns true for tokens corresponding to basic type literals; it
+// returns false otherwise.
+func (t Type) IsLiteral() bool { return literal_beg < t && t < literal_end }
+
+// IsOperator returns true for tokens corresponding to operators and
+// delimiters; it returns false otherwise.
+func (t Type) IsOperator() bool { return operator_beg < t && t < operator_end }
+
+// String returns the token's literal text. Note that this is only
+// applicable for certain token types, such as token.IDENT,
+// token.STRING, etc..
+func (t Token) String() string {
+	return fmt.Sprintf("%s %s %s", t.Pos.String(), t.Type.String(), t.Text)
+}
+
+// HCLToken converts this token to an HCL token.
+//
+// The token type must be a literal type or this will panic.
+func (t Token) HCLToken() hcltoken.Token {
+	switch t.Type {
+	case BOOL:
+		return hcltoken.Token{Type: hcltoken.BOOL, Text: t.Text}
+	case FLOAT:
+		return hcltoken.Token{Type: hcltoken.FLOAT, Text: t.Text}
+	case NULL:
+		return hcltoken.Token{Type: hcltoken.STRING, Text: ""}
+	case NUMBER:
+		return hcltoken.Token{Type: hcltoken.NUMBER, Text: t.Text}
+	case STRING:
+		return hcltoken.Token{Type: hcltoken.STRING, Text: t.Text, JSON: true}
+	default:
+		panic(fmt.Sprintf("unimplemented HCLToken for type: %s", t.Type))
+	}
+}

+ 38 - 0
vendor/github.com/hashicorp/hcl/lex.go

@@ -0,0 +1,38 @@
+package hcl
+
+import (
+	"unicode"
+	"unicode/utf8"
+)
+
+type lexModeValue byte
+
+const (
+	lexModeUnknown lexModeValue = iota
+	lexModeHcl
+	lexModeJson
+)
+
+// lexMode returns whether we're going to be parsing in JSON
+// mode or HCL mode.
+func lexMode(v []byte) lexModeValue {
+	var (
+		r      rune
+		w      int
+		offset int
+	)
+
+	for {
+		r, w = utf8.DecodeRune(v[offset:])
+		offset += w
+		if unicode.IsSpace(r) {
+			continue
+		}
+		if r == '{' {
+			return lexModeJson
+		}
+		break
+	}
+
+	return lexModeHcl
+}

+ 39 - 0
vendor/github.com/hashicorp/hcl/parse.go

@@ -0,0 +1,39 @@
+package hcl
+
+import (
+	"fmt"
+
+	"github.com/hashicorp/hcl/hcl/ast"
+	hclParser "github.com/hashicorp/hcl/hcl/parser"
+	jsonParser "github.com/hashicorp/hcl/json/parser"
+)
+
+// ParseBytes accepts as input byte slice and returns ast tree.
+//
+// Input can be either JSON or HCL
+func ParseBytes(in []byte) (*ast.File, error) {
+	return parse(in)
+}
+
+// ParseString accepts input as a string and returns ast tree.
+func ParseString(input string) (*ast.File, error) {
+	return parse([]byte(input))
+}
+
+func parse(in []byte) (*ast.File, error) {
+	switch lexMode(in) {
+	case lexModeHcl:
+		return hclParser.Parse(in)
+	case lexModeJson:
+		return jsonParser.Parse(in)
+	}
+
+	return nil, fmt.Errorf("unknown config format")
+}
+
+// Parse parses the given input and returns the root object.
+//
+// The input format can be either HCL or JSON.
+func Parse(input string) (*ast.File, error) {
+	return parse([]byte(input))
+}

+ 6 - 0
vendor/github.com/magiconair/properties/.gitignore

@@ -0,0 +1,6 @@
+*.sublime-project
+*.sublime-workspace
+*.un~
+*.swp
+.idea/
+*.iml

+ 12 - 0
vendor/github.com/magiconair/properties/.travis.yml

@@ -0,0 +1,12 @@
+language: go
+go:
+    - 1.4.x
+    - 1.5.x
+    - 1.6.x
+    - 1.7.x
+    - 1.8.x
+    - 1.9.x
+    - "1.10.x"
+    - "1.11.x"
+    - "1.12.x"
+    - tip

+ 139 - 0
vendor/github.com/magiconair/properties/CHANGELOG.md

@@ -0,0 +1,139 @@
+## Changelog
+
+### [1.8.1](https://github.com/magiconair/properties/tree/v1.8.1) - 10 May 2019
+
+ * [PR #26](https://github.com/magiconair/properties/pull/35): Close body always after request
+
+   This patch ensures that in `LoadURL` the response body is always closed.
+
+   Thanks to [@liubog2008](https://github.com/liubog2008) for the patch.
+
+### [1.8](https://github.com/magiconair/properties/tree/v1.8) - 15 May 2018
+
+ * [PR #26](https://github.com/magiconair/properties/pull/26): Disable expansion during loading
+
+   This adds the option to disable property expansion during loading.
+
+   Thanks to [@kmala](https://github.com/kmala) for the patch.
+
+### [1.7.6](https://github.com/magiconair/properties/tree/v1.7.6) - 14 Feb 2018
+
+ * [PR #29](https://github.com/magiconair/properties/pull/29): Reworked expansion logic to handle more complex cases.
+
+   See PR for an example.
+
+   Thanks to [@yobert](https://github.com/yobert) for the fix.
+
+### [1.7.5](https://github.com/magiconair/properties/tree/v1.7.5) - 13 Feb 2018
+
+ * [PR #28](https://github.com/magiconair/properties/pull/28): Support duplicate expansions in the same value
+
+   Values which expand the same key multiple times (e.g. `key=${a} ${a}`) will no longer fail
+   with a `circular reference error`.
+
+   Thanks to [@yobert](https://github.com/yobert) for the fix.
+
+### [1.7.4](https://github.com/magiconair/properties/tree/v1.7.4) - 31 Oct 2017
+
+ * [Issue #23](https://github.com/magiconair/properties/issues/23): Ignore blank lines with whitespaces
+
+ * [PR #24](https://github.com/magiconair/properties/pull/24): Update keys when DisableExpansion is enabled
+
+   Thanks to [@mgurov](https://github.com/mgurov) for the fix.
+
+### [1.7.3](https://github.com/magiconair/properties/tree/v1.7.3) - 10 Jul 2017
+
+ * [Issue #17](https://github.com/magiconair/properties/issues/17): Add [SetValue()](http://godoc.org/github.com/magiconair/properties#Properties.SetValue) method to set values generically
+ * [Issue #22](https://github.com/magiconair/properties/issues/22): Add [LoadMap()](http://godoc.org/github.com/magiconair/properties#LoadMap) function to load properties from a string map
+
+### [1.7.2](https://github.com/magiconair/properties/tree/v1.7.2) - 20 Mar 2017
+
+ * [Issue #15](https://github.com/magiconair/properties/issues/15): Drop gocheck dependency
+ * [PR #21](https://github.com/magiconair/properties/pull/21): Add [Map()](http://godoc.org/github.com/magiconair/properties#Properties.Map) and [FilterFunc()](http://godoc.org/github.com/magiconair/properties#Properties.FilterFunc)
+
+### [1.7.1](https://github.com/magiconair/properties/tree/v1.7.1) - 13 Jan 2017
+
+ * [Issue #14](https://github.com/magiconair/properties/issues/14): Decouple TestLoadExpandedFile from `$USER`
+ * [PR #12](https://github.com/magiconair/properties/pull/12): Load from files and URLs
+ * [PR #16](https://github.com/magiconair/properties/pull/16): Keep gofmt happy
+ * [PR #18](https://github.com/magiconair/properties/pull/18): Fix Delete() function
+
+### [1.7.0](https://github.com/magiconair/properties/tree/v1.7.0) - 20 Mar 2016
+
+ * [Issue #10](https://github.com/magiconair/properties/issues/10): Add [LoadURL,LoadURLs,MustLoadURL,MustLoadURLs](http://godoc.org/github.com/magiconair/properties#LoadURL) method to load properties from a URL.
+ * [Issue #11](https://github.com/magiconair/properties/issues/11): Add [LoadString,MustLoadString](http://godoc.org/github.com/magiconair/properties#LoadString) method to load properties from an UTF8 string.
+ * [PR #8](https://github.com/magiconair/properties/pull/8): Add [MustFlag](http://godoc.org/github.com/magiconair/properties#Properties.MustFlag) method to provide overrides via command line flags. (@pascaldekloe)
+
+### [1.6.0](https://github.com/magiconair/properties/tree/v1.6.0) - 11 Dec 2015
+
+ * Add [Decode](http://godoc.org/github.com/magiconair/properties#Properties.Decode) method to populate struct from properties via tags.
+
+### [1.5.6](https://github.com/magiconair/properties/tree/v1.5.6) - 18 Oct 2015
+
+ * Vendored in gopkg.in/check.v1
+
+### [1.5.5](https://github.com/magiconair/properties/tree/v1.5.5) - 31 Jul 2015
+
+ * [PR #6](https://github.com/magiconair/properties/pull/6): Add [Delete](http://godoc.org/github.com/magiconair/properties#Properties.Delete) method to remove keys including comments. (@gerbenjacobs)
+
+### [1.5.4](https://github.com/magiconair/properties/tree/v1.5.4) - 23 Jun 2015
+
+ * [Issue #5](https://github.com/magiconair/properties/issues/5): Allow disabling of property expansion [DisableExpansion](http://godoc.org/github.com/magiconair/properties#Properties.DisableExpansion). When property expansion is disabled Properties become a simple key/value store and don't check for circular references.
+
+### [1.5.3](https://github.com/magiconair/properties/tree/v1.5.3) - 02 Jun 2015
+
+ * [Issue #4](https://github.com/magiconair/properties/issues/4): Maintain key order in [Filter()](http://godoc.org/github.com/magiconair/properties#Properties.Filter), [FilterPrefix()](http://godoc.org/github.com/magiconair/properties#Properties.FilterPrefix) and [FilterRegexp()](http://godoc.org/github.com/magiconair/properties#Properties.FilterRegexp)
+
+### [1.5.2](https://github.com/magiconair/properties/tree/v1.5.2) - 10 Apr 2015
+
+ * [Issue #3](https://github.com/magiconair/properties/issues/3): Don't print comments in [WriteComment()](http://godoc.org/github.com/magiconair/properties#Properties.WriteComment) if they are all empty
+ * Add clickable links to README
+
+### [1.5.1](https://github.com/magiconair/properties/tree/v1.5.1) - 08 Dec 2014
+
+ * Added [GetParsedDuration()](http://godoc.org/github.com/magiconair/properties#Properties.GetParsedDuration) and [MustGetParsedDuration()](http://godoc.org/github.com/magiconair/properties#Properties.MustGetParsedDuration) for values specified compatible with
+   [time.ParseDuration()](http://golang.org/pkg/time/#ParseDuration).
+
+### [1.5.0](https://github.com/magiconair/properties/tree/v1.5.0) - 18 Nov 2014
+
+ * Added support for single and multi-line comments (reading, writing and updating)
+ * The order of keys is now preserved
+ * Calling [Set()](http://godoc.org/github.com/magiconair/properties#Properties.Set) with an empty key now silently ignores the call and does not create a new entry
+ * Added a [MustSet()](http://godoc.org/github.com/magiconair/properties#Properties.MustSet) method
+ * Migrated test library from launchpad.net/gocheck to [gopkg.in/check.v1](http://gopkg.in/check.v1)
+
+### [1.4.2](https://github.com/magiconair/properties/tree/v1.4.2) - 15 Nov 2014
+
+ * [Issue #2](https://github.com/magiconair/properties/issues/2): Fixed goroutine leak in parser which created two lexers but cleaned up only one
+
+### [1.4.1](https://github.com/magiconair/properties/tree/v1.4.1) - 13 Nov 2014
+
+ * [Issue #1](https://github.com/magiconair/properties/issues/1): Fixed bug in Keys() method which returned an empty string
+
+### [1.4.0](https://github.com/magiconair/properties/tree/v1.4.0) - 23 Sep 2014
+
+ * Added [Keys()](http://godoc.org/github.com/magiconair/properties#Properties.Keys) to get the keys
+ * Added [Filter()](http://godoc.org/github.com/magiconair/properties#Properties.Filter), [FilterRegexp()](http://godoc.org/github.com/magiconair/properties#Properties.FilterRegexp) and [FilterPrefix()](http://godoc.org/github.com/magiconair/properties#Properties.FilterPrefix) to get a subset of the properties
+
+### [1.3.0](https://github.com/magiconair/properties/tree/v1.3.0) - 18 Mar 2014
+
+* Added support for time.Duration
+* Made MustXXX() failure beha[ior configurable (log.Fatal, panic](https://github.com/magiconair/properties/tree/vior configurable (log.Fatal, panic) - custom)
+* Changed default of MustXXX() failure from panic to log.Fatal
+
+### [1.2.0](https://github.com/magiconair/properties/tree/v1.2.0) - 05 Mar 2014
+
+* Added MustGet... functions
+* Added support for int and uint with range checks on 32 bit platforms
+
+### [1.1.0](https://github.com/magiconair/properties/tree/v1.1.0) - 20 Jan 2014
+
+* Renamed from goproperties to properties
+* Added support for expansion of environment vars in
+  filenames and value expressions
+* Fixed bug where value expressions were not at the
+  start of the string
+
+### [1.0.0](https://github.com/magiconair/properties/tree/v1.0.0) - 7 Jan 2014
+
+* Initial release

+ 25 - 0
vendor/github.com/magiconair/properties/LICENSE

@@ -0,0 +1,25 @@
+goproperties - properties file decoder for Go
+
+Copyright (c) 2013-2018 - Frank Schroeder
+
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+1. Redistributions of source code must retain the above copyright notice, this
+   list of conditions and the following disclaimer.
+2. Redistributions in binary form must reproduce the above copyright notice,
+   this list of conditions and the following disclaimer in the documentation
+   and/or other materials provided with the distribution.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

+ 129 - 0
vendor/github.com/magiconair/properties/README.md

@@ -0,0 +1,129 @@
+[![](https://img.shields.io/github/tag/magiconair/properties.svg?style=flat-square&label=release)](https://github.com/magiconair/properties/releases)
+[![Travis CI Status](https://img.shields.io/travis/magiconair/properties.svg?branch=master&style=flat-square&label=travis)](https://travis-ci.org/magiconair/properties)
+[![CircleCI Status](https://img.shields.io/circleci/project/github/magiconair/properties.svg?label=circle+ci&style=flat-square)](https://circleci.com/gh/magiconair/properties)
+[![License](https://img.shields.io/badge/License-BSD%202--Clause-orange.svg?style=flat-square)](https://raw.githubusercontent.com/magiconair/properties/master/LICENSE)
+[![GoDoc](http://img.shields.io/badge/godoc-reference-5272B4.svg?style=flat-square)](http://godoc.org/github.com/magiconair/properties)
+
+# Overview
+
+#### Please run `git pull --tags` to update the tags. See [below](#updated-git-tags) why.
+
+properties is a Go library for reading and writing properties files.
+
+It supports reading from multiple files or URLs and Spring style recursive
+property expansion of expressions like `${key}` to their corresponding value.
+Value expressions can refer to other keys like in `${key}` or to environment
+variables like in `${USER}`.  Filenames can also contain environment variables
+like in `/home/${USER}/myapp.properties`.
+
+Properties can be decoded into structs, maps, arrays and values through
+struct tags.
+
+Comments and the order of keys are preserved. Comments can be modified
+and can be written to the output.
+
+The properties library supports both ISO-8859-1 and UTF-8 encoded data.
+
+Starting from version 1.3.0 the behavior of the MustXXX() functions is
+configurable by providing a custom `ErrorHandler` function. The default has
+changed from `panic` to `log.Fatal` but this is configurable and custom
+error handling functions can be provided. See the package documentation for
+details.
+
+Read the full documentation on [![GoDoc](http://img.shields.io/badge/godoc-reference-5272B4.svg?style=flat-square)](http://godoc.org/github.com/magiconair/properties)
+
+## Getting Started
+
+```go
+import (
+	"flag"
+	"github.com/magiconair/properties"
+)
+
+func main() {
+	// init from a file
+	p := properties.MustLoadFile("${HOME}/config.properties", properties.UTF8)
+
+	// or multiple files
+	p = properties.MustLoadFiles([]string{
+			"${HOME}/config.properties",
+			"${HOME}/config-${USER}.properties",
+		}, properties.UTF8, true)
+
+	// or from a map
+	p = properties.LoadMap(map[string]string{"key": "value", "abc": "def"})
+
+	// or from a string
+	p = properties.MustLoadString("key=value\nabc=def")
+
+	// or from a URL
+	p = properties.MustLoadURL("http://host/path")
+
+	// or from multiple URLs
+	p = properties.MustLoadURL([]string{
+			"http://host/config",
+			"http://host/config-${USER}",
+		}, true)
+
+	// or from flags
+	p.MustFlag(flag.CommandLine)
+
+	// get values through getters
+	host := p.MustGetString("host")
+	port := p.GetInt("port", 8080)
+
+	// or through Decode
+	type Config struct {
+		Host    string        `properties:"host"`
+		Port    int           `properties:"port,default=9000"`
+		Accept  []string      `properties:"accept,default=image/png;image;gif"`
+		Timeout time.Duration `properties:"timeout,default=5s"`
+	}
+	var cfg Config
+	if err := p.Decode(&cfg); err != nil {
+		log.Fatal(err)
+	}
+}
+
+```
+
+## Installation and Upgrade
+
+```
+$ go get -u github.com/magiconair/properties
+```
+
+## License
+
+2 clause BSD license. See [LICENSE](https://github.com/magiconair/properties/blob/master/LICENSE) file for details.
+
+## ToDo
+
+* Dump contents with passwords and secrets obscured
+
+## Updated Git tags
+
+#### 13 Feb 2018
+
+I realized that all of the git tags I had pushed before v1.7.5 were lightweight tags
+and I've only recently learned that this doesn't play well with `git describe` 😞
+
+I have replaced all lightweight tags with signed tags using this script which should
+retain the commit date, name and email address. Please run `git pull --tags` to update them.
+
+Worst case you have to reclone the repo.
+
+```shell
+#!/bin/bash
+tag=$1
+echo "Updating $tag"
+date=$(git show ${tag}^0 --format=%aD | head -1)
+email=$(git show ${tag}^0 --format=%aE | head -1)
+name=$(git show ${tag}^0 --format=%aN | head -1)
+GIT_COMMITTER_DATE="$date" GIT_COMMITTER_NAME="$name" GIT_COMMITTER_EMAIL="$email" git tag -s -f ${tag} ${tag}^0 -m ${tag}
+```
+
+I apologize for the inconvenience.
+
+Frank
+

+ 289 - 0
vendor/github.com/magiconair/properties/decode.go

@@ -0,0 +1,289 @@
+// Copyright 2018 Frank Schroeder. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package properties
+
+import (
+	"fmt"
+	"reflect"
+	"strconv"
+	"strings"
+	"time"
+)
+
+// Decode assigns property values to exported fields of a struct.
+//
+// Decode traverses v recursively and returns an error if a value cannot be
+// converted to the field type or a required value is missing for a field.
+//
+// The following type dependent decodings are used:
+//
+// String, boolean, numeric fields have the value of the property key assigned.
+// The property key name is the name of the field. A different key and a default
+// value can be set in the field's tag. Fields without default value are
+// required. If the value cannot be converted to the field type an error is
+// returned.
+//
+// time.Duration fields have the result of time.ParseDuration() assigned.
+//
+// time.Time fields have the vaule of time.Parse() assigned. The default layout
+// is time.RFC3339 but can be set in the field's tag.
+//
+// Arrays and slices of string, boolean, numeric, time.Duration and time.Time
+// fields have the value interpreted as a comma separated list of values. The
+// individual values are trimmed of whitespace and empty values are ignored. A
+// default value can be provided as a semicolon separated list in the field's
+// tag.
+//
+// Struct fields are decoded recursively using the field name plus "." as
+// prefix. The prefix (without dot) can be overridden in the field's tag.
+// Default values are not supported in the field's tag. Specify them on the
+// fields of the inner struct instead.
+//
+// Map fields must have a key of type string and are decoded recursively by
+// using the field's name plus ".' as prefix and the next element of the key
+// name as map key. The prefix (without dot) can be overridden in the field's
+// tag. Default values are not supported.
+//
+// Examples:
+//
+//     // Field is ignored.
+//     Field int `properties:"-"`
+//
+//     // Field is assigned value of 'Field'.
+//     Field int
+//
+//     // Field is assigned value of 'myName'.
+//     Field int `properties:"myName"`
+//
+//     // Field is assigned value of key 'myName' and has a default
+//     // value 15 if the key does not exist.
+//     Field int `properties:"myName,default=15"`
+//
+//     // Field is assigned value of key 'Field' and has a default
+//     // value 15 if the key does not exist.
+//     Field int `properties:",default=15"`
+//
+//     // Field is assigned value of key 'date' and the date
+//     // is in format 2006-01-02
+//     Field time.Time `properties:"date,layout=2006-01-02"`
+//
+//     // Field is assigned the non-empty and whitespace trimmed
+//     // values of key 'Field' split by commas.
+//     Field []string
+//
+//     // Field is assigned the non-empty and whitespace trimmed
+//     // values of key 'Field' split by commas and has a default
+//     // value ["a", "b", "c"] if the key does not exist.
+//     Field []string `properties:",default=a;b;c"`
+//
+//     // Field is decoded recursively with "Field." as key prefix.
+//     Field SomeStruct
+//
+//     // Field is decoded recursively with "myName." as key prefix.
+//     Field SomeStruct `properties:"myName"`
+//
+//     // Field is decoded recursively with "Field." as key prefix
+//     // and the next dotted element of the key as map key.
+//     Field map[string]string
+//
+//     // Field is decoded recursively with "myName." as key prefix
+//     // and the next dotted element of the key as map key.
+//     Field map[string]string `properties:"myName"`
+func (p *Properties) Decode(x interface{}) error {
+	t, v := reflect.TypeOf(x), reflect.ValueOf(x)
+	if t.Kind() != reflect.Ptr || v.Elem().Type().Kind() != reflect.Struct {
+		return fmt.Errorf("not a pointer to struct: %s", t)
+	}
+	if err := dec(p, "", nil, nil, v); err != nil {
+		return err
+	}
+	return nil
+}
+
+func dec(p *Properties, key string, def *string, opts map[string]string, v reflect.Value) error {
+	t := v.Type()
+
+	// value returns the property value for key or the default if provided.
+	value := func() (string, error) {
+		if val, ok := p.Get(key); ok {
+			return val, nil
+		}
+		if def != nil {
+			return *def, nil
+		}
+		return "", fmt.Errorf("missing required key %s", key)
+	}
+
+	// conv converts a string to a value of the given type.
+	conv := func(s string, t reflect.Type) (val reflect.Value, err error) {
+		var v interface{}
+
+		switch {
+		case isDuration(t):
+			v, err = time.ParseDuration(s)
+
+		case isTime(t):
+			layout := opts["layout"]
+			if layout == "" {
+				layout = time.RFC3339
+			}
+			v, err = time.Parse(layout, s)
+
+		case isBool(t):
+			v, err = boolVal(s), nil
+
+		case isString(t):
+			v, err = s, nil
+
+		case isFloat(t):
+			v, err = strconv.ParseFloat(s, 64)
+
+		case isInt(t):
+			v, err = strconv.ParseInt(s, 10, 64)
+
+		case isUint(t):
+			v, err = strconv.ParseUint(s, 10, 64)
+
+		default:
+			return reflect.Zero(t), fmt.Errorf("unsupported type %s", t)
+		}
+		if err != nil {
+			return reflect.Zero(t), err
+		}
+		return reflect.ValueOf(v).Convert(t), nil
+	}
+
+	// keydef returns the property key and the default value based on the
+	// name of the struct field and the options in the tag.
+	keydef := func(f reflect.StructField) (string, *string, map[string]string) {
+		_key, _opts := parseTag(f.Tag.Get("properties"))
+
+		var _def *string
+		if d, ok := _opts["default"]; ok {
+			_def = &d
+		}
+		if _key != "" {
+			return _key, _def, _opts
+		}
+		return f.Name, _def, _opts
+	}
+
+	switch {
+	case isDuration(t) || isTime(t) || isBool(t) || isString(t) || isFloat(t) || isInt(t) || isUint(t):
+		s, err := value()
+		if err != nil {
+			return err
+		}
+		val, err := conv(s, t)
+		if err != nil {
+			return err
+		}
+		v.Set(val)
+
+	case isPtr(t):
+		return dec(p, key, def, opts, v.Elem())
+
+	case isStruct(t):
+		for i := 0; i < v.NumField(); i++ {
+			fv := v.Field(i)
+			fk, def, opts := keydef(t.Field(i))
+			if !fv.CanSet() {
+				return fmt.Errorf("cannot set %s", t.Field(i).Name)
+			}
+			if fk == "-" {
+				continue
+			}
+			if key != "" {
+				fk = key + "." + fk
+			}
+			if err := dec(p, fk, def, opts, fv); err != nil {
+				return err
+			}
+		}
+		return nil
+
+	case isArray(t):
+		val, err := value()
+		if err != nil {
+			return err
+		}
+		vals := split(val, ";")
+		a := reflect.MakeSlice(t, 0, len(vals))
+		for _, s := range vals {
+			val, err := conv(s, t.Elem())
+			if err != nil {
+				return err
+			}
+			a = reflect.Append(a, val)
+		}
+		v.Set(a)
+
+	case isMap(t):
+		valT := t.Elem()
+		m := reflect.MakeMap(t)
+		for postfix := range p.FilterStripPrefix(key + ".").m {
+			pp := strings.SplitN(postfix, ".", 2)
+			mk, mv := pp[0], reflect.New(valT)
+			if err := dec(p, key+"."+mk, nil, nil, mv); err != nil {
+				return err
+			}
+			m.SetMapIndex(reflect.ValueOf(mk), mv.Elem())
+		}
+		v.Set(m)
+
+	default:
+		return fmt.Errorf("unsupported type %s", t)
+	}
+	return nil
+}
+
+// split splits a string on sep, trims whitespace of elements
+// and omits empty elements
+func split(s string, sep string) []string {
+	var a []string
+	for _, v := range strings.Split(s, sep) {
+		if v = strings.TrimSpace(v); v != "" {
+			a = append(a, v)
+		}
+	}
+	return a
+}
+
+// parseTag parses a "key,k=v,k=v,..."
+func parseTag(tag string) (key string, opts map[string]string) {
+	opts = map[string]string{}
+	for i, s := range strings.Split(tag, ",") {
+		if i == 0 {
+			key = s
+			continue
+		}
+
+		pp := strings.SplitN(s, "=", 2)
+		if len(pp) == 1 {
+			opts[pp[0]] = ""
+		} else {
+			opts[pp[0]] = pp[1]
+		}
+	}
+	return key, opts
+}
+
+func isArray(t reflect.Type) bool    { return t.Kind() == reflect.Array || t.Kind() == reflect.Slice }
+func isBool(t reflect.Type) bool     { return t.Kind() == reflect.Bool }
+func isDuration(t reflect.Type) bool { return t == reflect.TypeOf(time.Second) }
+func isMap(t reflect.Type) bool      { return t.Kind() == reflect.Map }
+func isPtr(t reflect.Type) bool      { return t.Kind() == reflect.Ptr }
+func isString(t reflect.Type) bool   { return t.Kind() == reflect.String }
+func isStruct(t reflect.Type) bool   { return t.Kind() == reflect.Struct }
+func isTime(t reflect.Type) bool     { return t == reflect.TypeOf(time.Time{}) }
+func isFloat(t reflect.Type) bool {
+	return t.Kind() == reflect.Float32 || t.Kind() == reflect.Float64
+}
+func isInt(t reflect.Type) bool {
+	return t.Kind() == reflect.Int || t.Kind() == reflect.Int8 || t.Kind() == reflect.Int16 || t.Kind() == reflect.Int32 || t.Kind() == reflect.Int64
+}
+func isUint(t reflect.Type) bool {
+	return t.Kind() == reflect.Uint || t.Kind() == reflect.Uint8 || t.Kind() == reflect.Uint16 || t.Kind() == reflect.Uint32 || t.Kind() == reflect.Uint64
+}

+ 156 - 0
vendor/github.com/magiconair/properties/doc.go

@@ -0,0 +1,156 @@
+// Copyright 2018 Frank Schroeder. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package properties provides functions for reading and writing
+// ISO-8859-1 and UTF-8 encoded .properties files and has
+// support for recursive property expansion.
+//
+// Java properties files are ISO-8859-1 encoded and use Unicode
+// literals for characters outside the ISO character set. Unicode
+// literals can be used in UTF-8 encoded properties files but
+// aren't necessary.
+//
+// To load a single properties file use MustLoadFile():
+//
+//   p := properties.MustLoadFile(filename, properties.UTF8)
+//
+// To load multiple properties files use MustLoadFiles()
+// which loads the files in the given order and merges the
+// result. Missing properties files can be ignored if the
+// 'ignoreMissing' flag is set to true.
+//
+// Filenames can contain environment variables which are expanded
+// before loading.
+//
+//   f1 := "/etc/myapp/myapp.conf"
+//   f2 := "/home/${USER}/myapp.conf"
+//   p := MustLoadFiles([]string{f1, f2}, properties.UTF8, true)
+//
+// All of the different key/value delimiters ' ', ':' and '=' are
+// supported as well as the comment characters '!' and '#' and
+// multi-line values.
+//
+//   ! this is a comment
+//   # and so is this
+//
+//   # the following expressions are equal
+//   key value
+//   key=value
+//   key:value
+//   key = value
+//   key : value
+//   key = val\
+//         ue
+//
+// Properties stores all comments preceding a key and provides
+// GetComments() and SetComments() methods to retrieve and
+// update them. The convenience functions GetComment() and
+// SetComment() allow access to the last comment. The
+// WriteComment() method writes properties files including
+// the comments and with the keys in the original order.
+// This can be used for sanitizing properties files.
+//
+// Property expansion is recursive and circular references
+// and malformed expressions are not allowed and cause an
+// error. Expansion of environment variables is supported.
+//
+//   # standard property
+//   key = value
+//
+//   # property expansion: key2 = value
+//   key2 = ${key}
+//
+//   # recursive expansion: key3 = value
+//   key3 = ${key2}
+//
+//   # circular reference (error)
+//   key = ${key}
+//
+//   # malformed expression (error)
+//   key = ${ke
+//
+//   # refers to the users' home dir
+//   home = ${HOME}
+//
+//   # local key takes precedence over env var: u = foo
+//   USER = foo
+//   u = ${USER}
+//
+// The default property expansion format is ${key} but can be
+// changed by setting different pre- and postfix values on the
+// Properties object.
+//
+//   p := properties.NewProperties()
+//   p.Prefix = "#["
+//   p.Postfix = "]#"
+//
+// Properties provides convenience functions for getting typed
+// values with default values if the key does not exist or the
+// type conversion failed.
+//
+//   # Returns true if the value is either "1", "on", "yes" or "true"
+//   # Returns false for every other value and the default value if
+//   # the key does not exist.
+//   v = p.GetBool("key", false)
+//
+//   # Returns the value if the key exists and the format conversion
+//   # was successful. Otherwise, the default value is returned.
+//   v = p.GetInt64("key", 999)
+//   v = p.GetUint64("key", 999)
+//   v = p.GetFloat64("key", 123.0)
+//   v = p.GetString("key", "def")
+//   v = p.GetDuration("key", 999)
+//
+// As an alternative properties may be applied with the standard
+// library's flag implementation at any time.
+//
+//   # Standard configuration
+//   v = flag.Int("key", 999, "help message")
+//   flag.Parse()
+//
+//   # Merge p into the flag set
+//   p.MustFlag(flag.CommandLine)
+//
+// Properties provides several MustXXX() convenience functions
+// which will terminate the app if an error occurs. The behavior
+// of the failure is configurable and the default is to call
+// log.Fatal(err). To have the MustXXX() functions panic instead
+// of logging the error set a different ErrorHandler before
+// you use the Properties package.
+//
+//   properties.ErrorHandler = properties.PanicHandler
+//
+//   # Will panic instead of logging an error
+//   p := properties.MustLoadFile("config.properties")
+//
+// You can also provide your own ErrorHandler function. The only requirement
+// is that the error handler function must exit after handling the error.
+//
+//   properties.ErrorHandler = func(err error) {
+//	     fmt.Println(err)
+//       os.Exit(1)
+//   }
+//
+//   # Will write to stdout and then exit
+//   p := properties.MustLoadFile("config.properties")
+//
+// Properties can also be loaded into a struct via the `Decode`
+// method, e.g.
+//
+//   type S struct {
+//       A string        `properties:"a,default=foo"`
+//       D time.Duration `properties:"timeout,default=5s"`
+//       E time.Time     `properties:"expires,layout=2006-01-02,default=2015-01-01"`
+//   }
+//
+// See `Decode()` method for the full documentation.
+//
+// The following documents provide a description of the properties
+// file format.
+//
+// http://en.wikipedia.org/wiki/.properties
+//
+// http://docs.oracle.com/javase/7/docs/api/java/util/Properties.html#load%28java.io.Reader%29
+//
+package properties

+ 1 - 0
vendor/github.com/magiconair/properties/go.mod

@@ -0,0 +1 @@
+module github.com/magiconair/properties

+ 34 - 0
vendor/github.com/magiconair/properties/integrate.go

@@ -0,0 +1,34 @@
+// Copyright 2018 Frank Schroeder. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package properties
+
+import "flag"
+
+// MustFlag sets flags that are skipped by dst.Parse when p contains
+// the respective key for flag.Flag.Name.
+//
+// It's use is recommended with command line arguments as in:
+// 	flag.Parse()
+// 	p.MustFlag(flag.CommandLine)
+func (p *Properties) MustFlag(dst *flag.FlagSet) {
+	m := make(map[string]*flag.Flag)
+	dst.VisitAll(func(f *flag.Flag) {
+		m[f.Name] = f
+	})
+	dst.Visit(func(f *flag.Flag) {
+		delete(m, f.Name) // overridden
+	})
+
+	for name, f := range m {
+		v, ok := p.Get(name)
+		if !ok {
+			continue
+		}
+
+		if err := f.Value.Set(v); err != nil {
+			ErrorHandler(err)
+		}
+	}
+}

+ 407 - 0
vendor/github.com/magiconair/properties/lex.go

@@ -0,0 +1,407 @@
+// Copyright 2018 Frank Schroeder. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+//
+// Parts of the lexer are from the template/text/parser package
+// For these parts the following applies:
+//
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file of the go 1.2
+// distribution.
+
+package properties
+
+import (
+	"fmt"
+	"strconv"
+	"strings"
+	"unicode/utf8"
+)
+
+// item represents a token or text string returned from the scanner.
+type item struct {
+	typ itemType // The type of this item.
+	pos int      // The starting position, in bytes, of this item in the input string.
+	val string   // The value of this item.
+}
+
+func (i item) String() string {
+	switch {
+	case i.typ == itemEOF:
+		return "EOF"
+	case i.typ == itemError:
+		return i.val
+	case len(i.val) > 10:
+		return fmt.Sprintf("%.10q...", i.val)
+	}
+	return fmt.Sprintf("%q", i.val)
+}
+
+// itemType identifies the type of lex items.
+type itemType int
+
+const (
+	itemError itemType = iota // error occurred; value is text of error
+	itemEOF
+	itemKey     // a key
+	itemValue   // a value
+	itemComment // a comment
+)
+
+// defines a constant for EOF
+const eof = -1
+
+// permitted whitespace characters space, FF and TAB
+const whitespace = " \f\t"
+
+// stateFn represents the state of the scanner as a function that returns the next state.
+type stateFn func(*lexer) stateFn
+
+// lexer holds the state of the scanner.
+type lexer struct {
+	input   string    // the string being scanned
+	state   stateFn   // the next lexing function to enter
+	pos     int       // current position in the input
+	start   int       // start position of this item
+	width   int       // width of last rune read from input
+	lastPos int       // position of most recent item returned by nextItem
+	runes   []rune    // scanned runes for this item
+	items   chan item // channel of scanned items
+}
+
+// next returns the next rune in the input.
+func (l *lexer) next() rune {
+	if l.pos >= len(l.input) {
+		l.width = 0
+		return eof
+	}
+	r, w := utf8.DecodeRuneInString(l.input[l.pos:])
+	l.width = w
+	l.pos += l.width
+	return r
+}
+
+// peek returns but does not consume the next rune in the input.
+func (l *lexer) peek() rune {
+	r := l.next()
+	l.backup()
+	return r
+}
+
+// backup steps back one rune. Can only be called once per call of next.
+func (l *lexer) backup() {
+	l.pos -= l.width
+}
+
+// emit passes an item back to the client.
+func (l *lexer) emit(t itemType) {
+	i := item{t, l.start, string(l.runes)}
+	l.items <- i
+	l.start = l.pos
+	l.runes = l.runes[:0]
+}
+
+// ignore skips over the pending input before this point.
+func (l *lexer) ignore() {
+	l.start = l.pos
+}
+
+// appends the rune to the current value
+func (l *lexer) appendRune(r rune) {
+	l.runes = append(l.runes, r)
+}
+
+// accept consumes the next rune if it's from the valid set.
+func (l *lexer) accept(valid string) bool {
+	if strings.ContainsRune(valid, l.next()) {
+		return true
+	}
+	l.backup()
+	return false
+}
+
+// acceptRun consumes a run of runes from the valid set.
+func (l *lexer) acceptRun(valid string) {
+	for strings.ContainsRune(valid, l.next()) {
+	}
+	l.backup()
+}
+
+// acceptRunUntil consumes a run of runes up to a terminator.
+func (l *lexer) acceptRunUntil(term rune) {
+	for term != l.next() {
+	}
+	l.backup()
+}
+
+// hasText returns true if the current parsed text is not empty.
+func (l *lexer) isNotEmpty() bool {
+	return l.pos > l.start
+}
+
+// lineNumber reports which line we're on, based on the position of
+// the previous item returned by nextItem. Doing it this way
+// means we don't have to worry about peek double counting.
+func (l *lexer) lineNumber() int {
+	return 1 + strings.Count(l.input[:l.lastPos], "\n")
+}
+
+// errorf returns an error token and terminates the scan by passing
+// back a nil pointer that will be the next state, terminating l.nextItem.
+func (l *lexer) errorf(format string, args ...interface{}) stateFn {
+	l.items <- item{itemError, l.start, fmt.Sprintf(format, args...)}
+	return nil
+}
+
+// nextItem returns the next item from the input.
+func (l *lexer) nextItem() item {
+	i := <-l.items
+	l.lastPos = i.pos
+	return i
+}
+
+// lex creates a new scanner for the input string.
+func lex(input string) *lexer {
+	l := &lexer{
+		input: input,
+		items: make(chan item),
+		runes: make([]rune, 0, 32),
+	}
+	go l.run()
+	return l
+}
+
+// run runs the state machine for the lexer.
+func (l *lexer) run() {
+	for l.state = lexBeforeKey(l); l.state != nil; {
+		l.state = l.state(l)
+	}
+}
+
+// state functions
+
+// lexBeforeKey scans until a key begins.
+func lexBeforeKey(l *lexer) stateFn {
+	switch r := l.next(); {
+	case isEOF(r):
+		l.emit(itemEOF)
+		return nil
+
+	case isEOL(r):
+		l.ignore()
+		return lexBeforeKey
+
+	case isComment(r):
+		return lexComment
+
+	case isWhitespace(r):
+		l.ignore()
+		return lexBeforeKey
+
+	default:
+		l.backup()
+		return lexKey
+	}
+}
+
+// lexComment scans a comment line. The comment character has already been scanned.
+func lexComment(l *lexer) stateFn {
+	l.acceptRun(whitespace)
+	l.ignore()
+	for {
+		switch r := l.next(); {
+		case isEOF(r):
+			l.ignore()
+			l.emit(itemEOF)
+			return nil
+		case isEOL(r):
+			l.emit(itemComment)
+			return lexBeforeKey
+		default:
+			l.appendRune(r)
+		}
+	}
+}
+
+// lexKey scans the key up to a delimiter
+func lexKey(l *lexer) stateFn {
+	var r rune
+
+Loop:
+	for {
+		switch r = l.next(); {
+
+		case isEscape(r):
+			err := l.scanEscapeSequence()
+			if err != nil {
+				return l.errorf(err.Error())
+			}
+
+		case isEndOfKey(r):
+			l.backup()
+			break Loop
+
+		case isEOF(r):
+			break Loop
+
+		default:
+			l.appendRune(r)
+		}
+	}
+
+	if len(l.runes) > 0 {
+		l.emit(itemKey)
+	}
+
+	if isEOF(r) {
+		l.emit(itemEOF)
+		return nil
+	}
+
+	return lexBeforeValue
+}
+
+// lexBeforeValue scans the delimiter between key and value.
+// Leading and trailing whitespace is ignored.
+// We expect to be just after the key.
+func lexBeforeValue(l *lexer) stateFn {
+	l.acceptRun(whitespace)
+	l.accept(":=")
+	l.acceptRun(whitespace)
+	l.ignore()
+	return lexValue
+}
+
+// lexValue scans text until the end of the line. We expect to be just after the delimiter.
+func lexValue(l *lexer) stateFn {
+	for {
+		switch r := l.next(); {
+		case isEscape(r):
+			if isEOL(l.peek()) {
+				l.next()
+				l.acceptRun(whitespace)
+			} else {
+				err := l.scanEscapeSequence()
+				if err != nil {
+					return l.errorf(err.Error())
+				}
+			}
+
+		case isEOL(r):
+			l.emit(itemValue)
+			l.ignore()
+			return lexBeforeKey
+
+		case isEOF(r):
+			l.emit(itemValue)
+			l.emit(itemEOF)
+			return nil
+
+		default:
+			l.appendRune(r)
+		}
+	}
+}
+
+// scanEscapeSequence scans either one of the escaped characters
+// or a unicode literal. We expect to be after the escape character.
+func (l *lexer) scanEscapeSequence() error {
+	switch r := l.next(); {
+
+	case isEscapedCharacter(r):
+		l.appendRune(decodeEscapedCharacter(r))
+		return nil
+
+	case atUnicodeLiteral(r):
+		return l.scanUnicodeLiteral()
+
+	case isEOF(r):
+		return fmt.Errorf("premature EOF")
+
+	// silently drop the escape character and append the rune as is
+	default:
+		l.appendRune(r)
+		return nil
+	}
+}
+
+// scans a unicode literal in the form \uXXXX. We expect to be after the \u.
+func (l *lexer) scanUnicodeLiteral() error {
+	// scan the digits
+	d := make([]rune, 4)
+	for i := 0; i < 4; i++ {
+		d[i] = l.next()
+		if d[i] == eof || !strings.ContainsRune("0123456789abcdefABCDEF", d[i]) {
+			return fmt.Errorf("invalid unicode literal")
+		}
+	}
+
+	// decode the digits into a rune
+	r, err := strconv.ParseInt(string(d), 16, 0)
+	if err != nil {
+		return err
+	}
+
+	l.appendRune(rune(r))
+	return nil
+}
+
+// decodeEscapedCharacter returns the unescaped rune. We expect to be after the escape character.
+func decodeEscapedCharacter(r rune) rune {
+	switch r {
+	case 'f':
+		return '\f'
+	case 'n':
+		return '\n'
+	case 'r':
+		return '\r'
+	case 't':
+		return '\t'
+	default:
+		return r
+	}
+}
+
+// atUnicodeLiteral reports whether we are at a unicode literal.
+// The escape character has already been consumed.
+func atUnicodeLiteral(r rune) bool {
+	return r == 'u'
+}
+
+// isComment reports whether we are at the start of a comment.
+func isComment(r rune) bool {
+	return r == '#' || r == '!'
+}
+
+// isEndOfKey reports whether the rune terminates the current key.
+func isEndOfKey(r rune) bool {
+	return strings.ContainsRune(" \f\t\r\n:=", r)
+}
+
+// isEOF reports whether we are at EOF.
+func isEOF(r rune) bool {
+	return r == eof
+}
+
+// isEOL reports whether we are at a new line character.
+func isEOL(r rune) bool {
+	return r == '\n' || r == '\r'
+}
+
+// isEscape reports whether the rune is the escape character which
+// prefixes unicode literals and other escaped characters.
+func isEscape(r rune) bool {
+	return r == '\\'
+}
+
+// isEscapedCharacter reports whether we are at one of the characters that need escaping.
+// The escape character has already been consumed.
+func isEscapedCharacter(r rune) bool {
+	return strings.ContainsRune(" :=fnrt", r)
+}
+
+// isWhitespace reports whether the rune is a whitespace character.
+func isWhitespace(r rune) bool {
+	return strings.ContainsRune(whitespace, r)
+}

+ 292 - 0
vendor/github.com/magiconair/properties/load.go

@@ -0,0 +1,292 @@
+// Copyright 2018 Frank Schroeder. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package properties
+
+import (
+	"fmt"
+	"io/ioutil"
+	"net/http"
+	"os"
+	"strings"
+)
+
+// Encoding specifies encoding of the input data.
+type Encoding uint
+
+const (
+	// utf8Default is a private placeholder for the zero value of Encoding to
+	// ensure that it has the correct meaning. UTF8 is the default encoding but
+	// was assigned a non-zero value which cannot be changed without breaking
+	// existing code. Clients should continue to use the public constants.
+	utf8Default Encoding = iota
+
+	// UTF8 interprets the input data as UTF-8.
+	UTF8
+
+	// ISO_8859_1 interprets the input data as ISO-8859-1.
+	ISO_8859_1
+)
+
+type Loader struct {
+	// Encoding determines how the data from files and byte buffers
+	// is interpreted. For URLs the Content-Type header is used
+	// to determine the encoding of the data.
+	Encoding Encoding
+
+	// DisableExpansion configures the property expansion of the
+	// returned property object. When set to true, the property values
+	// will not be expanded and the Property object will not be checked
+	// for invalid expansion expressions.
+	DisableExpansion bool
+
+	// IgnoreMissing configures whether missing files or URLs which return
+	// 404 are reported as errors. When set to true, missing files and 404
+	// status codes are not reported as errors.
+	IgnoreMissing bool
+}
+
+// Load reads a buffer into a Properties struct.
+func (l *Loader) LoadBytes(buf []byte) (*Properties, error) {
+	return l.loadBytes(buf, l.Encoding)
+}
+
+// LoadAll reads the content of multiple URLs or files in the given order into
+// a Properties struct. If IgnoreMissing is true then a 404 status code or
+// missing file will not be reported as error. Encoding sets the encoding for
+// files. For the URLs see LoadURL for the Content-Type header and the
+// encoding.
+func (l *Loader) LoadAll(names []string) (*Properties, error) {
+	all := NewProperties()
+	for _, name := range names {
+		n, err := expandName(name)
+		if err != nil {
+			return nil, err
+		}
+
+		var p *Properties
+		switch {
+		case strings.HasPrefix(n, "http://"):
+			p, err = l.LoadURL(n)
+		case strings.HasPrefix(n, "https://"):
+			p, err = l.LoadURL(n)
+		default:
+			p, err = l.LoadFile(n)
+		}
+		if err != nil {
+			return nil, err
+		}
+		all.Merge(p)
+	}
+
+	all.DisableExpansion = l.DisableExpansion
+	if all.DisableExpansion {
+		return all, nil
+	}
+	return all, all.check()
+}
+
+// LoadFile reads a file into a Properties struct.
+// If IgnoreMissing is true then a missing file will not be
+// reported as error.
+func (l *Loader) LoadFile(filename string) (*Properties, error) {
+	data, err := ioutil.ReadFile(filename)
+	if err != nil {
+		if l.IgnoreMissing && os.IsNotExist(err) {
+			LogPrintf("properties: %s not found. skipping", filename)
+			return NewProperties(), nil
+		}
+		return nil, err
+	}
+	return l.loadBytes(data, l.Encoding)
+}
+
+// LoadURL reads the content of the URL into a Properties struct.
+//
+// The encoding is determined via the Content-Type header which
+// should be set to 'text/plain'. If the 'charset' parameter is
+// missing, 'iso-8859-1' or 'latin1' the encoding is set to
+// ISO-8859-1. If the 'charset' parameter is set to 'utf-8' the
+// encoding is set to UTF-8. A missing content type header is
+// interpreted as 'text/plain; charset=utf-8'.
+func (l *Loader) LoadURL(url string) (*Properties, error) {
+	resp, err := http.Get(url)
+	if err != nil {
+		return nil, fmt.Errorf("properties: error fetching %q. %s", url, err)
+	}
+	defer resp.Body.Close()
+
+	if resp.StatusCode == 404 && l.IgnoreMissing {
+		LogPrintf("properties: %s returned %d. skipping", url, resp.StatusCode)
+		return NewProperties(), nil
+	}
+
+	if resp.StatusCode != 200 {
+		return nil, fmt.Errorf("properties: %s returned %d", url, resp.StatusCode)
+	}
+
+	body, err := ioutil.ReadAll(resp.Body)
+	if err != nil {
+		return nil, fmt.Errorf("properties: %s error reading response. %s", url, err)
+	}
+
+	ct := resp.Header.Get("Content-Type")
+	var enc Encoding
+	switch strings.ToLower(ct) {
+	case "text/plain", "text/plain; charset=iso-8859-1", "text/plain; charset=latin1":
+		enc = ISO_8859_1
+	case "", "text/plain; charset=utf-8":
+		enc = UTF8
+	default:
+		return nil, fmt.Errorf("properties: invalid content type %s", ct)
+	}
+
+	return l.loadBytes(body, enc)
+}
+
+func (l *Loader) loadBytes(buf []byte, enc Encoding) (*Properties, error) {
+	p, err := parse(convert(buf, enc))
+	if err != nil {
+		return nil, err
+	}
+	p.DisableExpansion = l.DisableExpansion
+	if p.DisableExpansion {
+		return p, nil
+	}
+	return p, p.check()
+}
+
+// Load reads a buffer into a Properties struct.
+func Load(buf []byte, enc Encoding) (*Properties, error) {
+	l := &Loader{Encoding: enc}
+	return l.LoadBytes(buf)
+}
+
+// LoadString reads an UTF8 string into a properties struct.
+func LoadString(s string) (*Properties, error) {
+	l := &Loader{Encoding: UTF8}
+	return l.LoadBytes([]byte(s))
+}
+
+// LoadMap creates a new Properties struct from a string map.
+func LoadMap(m map[string]string) *Properties {
+	p := NewProperties()
+	for k, v := range m {
+		p.Set(k, v)
+	}
+	return p
+}
+
+// LoadFile reads a file into a Properties struct.
+func LoadFile(filename string, enc Encoding) (*Properties, error) {
+	l := &Loader{Encoding: enc}
+	return l.LoadAll([]string{filename})
+}
+
+// LoadFiles reads multiple files in the given order into
+// a Properties struct. If 'ignoreMissing' is true then
+// non-existent files will not be reported as error.
+func LoadFiles(filenames []string, enc Encoding, ignoreMissing bool) (*Properties, error) {
+	l := &Loader{Encoding: enc, IgnoreMissing: ignoreMissing}
+	return l.LoadAll(filenames)
+}
+
+// LoadURL reads the content of the URL into a Properties struct.
+// See Loader#LoadURL for details.
+func LoadURL(url string) (*Properties, error) {
+	l := &Loader{Encoding: UTF8}
+	return l.LoadAll([]string{url})
+}
+
+// LoadURLs reads the content of multiple URLs in the given order into a
+// Properties struct. If IgnoreMissing is true then a 404 status code will
+// not be reported as error. See Loader#LoadURL for the Content-Type header
+// and the encoding.
+func LoadURLs(urls []string, ignoreMissing bool) (*Properties, error) {
+	l := &Loader{Encoding: UTF8, IgnoreMissing: ignoreMissing}
+	return l.LoadAll(urls)
+}
+
+// LoadAll reads the content of multiple URLs or files in the given order into a
+// Properties struct. If 'ignoreMissing' is true then a 404 status code or missing file will
+// not be reported as error. Encoding sets the encoding for files. For the URLs please see
+// LoadURL for the Content-Type header and the encoding.
+func LoadAll(names []string, enc Encoding, ignoreMissing bool) (*Properties, error) {
+	l := &Loader{Encoding: enc, IgnoreMissing: ignoreMissing}
+	return l.LoadAll(names)
+}
+
+// MustLoadString reads an UTF8 string into a Properties struct and
+// panics on error.
+func MustLoadString(s string) *Properties {
+	return must(LoadString(s))
+}
+
+// MustLoadFile reads a file into a Properties struct and
+// panics on error.
+func MustLoadFile(filename string, enc Encoding) *Properties {
+	return must(LoadFile(filename, enc))
+}
+
+// MustLoadFiles reads multiple files in the given order into
+// a Properties struct and panics on error. If 'ignoreMissing'
+// is true then non-existent files will not be reported as error.
+func MustLoadFiles(filenames []string, enc Encoding, ignoreMissing bool) *Properties {
+	return must(LoadFiles(filenames, enc, ignoreMissing))
+}
+
+// MustLoadURL reads the content of a URL into a Properties struct and
+// panics on error.
+func MustLoadURL(url string) *Properties {
+	return must(LoadURL(url))
+}
+
+// MustLoadURLs reads the content of multiple URLs in the given order into a
+// Properties struct and panics on error. If 'ignoreMissing' is true then a 404
+// status code will not be reported as error.
+func MustLoadURLs(urls []string, ignoreMissing bool) *Properties {
+	return must(LoadURLs(urls, ignoreMissing))
+}
+
+// MustLoadAll reads the content of multiple URLs or files in the given order into a
+// Properties struct. If 'ignoreMissing' is true then a 404 status code or missing file will
+// not be reported as error. Encoding sets the encoding for files. For the URLs please see
+// LoadURL for the Content-Type header and the encoding. It panics on error.
+func MustLoadAll(names []string, enc Encoding, ignoreMissing bool) *Properties {
+	return must(LoadAll(names, enc, ignoreMissing))
+}
+
+func must(p *Properties, err error) *Properties {
+	if err != nil {
+		ErrorHandler(err)
+	}
+	return p
+}
+
+// expandName expands ${ENV_VAR} expressions in a name.
+// If the environment variable does not exist then it will be replaced
+// with an empty string. Malformed expressions like "${ENV_VAR" will
+// be reported as error.
+func expandName(name string) (string, error) {
+	return expand(name, []string{}, "${", "}", make(map[string]string))
+}
+
+// Interprets a byte buffer either as an ISO-8859-1 or UTF-8 encoded string.
+// For ISO-8859-1 we can convert each byte straight into a rune since the
+// first 256 unicode code points cover ISO-8859-1.
+func convert(buf []byte, enc Encoding) string {
+	switch enc {
+	case utf8Default, UTF8:
+		return string(buf)
+	case ISO_8859_1:
+		runes := make([]rune, len(buf))
+		for i, b := range buf {
+			runes[i] = rune(b)
+		}
+		return string(runes)
+	default:
+		ErrorHandler(fmt.Errorf("unsupported encoding %v", enc))
+	}
+	panic("ErrorHandler should exit")
+}

+ 95 - 0
vendor/github.com/magiconair/properties/parser.go

@@ -0,0 +1,95 @@
+// Copyright 2018 Frank Schroeder. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package properties
+
+import (
+	"fmt"
+	"runtime"
+)
+
+type parser struct {
+	lex *lexer
+}
+
+func parse(input string) (properties *Properties, err error) {
+	p := &parser{lex: lex(input)}
+	defer p.recover(&err)
+
+	properties = NewProperties()
+	key := ""
+	comments := []string{}
+
+	for {
+		token := p.expectOneOf(itemComment, itemKey, itemEOF)
+		switch token.typ {
+		case itemEOF:
+			goto done
+		case itemComment:
+			comments = append(comments, token.val)
+			continue
+		case itemKey:
+			key = token.val
+			if _, ok := properties.m[key]; !ok {
+				properties.k = append(properties.k, key)
+			}
+		}
+
+		token = p.expectOneOf(itemValue, itemEOF)
+		if len(comments) > 0 {
+			properties.c[key] = comments
+			comments = []string{}
+		}
+		switch token.typ {
+		case itemEOF:
+			properties.m[key] = ""
+			goto done
+		case itemValue:
+			properties.m[key] = token.val
+		}
+	}
+
+done:
+	return properties, nil
+}
+
+func (p *parser) errorf(format string, args ...interface{}) {
+	format = fmt.Sprintf("properties: Line %d: %s", p.lex.lineNumber(), format)
+	panic(fmt.Errorf(format, args...))
+}
+
+func (p *parser) expect(expected itemType) (token item) {
+	token = p.lex.nextItem()
+	if token.typ != expected {
+		p.unexpected(token)
+	}
+	return token
+}
+
+func (p *parser) expectOneOf(expected ...itemType) (token item) {
+	token = p.lex.nextItem()
+	for _, v := range expected {
+		if token.typ == v {
+			return token
+		}
+	}
+	p.unexpected(token)
+	panic("unexpected token")
+}
+
+func (p *parser) unexpected(token item) {
+	p.errorf(token.String())
+}
+
+// recover is the handler that turns panics into returns from the top level of Parse.
+func (p *parser) recover(errp *error) {
+	e := recover()
+	if e != nil {
+		if _, ok := e.(runtime.Error); ok {
+			panic(e)
+		}
+		*errp = e.(error)
+	}
+	return
+}

+ 833 - 0
vendor/github.com/magiconair/properties/properties.go

@@ -0,0 +1,833 @@
+// Copyright 2018 Frank Schroeder. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package properties
+
+// BUG(frank): Set() does not check for invalid unicode literals since this is currently handled by the lexer.
+// BUG(frank): Write() does not allow to configure the newline character. Therefore, on Windows LF is used.
+
+import (
+	"fmt"
+	"io"
+	"log"
+	"os"
+	"regexp"
+	"strconv"
+	"strings"
+	"time"
+	"unicode/utf8"
+)
+
+const maxExpansionDepth = 64
+
+// ErrorHandlerFunc defines the type of function which handles failures
+// of the MustXXX() functions. An error handler function must exit
+// the application after handling the error.
+type ErrorHandlerFunc func(error)
+
+// ErrorHandler is the function which handles failures of the MustXXX()
+// functions. The default is LogFatalHandler.
+var ErrorHandler ErrorHandlerFunc = LogFatalHandler
+
+// LogHandlerFunc defines the function prototype for logging errors.
+type LogHandlerFunc func(fmt string, args ...interface{})
+
+// LogPrintf defines a log handler which uses log.Printf.
+var LogPrintf LogHandlerFunc = log.Printf
+
+// LogFatalHandler handles the error by logging a fatal error and exiting.
+func LogFatalHandler(err error) {
+	log.Fatal(err)
+}
+
+// PanicHandler handles the error by panicking.
+func PanicHandler(err error) {
+	panic(err)
+}
+
+// -----------------------------------------------------------------------------
+
+// A Properties contains the key/value pairs from the properties input.
+// All values are stored in unexpanded form and are expanded at runtime
+type Properties struct {
+	// Pre-/Postfix for property expansion.
+	Prefix  string
+	Postfix string
+
+	// DisableExpansion controls the expansion of properties on Get()
+	// and the check for circular references on Set(). When set to
+	// true Properties behaves like a simple key/value store and does
+	// not check for circular references on Get() or on Set().
+	DisableExpansion bool
+
+	// Stores the key/value pairs
+	m map[string]string
+
+	// Stores the comments per key.
+	c map[string][]string
+
+	// Stores the keys in order of appearance.
+	k []string
+}
+
+// NewProperties creates a new Properties struct with the default
+// configuration for "${key}" expressions.
+func NewProperties() *Properties {
+	return &Properties{
+		Prefix:  "${",
+		Postfix: "}",
+		m:       map[string]string{},
+		c:       map[string][]string{},
+		k:       []string{},
+	}
+}
+
+// Load reads a buffer into the given Properties struct.
+func (p *Properties) Load(buf []byte, enc Encoding) error {
+	l := &Loader{Encoding: enc, DisableExpansion: p.DisableExpansion}
+	newProperties, err := l.LoadBytes(buf)
+	if err != nil {
+		return err
+	}
+	p.Merge(newProperties)
+	return nil
+}
+
+// Get returns the expanded value for the given key if exists.
+// Otherwise, ok is false.
+func (p *Properties) Get(key string) (value string, ok bool) {
+	v, ok := p.m[key]
+	if p.DisableExpansion {
+		return v, ok
+	}
+	if !ok {
+		return "", false
+	}
+
+	expanded, err := p.expand(key, v)
+
+	// we guarantee that the expanded value is free of
+	// circular references and malformed expressions
+	// so we panic if we still get an error here.
+	if err != nil {
+		ErrorHandler(fmt.Errorf("%s in %q", err, key+" = "+v))
+	}
+
+	return expanded, true
+}
+
+// MustGet returns the expanded value for the given key if exists.
+// Otherwise, it panics.
+func (p *Properties) MustGet(key string) string {
+	if v, ok := p.Get(key); ok {
+		return v
+	}
+	ErrorHandler(invalidKeyError(key))
+	panic("ErrorHandler should exit")
+}
+
+// ----------------------------------------------------------------------------
+
+// ClearComments removes the comments for all keys.
+func (p *Properties) ClearComments() {
+	p.c = map[string][]string{}
+}
+
+// ----------------------------------------------------------------------------
+
+// GetComment returns the last comment before the given key or an empty string.
+func (p *Properties) GetComment(key string) string {
+	comments, ok := p.c[key]
+	if !ok || len(comments) == 0 {
+		return ""
+	}
+	return comments[len(comments)-1]
+}
+
+// ----------------------------------------------------------------------------
+
+// GetComments returns all comments that appeared before the given key or nil.
+func (p *Properties) GetComments(key string) []string {
+	if comments, ok := p.c[key]; ok {
+		return comments
+	}
+	return nil
+}
+
+// ----------------------------------------------------------------------------
+
+// SetComment sets the comment for the key.
+func (p *Properties) SetComment(key, comment string) {
+	p.c[key] = []string{comment}
+}
+
+// ----------------------------------------------------------------------------
+
+// SetComments sets the comments for the key. If the comments are nil then
+// all comments for this key are deleted.
+func (p *Properties) SetComments(key string, comments []string) {
+	if comments == nil {
+		delete(p.c, key)
+		return
+	}
+	p.c[key] = comments
+}
+
+// ----------------------------------------------------------------------------
+
+// GetBool checks if the expanded value is one of '1', 'yes',
+// 'true' or 'on' if the key exists. The comparison is case-insensitive.
+// If the key does not exist the default value is returned.
+func (p *Properties) GetBool(key string, def bool) bool {
+	v, err := p.getBool(key)
+	if err != nil {
+		return def
+	}
+	return v
+}
+
+// MustGetBool checks if the expanded value is one of '1', 'yes',
+// 'true' or 'on' if the key exists. The comparison is case-insensitive.
+// If the key does not exist the function panics.
+func (p *Properties) MustGetBool(key string) bool {
+	v, err := p.getBool(key)
+	if err != nil {
+		ErrorHandler(err)
+	}
+	return v
+}
+
+func (p *Properties) getBool(key string) (value bool, err error) {
+	if v, ok := p.Get(key); ok {
+		return boolVal(v), nil
+	}
+	return false, invalidKeyError(key)
+}
+
+func boolVal(v string) bool {
+	v = strings.ToLower(v)
+	return v == "1" || v == "true" || v == "yes" || v == "on"
+}
+
+// ----------------------------------------------------------------------------
+
+// GetDuration parses the expanded value as an time.Duration (in ns) if the
+// key exists. If key does not exist or the value cannot be parsed the default
+// value is returned. In almost all cases you want to use GetParsedDuration().
+func (p *Properties) GetDuration(key string, def time.Duration) time.Duration {
+	v, err := p.getInt64(key)
+	if err != nil {
+		return def
+	}
+	return time.Duration(v)
+}
+
+// MustGetDuration parses the expanded value as an time.Duration (in ns) if
+// the key exists. If key does not exist or the value cannot be parsed the
+// function panics. In almost all cases you want to use MustGetParsedDuration().
+func (p *Properties) MustGetDuration(key string) time.Duration {
+	v, err := p.getInt64(key)
+	if err != nil {
+		ErrorHandler(err)
+	}
+	return time.Duration(v)
+}
+
+// ----------------------------------------------------------------------------
+
+// GetParsedDuration parses the expanded value with time.ParseDuration() if the key exists.
+// If key does not exist or the value cannot be parsed the default
+// value is returned.
+func (p *Properties) GetParsedDuration(key string, def time.Duration) time.Duration {
+	s, ok := p.Get(key)
+	if !ok {
+		return def
+	}
+	v, err := time.ParseDuration(s)
+	if err != nil {
+		return def
+	}
+	return v
+}
+
+// MustGetParsedDuration parses the expanded value with time.ParseDuration() if the key exists.
+// If key does not exist or the value cannot be parsed the function panics.
+func (p *Properties) MustGetParsedDuration(key string) time.Duration {
+	s, ok := p.Get(key)
+	if !ok {
+		ErrorHandler(invalidKeyError(key))
+	}
+	v, err := time.ParseDuration(s)
+	if err != nil {
+		ErrorHandler(err)
+	}
+	return v
+}
+
+// ----------------------------------------------------------------------------
+
+// GetFloat64 parses the expanded value as a float64 if the key exists.
+// If key does not exist or the value cannot be parsed the default
+// value is returned.
+func (p *Properties) GetFloat64(key string, def float64) float64 {
+	v, err := p.getFloat64(key)
+	if err != nil {
+		return def
+	}
+	return v
+}
+
+// MustGetFloat64 parses the expanded value as a float64 if the key exists.
+// If key does not exist or the value cannot be parsed the function panics.
+func (p *Properties) MustGetFloat64(key string) float64 {
+	v, err := p.getFloat64(key)
+	if err != nil {
+		ErrorHandler(err)
+	}
+	return v
+}
+
+func (p *Properties) getFloat64(key string) (value float64, err error) {
+	if v, ok := p.Get(key); ok {
+		value, err = strconv.ParseFloat(v, 64)
+		if err != nil {
+			return 0, err
+		}
+		return value, nil
+	}
+	return 0, invalidKeyError(key)
+}
+
+// ----------------------------------------------------------------------------
+
+// GetInt parses the expanded value as an int if the key exists.
+// If key does not exist or the value cannot be parsed the default
+// value is returned. If the value does not fit into an int the
+// function panics with an out of range error.
+func (p *Properties) GetInt(key string, def int) int {
+	v, err := p.getInt64(key)
+	if err != nil {
+		return def
+	}
+	return intRangeCheck(key, v)
+}
+
+// MustGetInt parses the expanded value as an int if the key exists.
+// If key does not exist or the value cannot be parsed the function panics.
+// If the value does not fit into an int the function panics with
+// an out of range error.
+func (p *Properties) MustGetInt(key string) int {
+	v, err := p.getInt64(key)
+	if err != nil {
+		ErrorHandler(err)
+	}
+	return intRangeCheck(key, v)
+}
+
+// ----------------------------------------------------------------------------
+
+// GetInt64 parses the expanded value as an int64 if the key exists.
+// If key does not exist or the value cannot be parsed the default
+// value is returned.
+func (p *Properties) GetInt64(key string, def int64) int64 {
+	v, err := p.getInt64(key)
+	if err != nil {
+		return def
+	}
+	return v
+}
+
+// MustGetInt64 parses the expanded value as an int if the key exists.
+// If key does not exist or the value cannot be parsed the function panics.
+func (p *Properties) MustGetInt64(key string) int64 {
+	v, err := p.getInt64(key)
+	if err != nil {
+		ErrorHandler(err)
+	}
+	return v
+}
+
+func (p *Properties) getInt64(key string) (value int64, err error) {
+	if v, ok := p.Get(key); ok {
+		value, err = strconv.ParseInt(v, 10, 64)
+		if err != nil {
+			return 0, err
+		}
+		return value, nil
+	}
+	return 0, invalidKeyError(key)
+}
+
+// ----------------------------------------------------------------------------
+
+// GetUint parses the expanded value as an uint if the key exists.
+// If key does not exist or the value cannot be parsed the default
+// value is returned. If the value does not fit into an int the
+// function panics with an out of range error.
+func (p *Properties) GetUint(key string, def uint) uint {
+	v, err := p.getUint64(key)
+	if err != nil {
+		return def
+	}
+	return uintRangeCheck(key, v)
+}
+
+// MustGetUint parses the expanded value as an int if the key exists.
+// If key does not exist or the value cannot be parsed the function panics.
+// If the value does not fit into an int the function panics with
+// an out of range error.
+func (p *Properties) MustGetUint(key string) uint {
+	v, err := p.getUint64(key)
+	if err != nil {
+		ErrorHandler(err)
+	}
+	return uintRangeCheck(key, v)
+}
+
+// ----------------------------------------------------------------------------
+
+// GetUint64 parses the expanded value as an uint64 if the key exists.
+// If key does not exist or the value cannot be parsed the default
+// value is returned.
+func (p *Properties) GetUint64(key string, def uint64) uint64 {
+	v, err := p.getUint64(key)
+	if err != nil {
+		return def
+	}
+	return v
+}
+
+// MustGetUint64 parses the expanded value as an int if the key exists.
+// If key does not exist or the value cannot be parsed the function panics.
+func (p *Properties) MustGetUint64(key string) uint64 {
+	v, err := p.getUint64(key)
+	if err != nil {
+		ErrorHandler(err)
+	}
+	return v
+}
+
+func (p *Properties) getUint64(key string) (value uint64, err error) {
+	if v, ok := p.Get(key); ok {
+		value, err = strconv.ParseUint(v, 10, 64)
+		if err != nil {
+			return 0, err
+		}
+		return value, nil
+	}
+	return 0, invalidKeyError(key)
+}
+
+// ----------------------------------------------------------------------------
+
+// GetString returns the expanded value for the given key if exists or
+// the default value otherwise.
+func (p *Properties) GetString(key, def string) string {
+	if v, ok := p.Get(key); ok {
+		return v
+	}
+	return def
+}
+
+// MustGetString returns the expanded value for the given key if exists or
+// panics otherwise.
+func (p *Properties) MustGetString(key string) string {
+	if v, ok := p.Get(key); ok {
+		return v
+	}
+	ErrorHandler(invalidKeyError(key))
+	panic("ErrorHandler should exit")
+}
+
+// ----------------------------------------------------------------------------
+
+// Filter returns a new properties object which contains all properties
+// for which the key matches the pattern.
+func (p *Properties) Filter(pattern string) (*Properties, error) {
+	re, err := regexp.Compile(pattern)
+	if err != nil {
+		return nil, err
+	}
+
+	return p.FilterRegexp(re), nil
+}
+
+// FilterRegexp returns a new properties object which contains all properties
+// for which the key matches the regular expression.
+func (p *Properties) FilterRegexp(re *regexp.Regexp) *Properties {
+	pp := NewProperties()
+	for _, k := range p.k {
+		if re.MatchString(k) {
+			// TODO(fs): we are ignoring the error which flags a circular reference.
+			// TODO(fs): since we are just copying a subset of keys this cannot happen (fingers crossed)
+			pp.Set(k, p.m[k])
+		}
+	}
+	return pp
+}
+
+// FilterPrefix returns a new properties object with a subset of all keys
+// with the given prefix.
+func (p *Properties) FilterPrefix(prefix string) *Properties {
+	pp := NewProperties()
+	for _, k := range p.k {
+		if strings.HasPrefix(k, prefix) {
+			// TODO(fs): we are ignoring the error which flags a circular reference.
+			// TODO(fs): since we are just copying a subset of keys this cannot happen (fingers crossed)
+			pp.Set(k, p.m[k])
+		}
+	}
+	return pp
+}
+
+// FilterStripPrefix returns a new properties object with a subset of all keys
+// with the given prefix and the prefix removed from the keys.
+func (p *Properties) FilterStripPrefix(prefix string) *Properties {
+	pp := NewProperties()
+	n := len(prefix)
+	for _, k := range p.k {
+		if len(k) > len(prefix) && strings.HasPrefix(k, prefix) {
+			// TODO(fs): we are ignoring the error which flags a circular reference.
+			// TODO(fs): since we are modifying keys I am not entirely sure whether we can create a circular reference
+			// TODO(fs): this function should probably return an error but the signature is fixed
+			pp.Set(k[n:], p.m[k])
+		}
+	}
+	return pp
+}
+
+// Len returns the number of keys.
+func (p *Properties) Len() int {
+	return len(p.m)
+}
+
+// Keys returns all keys in the same order as in the input.
+func (p *Properties) Keys() []string {
+	keys := make([]string, len(p.k))
+	copy(keys, p.k)
+	return keys
+}
+
+// Set sets the property key to the corresponding value.
+// If a value for key existed before then ok is true and prev
+// contains the previous value. If the value contains a
+// circular reference or a malformed expression then
+// an error is returned.
+// An empty key is silently ignored.
+func (p *Properties) Set(key, value string) (prev string, ok bool, err error) {
+	if key == "" {
+		return "", false, nil
+	}
+
+	// if expansion is disabled we allow circular references
+	if p.DisableExpansion {
+		prev, ok = p.Get(key)
+		p.m[key] = value
+		if !ok {
+			p.k = append(p.k, key)
+		}
+		return prev, ok, nil
+	}
+
+	// to check for a circular reference we temporarily need
+	// to set the new value. If there is an error then revert
+	// to the previous state. Only if all tests are successful
+	// then we add the key to the p.k list.
+	prev, ok = p.Get(key)
+	p.m[key] = value
+
+	// now check for a circular reference
+	_, err = p.expand(key, value)
+	if err != nil {
+
+		// revert to the previous state
+		if ok {
+			p.m[key] = prev
+		} else {
+			delete(p.m, key)
+		}
+
+		return "", false, err
+	}
+
+	if !ok {
+		p.k = append(p.k, key)
+	}
+
+	return prev, ok, nil
+}
+
+// SetValue sets property key to the default string value
+// as defined by fmt.Sprintf("%v").
+func (p *Properties) SetValue(key string, value interface{}) error {
+	_, _, err := p.Set(key, fmt.Sprintf("%v", value))
+	return err
+}
+
+// MustSet sets the property key to the corresponding value.
+// If a value for key existed before then ok is true and prev
+// contains the previous value. An empty key is silently ignored.
+func (p *Properties) MustSet(key, value string) (prev string, ok bool) {
+	prev, ok, err := p.Set(key, value)
+	if err != nil {
+		ErrorHandler(err)
+	}
+	return prev, ok
+}
+
+// String returns a string of all expanded 'key = value' pairs.
+func (p *Properties) String() string {
+	var s string
+	for _, key := range p.k {
+		value, _ := p.Get(key)
+		s = fmt.Sprintf("%s%s = %s\n", s, key, value)
+	}
+	return s
+}
+
+// Write writes all unexpanded 'key = value' pairs to the given writer.
+// Write returns the number of bytes written and any write error encountered.
+func (p *Properties) Write(w io.Writer, enc Encoding) (n int, err error) {
+	return p.WriteComment(w, "", enc)
+}
+
+// WriteComment writes all unexpanced 'key = value' pairs to the given writer.
+// If prefix is not empty then comments are written with a blank line and the
+// given prefix. The prefix should be either "# " or "! " to be compatible with
+// the properties file format. Otherwise, the properties parser will not be
+// able to read the file back in. It returns the number of bytes written and
+// any write error encountered.
+func (p *Properties) WriteComment(w io.Writer, prefix string, enc Encoding) (n int, err error) {
+	var x int
+
+	for _, key := range p.k {
+		value := p.m[key]
+
+		if prefix != "" {
+			if comments, ok := p.c[key]; ok {
+				// don't print comments if they are all empty
+				allEmpty := true
+				for _, c := range comments {
+					if c != "" {
+						allEmpty = false
+						break
+					}
+				}
+
+				if !allEmpty {
+					// add a blank line between entries but not at the top
+					if len(comments) > 0 && n > 0 {
+						x, err = fmt.Fprintln(w)
+						if err != nil {
+							return
+						}
+						n += x
+					}
+
+					for _, c := range comments {
+						x, err = fmt.Fprintf(w, "%s%s\n", prefix, encode(c, "", enc))
+						if err != nil {
+							return
+						}
+						n += x
+					}
+				}
+			}
+		}
+
+		x, err = fmt.Fprintf(w, "%s = %s\n", encode(key, " :", enc), encode(value, "", enc))
+		if err != nil {
+			return
+		}
+		n += x
+	}
+	return
+}
+
+// Map returns a copy of the properties as a map.
+func (p *Properties) Map() map[string]string {
+	m := make(map[string]string)
+	for k, v := range p.m {
+		m[k] = v
+	}
+	return m
+}
+
+// FilterFunc returns a copy of the properties which includes the values which passed all filters.
+func (p *Properties) FilterFunc(filters ...func(k, v string) bool) *Properties {
+	pp := NewProperties()
+outer:
+	for k, v := range p.m {
+		for _, f := range filters {
+			if !f(k, v) {
+				continue outer
+			}
+			pp.Set(k, v)
+		}
+	}
+	return pp
+}
+
+// ----------------------------------------------------------------------------
+
+// Delete removes the key and its comments.
+func (p *Properties) Delete(key string) {
+	delete(p.m, key)
+	delete(p.c, key)
+	newKeys := []string{}
+	for _, k := range p.k {
+		if k != key {
+			newKeys = append(newKeys, k)
+		}
+	}
+	p.k = newKeys
+}
+
+// Merge merges properties, comments and keys from other *Properties into p
+func (p *Properties) Merge(other *Properties) {
+	for k, v := range other.m {
+		p.m[k] = v
+	}
+	for k, v := range other.c {
+		p.c[k] = v
+	}
+
+outer:
+	for _, otherKey := range other.k {
+		for _, key := range p.k {
+			if otherKey == key {
+				continue outer
+			}
+		}
+		p.k = append(p.k, otherKey)
+	}
+}
+
+// ----------------------------------------------------------------------------
+
+// check expands all values and returns an error if a circular reference or
+// a malformed expression was found.
+func (p *Properties) check() error {
+	for key, value := range p.m {
+		if _, err := p.expand(key, value); err != nil {
+			return err
+		}
+	}
+	return nil
+}
+
+func (p *Properties) expand(key, input string) (string, error) {
+	// no pre/postfix -> nothing to expand
+	if p.Prefix == "" && p.Postfix == "" {
+		return input, nil
+	}
+
+	return expand(input, []string{key}, p.Prefix, p.Postfix, p.m)
+}
+
+// expand recursively expands expressions of '(prefix)key(postfix)' to their corresponding values.
+// The function keeps track of the keys that were already expanded and stops if it
+// detects a circular reference or a malformed expression of the form '(prefix)key'.
+func expand(s string, keys []string, prefix, postfix string, values map[string]string) (string, error) {
+	if len(keys) > maxExpansionDepth {
+		return "", fmt.Errorf("expansion too deep")
+	}
+
+	for {
+		start := strings.Index(s, prefix)
+		if start == -1 {
+			return s, nil
+		}
+
+		keyStart := start + len(prefix)
+		keyLen := strings.Index(s[keyStart:], postfix)
+		if keyLen == -1 {
+			return "", fmt.Errorf("malformed expression")
+		}
+
+		end := keyStart + keyLen + len(postfix) - 1
+		key := s[keyStart : keyStart+keyLen]
+
+		// fmt.Printf("s:%q pp:%q start:%d end:%d keyStart:%d keyLen:%d key:%q\n", s, prefix + "..." + postfix, start, end, keyStart, keyLen, key)
+
+		for _, k := range keys {
+			if key == k {
+				return "", fmt.Errorf("circular reference")
+			}
+		}
+
+		val, ok := values[key]
+		if !ok {
+			val = os.Getenv(key)
+		}
+		new_val, err := expand(val, append(keys, key), prefix, postfix, values)
+		if err != nil {
+			return "", err
+		}
+		s = s[:start] + new_val + s[end+1:]
+	}
+	return s, nil
+}
+
+// encode encodes a UTF-8 string to ISO-8859-1 and escapes some characters.
+func encode(s string, special string, enc Encoding) string {
+	switch enc {
+	case UTF8:
+		return encodeUtf8(s, special)
+	case ISO_8859_1:
+		return encodeIso(s, special)
+	default:
+		panic(fmt.Sprintf("unsupported encoding %v", enc))
+	}
+}
+
+func encodeUtf8(s string, special string) string {
+	v := ""
+	for pos := 0; pos < len(s); {
+		r, w := utf8.DecodeRuneInString(s[pos:])
+		pos += w
+		v += escape(r, special)
+	}
+	return v
+}
+
+func encodeIso(s string, special string) string {
+	var r rune
+	var w int
+	var v string
+	for pos := 0; pos < len(s); {
+		switch r, w = utf8.DecodeRuneInString(s[pos:]); {
+		case r < 1<<8: // single byte rune -> escape special chars only
+			v += escape(r, special)
+		case r < 1<<16: // two byte rune -> unicode literal
+			v += fmt.Sprintf("\\u%04x", r)
+		default: // more than two bytes per rune -> can't encode
+			v += "?"
+		}
+		pos += w
+	}
+	return v
+}
+
+func escape(r rune, special string) string {
+	switch r {
+	case '\f':
+		return "\\f"
+	case '\n':
+		return "\\n"
+	case '\r':
+		return "\\r"
+	case '\t':
+		return "\\t"
+	default:
+		if strings.ContainsRune(special, r) {
+			return "\\" + string(r)
+		}
+		return string(r)
+	}
+}
+
+func invalidKeyError(key string) error {
+	return fmt.Errorf("unknown property: %s", key)
+}

+ 31 - 0
vendor/github.com/magiconair/properties/rangecheck.go

@@ -0,0 +1,31 @@
+// Copyright 2018 Frank Schroeder. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package properties
+
+import (
+	"fmt"
+	"math"
+)
+
+// make this a var to overwrite it in a test
+var is32Bit = ^uint(0) == math.MaxUint32
+
+// intRangeCheck checks if the value fits into the int type and
+// panics if it does not.
+func intRangeCheck(key string, v int64) int {
+	if is32Bit && (v < math.MinInt32 || v > math.MaxInt32) {
+		panic(fmt.Sprintf("Value %d for key %s out of range", v, key))
+	}
+	return int(v)
+}
+
+// uintRangeCheck checks if the value fits into the uint type and
+// panics if it does not.
+func uintRangeCheck(key string, v uint64) uint {
+	if is32Bit && v > math.MaxUint32 {
+		panic(fmt.Sprintf("Value %d for key %s out of range", v, key))
+	}
+	return uint(v)
+}

+ 8 - 0
vendor/github.com/mitchellh/mapstructure/.travis.yml

@@ -0,0 +1,8 @@
+language: go
+
+go:
+  - "1.11.x"
+  - tip
+
+script:
+  - go test

+ 21 - 0
vendor/github.com/mitchellh/mapstructure/CHANGELOG.md

@@ -0,0 +1,21 @@
+## 1.1.2
+
+* Fix error when decode hook decodes interface implementation into interface
+  type. [GH-140]
+
+## 1.1.1
+
+* Fix panic that can happen in `decodePtr`
+
+## 1.1.0
+
+* Added `StringToIPHookFunc` to convert `string` to `net.IP` and `net.IPNet` [GH-133]
+* Support struct to struct decoding [GH-137]
+* If source map value is nil, then destination map value is nil (instead of empty)
+* If source slice value is nil, then destination slice value is nil (instead of empty)
+* If source pointer is nil, then destination pointer is set to nil (instead of
+  allocated zero value of type)
+
+## 1.0.0
+
+* Initial tagged stable release.

+ 21 - 0
vendor/github.com/mitchellh/mapstructure/LICENSE

@@ -0,0 +1,21 @@
+The MIT License (MIT)
+
+Copyright (c) 2013 Mitchell Hashimoto
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.

+ 46 - 0
vendor/github.com/mitchellh/mapstructure/README.md

@@ -0,0 +1,46 @@
+# mapstructure [![Godoc](https://godoc.org/github.com/mitchellh/mapstructure?status.svg)](https://godoc.org/github.com/mitchellh/mapstructure)
+
+mapstructure is a Go library for decoding generic map values to structures
+and vice versa, while providing helpful error handling.
+
+This library is most useful when decoding values from some data stream (JSON,
+Gob, etc.) where you don't _quite_ know the structure of the underlying data
+until you read a part of it. You can therefore read a `map[string]interface{}`
+and use this library to decode it into the proper underlying native Go
+structure.
+
+## Installation
+
+Standard `go get`:
+
+```
+$ go get github.com/mitchellh/mapstructure
+```
+
+## Usage & Example
+
+For usage and examples see the [Godoc](http://godoc.org/github.com/mitchellh/mapstructure).
+
+The `Decode` function has examples associated with it there.
+
+## But Why?!
+
+Go offers fantastic standard libraries for decoding formats such as JSON.
+The standard method is to have a struct pre-created, and populate that struct
+from the bytes of the encoded format. This is great, but the problem is if
+you have configuration or an encoding that changes slightly depending on
+specific fields. For example, consider this JSON:
+
+```json
+{
+  "type": "person",
+  "name": "Mitchell"
+}
+```
+
+Perhaps we can't populate a specific structure without first reading
+the "type" field from the JSON. We could always do two passes over the
+decoding of the JSON (reading the "type" first, and the rest later).
+However, it is much simpler to just decode this into a `map[string]interface{}`
+structure, read the "type" key, then use something like this library
+to decode it into the proper structure.

+ 217 - 0
vendor/github.com/mitchellh/mapstructure/decode_hooks.go

@@ -0,0 +1,217 @@
+package mapstructure
+
+import (
+	"errors"
+	"fmt"
+	"net"
+	"reflect"
+	"strconv"
+	"strings"
+	"time"
+)
+
+// typedDecodeHook takes a raw DecodeHookFunc (an interface{}) and turns
+// it into the proper DecodeHookFunc type, such as DecodeHookFuncType.
+func typedDecodeHook(h DecodeHookFunc) DecodeHookFunc {
+	// Create variables here so we can reference them with the reflect pkg
+	var f1 DecodeHookFuncType
+	var f2 DecodeHookFuncKind
+
+	// Fill in the variables into this interface and the rest is done
+	// automatically using the reflect package.
+	potential := []interface{}{f1, f2}
+
+	v := reflect.ValueOf(h)
+	vt := v.Type()
+	for _, raw := range potential {
+		pt := reflect.ValueOf(raw).Type()
+		if vt.ConvertibleTo(pt) {
+			return v.Convert(pt).Interface()
+		}
+	}
+
+	return nil
+}
+
+// DecodeHookExec executes the given decode hook. This should be used
+// since it'll naturally degrade to the older backwards compatible DecodeHookFunc
+// that took reflect.Kind instead of reflect.Type.
+func DecodeHookExec(
+	raw DecodeHookFunc,
+	from reflect.Type, to reflect.Type,
+	data interface{}) (interface{}, error) {
+	switch f := typedDecodeHook(raw).(type) {
+	case DecodeHookFuncType:
+		return f(from, to, data)
+	case DecodeHookFuncKind:
+		return f(from.Kind(), to.Kind(), data)
+	default:
+		return nil, errors.New("invalid decode hook signature")
+	}
+}
+
+// ComposeDecodeHookFunc creates a single DecodeHookFunc that
+// automatically composes multiple DecodeHookFuncs.
+//
+// The composed funcs are called in order, with the result of the
+// previous transformation.
+func ComposeDecodeHookFunc(fs ...DecodeHookFunc) DecodeHookFunc {
+	return func(
+		f reflect.Type,
+		t reflect.Type,
+		data interface{}) (interface{}, error) {
+		var err error
+		for _, f1 := range fs {
+			data, err = DecodeHookExec(f1, f, t, data)
+			if err != nil {
+				return nil, err
+			}
+
+			// Modify the from kind to be correct with the new data
+			f = nil
+			if val := reflect.ValueOf(data); val.IsValid() {
+				f = val.Type()
+			}
+		}
+
+		return data, nil
+	}
+}
+
+// StringToSliceHookFunc returns a DecodeHookFunc that converts
+// string to []string by splitting on the given sep.
+func StringToSliceHookFunc(sep string) DecodeHookFunc {
+	return func(
+		f reflect.Kind,
+		t reflect.Kind,
+		data interface{}) (interface{}, error) {
+		if f != reflect.String || t != reflect.Slice {
+			return data, nil
+		}
+
+		raw := data.(string)
+		if raw == "" {
+			return []string{}, nil
+		}
+
+		return strings.Split(raw, sep), nil
+	}
+}
+
+// StringToTimeDurationHookFunc returns a DecodeHookFunc that converts
+// strings to time.Duration.
+func StringToTimeDurationHookFunc() DecodeHookFunc {
+	return func(
+		f reflect.Type,
+		t reflect.Type,
+		data interface{}) (interface{}, error) {
+		if f.Kind() != reflect.String {
+			return data, nil
+		}
+		if t != reflect.TypeOf(time.Duration(5)) {
+			return data, nil
+		}
+
+		// Convert it by parsing
+		return time.ParseDuration(data.(string))
+	}
+}
+
+// StringToIPHookFunc returns a DecodeHookFunc that converts
+// strings to net.IP
+func StringToIPHookFunc() DecodeHookFunc {
+	return func(
+		f reflect.Type,
+		t reflect.Type,
+		data interface{}) (interface{}, error) {
+		if f.Kind() != reflect.String {
+			return data, nil
+		}
+		if t != reflect.TypeOf(net.IP{}) {
+			return data, nil
+		}
+
+		// Convert it by parsing
+		ip := net.ParseIP(data.(string))
+		if ip == nil {
+			return net.IP{}, fmt.Errorf("failed parsing ip %v", data)
+		}
+
+		return ip, nil
+	}
+}
+
+// StringToIPNetHookFunc returns a DecodeHookFunc that converts
+// strings to net.IPNet
+func StringToIPNetHookFunc() DecodeHookFunc {
+	return func(
+		f reflect.Type,
+		t reflect.Type,
+		data interface{}) (interface{}, error) {
+		if f.Kind() != reflect.String {
+			return data, nil
+		}
+		if t != reflect.TypeOf(net.IPNet{}) {
+			return data, nil
+		}
+
+		// Convert it by parsing
+		_, net, err := net.ParseCIDR(data.(string))
+		return net, err
+	}
+}
+
+// StringToTimeHookFunc returns a DecodeHookFunc that converts
+// strings to time.Time.
+func StringToTimeHookFunc(layout string) DecodeHookFunc {
+	return func(
+		f reflect.Type,
+		t reflect.Type,
+		data interface{}) (interface{}, error) {
+		if f.Kind() != reflect.String {
+			return data, nil
+		}
+		if t != reflect.TypeOf(time.Time{}) {
+			return data, nil
+		}
+
+		// Convert it by parsing
+		return time.Parse(layout, data.(string))
+	}
+}
+
+// WeaklyTypedHook is a DecodeHookFunc which adds support for weak typing to
+// the decoder.
+//
+// Note that this is significantly different from the WeaklyTypedInput option
+// of the DecoderConfig.
+func WeaklyTypedHook(
+	f reflect.Kind,
+	t reflect.Kind,
+	data interface{}) (interface{}, error) {
+	dataVal := reflect.ValueOf(data)
+	switch t {
+	case reflect.String:
+		switch f {
+		case reflect.Bool:
+			if dataVal.Bool() {
+				return "1", nil
+			}
+			return "0", nil
+		case reflect.Float32:
+			return strconv.FormatFloat(dataVal.Float(), 'f', -1, 64), nil
+		case reflect.Int:
+			return strconv.FormatInt(dataVal.Int(), 10), nil
+		case reflect.Slice:
+			dataType := dataVal.Type()
+			elemKind := dataType.Elem().Kind()
+			if elemKind == reflect.Uint8 {
+				return string(dataVal.Interface().([]uint8)), nil
+			}
+		case reflect.Uint:
+			return strconv.FormatUint(dataVal.Uint(), 10), nil
+		}
+	}
+
+	return data, nil
+}

+ 50 - 0
vendor/github.com/mitchellh/mapstructure/error.go

@@ -0,0 +1,50 @@
+package mapstructure
+
+import (
+	"errors"
+	"fmt"
+	"sort"
+	"strings"
+)
+
+// Error implements the error interface and can represents multiple
+// errors that occur in the course of a single decode.
+type Error struct {
+	Errors []string
+}
+
+func (e *Error) Error() string {
+	points := make([]string, len(e.Errors))
+	for i, err := range e.Errors {
+		points[i] = fmt.Sprintf("* %s", err)
+	}
+
+	sort.Strings(points)
+	return fmt.Sprintf(
+		"%d error(s) decoding:\n\n%s",
+		len(e.Errors), strings.Join(points, "\n"))
+}
+
+// WrappedErrors implements the errwrap.Wrapper interface to make this
+// return value more useful with the errwrap and go-multierror libraries.
+func (e *Error) WrappedErrors() []error {
+	if e == nil {
+		return nil
+	}
+
+	result := make([]error, len(e.Errors))
+	for i, e := range e.Errors {
+		result[i] = errors.New(e)
+	}
+
+	return result
+}
+
+func appendErrors(errors []string, err error) []string {
+	switch e := err.(type) {
+	case *Error:
+		return append(errors, e.Errors...)
+	default:
+		return append(errors, e.Error())
+	}
+}

+ 1 - 0
vendor/github.com/mitchellh/mapstructure/go.mod

@@ -0,0 +1 @@
+module github.com/mitchellh/mapstructure

+ 1149 - 0
vendor/github.com/mitchellh/mapstructure/mapstructure.go

@@ -0,0 +1,1149 @@
+// Package mapstructure exposes functionality to convert an arbitrary
+// map[string]interface{} into a native Go structure.
+//
+// The Go structure can be arbitrarily complex, containing slices,
+// other structs, etc. and the decoder will properly decode nested
+// maps and so on into the proper structures in the native Go struct.
+// See the examples to see what the decoder is capable of.
+package mapstructure
+
+import (
+	"encoding/json"
+	"errors"
+	"fmt"
+	"reflect"
+	"sort"
+	"strconv"
+	"strings"
+)
+
+// DecodeHookFunc is the callback function that can be used for
+// data transformations. See "DecodeHook" in the DecoderConfig
+// struct.
+//
+// The type should be DecodeHookFuncType or DecodeHookFuncKind.
+// Either is accepted. Types are a superset of Kinds (Types can return
+// Kinds) and are generally a richer thing to use, but Kinds are simpler
+// if you only need those.
+//
+// The reason DecodeHookFunc is multi-typed is for backwards compatibility:
+// we started with Kinds and then realized Types were the better solution,
+// but have a promise to not break backwards compat so we now support
+// both.
+type DecodeHookFunc interface{}
+
+// DecodeHookFuncType is a DecodeHookFunc which has complete information about
+// the source and target types.
+type DecodeHookFuncType func(reflect.Type, reflect.Type, interface{}) (interface{}, error)
+
+// DecodeHookFuncKind is a DecodeHookFunc which knows only the Kinds of the
+// source and target types.
+type DecodeHookFuncKind func(reflect.Kind, reflect.Kind, interface{}) (interface{}, error)
+
+// DecoderConfig is the configuration that is used to create a new decoder
+// and allows customization of various aspects of decoding.
+type DecoderConfig struct {
+	// DecodeHook, if set, will be called before any decoding and any
+	// type conversion (if WeaklyTypedInput is on). This lets you modify
+	// the values before they're set down onto the resulting struct.
+	//
+	// If an error is returned, the entire decode will fail with that
+	// error.
+	DecodeHook DecodeHookFunc
+
+	// If ErrorUnused is true, then it is an error for there to exist
+	// keys in the original map that were unused in the decoding process
+	// (extra keys).
+	ErrorUnused bool
+
+	// ZeroFields, if set to true, will zero fields before writing them.
+	// For example, a map will be emptied before decoded values are put in
+	// it. If this is false, a map will be merged.
+	ZeroFields bool
+
+	// If WeaklyTypedInput is true, the decoder will make the following
+	// "weak" conversions:
+	//
+	//   - bools to string (true = "1", false = "0")
+	//   - numbers to string (base 10)
+	//   - bools to int/uint (true = 1, false = 0)
+	//   - strings to int/uint (base implied by prefix)
+	//   - int to bool (true if value != 0)
+	//   - string to bool (accepts: 1, t, T, TRUE, true, True, 0, f, F,
+	//     FALSE, false, False. Anything else is an error)
+	//   - empty array = empty map and vice versa
+	//   - negative numbers to overflowed uint values (base 10)
+	//   - slice of maps to a merged map
+	//   - single values are converted to slices if required. Each
+	//     element is weakly decoded. For example: "4" can become []int{4}
+	//     if the target type is an int slice.
+	//
+	WeaklyTypedInput bool
+
+	// Metadata is the struct that will contain extra metadata about
+	// the decoding. If this is nil, then no metadata will be tracked.
+	Metadata *Metadata
+
+	// Result is a pointer to the struct that will contain the decoded
+	// value.
+	Result interface{}
+
+	// The tag name that mapstructure reads for field names. This
+	// defaults to "mapstructure"
+	TagName string
+}
+
+// A Decoder takes a raw interface value and turns it into structured
+// data, keeping track of rich error information along the way in case
+// anything goes wrong. Unlike the basic top-level Decode method, you can
+// more finely control how the Decoder behaves using the DecoderConfig
+// structure. The top-level Decode method is just a convenience that sets
+// up the most basic Decoder.
+type Decoder struct {
+	config *DecoderConfig
+}
+
+// Metadata contains information about decoding a structure that
+// is tedious or difficult to get otherwise.
+type Metadata struct {
+	// Keys are the keys of the structure which were successfully decoded
+	Keys []string
+
+	// Unused is a slice of keys that were found in the raw value but
+	// weren't decoded since there was no matching field in the result interface
+	Unused []string
+}
+
+// Decode takes an input structure and uses reflection to translate it to
+// the output structure. output must be a pointer to a map or struct.
+func Decode(input interface{}, output interface{}) error {
+	config := &DecoderConfig{
+		Metadata: nil,
+		Result:   output,
+	}
+
+	decoder, err := NewDecoder(config)
+	if err != nil {
+		return err
+	}
+
+	return decoder.Decode(input)
+}
+
+// WeakDecode is the same as Decode but is shorthand to enable
+// WeaklyTypedInput. See DecoderConfig for more info.
+func WeakDecode(input, output interface{}) error {
+	config := &DecoderConfig{
+		Metadata:         nil,
+		Result:           output,
+		WeaklyTypedInput: true,
+	}
+
+	decoder, err := NewDecoder(config)
+	if err != nil {
+		return err
+	}
+
+	return decoder.Decode(input)
+}
+
+// DecodeMetadata is the same as Decode, but is shorthand to
+// enable metadata collection. See DecoderConfig for more info.
+func DecodeMetadata(input interface{}, output interface{}, metadata *Metadata) error {
+	config := &DecoderConfig{
+		Metadata: metadata,
+		Result:   output,
+	}
+
+	decoder, err := NewDecoder(config)
+	if err != nil {
+		return err
+	}
+
+	return decoder.Decode(input)
+}
+
+// WeakDecodeMetadata is the same as Decode, but is shorthand to
+// enable both WeaklyTypedInput and metadata collection. See
+// DecoderConfig for more info.
+func WeakDecodeMetadata(input interface{}, output interface{}, metadata *Metadata) error {
+	config := &DecoderConfig{
+		Metadata:         metadata,
+		Result:           output,
+		WeaklyTypedInput: true,
+	}
+
+	decoder, err := NewDecoder(config)
+	if err != nil {
+		return err
+	}
+
+	return decoder.Decode(input)
+}
+
+// NewDecoder returns a new decoder for the given configuration. Once
+// a decoder has been returned, the same configuration must not be used
+// again.
+func NewDecoder(config *DecoderConfig) (*Decoder, error) {
+	val := reflect.ValueOf(config.Result)
+	if val.Kind() != reflect.Ptr {
+		return nil, errors.New("result must be a pointer")
+	}
+
+	val = val.Elem()
+	if !val.CanAddr() {
+		return nil, errors.New("result must be addressable (a pointer)")
+	}
+
+	if config.Metadata != nil {
+		if config.Metadata.Keys == nil {
+			config.Metadata.Keys = make([]string, 0)
+		}
+
+		if config.Metadata.Unused == nil {
+			config.Metadata.Unused = make([]string, 0)
+		}
+	}
+
+	if config.TagName == "" {
+		config.TagName = "mapstructure"
+	}
+
+	result := &Decoder{
+		config: config,
+	}
+
+	return result, nil
+}
+
+// Decode decodes the given raw interface to the target pointer specified
+// by the configuration.
+func (d *Decoder) Decode(input interface{}) error {
+	return d.decode("", input, reflect.ValueOf(d.config.Result).Elem())
+}
+
+// Decodes an unknown data type into a specific reflection value.
+func (d *Decoder) decode(name string, input interface{}, outVal reflect.Value) error {
+	var inputVal reflect.Value
+	if input != nil {
+		inputVal = reflect.ValueOf(input)
+
+		// We need to check here if input is a typed nil. Typed nils won't
+		// match the "input == nil" below so we check that here.
+		if inputVal.Kind() == reflect.Ptr && inputVal.IsNil() {
+			input = nil
+		}
+	}
+
+	if input == nil {
+		// If the data is nil, then we don't set anything, unless ZeroFields is set
+		// to true.
+		if d.config.ZeroFields {
+			outVal.Set(reflect.Zero(outVal.Type()))
+
+			if d.config.Metadata != nil && name != "" {
+				d.config.Metadata.Keys = append(d.config.Metadata.Keys, name)
+			}
+		}
+		return nil
+	}
+
+	if !inputVal.IsValid() {
+		// If the input value is invalid, then we just set the value
+		// to be the zero value.
+		outVal.Set(reflect.Zero(outVal.Type()))
+		if d.config.Metadata != nil && name != "" {
+			d.config.Metadata.Keys = append(d.config.Metadata.Keys, name)
+		}
+		return nil
+	}
+
+	if d.config.DecodeHook != nil {
+		// We have a DecodeHook, so let's pre-process the input.
+		var err error
+		input, err = DecodeHookExec(
+			d.config.DecodeHook,
+			inputVal.Type(), outVal.Type(), input)
+		if err != nil {
+			return fmt.Errorf("error decoding '%s': %s", name, err)
+		}
+	}
+
+	var err error
+	outputKind := getKind(outVal)
+	switch outputKind {
+	case reflect.Bool:
+		err = d.decodeBool(name, input, outVal)
+	case reflect.Interface:
+		err = d.decodeBasic(name, input, outVal)
+	case reflect.String:
+		err = d.decodeString(name, input, outVal)
+	case reflect.Int:
+		err = d.decodeInt(name, input, outVal)
+	case reflect.Uint:
+		err = d.decodeUint(name, input, outVal)
+	case reflect.Float32:
+		err = d.decodeFloat(name, input, outVal)
+	case reflect.Struct:
+		err = d.decodeStruct(name, input, outVal)
+	case reflect.Map:
+		err = d.decodeMap(name, input, outVal)
+	case reflect.Ptr:
+		err = d.decodePtr(name, input, outVal)
+	case reflect.Slice:
+		err = d.decodeSlice(name, input, outVal)
+	case reflect.Array:
+		err = d.decodeArray(name, input, outVal)
+	case reflect.Func:
+		err = d.decodeFunc(name, input, outVal)
+	default:
+		// If we reached this point then we weren't able to decode it
+		return fmt.Errorf("%s: unsupported type: %s", name, outputKind)
+	}
+
+	// If we reached here, then we successfully decoded SOMETHING, so
+	// mark the key as used if we're tracking metainput.
+	if d.config.Metadata != nil && name != "" {
+		d.config.Metadata.Keys = append(d.config.Metadata.Keys, name)
+	}
+
+	return err
+}
+
+// This decodes a basic type (bool, int, string, etc.) and sets the
+// value to "data" of that type.
+func (d *Decoder) decodeBasic(name string, data interface{}, val reflect.Value) error {
+	if val.IsValid() && val.Elem().IsValid() {
+		return d.decode(name, data, val.Elem())
+	}
+
+	dataVal := reflect.ValueOf(data)
+
+	// If the input data is a pointer, and the assigned type is the dereference
+	// of that exact pointer, then indirect it so that we can assign it.
+	// Example: *string to string
+	if dataVal.Kind() == reflect.Ptr && dataVal.Type().Elem() == val.Type() {
+		dataVal = reflect.Indirect(dataVal)
+	}
+
+	if !dataVal.IsValid() {
+		dataVal = reflect.Zero(val.Type())
+	}
+
+	dataValType := dataVal.Type()
+	if !dataValType.AssignableTo(val.Type()) {
+		return fmt.Errorf(
+			"'%s' expected type '%s', got '%s'",
+			name, val.Type(), dataValType)
+	}
+
+	val.Set(dataVal)
+	return nil
+}
+
+func (d *Decoder) decodeString(name string, data interface{}, val reflect.Value) error {
+	dataVal := reflect.Indirect(reflect.ValueOf(data))
+	dataKind := getKind(dataVal)
+
+	converted := true
+	switch {
+	case dataKind == reflect.String:
+		val.SetString(dataVal.String())
+	case dataKind == reflect.Bool && d.config.WeaklyTypedInput:
+		if dataVal.Bool() {
+			val.SetString("1")
+		} else {
+			val.SetString("0")
+		}
+	case dataKind == reflect.Int && d.config.WeaklyTypedInput:
+		val.SetString(strconv.FormatInt(dataVal.Int(), 10))
+	case dataKind == reflect.Uint && d.config.WeaklyTypedInput:
+		val.SetString(strconv.FormatUint(dataVal.Uint(), 10))
+	case dataKind == reflect.Float32 && d.config.WeaklyTypedInput:
+		val.SetString(strconv.FormatFloat(dataVal.Float(), 'f', -1, 64))
+	case dataKind == reflect.Slice && d.config.WeaklyTypedInput,
+		dataKind == reflect.Array && d.config.WeaklyTypedInput:
+		dataType := dataVal.Type()
+		elemKind := dataType.Elem().Kind()
+		switch elemKind {
+		case reflect.Uint8:
+			var uints []uint8
+			if dataKind == reflect.Array {
+				uints = make([]uint8, dataVal.Len(), dataVal.Len())
+				for i := range uints {
+					uints[i] = dataVal.Index(i).Interface().(uint8)
+				}
+			} else {
+				uints = dataVal.Interface().([]uint8)
+			}
+			val.SetString(string(uints))
+		default:
+			converted = false
+		}
+	default:
+		converted = false
+	}
+
+	if !converted {
+		return fmt.Errorf(
+			"'%s' expected type '%s', got unconvertible type '%s'",
+			name, val.Type(), dataVal.Type())
+	}
+
+	return nil
+}
+
+func (d *Decoder) decodeInt(name string, data interface{}, val reflect.Value) error {
+	dataVal := reflect.Indirect(reflect.ValueOf(data))
+	dataKind := getKind(dataVal)
+	dataType := dataVal.Type()
+
+	switch {
+	case dataKind == reflect.Int:
+		val.SetInt(dataVal.Int())
+	case dataKind == reflect.Uint:
+		val.SetInt(int64(dataVal.Uint()))
+	case dataKind == reflect.Float32:
+		val.SetInt(int64(dataVal.Float()))
+	case dataKind == reflect.Bool && d.config.WeaklyTypedInput:
+		if dataVal.Bool() {
+			val.SetInt(1)
+		} else {
+			val.SetInt(0)
+		}
+	case dataKind == reflect.String && d.config.WeaklyTypedInput:
+		i, err := strconv.ParseInt(dataVal.String(), 0, val.Type().Bits())
+		if err == nil {
+			val.SetInt(i)
+		} else {
+			return fmt.Errorf("cannot parse '%s' as int: %s", name, err)
+		}
+	case dataType.PkgPath() == "encoding/json" && dataType.Name() == "Number":
+		jn := data.(json.Number)
+		i, err := jn.Int64()
+		if err != nil {
+			return fmt.Errorf(
+				"error decoding json.Number into %s: %s", name, err)
+		}
+		val.SetInt(i)
+	default:
+		return fmt.Errorf(
+			"'%s' expected type '%s', got unconvertible type '%s'",
+			name, val.Type(), dataVal.Type())
+	}
+
+	return nil
+}
+
+func (d *Decoder) decodeUint(name string, data interface{}, val reflect.Value) error {
+	dataVal := reflect.Indirect(reflect.ValueOf(data))
+	dataKind := getKind(dataVal)
+
+	switch {
+	case dataKind == reflect.Int:
+		i := dataVal.Int()
+		if i < 0 && !d.config.WeaklyTypedInput {
+			return fmt.Errorf("cannot parse '%s', %d overflows uint",
+				name, i)
+		}
+		val.SetUint(uint64(i))
+	case dataKind == reflect.Uint:
+		val.SetUint(dataVal.Uint())
+	case dataKind == reflect.Float32:
+		f := dataVal.Float()
+		if f < 0 && !d.config.WeaklyTypedInput {
+			return fmt.Errorf("cannot parse '%s', %f overflows uint",
+				name, f)
+		}
+		val.SetUint(uint64(f))
+	case dataKind == reflect.Bool && d.config.WeaklyTypedInput:
+		if dataVal.Bool() {
+			val.SetUint(1)
+		} else {
+			val.SetUint(0)
+		}
+	case dataKind == reflect.String && d.config.WeaklyTypedInput:
+		i, err := strconv.ParseUint(dataVal.String(), 0, val.Type().Bits())
+		if err == nil {
+			val.SetUint(i)
+		} else {
+			return fmt.Errorf("cannot parse '%s' as uint: %s", name, err)
+		}
+	default:
+		return fmt.Errorf(
+			"'%s' expected type '%s', got unconvertible type '%s'",
+			name, val.Type(), dataVal.Type())
+	}
+
+	return nil
+}
+
+func (d *Decoder) decodeBool(name string, data interface{}, val reflect.Value) error {
+	dataVal := reflect.Indirect(reflect.ValueOf(data))
+	dataKind := getKind(dataVal)
+
+	switch {
+	case dataKind == reflect.Bool:
+		val.SetBool(dataVal.Bool())
+	case dataKind == reflect.Int && d.config.WeaklyTypedInput:
+		val.SetBool(dataVal.Int() != 0)
+	case dataKind == reflect.Uint && d.config.WeaklyTypedInput:
+		val.SetBool(dataVal.Uint() != 0)
+	case dataKind == reflect.Float32 && d.config.WeaklyTypedInput:
+		val.SetBool(dataVal.Float() != 0)
+	case dataKind == reflect.String && d.config.WeaklyTypedInput:
+		b, err := strconv.ParseBool(dataVal.String())
+		if err == nil {
+			val.SetBool(b)
+		} else if dataVal.String() == "" {
+			val.SetBool(false)
+		} else {
+			return fmt.Errorf("cannot parse '%s' as bool: %s", name, err)
+		}
+	default:
+		return fmt.Errorf(
+			"'%s' expected type '%s', got unconvertible type '%s'",
+			name, val.Type(), dataVal.Type())
+	}
+
+	return nil
+}
+
+func (d *Decoder) decodeFloat(name string, data interface{}, val reflect.Value) error {
+	dataVal := reflect.Indirect(reflect.ValueOf(data))
+	dataKind := getKind(dataVal)
+	dataType := dataVal.Type()
+
+	switch {
+	case dataKind == reflect.Int:
+		val.SetFloat(float64(dataVal.Int()))
+	case dataKind == reflect.Uint:
+		val.SetFloat(float64(dataVal.Uint()))
+	case dataKind == reflect.Float32:
+		val.SetFloat(dataVal.Float())
+	case dataKind == reflect.Bool && d.config.WeaklyTypedInput:
+		if dataVal.Bool() {
+			val.SetFloat(1)
+		} else {
+			val.SetFloat(0)
+		}
+	case dataKind == reflect.String && d.config.WeaklyTypedInput:
+		f, err := strconv.ParseFloat(dataVal.String(), val.Type().Bits())
+		if err == nil {
+			val.SetFloat(f)
+		} else {
+			return fmt.Errorf("cannot parse '%s' as float: %s", name, err)
+		}
+	case dataType.PkgPath() == "encoding/json" && dataType.Name() == "Number":
+		jn := data.(json.Number)
+		i, err := jn.Float64()
+		if err != nil {
+			return fmt.Errorf(
+				"error decoding json.Number into %s: %s", name, err)
+		}
+		val.SetFloat(i)
+	default:
+		return fmt.Errorf(
+			"'%s' expected type '%s', got unconvertible type '%s'",
+			name, val.Type(), dataVal.Type())
+	}
+
+	return nil
+}
+
+func (d *Decoder) decodeMap(name string, data interface{}, val reflect.Value) error {
+	valType := val.Type()
+	valKeyType := valType.Key()
+	valElemType := valType.Elem()
+
+	// By default we overwrite keys in the current map
+	valMap := val
+
+	// If the map is nil or we're purposely zeroing fields, make a new map
+	if valMap.IsNil() || d.config.ZeroFields {
+		// Make a new map to hold our result
+		mapType := reflect.MapOf(valKeyType, valElemType)
+		valMap = reflect.MakeMap(mapType)
+	}
+
+	// Check input type and based on the input type jump to the proper func
+	dataVal := reflect.Indirect(reflect.ValueOf(data))
+	switch dataVal.Kind() {
+	case reflect.Map:
+		return d.decodeMapFromMap(name, dataVal, val, valMap)
+
+	case reflect.Struct:
+		return d.decodeMapFromStruct(name, dataVal, val, valMap)
+
+	case reflect.Array, reflect.Slice:
+		if d.config.WeaklyTypedInput {
+			return d.decodeMapFromSlice(name, dataVal, val, valMap)
+		}
+
+		fallthrough
+
+	default:
+		return fmt.Errorf("'%s' expected a map, got '%s'", name, dataVal.Kind())
+	}
+}
+
+func (d *Decoder) decodeMapFromSlice(name string, dataVal reflect.Value, val reflect.Value, valMap reflect.Value) error {
+	// Special case for BC reasons (covered by tests)
+	if dataVal.Len() == 0 {
+		val.Set(valMap)
+		return nil
+	}
+
+	for i := 0; i < dataVal.Len(); i++ {
+		err := d.decode(
+			fmt.Sprintf("%s[%d]", name, i),
+			dataVal.Index(i).Interface(), val)
+		if err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+func (d *Decoder) decodeMapFromMap(name string, dataVal reflect.Value, val reflect.Value, valMap reflect.Value) error {
+	valType := val.Type()
+	valKeyType := valType.Key()
+	valElemType := valType.Elem()
+
+	// Accumulate errors
+	errors := make([]string, 0)
+
+	// If the input data is empty, then we just match what the input data is.
+	if dataVal.Len() == 0 {
+		if dataVal.IsNil() {
+			if !val.IsNil() {
+				val.Set(dataVal)
+			}
+		} else {
+			// Set to empty allocated value
+			val.Set(valMap)
+		}
+
+		return nil
+	}
+
+	for _, k := range dataVal.MapKeys() {
+		fieldName := fmt.Sprintf("%s[%s]", name, k)
+
+		// First decode the key into the proper type
+		currentKey := reflect.Indirect(reflect.New(valKeyType))
+		if err := d.decode(fieldName, k.Interface(), currentKey); err != nil {
+			errors = appendErrors(errors, err)
+			continue
+		}
+
+		// Next decode the data into the proper type
+		v := dataVal.MapIndex(k).Interface()
+		currentVal := reflect.Indirect(reflect.New(valElemType))
+		if err := d.decode(fieldName, v, currentVal); err != nil {
+			errors = appendErrors(errors, err)
+			continue
+		}
+
+		valMap.SetMapIndex(currentKey, currentVal)
+	}
+
+	// Set the built up map to the value
+	val.Set(valMap)
+
+	// If we had errors, return those
+	if len(errors) > 0 {
+		return &Error{errors}
+	}
+
+	return nil
+}
+
+func (d *Decoder) decodeMapFromStruct(name string, dataVal reflect.Value, val reflect.Value, valMap reflect.Value) error {
+	typ := dataVal.Type()
+	for i := 0; i < typ.NumField(); i++ {
+		// Get the StructField first since this is a cheap operation. If the
+		// field is unexported, then ignore it.
+		f := typ.Field(i)
+		if f.PkgPath != "" {
+			continue
+		}
+
+		// Next get the actual value of this field and verify it is assignable
+		// to the map value.
+		v := dataVal.Field(i)
+		if !v.Type().AssignableTo(valMap.Type().Elem()) {
+			return fmt.Errorf("cannot assign type '%s' to map value field of type '%s'", v.Type(), valMap.Type().Elem())
+		}
+
+		tagValue := f.Tag.Get(d.config.TagName)
+		tagParts := strings.Split(tagValue, ",")
+
+		// Determine the name of the key in the map
+		keyName := f.Name
+		if tagParts[0] != "" {
+			if tagParts[0] == "-" {
+				continue
+			}
+			keyName = tagParts[0]
+		}
+
+		// If "squash" is specified in the tag, we squash the field down.
+		squash := false
+		for _, tag := range tagParts[1:] {
+			if tag == "squash" {
+				squash = true
+				break
+			}
+		}
+		if squash && v.Kind() != reflect.Struct {
+			return fmt.Errorf("cannot squash non-struct type '%s'", v.Type())
+		}
+
+		switch v.Kind() {
+		// this is an embedded struct, so handle it differently
+		case reflect.Struct:
+			x := reflect.New(v.Type())
+			x.Elem().Set(v)
+
+			vType := valMap.Type()
+			vKeyType := vType.Key()
+			vElemType := vType.Elem()
+			mType := reflect.MapOf(vKeyType, vElemType)
+			vMap := reflect.MakeMap(mType)
+
+			err := d.decode(keyName, x.Interface(), vMap)
+			if err != nil {
+				return err
+			}
+
+			if squash {
+				for _, k := range vMap.MapKeys() {
+					valMap.SetMapIndex(k, vMap.MapIndex(k))
+				}
+			} else {
+				valMap.SetMapIndex(reflect.ValueOf(keyName), vMap)
+			}
+
+		default:
+			valMap.SetMapIndex(reflect.ValueOf(keyName), v)
+		}
+	}
+
+	if val.CanAddr() {
+		val.Set(valMap)
+	}
+
+	return nil
+}
+
+func (d *Decoder) decodePtr(name string, data interface{}, val reflect.Value) error {
+	// If the input data is nil, then we want to just set the output
+	// pointer to be nil as well.
+	isNil := data == nil
+	if !isNil {
+		switch v := reflect.Indirect(reflect.ValueOf(data)); v.Kind() {
+		case reflect.Chan,
+			reflect.Func,
+			reflect.Interface,
+			reflect.Map,
+			reflect.Ptr,
+			reflect.Slice:
+			isNil = v.IsNil()
+		}
+	}
+	if isNil {
+		if !val.IsNil() && val.CanSet() {
+			nilValue := reflect.New(val.Type()).Elem()
+			val.Set(nilValue)
+		}
+
+		return nil
+	}
+
+	// Create an element of the concrete (non pointer) type and decode
+	// into that. Then set the value of the pointer to this type.
+	valType := val.Type()
+	valElemType := valType.Elem()
+	if val.CanSet() {
+		realVal := val
+		if realVal.IsNil() || d.config.ZeroFields {
+			realVal = reflect.New(valElemType)
+		}
+
+		if err := d.decode(name, data, reflect.Indirect(realVal)); err != nil {
+			return err
+		}
+
+		val.Set(realVal)
+	} else {
+		if err := d.decode(name, data, reflect.Indirect(val)); err != nil {
+			return err
+		}
+	}
+	return nil
+}
+
+func (d *Decoder) decodeFunc(name string, data interface{}, val reflect.Value) error {
+	// Create an element of the concrete (non pointer) type and decode
+	// into that. Then set the value of the pointer to this type.
+	dataVal := reflect.Indirect(reflect.ValueOf(data))
+	if val.Type() != dataVal.Type() {
+		return fmt.Errorf(
+			"'%s' expected type '%s', got unconvertible type '%s'",
+			name, val.Type(), dataVal.Type())
+	}
+	val.Set(dataVal)
+	return nil
+}
+
+func (d *Decoder) decodeSlice(name string, data interface{}, val reflect.Value) error {
+	dataVal := reflect.Indirect(reflect.ValueOf(data))
+	dataValKind := dataVal.Kind()
+	valType := val.Type()
+	valElemType := valType.Elem()
+	sliceType := reflect.SliceOf(valElemType)
+
+	valSlice := val
+	if valSlice.IsNil() || d.config.ZeroFields {
+		if d.config.WeaklyTypedInput {
+			switch {
+			// Slice and array we use the normal logic
+			case dataValKind == reflect.Slice, dataValKind == reflect.Array:
+				break
+
+			// Empty maps turn into empty slices
+			case dataValKind == reflect.Map:
+				if dataVal.Len() == 0 {
+					val.Set(reflect.MakeSlice(sliceType, 0, 0))
+					return nil
+				}
+				// Create slice of maps of other sizes
+				return d.decodeSlice(name, []interface{}{data}, val)
+
+			case dataValKind == reflect.String && valElemType.Kind() == reflect.Uint8:
+				return d.decodeSlice(name, []byte(dataVal.String()), val)
+
+			// All other types we try to convert to the slice type
+			// and "lift" it into it. i.e. a string becomes a string slice.
+			default:
+				// Just re-try this function with data as a slice.
+				return d.decodeSlice(name, []interface{}{data}, val)
+			}
+		}
+
+		// Check input type
+		if dataValKind != reflect.Array && dataValKind != reflect.Slice {
+			return fmt.Errorf(
+				"'%s': source data must be an array or slice, got %s", name, dataValKind)
+
+		}
+
+		// If the input value is empty, then don't allocate since non-nil != nil
+		if dataVal.Len() == 0 {
+			return nil
+		}
+
+		// Make a new slice to hold our result, same size as the original data.
+		valSlice = reflect.MakeSlice(sliceType, dataVal.Len(), dataVal.Len())
+	}
+
+	// Accumulate any errors
+	errors := make([]string, 0)
+
+	for i := 0; i < dataVal.Len(); i++ {
+		currentData := dataVal.Index(i).Interface()
+		for valSlice.Len() <= i {
+			valSlice = reflect.Append(valSlice, reflect.Zero(valElemType))
+		}
+		currentField := valSlice.Index(i)
+
+		fieldName := fmt.Sprintf("%s[%d]", name, i)
+		if err := d.decode(fieldName, currentData, currentField); err != nil {
+			errors = appendErrors(errors, err)
+		}
+	}
+
+	// Finally, set the value to the slice we built up
+	val.Set(valSlice)
+
+	// If there were errors, we return those
+	if len(errors) > 0 {
+		return &Error{errors}
+	}
+
+	return nil
+}
+
+func (d *Decoder) decodeArray(name string, data interface{}, val reflect.Value) error {
+	dataVal := reflect.Indirect(reflect.ValueOf(data))
+	dataValKind := dataVal.Kind()
+	valType := val.Type()
+	valElemType := valType.Elem()
+	arrayType := reflect.ArrayOf(valType.Len(), valElemType)
+
+	valArray := val
+
+	if valArray.Interface() == reflect.Zero(valArray.Type()).Interface() || d.config.ZeroFields {
+		// Check input type
+		if dataValKind != reflect.Array && dataValKind != reflect.Slice {
+			if d.config.WeaklyTypedInput {
+				switch {
+				// Empty maps turn into empty arrays
+				case dataValKind == reflect.Map:
+					if dataVal.Len() == 0 {
+						val.Set(reflect.Zero(arrayType))
+						return nil
+					}
+
+				// All other types we try to convert to the array type
+				// and "lift" it into it. i.e. a string becomes a string array.
+				default:
+					// Just re-try this function with data as a slice.
+					return d.decodeArray(name, []interface{}{data}, val)
+				}
+			}
+
+			return fmt.Errorf(
+				"'%s': source data must be an array or slice, got %s", name, dataValKind)
+
+		}
+		if dataVal.Len() > arrayType.Len() {
+			return fmt.Errorf(
+				"'%s': expected source data to have length less or equal to %d, got %d", name, arrayType.Len(), dataVal.Len())
+
+		}
+
+		// Make a new array to hold our result, same size as the original data.
+		valArray = reflect.New(arrayType).Elem()
+	}
+
+	// Accumulate any errors
+	errors := make([]string, 0)
+
+	for i := 0; i < dataVal.Len(); i++ {
+		currentData := dataVal.Index(i).Interface()
+		currentField := valArray.Index(i)
+
+		fieldName := fmt.Sprintf("%s[%d]", name, i)
+		if err := d.decode(fieldName, currentData, currentField); err != nil {
+			errors = appendErrors(errors, err)
+		}
+	}
+
+	// Finally, set the value to the array we built up
+	val.Set(valArray)
+
+	// If there were errors, we return those
+	if len(errors) > 0 {
+		return &Error{errors}
+	}
+
+	return nil
+}
+
+func (d *Decoder) decodeStruct(name string, data interface{}, val reflect.Value) error {
+	dataVal := reflect.Indirect(reflect.ValueOf(data))
+
+	// If the type of the value to write to and the data match directly,
+	// then we just set it directly instead of recursing into the structure.
+	if dataVal.Type() == val.Type() {
+		val.Set(dataVal)
+		return nil
+	}
+
+	dataValKind := dataVal.Kind()
+	switch dataValKind {
+	case reflect.Map:
+		return d.decodeStructFromMap(name, dataVal, val)
+
+	case reflect.Struct:
+		// Not the most efficient way to do this but we can optimize later if
+		// we want to. To convert from struct to struct we go to map first
+		// as an intermediary.
+		m := make(map[string]interface{})
+		mval := reflect.Indirect(reflect.ValueOf(&m))
+		if err := d.decodeMapFromStruct(name, dataVal, mval, mval); err != nil {
+			return err
+		}
+
+		result := d.decodeStructFromMap(name, mval, val)
+		return result
+
+	default:
+		return fmt.Errorf("'%s' expected a map, got '%s'", name, dataVal.Kind())
+	}
+}
+
+func (d *Decoder) decodeStructFromMap(name string, dataVal, val reflect.Value) error {
+	dataValType := dataVal.Type()
+	if kind := dataValType.Key().Kind(); kind != reflect.String && kind != reflect.Interface {
+		return fmt.Errorf(
+			"'%s' needs a map with string keys, has '%s' keys",
+			name, dataValType.Key().Kind())
+	}
+
+	dataValKeys := make(map[reflect.Value]struct{})
+	dataValKeysUnused := make(map[interface{}]struct{})
+	for _, dataValKey := range dataVal.MapKeys() {
+		dataValKeys[dataValKey] = struct{}{}
+		dataValKeysUnused[dataValKey.Interface()] = struct{}{}
+	}
+
+	errors := make([]string, 0)
+
+	// This slice will keep track of all the structs we'll be decoding.
+	// There can be more than one struct if there are embedded structs
+	// that are squashed.
+	structs := make([]reflect.Value, 1, 5)
+	structs[0] = val
+
+	// Compile the list of all the fields that we're going to be decoding
+	// from all the structs.
+	type field struct {
+		field reflect.StructField
+		val   reflect.Value
+	}
+	fields := []field{}
+	for len(structs) > 0 {
+		structVal := structs[0]
+		structs = structs[1:]
+
+		structType := structVal.Type()
+
+		for i := 0; i < structType.NumField(); i++ {
+			fieldType := structType.Field(i)
+			fieldKind := fieldType.Type.Kind()
+
+			// If "squash" is specified in the tag, we squash the field down.
+			squash := false
+			tagParts := strings.Split(fieldType.Tag.Get(d.config.TagName), ",")
+			for _, tag := range tagParts[1:] {
+				if tag == "squash" {
+					squash = true
+					break
+				}
+			}
+
+			if squash {
+				if fieldKind != reflect.Struct {
+					errors = appendErrors(errors,
+						fmt.Errorf("%s: unsupported type for squash: %s", fieldType.Name, fieldKind))
+				} else {
+					structs = append(structs, structVal.FieldByName(fieldType.Name))
+				}
+				continue
+			}
+
+			// Normal struct field, store it away
+			fields = append(fields, field{fieldType, structVal.Field(i)})
+		}
+	}
+
+	// for fieldType, field := range fields {
+	for _, f := range fields {
+		field, fieldValue := f.field, f.val
+		fieldName := field.Name
+
+		tagValue := field.Tag.Get(d.config.TagName)
+		tagValue = strings.SplitN(tagValue, ",", 2)[0]
+		if tagValue != "" {
+			fieldName = tagValue
+		}
+
+		rawMapKey := reflect.ValueOf(fieldName)
+		rawMapVal := dataVal.MapIndex(rawMapKey)
+		if !rawMapVal.IsValid() {
+			// Do a slower search by iterating over each key and
+			// doing case-insensitive search.
+			for dataValKey := range dataValKeys {
+				mK, ok := dataValKey.Interface().(string)
+				if !ok {
+					// Not a string key
+					continue
+				}
+
+				if strings.EqualFold(mK, fieldName) {
+					rawMapKey = dataValKey
+					rawMapVal = dataVal.MapIndex(dataValKey)
+					break
+				}
+			}
+
+			if !rawMapVal.IsValid() {
+				// There was no matching key in the map for the value in
+				// the struct. Just ignore.
+				continue
+			}
+		}
+
+		// Delete the key we're using from the unused map so we stop tracking
+		delete(dataValKeysUnused, rawMapKey.Interface())
+
+		if !fieldValue.IsValid() {
+			// This should never happen
+			panic("field is not valid")
+		}
+
+		// If we can't set the field, then it is unexported or something,
+		// and we just continue onwards.
+		if !fieldValue.CanSet() {
+			continue
+		}
+
+		// If the name is empty string, then we're at the root, and we
+		// don't dot-join the fields.
+		if name != "" {
+			fieldName = fmt.Sprintf("%s.%s", name, fieldName)
+		}
+
+		if err := d.decode(fieldName, rawMapVal.Interface(), fieldValue); err != nil {
+			errors = appendErrors(errors, err)
+		}
+	}
+
+	if d.config.ErrorUnused && len(dataValKeysUnused) > 0 {
+		keys := make([]string, 0, len(dataValKeysUnused))
+		for rawKey := range dataValKeysUnused {
+			keys = append(keys, rawKey.(string))
+		}
+		sort.Strings(keys)
+
+		err := fmt.Errorf("'%s' has invalid keys: %s", name, strings.Join(keys, ", "))
+		errors = appendErrors(errors, err)
+	}
+
+	if len(errors) > 0 {
+		return &Error{errors}
+	}
+
+	// Add the unused keys to the list of unused keys if we're tracking metadata
+	if d.config.Metadata != nil {
+		for rawKey := range dataValKeysUnused {
+			key := rawKey.(string)
+			if name != "" {
+				key = fmt.Sprintf("%s.%s", name, key)
+			}
+
+			d.config.Metadata.Unused = append(d.config.Metadata.Unused, key)
+		}
+	}
+
+	return nil
+}
+
+func getKind(val reflect.Value) reflect.Kind {
+	kind := val.Kind()
+
+	switch {
+	case kind >= reflect.Int && kind <= reflect.Int64:
+		return reflect.Int
+	case kind >= reflect.Uint && kind <= reflect.Uint64:
+		return reflect.Uint
+	case kind >= reflect.Float32 && kind <= reflect.Float64:
+		return reflect.Float32
+	default:
+		return kind
+	}
+}

+ 2 - 0
vendor/github.com/pelletier/go-toml/.gitignore

@@ -0,0 +1,2 @@
+test_program/test_program_bin
+fuzz/

+ 23 - 0
vendor/github.com/pelletier/go-toml/.travis.yml

@@ -0,0 +1,23 @@
+sudo: false
+language: go
+go:
+  - 1.8.x
+  - 1.9.x
+  - 1.10.x
+  - tip
+matrix:
+  allow_failures:
+    - go: tip
+  fast_finish: true
+script:
+  - if [ -n "$(go fmt ./...)" ]; then exit 1; fi
+  - ./test.sh
+  - ./benchmark.sh $TRAVIS_BRANCH https://github.com/$TRAVIS_REPO_SLUG.git
+before_install:
+  - go get github.com/axw/gocov/gocov
+  - go get github.com/mattn/goveralls
+  - if ! go get code.google.com/p/go.tools/cmd/cover; then go get golang.org/x/tools/cmd/cover; fi
+branches:
+  only: [master]
+after_success:
+  - $HOME/gopath/bin/goveralls -service=travis-ci -coverprofile=coverage.out -repotoken $COVERALLS_TOKEN

+ 21 - 0
vendor/github.com/pelletier/go-toml/LICENSE

@@ -0,0 +1,21 @@
+The MIT License (MIT)
+
+Copyright (c) 2013 - 2017 Thomas Pelletier, Eric Anderton
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.

+ 131 - 0
vendor/github.com/pelletier/go-toml/README.md

@@ -0,0 +1,131 @@
+# go-toml
+
+Go library for the [TOML](https://github.com/mojombo/toml) format.
+
+This library supports TOML version
+[v0.4.0](https://github.com/toml-lang/toml/blob/master/versions/en/toml-v0.4.0.md)
+
+[![GoDoc](https://godoc.org/github.com/pelletier/go-toml?status.svg)](http://godoc.org/github.com/pelletier/go-toml)
+[![license](https://img.shields.io/github/license/pelletier/go-toml.svg)](https://github.com/pelletier/go-toml/blob/master/LICENSE)
+[![Build Status](https://travis-ci.org/pelletier/go-toml.svg?branch=master)](https://travis-ci.org/pelletier/go-toml)
+[![Coverage Status](https://coveralls.io/repos/github/pelletier/go-toml/badge.svg?branch=master)](https://coveralls.io/github/pelletier/go-toml?branch=master)
+[![Go Report Card](https://goreportcard.com/badge/github.com/pelletier/go-toml)](https://goreportcard.com/report/github.com/pelletier/go-toml)
+
+## Features
+
+Go-toml provides the following features for using data parsed from TOML documents:
+
+* Load TOML documents from files and string data
+* Easily navigate TOML structure using Tree
+* Mashaling and unmarshaling to and from data structures
+* Line & column position data for all parsed elements
+* [Query support similar to JSON-Path](query/)
+* Syntax errors contain line and column numbers
+
+## Import
+
+```go
+import "github.com/pelletier/go-toml"
+```
+
+## Usage example
+
+Read a TOML document:
+
+```go
+config, _ := toml.Load(`
+[postgres]
+user = "pelletier"
+password = "mypassword"`)
+// retrieve data directly
+user := config.Get("postgres.user").(string)
+
+// or using an intermediate object
+postgresConfig := config.Get("postgres").(*toml.Tree)
+password := postgresConfig.Get("password").(string)
+```
+
+Or use Unmarshal:
+
+```go
+type Postgres struct {
+    User     string
+    Password string
+}
+type Config struct {
+    Postgres Postgres
+}
+
+doc := []byte(`
+[Postgres]
+User = "pelletier"
+Password = "mypassword"`)
+
+config := Config{}
+toml.Unmarshal(doc, &config)
+fmt.Println("user=", config.Postgres.User)
+```
+
+Or use a query:
+
+```go
+// use a query to gather elements without walking the tree
+q, _ := query.Compile("$..[user,password]")
+results := q.Execute(config)
+for ii, item := range results.Values() {
+    fmt.Println("Query result %d: %v", ii, item)
+}
+```
+
+## Documentation
+
+The documentation and additional examples are available at
+[godoc.org](http://godoc.org/github.com/pelletier/go-toml).
+
+## Tools
+
+Go-toml provides two handy command line tools:
+
+* `tomll`: Reads TOML files and lint them.
+
+    ```
+    go install github.com/pelletier/go-toml/cmd/tomll
+    tomll --help
+    ```
+* `tomljson`: Reads a TOML file and outputs its JSON representation.
+
+    ```
+    go install github.com/pelletier/go-toml/cmd/tomljson
+    tomljson --help
+    ```
+
+## Contribute
+
+Feel free to report bugs and patches using GitHub's pull requests system on
+[pelletier/go-toml](https://github.com/pelletier/go-toml). Any feedback would be
+much appreciated!
+
+### Run tests
+
+You have to make sure two kind of tests run:
+
+1. The Go unit tests
+2. The TOML examples base
+
+You can run both of them using `./test.sh`.
+
+### Fuzzing
+
+The script `./fuzz.sh` is available to
+run [go-fuzz](https://github.com/dvyukov/go-fuzz) on go-toml.
+
+## Versioning
+
+Go-toml follows [Semantic Versioning](http://semver.org/). The supported version
+of [TOML](https://github.com/toml-lang/toml) is indicated at the beginning of
+this document. The last two major versions of Go are supported
+(see [Go Release Policy](https://golang.org/doc/devel/release.html#policy)).
+
+## License
+
+The MIT License (MIT). Read [LICENSE](LICENSE).

+ 164 - 0
vendor/github.com/pelletier/go-toml/benchmark.json

@@ -0,0 +1,164 @@
+{
+    "array": {
+        "key1": [
+            1,
+            2,
+            3
+        ],
+        "key2": [
+            "red",
+            "yellow",
+            "green"
+        ],
+        "key3": [
+            [
+                1,
+                2
+            ],
+            [
+                3,
+                4,
+                5
+            ]
+        ],
+        "key4": [
+            [
+                1,
+                2
+            ],
+            [
+                "a",
+                "b",
+                "c"
+            ]
+        ],
+        "key5": [
+            1,
+            2,
+            3
+        ],
+        "key6": [
+            1,
+            2
+        ]
+    },
+    "boolean": {
+        "False": false,
+        "True": true
+    },
+    "datetime": {
+        "key1": "1979-05-27T07:32:00Z",
+        "key2": "1979-05-27T00:32:00-07:00",
+        "key3": "1979-05-27T00:32:00.999999-07:00"
+    },
+    "float": {
+        "both": {
+            "key": 6.626e-34
+        },
+        "exponent": {
+            "key1": 5e+22,
+            "key2": 1000000,
+            "key3": -0.02
+        },
+        "fractional": {
+            "key1": 1,
+            "key2": 3.1415,
+            "key3": -0.01
+        },
+        "underscores": {
+            "key1": 9224617.445991227,
+            "key2": 1e+100
+        }
+    },
+    "fruit": [{
+            "name": "apple",
+            "physical": {
+                "color": "red",
+                "shape": "round"
+            },
+            "variety": [{
+                    "name": "red delicious"
+                },
+                {
+                    "name": "granny smith"
+                }
+            ]
+        },
+        {
+            "name": "banana",
+            "variety": [{
+                "name": "plantain"
+            }]
+        }
+    ],
+    "integer": {
+        "key1": 99,
+        "key2": 42,
+        "key3": 0,
+        "key4": -17,
+        "underscores": {
+            "key1": 1000,
+            "key2": 5349221,
+            "key3": 12345
+        }
+    },
+    "products": [{
+            "name": "Hammer",
+            "sku": 738594937
+        },
+        {},
+        {
+            "color": "gray",
+            "name": "Nail",
+            "sku": 284758393
+        }
+    ],
+    "string": {
+        "basic": {
+            "basic": "I'm a string. \"You can quote me\". Name\tJosé\nLocation\tSF."
+        },
+        "literal": {
+            "multiline": {
+                "lines": "The first newline is\ntrimmed in raw strings.\n   All other whitespace\n   is preserved.\n",
+                "regex2": "I [dw]on't need \\d{2} apples"
+            },
+            "quoted": "Tom \"Dubs\" Preston-Werner",
+            "regex": "\u003c\\i\\c*\\s*\u003e",
+            "winpath": "C:\\Users\\nodejs\\templates",
+            "winpath2": "\\\\ServerX\\admin$\\system32\\"
+        },
+        "multiline": {
+            "continued": {
+                "key1": "The quick brown fox jumps over the lazy dog.",
+                "key2": "The quick brown fox jumps over the lazy dog.",
+                "key3": "The quick brown fox jumps over the lazy dog."
+            },
+            "key1": "One\nTwo",
+            "key2": "One\nTwo",
+            "key3": "One\nTwo"
+        }
+    },
+    "table": {
+        "inline": {
+            "name": {
+                "first": "Tom",
+                "last": "Preston-Werner"
+            },
+            "point": {
+                "x": 1,
+                "y": 2
+            }
+        },
+        "key": "value",
+        "subtable": {
+            "key": "another value"
+        }
+    },
+    "x": {
+        "y": {
+            "z": {
+                "w": {}
+            }
+        }
+    }
+}

+ 32 - 0
vendor/github.com/pelletier/go-toml/benchmark.sh

@@ -0,0 +1,32 @@
+#!/bin/bash
+
+set -e
+
+reference_ref=${1:-master}
+reference_git=${2:-.}
+
+if ! `hash benchstat 2>/dev/null`; then
+    echo "Installing benchstat"
+    go get golang.org/x/perf/cmd/benchstat
+    go install golang.org/x/perf/cmd/benchstat
+fi
+
+tempdir=`mktemp -d /tmp/go-toml-benchmark-XXXXXX`
+ref_tempdir="${tempdir}/ref"
+ref_benchmark="${ref_tempdir}/benchmark-`echo -n ${reference_ref}|tr -s '/' '-'`.txt"
+local_benchmark="`pwd`/benchmark-local.txt"
+
+echo "=== ${reference_ref} (${ref_tempdir})"
+git clone ${reference_git} ${ref_tempdir} >/dev/null 2>/dev/null
+pushd ${ref_tempdir} >/dev/null
+git checkout ${reference_ref} >/dev/null 2>/dev/null
+go test -bench=. -benchmem | tee ${ref_benchmark}
+popd >/dev/null
+
+echo ""
+echo "=== local"
+go test -bench=. -benchmem  | tee ${local_benchmark}
+
+echo ""
+echo "=== diff"
+benchstat -delta-test=none ${ref_benchmark} ${local_benchmark}

+ 244 - 0
vendor/github.com/pelletier/go-toml/benchmark.toml

@@ -0,0 +1,244 @@
+################################################################################
+## Comment
+
+# Speak your mind with the hash symbol. They go from the symbol to the end of
+# the line.
+
+
+################################################################################
+## Table
+
+# Tables (also known as hash tables or dictionaries) are collections of
+# key/value pairs. They appear in square brackets on a line by themselves.
+
+[table]
+
+key = "value" # Yeah, you can do this.
+
+# Nested tables are denoted by table names with dots in them. Name your tables
+# whatever crap you please, just don't use #, ., [ or ].
+
+[table.subtable]
+
+key = "another value"
+
+# You don't need to specify all the super-tables if you don't want to. TOML
+# knows how to do it for you.
+
+# [x] you
+# [x.y] don't
+# [x.y.z] need these
+[x.y.z.w] # for this to work
+
+
+################################################################################
+## Inline Table
+
+# Inline tables provide a more compact syntax for expressing tables. They are
+# especially useful for grouped data that can otherwise quickly become verbose.
+# Inline tables are enclosed in curly braces `{` and `}`. No newlines are
+# allowed between the curly braces unless they are valid within a value.
+
+[table.inline]
+
+name = { first = "Tom", last = "Preston-Werner" }
+point = { x = 1, y = 2 }
+
+
+################################################################################
+## String
+
+# There are four ways to express strings: basic, multi-line basic, literal, and
+# multi-line literal. All strings must contain only valid UTF-8 characters.
+
+[string.basic]
+
+basic = "I'm a string. \"You can quote me\". Name\tJos\u00E9\nLocation\tSF."
+
+[string.multiline]
+
+# The following strings are byte-for-byte equivalent:
+key1 = "One\nTwo"
+key2 = """One\nTwo"""
+key3 = """
+One
+Two"""
+
+[string.multiline.continued]
+
+# The following strings are byte-for-byte equivalent:
+key1 = "The quick brown fox jumps over the lazy dog."
+
+key2 = """
+The quick brown \
+
+
+  fox jumps over \
+    the lazy dog."""
+
+key3 = """\
+       The quick brown \
+       fox jumps over \
+       the lazy dog.\
+       """
+
+[string.literal]
+
+# What you see is what you get.
+winpath  = 'C:\Users\nodejs\templates'
+winpath2 = '\\ServerX\admin$\system32\'
+quoted   = 'Tom "Dubs" Preston-Werner'
+regex    = '<\i\c*\s*>'
+
+
+[string.literal.multiline]
+
+regex2 = '''I [dw]on't need \d{2} apples'''
+lines  = '''
+The first newline is
+trimmed in raw strings.
+   All other whitespace
+   is preserved.
+'''
+
+
+################################################################################
+## Integer
+
+# Integers are whole numbers. Positive numbers may be prefixed with a plus sign.
+# Negative numbers are prefixed with a minus sign.
+
+[integer]
+
+key1 = +99
+key2 = 42
+key3 = 0
+key4 = -17
+
+[integer.underscores]
+
+# For large numbers, you may use underscores to enhance readability. Each
+# underscore must be surrounded by at least one digit.
+key1 = 1_000
+key2 = 5_349_221
+key3 = 1_2_3_4_5     # valid but inadvisable
+
+
+################################################################################
+## Float
+
+# A float consists of an integer part (which may be prefixed with a plus or
+# minus sign) followed by a fractional part and/or an exponent part.
+
+[float.fractional]
+
+key1 = +1.0
+key2 = 3.1415
+key3 = -0.01
+
+[float.exponent]
+
+key1 = 5e+22
+key2 = 1e6
+key3 = -2E-2
+
+[float.both]
+
+key = 6.626e-34
+
+[float.underscores]
+
+key1 = 9_224_617.445_991_228_313
+key2 = 1e1_00
+
+
+################################################################################
+## Boolean
+
+# Booleans are just the tokens you're used to. Always lowercase.
+
+[boolean]
+
+True = true
+False = false
+
+
+################################################################################
+## Datetime
+
+# Datetimes are RFC 3339 dates.
+
+[datetime]
+
+key1 = 1979-05-27T07:32:00Z
+key2 = 1979-05-27T00:32:00-07:00
+key3 = 1979-05-27T00:32:00.999999-07:00
+
+
+################################################################################
+## Array
+
+# Arrays are square brackets with other primitives inside. Whitespace is
+# ignored. Elements are separated by commas. Data types may not be mixed.
+
+[array]
+
+key1 = [ 1, 2, 3 ]
+key2 = [ "red", "yellow", "green" ]
+key3 = [ [ 1, 2 ], [3, 4, 5] ]
+#key4 = [ [ 1, 2 ], ["a", "b", "c"] ] # this is ok
+
+# Arrays can also be multiline. So in addition to ignoring whitespace, arrays
+# also ignore newlines between the brackets.  Terminating commas are ok before
+# the closing bracket.
+
+key5 = [
+  1, 2, 3
+]
+key6 = [
+  1,
+  2, # this is ok
+]
+
+
+################################################################################
+## Array of Tables
+
+# These can be expressed by using a table name in double brackets. Each table
+# with the same double bracketed name will be an element in the array. The
+# tables are inserted in the order encountered.
+
+[[products]]
+
+name = "Hammer"
+sku = 738594937
+
+[[products]]
+
+[[products]]
+
+name = "Nail"
+sku = 284758393
+color = "gray"
+
+
+# You can create nested arrays of tables as well.
+
+[[fruit]]
+  name = "apple"
+
+  [fruit.physical]
+    color = "red"
+    shape = "round"
+
+  [[fruit.variety]]
+    name = "red delicious"
+
+  [[fruit.variety]]
+    name = "granny smith"
+
+[[fruit]]
+  name = "banana"
+
+  [[fruit.variety]]
+    name = "plantain"

+ 121 - 0
vendor/github.com/pelletier/go-toml/benchmark.yml

@@ -0,0 +1,121 @@
+---
+array:
+  key1:
+  - 1
+  - 2
+  - 3
+  key2:
+  - red
+  - yellow
+  - green
+  key3:
+  - - 1
+    - 2
+  - - 3
+    - 4
+    - 5
+  key4:
+  - - 1
+    - 2
+  - - a
+    - b
+    - c
+  key5:
+  - 1
+  - 2
+  - 3
+  key6:
+  - 1
+  - 2
+boolean:
+  'False': false
+  'True': true
+datetime:
+  key1: '1979-05-27T07:32:00Z'
+  key2: '1979-05-27T00:32:00-07:00'
+  key3: '1979-05-27T00:32:00.999999-07:00'
+float:
+  both:
+    key: 6.626e-34
+  exponent:
+    key1: 5.0e+22
+    key2: 1000000
+    key3: -0.02
+  fractional:
+    key1: 1
+    key2: 3.1415
+    key3: -0.01
+  underscores:
+    key1: 9224617.445991227
+    key2: 1.0e+100
+fruit:
+- name: apple
+  physical:
+    color: red
+    shape: round
+  variety:
+  - name: red delicious
+  - name: granny smith
+- name: banana
+  variety:
+  - name: plantain
+integer:
+  key1: 99
+  key2: 42
+  key3: 0
+  key4: -17
+  underscores:
+    key1: 1000
+    key2: 5349221
+    key3: 12345
+products:
+- name: Hammer
+  sku: 738594937
+- {}
+- color: gray
+  name: Nail
+  sku: 284758393
+string:
+  basic:
+    basic: "I'm a string. \"You can quote me\". Name\tJosé\nLocation\tSF."
+  literal:
+    multiline:
+      lines: |
+        The first newline is
+        trimmed in raw strings.
+           All other whitespace
+           is preserved.
+      regex2: I [dw]on't need \d{2} apples
+    quoted: Tom "Dubs" Preston-Werner
+    regex: "<\\i\\c*\\s*>"
+    winpath: C:\Users\nodejs\templates
+    winpath2: "\\\\ServerX\\admin$\\system32\\"
+  multiline:
+    continued:
+      key1: The quick brown fox jumps over the lazy dog.
+      key2: The quick brown fox jumps over the lazy dog.
+      key3: The quick brown fox jumps over the lazy dog.
+    key1: |-
+      One
+      Two
+    key2: |-
+      One
+      Two
+    key3: |-
+      One
+      Two
+table:
+  inline:
+    name:
+      first: Tom
+      last: Preston-Werner
+    point:
+      x: 1
+      y: 2
+  key: value
+  subtable:
+    key: another value
+x:
+  y:
+    z:
+      w: {}

+ 23 - 0
vendor/github.com/pelletier/go-toml/doc.go

@@ -0,0 +1,23 @@
+// Package toml is a TOML parser and manipulation library.
+//
+// This version supports the specification as described in
+// https://github.com/toml-lang/toml/blob/master/versions/en/toml-v0.4.0.md
+//
+// Marshaling
+//
+// Go-toml can marshal and unmarshal TOML documents from and to data
+// structures.
+//
+// TOML document as a tree
+//
+// Go-toml can operate on a TOML document as a tree. Use one of the Load*
+// functions to parse TOML data and obtain a Tree instance, then one of its
+// methods to manipulate the tree.
+//
+// JSONPath-like queries
+//
+// The package github.com/pelletier/go-toml/query implements a system
+// similar to JSONPath to quickly retrieve elements of a TOML document using a
+// single expression. See the package documentation for more information.
+//
+package toml

+ 29 - 0
vendor/github.com/pelletier/go-toml/example-crlf.toml

@@ -0,0 +1,29 @@
+# This is a TOML document. Boom.
+
+title = "TOML Example"
+
+[owner]
+name = "Tom Preston-Werner"
+organization = "GitHub"
+bio = "GitHub Cofounder & CEO\nLikes tater tots and beer."
+dob = 1979-05-27T07:32:00Z # First class dates? Why not?
+
+[database]
+server = "192.168.1.1"
+ports = [ 8001, 8001, 8002 ]
+connection_max = 5000
+enabled = true
+
+[servers]
+
+  # You can indent as you please. Tabs or spaces. TOML don't care.
+  [servers.alpha]
+  ip = "10.0.0.1"
+  dc = "eqdc10"
+
+  [servers.beta]
+  ip = "10.0.0.2"
+  dc = "eqdc10"
+
+[clients]
+data = [ ["gamma", "delta"], [1, 2] ] # just an update to make sure parsers support it

Bu fark içinde çok fazla dosya değişikliği olduğu için bazı dosyalar gösterilmiyor