Browse Source

add vendor

xormplus 9 năm trước cách đây
mục cha
commit
b896d33714
100 tập tin đã thay đổi với 16957 bổ sung0 xóa
  1. 22 0
      vendor/github.com/Chronokeeper/anyxml/LICENSE
  2. 341 0
      vendor/github.com/Chronokeeper/anyxml/anyxml.go
  3. 490 0
      vendor/github.com/Chronokeeper/anyxml/xml.go
  4. 191 0
      vendor/github.com/Unknwon/goconfig/LICENSE
  5. 72 0
      vendor/github.com/Unknwon/goconfig/README.md
  6. 64 0
      vendor/github.com/Unknwon/goconfig/README_ZH.md
  7. 556 0
      vendor/github.com/Unknwon/goconfig/conf.go
  8. 294 0
      vendor/github.com/Unknwon/goconfig/read.go
  9. 117 0
      vendor/github.com/Unknwon/goconfig/write.go
  10. 39 0
      vendor/github.com/agrison/go-tablib/HISTORY.md
  11. 21 0
      vendor/github.com/agrison/go-tablib/LICENSE
  12. 602 0
      vendor/github.com/agrison/go-tablib/README.md
  13. 81 0
      vendor/github.com/agrison/go-tablib/tablib_csv.go
  14. 54 0
      vendor/github.com/agrison/go-tablib/tablib_databook.go
  15. 745 0
      vendor/github.com/agrison/go-tablib/tablib_dataset.go
  16. 20 0
      vendor/github.com/agrison/go-tablib/tablib_errors.go
  17. 70 0
      vendor/github.com/agrison/go-tablib/tablib_exportable.go
  18. 41 0
      vendor/github.com/agrison/go-tablib/tablib_html.go
  19. 72 0
      vendor/github.com/agrison/go-tablib/tablib_json.go
  20. 48 0
      vendor/github.com/agrison/go-tablib/tablib_sort.go
  21. 149 0
      vendor/github.com/agrison/go-tablib/tablib_sql.go
  22. 72 0
      vendor/github.com/agrison/go-tablib/tablib_tabular.go
  23. 65 0
      vendor/github.com/agrison/go-tablib/tablib_util.go
  24. 50 0
      vendor/github.com/agrison/go-tablib/tablib_xlsx.go
  25. 64 0
      vendor/github.com/agrison/go-tablib/tablib_xml.go
  26. 69 0
      vendor/github.com/agrison/go-tablib/tablib_yaml.go
  27. 55 0
      vendor/github.com/agrison/mxj/LICENSE
  28. 177 0
      vendor/github.com/agrison/mxj/anyxml.go
  29. 54 0
      vendor/github.com/agrison/mxj/atomFeedString.xml
  30. 110 0
      vendor/github.com/agrison/mxj/doc.go
  31. 7 0
      vendor/github.com/agrison/mxj/exists.go
  32. 299 0
      vendor/github.com/agrison/mxj/files.go
  33. 2 0
      vendor/github.com/agrison/mxj/files_test.badjson
  34. 9 0
      vendor/github.com/agrison/mxj/files_test.badxml
  35. 2 0
      vendor/github.com/agrison/mxj/files_test.json
  36. 9 0
      vendor/github.com/agrison/mxj/files_test.xml
  37. 1 0
      vendor/github.com/agrison/mxj/files_test_dup.json
  38. 1 0
      vendor/github.com/agrison/mxj/files_test_dup.xml
  39. 12 0
      vendor/github.com/agrison/mxj/files_test_indent.json
  40. 8 0
      vendor/github.com/agrison/mxj/files_test_indent.xml
  41. 319 0
      vendor/github.com/agrison/mxj/json.go
  42. 658 0
      vendor/github.com/agrison/mxj/keyvalues.go
  43. 82 0
      vendor/github.com/agrison/mxj/leafnode.go
  44. 83 0
      vendor/github.com/agrison/mxj/misc.go
  45. 206 0
      vendor/github.com/agrison/mxj/mxj.go
  46. 183 0
      vendor/github.com/agrison/mxj/newmap.go
  47. 159 0
      vendor/github.com/agrison/mxj/readme.md
  48. 37 0
      vendor/github.com/agrison/mxj/remove.go
  49. 54 0
      vendor/github.com/agrison/mxj/rename.go
  50. 26 0
      vendor/github.com/agrison/mxj/set.go
  51. 29 0
      vendor/github.com/agrison/mxj/songtext.xml
  52. 41 0
      vendor/github.com/agrison/mxj/struct.go
  53. 249 0
      vendor/github.com/agrison/mxj/updatevalues.go
  54. 919 0
      vendor/github.com/agrison/mxj/xml.go
  55. 723 0
      vendor/github.com/agrison/mxj/xmlseq.go
  56. 1 0
      vendor/github.com/bndr/gotabulate/AUTHOR
  57. 0 0
      vendor/github.com/bndr/gotabulate/CHANGELOG
  58. 2 0
      vendor/github.com/bndr/gotabulate/CONTRIBUTORS
  59. 201 0
      vendor/github.com/bndr/gotabulate/LICENSE
  60. 222 0
      vendor/github.com/bndr/gotabulate/README.md
  61. 483 0
      vendor/github.com/bndr/gotabulate/tabulate.go
  62. 144 0
      vendor/github.com/bndr/gotabulate/utils.go
  63. 21 0
      vendor/github.com/fatih/structs/LICENSE
  64. 163 0
      vendor/github.com/fatih/structs/README.md
  65. 141 0
      vendor/github.com/fatih/structs/field.go
  66. 579 0
      vendor/github.com/fatih/structs/structs.go
  67. 32 0
      vendor/github.com/fatih/structs/tags.go
  68. 46 0
      vendor/github.com/fsnotify/fsnotify/AUTHORS
  69. 307 0
      vendor/github.com/fsnotify/fsnotify/CHANGELOG.md
  70. 77 0
      vendor/github.com/fsnotify/fsnotify/CONTRIBUTING.md
  71. 28 0
      vendor/github.com/fsnotify/fsnotify/LICENSE
  72. 50 0
      vendor/github.com/fsnotify/fsnotify/README.md
  73. 37 0
      vendor/github.com/fsnotify/fsnotify/fen.go
  74. 66 0
      vendor/github.com/fsnotify/fsnotify/fsnotify.go
  75. 334 0
      vendor/github.com/fsnotify/fsnotify/inotify.go
  76. 187 0
      vendor/github.com/fsnotify/fsnotify/inotify_poller.go
  77. 503 0
      vendor/github.com/fsnotify/fsnotify/kqueue.go
  78. 11 0
      vendor/github.com/fsnotify/fsnotify/open_mode_bsd.go
  79. 12 0
      vendor/github.com/fsnotify/fsnotify/open_mode_darwin.go
  80. 561 0
      vendor/github.com/fsnotify/fsnotify/windows.go
  81. 21 0
      vendor/github.com/mattn/go-runewidth/LICENSE
  82. 27 0
      vendor/github.com/mattn/go-runewidth/README.mkd
  83. 481 0
      vendor/github.com/mattn/go-runewidth/runewidth.go
  84. 8 0
      vendor/github.com/mattn/go-runewidth/runewidth_js.go
  85. 77 0
      vendor/github.com/mattn/go-runewidth/runewidth_posix.go
  86. 25 0
      vendor/github.com/mattn/go-runewidth/runewidth_windows.go
  87. 54 0
      vendor/github.com/tealeg/xlsx/AUTHORS.txt
  88. 131 0
      vendor/github.com/tealeg/xlsx/README.org
  89. 393 0
      vendor/github.com/tealeg/xlsx/cell.go
  90. 44 0
      vendor/github.com/tealeg/xlsx/col.go
  91. 105 0
      vendor/github.com/tealeg/xlsx/date.go
  92. 11 0
      vendor/github.com/tealeg/xlsx/doc.go
  93. 308 0
      vendor/github.com/tealeg/xlsx/file.go
  94. 145 0
      vendor/github.com/tealeg/xlsx/hsl.go
  95. 983 0
      vendor/github.com/tealeg/xlsx/lib.go
  96. 77 0
      vendor/github.com/tealeg/xlsx/reftable.go
  97. 22 0
      vendor/github.com/tealeg/xlsx/row.go
  98. 395 0
      vendor/github.com/tealeg/xlsx/sheet.go
  99. 180 0
      vendor/github.com/tealeg/xlsx/style.go
  100. 339 0
      vendor/github.com/tealeg/xlsx/templates.go

+ 22 - 0
vendor/github.com/Chronokeeper/anyxml/LICENSE

@@ -0,0 +1,22 @@
+The MIT License (MIT)
+
+Copyright (c) 2015 Chronokeeper
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
+

+ 341 - 0
vendor/github.com/Chronokeeper/anyxml/anyxml.go

@@ -0,0 +1,341 @@
+// anyxml - marshal an XML document from almost any Go variable
+// Marshal XML from map[string]interface{}, arrays, slices, alpha/numeric, etc.  
+// 
+// Wraps xml.Marshal with functionality in github.com/clbanning/mxj to create
+// a more genericized XML marshaling capability. Note: unmarshaling the resultant
+// XML may not return the original value, since tag labels may have been injected
+// to create the XML representation of the value.
+//
+// See mxj package documentation for more information.  See anyxml_test.go for
+// examples or just try Xml() or XmlIndent().
+/*
+ Encode an arbitrary JSON object.
+	package main
+
+	import (
+		"encoding/json"
+		"fmt"
+		"github.com/clbanning/anyxml"
+	)
+
+	func main() {
+		jsondata := []byte(`[
+			{ "somekey":"somevalue" },
+			"string",
+			3.14159265,
+			true
+		]`)
+		var i interface{}
+		err := json.Unmarshal(jsondata, &i)
+		if err != nil {
+			// do something
+		}
+		x, err := anyxml.XmlIndent(i, "", "  ", "mydoc")
+		if err != nil {
+			// do something else
+		}
+		fmt.Println(string(x))
+	}
+
+	output:
+		<mydoc>
+		  <somekey>somevalue</somekey>
+		  <element>string</element>
+		  <element>3.14159265</element>
+		  <element>true</element>
+		</mydoc>
+*/
+package anyxml
+
+import (
+	"encoding/xml"
+	"reflect"
+	"time"
+)
+
+// Encode arbitrary value as XML.  Note: there are no guarantees.
+func Xml(v interface{}, rootTag ...string) ([]byte, error) {
+	if reflect.TypeOf(v).Kind() == reflect.Struct {
+		return xml.Marshal(v)
+	}
+
+	var err error
+	s := new(string)
+	p := new(pretty)
+
+	var rt string
+	if len(rootTag) == 1 {
+		rt = rootTag[0]
+	} else {
+		rt = DefaultRootTag
+	}
+
+	var ss string
+	var b []byte
+	switch v.(type) {
+	case []interface{}:
+		ss = "<" + rt + ">"
+		for _, vv := range v.([]interface{}) {
+			switch vv.(type) {
+			case map[string]interface{}:
+				m := vv.(map[string]interface{})
+				if len(m) == 1 {
+					for tag, val := range m {
+						err = mapToXmlIndent(false, s, tag, val, p)
+					}
+				} else {
+					err = mapToXmlIndent(false, s, "element", vv, p)
+				}
+			default:
+				err = mapToXmlIndent(false, s, "element", vv, p)
+			}
+			if err != nil {
+				break
+			}
+		}
+		ss += *s + "</" + rt + ">"
+		b = []byte(ss)
+	case map[string]interface{}:
+		b, err = anyxml(v.(map[string]interface{}), rootTag...)
+	case []map[string]interface{}:
+		for _, vv := range v.([]map[string]interface{}) {
+			b, err = anyxml(vv, rootTag...)
+			ss += string(b)
+			if err != nil {
+				break
+			}
+		}
+		b = []byte(ss)
+	default:
+		err = mapToXmlIndent(false, s, rt, v, p)
+		b = []byte(*s)
+	}
+
+	return b, err
+}
+
+// Encode arbitrary value as XML.  Note: there are no guarantees.
+func XmlWithDateFormat(dateFormat string, v interface{}, rootTag ...string) ([]byte, error) {
+	if reflect.TypeOf(v).Kind() == reflect.Struct {
+		return xml.Marshal(v)
+	}
+
+	var err error
+	s := new(string)
+	p := new(pretty)
+
+	var rt string
+	if len(rootTag) == 1 {
+		rt = rootTag[0]
+	} else {
+		rt = DefaultRootTag
+	}
+
+	var ss string
+	var b []byte
+	switch v.(type) {
+	case []interface{}:
+		ss = "<" + rt + ">"
+		for _, vv := range v.([]interface{}) {
+			switch vv.(type) {
+			case map[string]interface{}:
+				m := vv.(map[string]interface{})
+				if len(m) == 1 {
+					for tag, val := range m {
+						err = mapToXmlIndentWithDateFormat(dateFormat, false, s, tag, val, p)
+					}
+				} else {
+					err = mapToXmlIndentWithDateFormat(dateFormat, false, s, "element", vv, p)
+				}
+			default:
+				err = mapToXmlIndentWithDateFormat(dateFormat, false, s, "element", vv, p)
+			}
+			if err != nil {
+				break
+			}
+		}
+		ss += *s + "</" + rt + ">"
+		b = []byte(ss)
+	case map[string]interface{}:
+		b, err = anyxmlWithDateFormat(dateFormat, v.(map[string]interface{}), rootTag...)
+	case []map[string]interface{}:
+		for _, vv := range v.([]map[string]interface{}) {
+			b, err = anyxmlWithDateFormat(dateFormat, vv, "element")
+			ss += (string(b) + "\n")
+			if err != nil {
+				break
+			}
+		}
+		b = []byte(ss)
+	default:
+		err = mapToXmlIndentWithDateFormat(dateFormat, false, s, rt, v, p)
+		b = []byte(*s)
+	}
+
+	return b, err
+}
+
+// Encode an arbitrary value as a pretty XML string. Note: there are no guarantees.
+func XmlIndent(v interface{}, prefix, indent string, rootTag ...string) ([]byte, error) {
+	if reflect.TypeOf(v).Kind() == reflect.Struct {
+		return xml.MarshalIndent(v, prefix, indent)
+	}
+
+	var err error
+	s := new(string)
+	p := new(pretty)
+	p.indent = indent
+	p.padding = prefix
+
+	var rt string
+	if len(rootTag) == 1 {
+		rt = rootTag[0]
+	} else {
+		rt = DefaultRootTag
+	}
+
+	var ss string
+	var b []byte
+
+	switch v.(type) {
+
+	case []interface{}:
+		ss = "<" + rt + ">\n"
+		p.Indent()
+		for _, vv := range v.([]interface{}) {
+			switch vv.(type) {
+			case map[string]interface{}:
+				m := vv.(map[string]interface{})
+				if len(m) == 1 {
+					for tag, val := range m {
+
+						err = mapToXmlIndent(true, s, tag, val, p)
+					}
+				} else {
+					p.start = 1 // we're 1 tag in to the doc
+					err = mapToXmlIndent(true, s, "element", vv, p)
+					*s += "\n"
+				}
+			case []map[string]interface{}:
+				*s += p.padding + "<element>\n" + p.padding
+				for _, vvv := range vv.([]map[string]interface{}) {
+					err = mapToXmlIndent(true, s, "element", vvv, p)
+					*s += "\n"
+					if err != nil {
+						break
+					}
+				}
+				*s += "</element>\n"
+			default:
+				p.start = 0
+				err = mapToXmlIndent(true, s, "element", vv, p)
+			}
+			if err != nil {
+				break
+			}
+		}
+		ss += *s + "</" + rt + ">"
+		b = []byte(ss)
+	case map[string]interface{}:
+		b, err = anyxmlIndent(v.(map[string]interface{}), prefix, indent, rootTag...)
+	case []map[string]interface{}:
+		for _, vv := range v.([]map[string]interface{}) {
+			b, err = anyxmlIndent(vv, prefix, indent, rootTag...)
+			ss += (string(b) + "\n")
+			if err != nil {
+				break
+			}
+		}
+		b = []byte(ss)
+	default:
+		err = mapToXmlIndent(true, s, rt, v, p)
+		b = []byte(*s)
+	}
+
+	return b, err
+}
+
+// Encode an arbitrary value as a pretty XML string. Note: there are no guarantees.
+func XmlIndentWithDateFormat(dateFormat string, v interface{}, prefix, indent string, rootTag ...string) ([]byte, error) {
+
+	if reflect.TypeOf(v).Kind() == reflect.Struct {
+		return xml.MarshalIndent(v, prefix, indent)
+
+	}
+
+	var err error
+	s := new(string)
+	p := new(pretty)
+	p.indent = indent
+	p.padding = prefix
+
+	var rt string
+	if len(rootTag) == 1 {
+		rt = rootTag[0]
+	} else {
+		rt = DefaultRootTag
+	}
+
+	var ss string
+	var b []byte
+	switch v.(type) {
+	case []interface{}:
+		ss = "<" + rt + ">\n"
+		p.Indent()
+		for _, vv := range v.([]interface{}) {
+			switch vv.(type) {
+			case map[string]interface{}:
+				m := vv.(map[string]interface{})
+				if len(m) == 1 {
+					for tag, val := range m {
+						err = mapToXmlIndentWithDateFormat(dateFormat, true, s, tag, val, p)
+					}
+				} else {
+					p.start = 1 // we're 1 tag in to the doc
+					err = mapToXmlIndentWithDateFormat(dateFormat, true, s, "element", vv, p)
+					*s += "\n"
+				}
+			default:
+				p.start = 0
+				err = mapToXmlIndentWithDateFormat(dateFormat, true, s, "element", vv, p)
+			}
+			if err != nil {
+				break
+			}
+		}
+		ss += *s + "</" + rt + ">"
+		b = []byte(ss)
+	case map[string]interface{}:
+		b, err = anyxmlIndentWithDateFormat(dateFormat, v.(map[string]interface{}), prefix, indent, rootTag...)
+	case []map[string]interface{}:
+		for _, vv := range v.([]map[string]interface{}) {
+			b, err = anyxmlIndentWithDateFormat(dateFormat, vv, prefix, indent, rootTag...)
+			ss += (string(b) + "\n")
+			if err != nil {
+				break
+			}
+		}
+		b = []byte(ss)
+	default:
+		err = mapToXmlIndentWithDateFormat(dateFormat, true, s, rt, v, p)
+		b = []byte(*s)
+	}
+
+	return b, err
+}
+
+func Struct2MapWithDateFormat(dateFormat string, obj interface{}) map[string]interface{} {
+	t := reflect.TypeOf(obj)
+	v := reflect.ValueOf(obj)
+	var data = make(map[string]interface{})
+
+	for i := 0; i < t.NumField(); i++ {
+		if t.Field(i).Type == reflect.TypeOf(time.Now()) {
+			data[t.Field(i).Name] = (v.Field(i).Interface().(time.Time)).Format(dateFormat)
+		} else {
+			data[t.Field(i).Name] = v.Field(i).Interface()
+		}
+
+	}
+	return data
+}

+ 490 - 0
vendor/github.com/Chronokeeper/anyxml/xml.go

@@ -0,0 +1,490 @@
+// Copyright 2012-2016 xiaolipeng. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file
+
+// xml.go - basically the core of X2j for map[string]interface{} values.
+//          NewMapXml, NewMapXmlReader, mv.Xml, mv.XmlWriter
+// see x2j and j2x for wrappers to provide end-to-end transformation of XML and JSON messages.
+
+package anyxml
+
+import (
+	"encoding/xml"
+	"errors"
+	"fmt"
+	"html/template"
+	"time"
+)
+
+// --------------------------------- Xml, XmlIndent - from mxj -------------------------------
+
+const (
+	DefaultRootTag          = "doc"
+	UseGoEmptyElementSyntax = false // if 'true' encode empty element as "<tag></tag>" instead of "<tag/>
+)
+
+// From: github.com/clbanning/mxj/xml.go with functions relabled: Xml() --> anyxml().
+// Encode a Map as XML.  The companion of NewMapXml().
+// The following rules apply.
+//    - The key label "#text" is treated as the value for a simple element with attributes.
+//    - Map keys that begin with a hyphen, '-', are interpreted as attributes.
+//      It is an error if the attribute doesn't have a []byte, string, number, or boolean value.
+//    - Map value type encoding:
+//          > string, bool, float64, int, int32, int64, float32: per "%v" formating
+//          > []bool, []uint8: by casting to string
+//          > structures, etc.: handed to xml.Marshal() - if there is an error, the element
+//            value is "UNKNOWN"
+//    - Elements with only attribute values or are null are terminated using "/>".
+//    - If len(mv) == 1 and no rootTag is provided, then the map key is used as the root tag, possible.
+//      Thus, `{ "key":"value" }` encodes as "<key>value</key>".
+//    - To encode empty elements in a syntax consistent with encoding/xml call UseGoXmlEmptyElementSyntax().
+func anyxml(m map[string]interface{}, rootTag ...string) ([]byte, error) {
+	var err error
+	s := new(string)
+	p := new(pretty) // just a stub
+
+	if len(m) == 1 && len(rootTag) == 0 {
+		for key, value := range m {
+			// if it an array, see if all values are map[string]interface{}
+			// we force a new root tag if we'll end up with no key:value in the list
+			// so: key:[string_val, bool:true] --> <doc><key>string_val</key><bool>true</bool></key></doc>
+			switch value.(type) {
+			case []interface{}:
+				for _, v := range value.([]interface{}) {
+					switch v.(type) {
+					case map[string]interface{}: // noop
+					default: // anything else
+						err = mapToXmlIndent(false, s, DefaultRootTag, m, p)
+						goto done
+					}
+				}
+			}
+			err = mapToXmlIndent(false, s, key, value, p)
+		}
+	} else if len(rootTag) == 1 {
+		err = mapToXmlIndent(false, s, rootTag[0], m, p)
+	} else {
+		err = mapToXmlIndent(false, s, DefaultRootTag, m, p)
+	}
+done:
+	return []byte(*s), err
+}
+
+func anyxmlWithDateFormat(dateFormat string, m map[string]interface{}, rootTag ...string) ([]byte, error) {
+	var err error
+	s := new(string)
+	p := new(pretty) // just a stub
+
+	if len(m) == 1 && len(rootTag) == 0 {
+		for key, value := range m {
+			// if it an array, see if all values are map[string]interface{}
+			// we force a new root tag if we'll end up with no key:value in the list
+			// so: key:[string_val, bool:true] --> <doc><key>string_val</key><bool>true</bool></key></doc>
+			switch value.(type) {
+			case []interface{}:
+				for _, v := range value.([]interface{}) {
+					switch v.(type) {
+					case map[string]interface{}: // noop
+					default: // anything else
+						err = mapToXmlIndentWithDateFormat(dateFormat, false, s, DefaultRootTag, m, p)
+						goto done
+					}
+				}
+			}
+			err = mapToXmlIndentWithDateFormat(dateFormat, false, s, key, value, p)
+		}
+	} else if len(rootTag) == 1 {
+		err = mapToXmlIndentWithDateFormat(dateFormat, false, s, rootTag[0], m, p)
+	} else {
+		err = mapToXmlIndentWithDateFormat(dateFormat, false, s, DefaultRootTag, m, p)
+	}
+done:
+	return []byte(*s), err
+}
+
+func anyxmlIndentWithDateFormat(dateFormat string, m map[string]interface{}, prefix string, indent string, rootTag ...string) ([]byte, error) {
+	var err error
+	s := new(string)
+	p := new(pretty) // just a stub
+
+	if len(m) == 1 && len(rootTag) == 0 {
+		for key, value := range m {
+			// if it an array, see if all values are map[string]interface{}
+			// we force a new root tag if we'll end up with no key:value in the list
+			// so: key:[string_val, bool:true] --> <doc><key>string_val</key><bool>true</bool></key></doc>
+			switch value.(type) {
+			case []interface{}:
+				for _, v := range value.([]interface{}) {
+					switch v.(type) {
+					case map[string]interface{}: // noop
+					default: // anything else
+						err = mapToXmlIndentWithDateFormat(dateFormat, true, s, DefaultRootTag, m, p)
+						goto done
+					}
+				}
+			}
+			err = mapToXmlIndentWithDateFormat(dateFormat, true, s, key, value, p)
+		}
+	} else if len(rootTag) == 1 {
+		err = mapToXmlIndentWithDateFormat(dateFormat, true, s, rootTag[0], m, p)
+	} else {
+		err = mapToXmlIndentWithDateFormat(dateFormat, true, s, DefaultRootTag, m, p)
+	}
+done:
+	return []byte(*s), err
+}
+
+// Encode a map[string]interface{} as a pretty XML string.
+// See Xml for encoding rules.
+func anyxmlIndent(m map[string]interface{}, prefix string, indent string, rootTag ...string) ([]byte, error) {
+	var err error
+	s := new(string)
+	p := new(pretty)
+	p.indent = indent
+	p.padding = prefix
+
+	if len(m) == 1 && len(rootTag) == 0 {
+		// this can extract the key for the single map element
+		// use it if it isn't a key for a list
+		for key, value := range m {
+			if _, ok := value.([]interface{}); ok {
+				err = mapToXmlIndent(true, s, DefaultRootTag, m, p)
+			} else {
+				err = mapToXmlIndent(true, s, key, value, p)
+			}
+		}
+	} else if len(rootTag) == 1 {
+		err = mapToXmlIndent(true, s, rootTag[0], m, p)
+	} else {
+		err = mapToXmlIndent(true, s, DefaultRootTag, m, p)
+	}
+	return []byte(*s), err
+}
+
+type pretty struct {
+	indent   string
+	cnt      int
+	padding  string
+	mapDepth int
+	start    int
+}
+
+func (p *pretty) Indent() {
+	p.padding += p.indent
+	p.cnt++
+}
+
+func (p *pretty) Outdent() {
+	if p.cnt > 0 {
+		p.padding = p.padding[:len(p.padding)-len(p.indent)]
+		p.cnt--
+	}
+}
+
+// where the work actually happens
+// returns an error if an attribute is not atomic
+func mapToXmlIndent(doIndent bool, s *string, key string, value interface{}, pp *pretty) error {
+	var endTag bool
+	var isSimple bool
+	p := &pretty{pp.indent, pp.cnt, pp.padding, pp.mapDepth, pp.start}
+	key = template.HTMLEscapeString(key)
+
+	switch value.(type) {
+	case map[string]interface{}, []byte, string, float64, bool, int, int32, int64, float32:
+		if doIndent {
+			*s += p.padding
+		}
+		*s += `<` + key
+	}
+	switch value.(type) {
+	case map[string]interface{}:
+		vv := value.(map[string]interface{})
+		lenvv := len(vv)
+		// scan out attributes - keys have prepended hyphen, '-'
+		var cntAttr int
+		for k, v := range vv {
+			if k[:1] == "-" {
+				switch v.(type) {
+				case string:
+					*s += ` ` + k[1:] + `="` + fmt.Sprintf("%v", template.HTMLEscapeString(v.(string))) + `"`
+					cntAttr++
+				case float64, bool, int, int32, int64, float32:
+					*s += ` ` + k[1:] + `="` + fmt.Sprintf("%v", v) + `"`
+					cntAttr++
+				case []byte: // allow standard xml pkg []byte transform, as below
+					*s += ` ` + k[1:] + `="` + fmt.Sprintf("%v", string(v.([]byte))) + `"`
+					cntAttr++
+				default:
+					return fmt.Errorf("invalid attribute value for: %s", k)
+				}
+			}
+		}
+		// only attributes?
+		if cntAttr == lenvv {
+			break
+		}
+		// simple element? Note: '#text" is an invalid XML tag.
+		if v, ok := vv["#text"]; ok {
+			if cntAttr+1 < lenvv {
+				return errors.New("#text key occurs with other non-attribute keys")
+			}
+			*s += ">" + fmt.Sprintf("%v", v)
+			endTag = true
+			break
+		}
+		// close tag with possible attributes
+		*s += ">"
+		if doIndent {
+			*s += "\n"
+		}
+		// something more complex
+		p.mapDepth++
+		var i int
+		for k, v := range vv {
+			if k[:1] == "-" {
+				continue
+			}
+			switch v.(type) {
+			case []interface{}:
+			default:
+				if i == 0 && doIndent {
+					p.Indent()
+				}
+			}
+			i++
+			mapToXmlIndent(doIndent, s, k, v, p)
+			switch v.(type) {
+			case []interface{}: // handled in []interface{} case
+			default:
+				if doIndent {
+					p.Outdent()
+				}
+			}
+			i--
+		}
+		p.mapDepth--
+		endTag = true
+	case []interface{}:
+		for _, v := range value.([]interface{}) {
+			if doIndent {
+				p.Indent()
+			}
+			mapToXmlIndent(doIndent, s, key, v, p)
+			if doIndent {
+				p.Outdent()
+			}
+		}
+		return nil
+	case nil:
+		// terminate the tag
+		*s += "<" + key
+		break
+	default: // handle anything - even goofy stuff
+		switch value.(type) {
+		case string:
+			*s += ">" + fmt.Sprintf("%v", template.HTMLEscapeString(value.(string)))
+		case float64, bool, int, int32, int64, float32:
+			*s += ">" + fmt.Sprintf("%v", value)
+		case []byte: // NOTE: byte is just an alias for uint8
+			// similar to how xml.Marshal handles []byte structure members
+			*s += ">" + string(value.([]byte))
+		default:
+			var v []byte
+			var err error
+			if doIndent {
+				v, err = xml.MarshalIndent(value, p.padding, p.indent)
+			} else {
+				v, err = xml.Marshal(value)
+			}
+			if err != nil {
+				*s += ">UNKNOWN"
+			} else {
+				*s += string(v)
+			}
+		}
+		isSimple = true
+		endTag = true
+	}
+
+	if endTag {
+		if doIndent {
+			if !isSimple {
+				//				if p.mapDepth == 0 {
+				//					p.Outdent()
+				//				}
+				*s += p.padding
+			}
+		}
+		switch value.(type) {
+		case map[string]interface{}, []byte, string, float64, bool, int, int32, int64, float32:
+			*s += `</` + key + ">"
+		}
+	} else if UseGoEmptyElementSyntax {
+		*s += "></" + key + ">"
+	} else {
+		*s += "/>"
+	}
+	if doIndent {
+		if p.cnt > p.start {
+			*s += "\n"
+		}
+		p.Outdent()
+	}
+
+	return nil
+}
+
+// where the work actually happens
+// returns an error if an attribute is not atomic
+func mapToXmlIndentWithDateFormat(dateFormat string, doIndent bool, s *string, key string, value interface{}, pp *pretty) error {
+	var endTag bool
+	var isSimple bool
+	p := &pretty{pp.indent, pp.cnt, pp.padding, pp.mapDepth, pp.start}
+	key = template.HTMLEscapeString(key)
+
+	//start tag
+	switch value.(type) {
+	case map[string]interface{}, []byte, string, float64, bool, int, int32, int64, float32, time.Time:
+		if doIndent {
+			*s += p.padding
+		}
+		*s += `<` + key
+	}
+
+	switch value.(type) {
+	case map[string]interface{}:
+		vv := value.(map[string]interface{})
+		lenvv := len(vv)
+		var cntAttr int
+		for k, v := range vv {
+			if k[:1] == "-" {
+				switch v.(type) {
+				case string:
+					*s += ` ` + k[1:] + `="` + fmt.Sprintf("%v", template.HTMLEscapeString(v.(string))) + `"`
+					cntAttr++
+				case float64, bool, int, int32, int64, float32:
+					*s += ` ` + k[1:] + `="` + fmt.Sprintf("%v", v) + `"`
+					cntAttr++
+				case []byte: // allow standard xml pkg []byte transform, as below
+					*s += ` ` + k[1:] + `="` + fmt.Sprintf("%v", string(v.([]byte))) + `"`
+					cntAttr++
+				default:
+					return fmt.Errorf("invalid attribute value for: %s", k)
+				}
+			}
+		}
+		// only attributes?
+		if cntAttr == lenvv {
+			break
+		}
+		// simple element? Note: '#text" is an invalid XML tag.
+		if v, ok := vv["#text"]; ok {
+			if cntAttr+1 < lenvv {
+				return errors.New("#text key occurs with other non-attribute keys")
+			}
+			*s += ">" + fmt.Sprintf("%v", v)
+			endTag = true
+			break
+		}
+		// close tag with possible attributes
+		*s += ">"
+		if doIndent {
+			*s += "\n"
+		}
+		// something more complex
+		p.mapDepth++
+		var i int
+		for k, v := range vv {
+			if k[:1] == "-" {
+				continue
+			}
+			switch v.(type) {
+			case []interface{}:
+			default:
+				if i == 0 && doIndent {
+					p.Indent()
+				}
+			}
+			i++
+			mapToXmlIndentWithDateFormat(dateFormat, doIndent, s, k, v, p)
+			switch v.(type) {
+			case []interface{}: // handled in []interface{} case
+			default:
+				if doIndent {
+					p.Outdent()
+				}
+			}
+			i--
+		}
+		p.mapDepth--
+		endTag = true
+	case []interface{}:
+		for _, v := range value.([]interface{}) {
+			if doIndent {
+				p.Indent()
+			}
+			mapToXmlIndentWithDateFormat(dateFormat, doIndent, s, key, v, p)
+			if doIndent {
+				p.Outdent()
+			}
+		}
+		return nil
+	case nil:
+		*s += "<" + key
+		break
+	default: // handle anything - even goofy stuff
+		switch value.(type) {
+		case string:
+			*s += ">" + fmt.Sprintf("%v", template.HTMLEscapeString(value.(string)))
+		case float64, bool, int, int32, int64, float32:
+			*s += ">" + fmt.Sprintf("%v", value)
+		case []byte: // NOTE: byte is just an alias for uint8
+			// similar to how xml.Marshal handles []byte structure members
+			*s += ">" + string(value.([]byte))
+		case time.Time:
+			*s += ">" + (value.(time.Time)).Format(dateFormat)
+		default:
+			var v []byte
+			var err error
+			if doIndent {
+				v, err = xml.MarshalIndent(value, p.padding, p.indent)
+			} else {
+				v, err = xml.Marshal(value)
+			}
+			if err != nil {
+				*s += ">UNKNOWN"
+			} else {
+				*s += string(v)
+			}
+		}
+		isSimple = true
+		endTag = true
+	}
+
+	if endTag {
+		if doIndent {
+			if !isSimple {
+				//				if p.mapDepth == 0 {
+				//					p.Outdent()
+				//				}
+				*s += p.padding
+			}
+		}
+		switch value.(type) {
+		case map[string]interface{}, []byte, string, float64, bool, int, int32, int64, float32, time.Time:
+			*s += `</` + key + ">"
+		}
+	} else if UseGoEmptyElementSyntax {
+		*s += "></" + key + ">"
+	} else {
+		*s += "/>"
+	}
+	if doIndent {
+		if p.cnt > p.start {
+			*s += "\n"
+		}
+		p.Outdent()
+	}
+
+	return nil
+}

+ 191 - 0
vendor/github.com/Unknwon/goconfig/LICENSE

@@ -0,0 +1,191 @@
+Apache License
+Version 2.0, January 2004
+http://www.apache.org/licenses/
+
+TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+1. Definitions.
+
+"License" shall mean the terms and conditions for use, reproduction, and
+distribution as defined by Sections 1 through 9 of this document.
+
+"Licensor" shall mean the copyright owner or entity authorized by the copyright
+owner that is granting the License.
+
+"Legal Entity" shall mean the union of the acting entity and all other entities
+that control, are controlled by, or are under common control with that entity.
+For the purposes of this definition, "control" means (i) the power, direct or
+indirect, to cause the direction or management of such entity, whether by
+contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the
+outstanding shares, or (iii) beneficial ownership of such entity.
+
+"You" (or "Your") shall mean an individual or Legal Entity exercising
+permissions granted by this License.
+
+"Source" form shall mean the preferred form for making modifications, including
+but not limited to software source code, documentation source, and configuration
+files.
+
+"Object" form shall mean any form resulting from mechanical transformation or
+translation of a Source form, including but not limited to compiled object code,
+generated documentation, and conversions to other media types.
+
+"Work" shall mean the work of authorship, whether in Source or Object form, made
+available under the License, as indicated by a copyright notice that is included
+in or attached to the work (an example is provided in the Appendix below).
+
+"Derivative Works" shall mean any work, whether in Source or Object form, that
+is based on (or derived from) the Work and for which the editorial revisions,
+annotations, elaborations, or other modifications represent, as a whole, an
+original work of authorship. For the purposes of this License, Derivative Works
+shall not include works that remain separable from, or merely link (or bind by
+name) to the interfaces of, the Work and Derivative Works thereof.
+
+"Contribution" shall mean any work of authorship, including the original version
+of the Work and any modifications or additions to that Work or Derivative Works
+thereof, that is intentionally submitted to Licensor for inclusion in the Work
+by the copyright owner or by an individual or Legal Entity authorized to submit
+on behalf of the copyright owner. For the purposes of this definition,
+"submitted" means any form of electronic, verbal, or written communication sent
+to the Licensor or its representatives, including but not limited to
+communication on electronic mailing lists, source code control systems, and
+issue tracking systems that are managed by, or on behalf of, the Licensor for
+the purpose of discussing and improving the Work, but excluding communication
+that is conspicuously marked or otherwise designated in writing by the copyright
+owner as "Not a Contribution."
+
+"Contributor" shall mean Licensor and any individual or Legal Entity on behalf
+of whom a Contribution has been received by Licensor and subsequently
+incorporated within the Work.
+
+2. Grant of Copyright License.
+
+Subject to the terms and conditions of this License, each Contributor hereby
+grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free,
+irrevocable copyright license to reproduce, prepare Derivative Works of,
+publicly display, publicly perform, sublicense, and distribute the Work and such
+Derivative Works in Source or Object form.
+
+3. Grant of Patent License.
+
+Subject to the terms and conditions of this License, each Contributor hereby
+grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free,
+irrevocable (except as stated in this section) patent license to make, have
+made, use, offer to sell, sell, import, and otherwise transfer the Work, where
+such license applies only to those patent claims licensable by such Contributor
+that are necessarily infringed by their Contribution(s) alone or by combination
+of their Contribution(s) with the Work to which such Contribution(s) was
+submitted. If You institute patent litigation against any entity (including a
+cross-claim or counterclaim in a lawsuit) alleging that the Work or a
+Contribution incorporated within the Work constitutes direct or contributory
+patent infringement, then any patent licenses granted to You under this License
+for that Work shall terminate as of the date such litigation is filed.
+
+4. Redistribution.
+
+You may reproduce and distribute copies of the Work or Derivative Works thereof
+in any medium, with or without modifications, and in Source or Object form,
+provided that You meet the following conditions:
+
+You must give any other recipients of the Work or Derivative Works a copy of
+this License; and
+You must cause any modified files to carry prominent notices stating that You
+changed the files; and
+You must retain, in the Source form of any Derivative Works that You distribute,
+all copyright, patent, trademark, and attribution notices from the Source form
+of the Work, excluding those notices that do not pertain to any part of the
+Derivative Works; and
+If the Work includes a "NOTICE" text file as part of its distribution, then any
+Derivative Works that You distribute must include a readable copy of the
+attribution notices contained within such NOTICE file, excluding those notices
+that do not pertain to any part of the Derivative Works, in at least one of the
+following places: within a NOTICE text file distributed as part of the
+Derivative Works; within the Source form or documentation, if provided along
+with the Derivative Works; or, within a display generated by the Derivative
+Works, if and wherever such third-party notices normally appear. The contents of
+the NOTICE file are for informational purposes only and do not modify the
+License. You may add Your own attribution notices within Derivative Works that
+You distribute, alongside or as an addendum to the NOTICE text from the Work,
+provided that such additional attribution notices cannot be construed as
+modifying the License.
+You may add Your own copyright statement to Your modifications and may provide
+additional or different license terms and conditions for use, reproduction, or
+distribution of Your modifications, or for any such Derivative Works as a whole,
+provided Your use, reproduction, and distribution of the Work otherwise complies
+with the conditions stated in this License.
+
+5. Submission of Contributions.
+
+Unless You explicitly state otherwise, any Contribution intentionally submitted
+for inclusion in the Work by You to the Licensor shall be under the terms and
+conditions of this License, without any additional terms or conditions.
+Notwithstanding the above, nothing herein shall supersede or modify the terms of
+any separate license agreement you may have executed with Licensor regarding
+such Contributions.
+
+6. Trademarks.
+
+This License does not grant permission to use the trade names, trademarks,
+service marks, or product names of the Licensor, except as required for
+reasonable and customary use in describing the origin of the Work and
+reproducing the content of the NOTICE file.
+
+7. Disclaimer of Warranty.
+
+Unless required by applicable law or agreed to in writing, Licensor provides the
+Work (and each Contributor provides its Contributions) on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied,
+including, without limitation, any warranties or conditions of TITLE,
+NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are
+solely responsible for determining the appropriateness of using or
+redistributing the Work and assume any risks associated with Your exercise of
+permissions under this License.
+
+8. Limitation of Liability.
+
+In no event and under no legal theory, whether in tort (including negligence),
+contract, or otherwise, unless required by applicable law (such as deliberate
+and grossly negligent acts) or agreed to in writing, shall any Contributor be
+liable to You for damages, including any direct, indirect, special, incidental,
+or consequential damages of any character arising as a result of this License or
+out of the use or inability to use the Work (including but not limited to
+damages for loss of goodwill, work stoppage, computer failure or malfunction, or
+any and all other commercial damages or losses), even if such Contributor has
+been advised of the possibility of such damages.
+
+9. Accepting Warranty or Additional Liability.
+
+While redistributing the Work or Derivative Works thereof, You may choose to
+offer, and charge a fee for, acceptance of support, warranty, indemnity, or
+other liability obligations and/or rights consistent with this License. However,
+in accepting such obligations, You may act only on Your own behalf and on Your
+sole responsibility, not on behalf of any other Contributor, and only if You
+agree to indemnify, defend, and hold each Contributor harmless for any liability
+incurred by, or claims asserted against, such Contributor by reason of your
+accepting any such warranty or additional liability.
+
+END OF TERMS AND CONDITIONS
+
+APPENDIX: How to apply the Apache License to your work
+
+To apply the Apache License to your work, attach the following boilerplate
+notice, with the fields enclosed by brackets "[]" replaced with your own
+identifying information. (Don't include the brackets!) The text should be
+enclosed in the appropriate comment syntax for the file format. We also
+recommend that a file or class name and description of purpose be included on
+the same "printed page" as the copyright notice for easier identification within
+third-party archives.
+
+   Copyright [yyyy] [name of copyright owner]
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+     http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.

+ 72 - 0
vendor/github.com/Unknwon/goconfig/README.md

@@ -0,0 +1,72 @@
+goconfig [![Build Status](https://drone.io/github.com/Unknwon/goconfig/status.png)](https://drone.io/github.com/Unknwon/goconfig/latest) [![Go Walker](http://gowalker.org/api/v1/badge)](http://gowalker.org/github.com/Unknwon/goconfig)
+========
+
+[中文文档](README_ZH.md)
+
+**IMPORTANT** 
+
+- This library is under bug fix only mode, which means no more features will be added.
+- I'm continuing working on better Go code with a different library: [ini](https://github.com/go-ini/ini).
+
+## About
+
+Package goconfig is a easy-use, comments-support configuration file parser for the Go Programming Language, which provides a structure similar to what you would find on Microsoft Windows INI files.
+
+The configuration file consists of sections, led by a `[section]` header and followed by `name:value` or `name=value` entries. Note that leading whitespace is removed from values. The optional values can contain format strings which refer to other values in the same section, or values in a special DEFAULT section. Comments are indicated by ";" or "#"; comments may begin anywhere on a single line.
+	
+## Features
+	
+- It simplified operation processes, easy to use and undersatnd; therefore, there are less chances to have errors. 
+- It uses exactly the same way to access a configuration file as you use Windows APIs, so you don't need to change your code style.
+- It supports read recursion sections.
+- It supports auto increment of key.
+- It supports **READ** and **WRITE** configuration file with comments each section or key which all the other parsers don't support!!!!!!!
+- It supports get value through type bool, float64, int, int64 and string, methods that start with "Must" means ignore errors and get zero-value if error occurs, or you can specify a default value.
+- It's able to load multiple files to overwrite key values. 
+
+## Installation
+	
+	go get github.com/Unknwon/goconfig
+
+Or
+	
+	gopm get github.com/Unknwon/goconfig
+
+## API Documentation
+
+[Go Walker](http://gowalker.org/github.com/Unknwon/goconfig).
+
+## Example
+
+Please see [conf.ini](testdata/conf.ini) as an example.
+
+### Usage
+
+- Function `LoadConfigFile` load file(s) depends on your situation, and return a variable with type `ConfigFile`.
+- `GetValue` gives basic functionality of getting a value of given section and key.
+- Methods like `Bool`, `Int`, `Int64` return corresponding type of values.
+- Methods start with `Must` return corresponding type of values and returns zero-value of given type if something goes wrong.
+- `SetValue` sets value to given section and key, and inserts somewhere if it does not exist.
+- `DeleteKey` deletes by given section and key.
+- Finally, `SaveConfigFile` saves your configuration to local file system.
+- Use method `Reload` in case someone else modified your file(s).
+- Methods contains `Comment` help you manipulate comments.
+- `LoadFromReader` allows loading data without an intermediate file.
+- `SaveConfigData` added, which writes configuration to an arbitrary writer.
+- `ReloadData` allows to reload data from memory.
+
+Note that you cannot mix in-memory configuration with on-disk configuration.
+
+## More Information
+
+- All characters are CASE SENSITIVE, BE CAREFUL!
+
+## Credits
+
+- [goconf](http://code.google.com/p/goconf/)
+- [robfig/config](https://github.com/robfig/config)
+- [Delete an item from a slice](https://groups.google.com/forum/?fromgroups=#!topic/golang-nuts/lYz8ftASMQ0)
+
+## License
+
+This project is under Apache v2 License. See the [LICENSE](LICENSE) file for the full license text.

+ 64 - 0
vendor/github.com/Unknwon/goconfig/README_ZH.md

@@ -0,0 +1,64 @@
+goconfig [![Build Status](https://drone.io/github.com/Unknwon/goconfig/status.png)](https://drone.io/github.com/Unknwon/goconfig/latest) [![Go Walker](http://gowalker.org/api/v1/badge)](http://gowalker.org/github.com/Unknwon/goconfig) 
+========
+
+本库已被 [《Go名库讲解》](https://github.com/Unknwon/go-rock-libraries-showcases/tree/master/lectures/01-goconfig) 收录讲解,欢迎前往学习如何使用!
+
+编码规范:基于 [Go 编码规范](https://github.com/Unknwon/go-code-convention)
+
+## 关于
+
+包 goconfig 是一个易于使用,支持注释的 Go 语言配置文件解析器,该文件的书写格式和 Windows 下的 INI 文件一样。
+
+配置文件由形为 `[section]` 的节构成,内部使用 `name:value` 或 `name=value` 这样的键值对;每行开头和尾部的空白符号都将被忽略;如果未指定任何节,则会默认放入名为 `DEFAULT` 的节当中;可以使用 “;” 或 “#” 来作为注释的开头,并可以放置于任意的单独一行中。
+	
+## 特性
+	
+- 简化流程,易于理解,更少出错。
+- 提供与 Windows API 一模一样的操作方式。
+- 支持读取递归节。
+- 支持自增键名。
+- 支持对注释的 **读** 和 **写** 操作,其它所有解析器都不支持!!!!
+- 可以直接返回 bool, float64, int, int64 和 string 类型的值,如果使用 “Must” 开头的方法,则一定会返回这个类型的一个值而不返回错误,如果错误发生则会返回零值。
+- 支持加载多个文件来重写值。
+
+## 安装
+	
+	go get github.com/Unknwon/goconfig
+
+或
+
+	gopm get github.com/Unknwon/goconfig
+
+
+## API 文档
+
+[Go Walker](http://gowalker.org/github.com/Unknwon/goconfig).
+
+## 示例
+
+请查看 [conf.ini](testdata/conf.ini) 文件作为使用示例。
+
+### 用例
+
+- 函数 `LoadConfigFile` 加载一个或多个文件,然后返回一个类型为 `ConfigFile` 的变量。
+- `GetValue` 可以简单的获取某个值。
+- 像 `Bool`、`Int`、`Int64` 这样的方法会直接返回指定类型的值。
+- 以 `Must` 开头的方法不会返回错误,但当错误发生时会返回零值。
+- `SetValue` 可以设置某个值。
+- `DeleteKey` 可以删除某个键。
+- 最后,`SaveConfigFile` 可以保持您的配置到本地文件系统。
+- 使用方法 `Reload` 可以重载您的配置文件。
+
+## 更多信息
+
+- 所有字符都是大小写敏感的!
+
+## 参考信息
+
+- [goconf](http://code.google.com/p/goconf/)
+- [robfig/config](https://github.com/robfig/config)
+- [Delete an item from a slice](https://groups.google.com/forum/?fromgroups=#!topic/golang-nuts/lYz8ftASMQ0)
+
+## 授权许可
+
+本项目采用 Apache v2 开源授权许可证,完整的授权说明已放置在 [LICENSE](LICENSE) 文件中。

+ 556 - 0
vendor/github.com/Unknwon/goconfig/conf.go

@@ -0,0 +1,556 @@
+// Copyright 2013 Unknwon
+//
+// Licensed under the Apache License, Version 2.0 (the "License"): you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+// Package goconfig is a fully functional and comments-support configuration file(.ini) parser.
+package goconfig
+
+import (
+	"fmt"
+	"regexp"
+	"runtime"
+	"strconv"
+	"strings"
+	"sync"
+)
+
+const (
+	// Default section name.
+	DEFAULT_SECTION = "DEFAULT"
+	// Maximum allowed depth when recursively substituing variable names.
+	_DEPTH_VALUES = 200
+)
+
+type ParseError int
+
+const (
+	ERR_SECTION_NOT_FOUND ParseError = iota + 1
+	ERR_KEY_NOT_FOUND
+	ERR_BLANK_SECTION_NAME
+	ERR_COULD_NOT_PARSE
+)
+
+var LineBreak = "\n"
+
+// Variable regexp pattern: %(variable)s
+var varPattern = regexp.MustCompile(`%\(([^\)]+)\)s`)
+
+func init() {
+	if runtime.GOOS == "windows" {
+		LineBreak = "\r\n"
+	}
+}
+
+// A ConfigFile represents a INI formar configuration file.
+type ConfigFile struct {
+	lock      sync.RWMutex                 // Go map is not safe.
+	fileNames []string                     // Support mutil-files.
+	data      map[string]map[string]string // Section -> key : value
+
+	// Lists can keep sections and keys in order.
+	sectionList []string            // Section name list.
+	keyList     map[string][]string // Section -> Key name list
+
+	sectionComments map[string]string            // Sections comments.
+	keyComments     map[string]map[string]string // Keys comments.
+	BlockMode       bool                         // Indicates whether use lock or not.
+}
+
+// newConfigFile creates an empty configuration representation.
+func newConfigFile(fileNames []string) *ConfigFile {
+	c := new(ConfigFile)
+	c.fileNames = fileNames
+	c.data = make(map[string]map[string]string)
+	c.keyList = make(map[string][]string)
+	c.sectionComments = make(map[string]string)
+	c.keyComments = make(map[string]map[string]string)
+	c.BlockMode = true
+	return c
+}
+
+// SetValue adds a new section-key-value to the configuration.
+// It returns true if the key and value were inserted,
+// or returns false if the value was overwritten.
+// If the section does not exist in advance, it will be created.
+func (c *ConfigFile) SetValue(section, key, value string) bool {
+	// Blank section name represents DEFAULT section.
+	if len(section) == 0 {
+		section = DEFAULT_SECTION
+	}
+	if len(key) == 0 {
+		return false
+	}
+
+	if c.BlockMode {
+		c.lock.Lock()
+		defer c.lock.Unlock()
+	}
+
+	// Check if section exists.
+	if _, ok := c.data[section]; !ok {
+		// Execute add operation.
+		c.data[section] = make(map[string]string)
+		// Append section to list.
+		c.sectionList = append(c.sectionList, section)
+	}
+
+	// Check if key exists.
+	_, ok := c.data[section][key]
+	c.data[section][key] = value
+	if !ok {
+		// If not exists, append to key list.
+		c.keyList[section] = append(c.keyList[section], key)
+	}
+	return !ok
+}
+
+// DeleteKey deletes the key in given section.
+// It returns true if the key was deleted,
+// or returns false if the section or key didn't exist.
+func (c *ConfigFile) DeleteKey(section, key string) bool {
+	// Blank section name represents DEFAULT section.
+	if len(section) == 0 {
+		section = DEFAULT_SECTION
+	}
+
+	if c.BlockMode {
+		c.lock.Lock()
+		defer c.lock.Unlock()
+	}
+
+	// Check if section exists.
+	if _, ok := c.data[section]; !ok {
+		return false
+	}
+
+	// Check if key exists.
+	if _, ok := c.data[section][key]; ok {
+		delete(c.data[section], key)
+		// Remove comments of key.
+		c.SetKeyComments(section, key, "")
+		// Get index of key.
+		i := 0
+		for _, keyName := range c.keyList[section] {
+			if keyName == key {
+				break
+			}
+			i++
+		}
+		// Remove from key list.
+		c.keyList[section] = append(c.keyList[section][:i], c.keyList[section][i+1:]...)
+		return true
+	}
+	return false
+}
+
+// GetValue returns the value of key available in the given section.
+// If the value needs to be unfolded
+// (see e.g. %(google)s example in the GoConfig_test.go),
+// then String does this unfolding automatically, up to
+// _DEPTH_VALUES number of iterations.
+// It returns an error and empty string value if the section does not exist,
+// or key does not exist in DEFAULT and current sections.
+func (c *ConfigFile) GetValue(section, key string) (string, error) {
+	if c.BlockMode {
+		c.lock.RLock()
+		defer c.lock.RUnlock()
+	}
+
+	// Blank section name represents DEFAULT section.
+	if len(section) == 0 {
+		section = DEFAULT_SECTION
+	}
+
+	// Check if section exists
+	if _, ok := c.data[section]; !ok {
+		// Section does not exist.
+		return "", getError{ERR_SECTION_NOT_FOUND, section}
+	}
+
+	// Section exists.
+	// Check if key exists or empty value.
+	value, ok := c.data[section][key]
+	if !ok {
+		// Check if it is a sub-section.
+		if i := strings.LastIndex(section, "."); i > -1 {
+			return c.GetValue(section[:i], key)
+		}
+
+		// Return empty value.
+		return "", getError{ERR_KEY_NOT_FOUND, key}
+	}
+
+	// Key exists.
+	var i int
+	for i = 0; i < _DEPTH_VALUES; i++ {
+		vr := varPattern.FindString(value)
+		if len(vr) == 0 {
+			break
+		}
+
+		// Take off leading '%(' and trailing ')s'.
+		noption := strings.TrimLeft(vr, "%(")
+		noption = strings.TrimRight(noption, ")s")
+
+		// Search variable in default section.
+		nvalue, err := c.GetValue(DEFAULT_SECTION, noption)
+		if err != nil && section != DEFAULT_SECTION {
+			// Search in the same section.
+			if _, ok := c.data[section][noption]; ok {
+				nvalue = c.data[section][noption]
+			}
+		}
+
+		// Substitute by new value and take off leading '%(' and trailing ')s'.
+		value = strings.Replace(value, vr, nvalue, -1)
+	}
+	return value, nil
+}
+
+// Bool returns bool type value.
+func (c *ConfigFile) Bool(section, key string) (bool, error) {
+	value, err := c.GetValue(section, key)
+	if err != nil {
+		return false, err
+	}
+	return strconv.ParseBool(value)
+}
+
+// Float64 returns float64 type value.
+func (c *ConfigFile) Float64(section, key string) (float64, error) {
+	value, err := c.GetValue(section, key)
+	if err != nil {
+		return 0.0, err
+	}
+	return strconv.ParseFloat(value, 64)
+}
+
+// Int returns int type value.
+func (c *ConfigFile) Int(section, key string) (int, error) {
+	value, err := c.GetValue(section, key)
+	if err != nil {
+		return 0, err
+	}
+	return strconv.Atoi(value)
+}
+
+// Int64 returns int64 type value.
+func (c *ConfigFile) Int64(section, key string) (int64, error) {
+	value, err := c.GetValue(section, key)
+	if err != nil {
+		return 0, err
+	}
+	return strconv.ParseInt(value, 10, 64)
+}
+
+// MustValue always returns value without error.
+// It returns empty string if error occurs, or the default value if given.
+func (c *ConfigFile) MustValue(section, key string, defaultVal ...string) string {
+	val, err := c.GetValue(section, key)
+	if len(defaultVal) > 0 && (err != nil || len(val) == 0) {
+		return defaultVal[0]
+	}
+	return val
+}
+
+// MustValue always returns value without error,
+// It returns empty string if error occurs, or the default value if given,
+// and a bool value indicates whether default value is returned.
+func (c *ConfigFile) MustValueSet(section, key string, defaultVal ...string) (string, bool) {
+	val, err := c.GetValue(section, key)
+	if len(defaultVal) > 0 && (err != nil || len(val) == 0) {
+		c.SetValue(section, key, defaultVal[0])
+		return defaultVal[0], true
+	}
+	return val, false
+}
+
+// MustValueRange always returns value without error,
+// it returns default value if error occurs or doesn't fit into range.
+func (c *ConfigFile) MustValueRange(section, key, defaultVal string, candidates []string) string {
+	val, err := c.GetValue(section, key)
+	if err != nil || len(val) == 0 {
+		return defaultVal
+	}
+
+	for _, cand := range candidates {
+		if val == cand {
+			return val
+		}
+	}
+	return defaultVal
+}
+
+// MustValueArray always returns value array without error,
+// it returns empty array if error occurs, split by delimiter otherwise.
+func (c *ConfigFile) MustValueArray(section, key, delim string) []string {
+	val, err := c.GetValue(section, key)
+	if err != nil || len(val) == 0 {
+		return []string{}
+	}
+
+	vals := strings.Split(val, delim)
+	for i := range vals {
+		vals[i] = strings.TrimSpace(vals[i])
+	}
+	return vals
+}
+
+// MustBool always returns value without error,
+// it returns false if error occurs.
+func (c *ConfigFile) MustBool(section, key string, defaultVal ...bool) bool {
+	val, err := c.Bool(section, key)
+	if len(defaultVal) > 0 && err != nil {
+		return defaultVal[0]
+	}
+	return val
+}
+
+// MustFloat64 always returns value without error,
+// it returns 0.0 if error occurs.
+func (c *ConfigFile) MustFloat64(section, key string, defaultVal ...float64) float64 {
+	value, err := c.Float64(section, key)
+	if len(defaultVal) > 0 && err != nil {
+		return defaultVal[0]
+	}
+	return value
+}
+
+// MustInt always returns value without error,
+// it returns 0 if error occurs.
+func (c *ConfigFile) MustInt(section, key string, defaultVal ...int) int {
+	value, err := c.Int(section, key)
+	if len(defaultVal) > 0 && err != nil {
+		return defaultVal[0]
+	}
+	return value
+}
+
+// MustInt64 always returns value without error,
+// it returns 0 if error occurs.
+func (c *ConfigFile) MustInt64(section, key string, defaultVal ...int64) int64 {
+	value, err := c.Int64(section, key)
+	if len(defaultVal) > 0 && err != nil {
+		return defaultVal[0]
+	}
+	return value
+}
+
+// GetSectionList returns the list of all sections
+// in the same order in the file.
+func (c *ConfigFile) GetSectionList() []string {
+	list := make([]string, len(c.sectionList))
+	copy(list, c.sectionList)
+	return list
+}
+
+// GetKeyList returns the list of all keys in give section
+// in the same order in the file.
+// It returns nil if given section does not exist.
+func (c *ConfigFile) GetKeyList(section string) []string {
+	// Blank section name represents DEFAULT section.
+	if len(section) == 0 {
+		section = DEFAULT_SECTION
+	}
+
+	if c.BlockMode {
+		c.lock.RLock()
+		defer c.lock.RUnlock()
+	}
+
+	// Check if section exists.
+	if _, ok := c.data[section]; !ok {
+		return nil
+	}
+
+	// Non-default section has a blank key as section keeper.
+	offset := 1
+	if section == DEFAULT_SECTION {
+		offset = 0
+	}
+
+	list := make([]string, len(c.keyList[section])-offset)
+	copy(list, c.keyList[section][offset:])
+	return list
+}
+
+// DeleteSection deletes the entire section by given name.
+// It returns true if the section was deleted, and false if the section didn't exist.
+func (c *ConfigFile) DeleteSection(section string) bool {
+	// Blank section name represents DEFAULT section.
+	if len(section) == 0 {
+		section = DEFAULT_SECTION
+	}
+
+	if c.BlockMode {
+		c.lock.Lock()
+		defer c.lock.Unlock()
+	}
+
+	// Check if section exists.
+	if _, ok := c.data[section]; !ok {
+		return false
+	}
+
+	delete(c.data, section)
+	// Remove comments of section.
+	c.SetSectionComments(section, "")
+	// Get index of section.
+	i := 0
+	for _, secName := range c.sectionList {
+		if secName == section {
+			break
+		}
+		i++
+	}
+	// Remove from section and key list.
+	c.sectionList = append(c.sectionList[:i], c.sectionList[i+1:]...)
+	delete(c.keyList, section)
+	return true
+}
+
+// GetSection returns key-value pairs in given section.
+// It section does not exist, returns nil and error.
+func (c *ConfigFile) GetSection(section string) (map[string]string, error) {
+	// Blank section name represents DEFAULT section.
+	if len(section) == 0 {
+		section = DEFAULT_SECTION
+	}
+
+	if c.BlockMode {
+		c.lock.Lock()
+		defer c.lock.Unlock()
+	}
+
+	// Check if section exists.
+	if _, ok := c.data[section]; !ok {
+		// Section does not exist.
+		return nil, getError{ERR_SECTION_NOT_FOUND, section}
+	}
+
+	// Remove pre-defined key.
+	secMap := c.data[section]
+	delete(c.data[section], " ")
+
+	// Section exists.
+	return secMap, nil
+}
+
+// SetSectionComments adds new section comments to the configuration.
+// If comments are empty(0 length), it will remove its section comments!
+// It returns true if the comments were inserted or removed,
+// or returns false if the comments were overwritten.
+func (c *ConfigFile) SetSectionComments(section, comments string) bool {
+	// Blank section name represents DEFAULT section.
+	if len(section) == 0 {
+		section = DEFAULT_SECTION
+	}
+
+	if len(comments) == 0 {
+		if _, ok := c.sectionComments[section]; ok {
+			delete(c.sectionComments, section)
+		}
+
+		// Not exists can be seen as remove.
+		return true
+	}
+
+	// Check if comments exists.
+	_, ok := c.sectionComments[section]
+	if comments[0] != '#' && comments[0] != ';' {
+		comments = "; " + comments
+	}
+	c.sectionComments[section] = comments
+	return !ok
+}
+
+// SetKeyComments adds new section-key comments to the configuration.
+// If comments are empty(0 length), it will remove its section-key comments!
+// It returns true if the comments were inserted or removed,
+// or returns false if the comments were overwritten.
+// If the section does not exist in advance, it is created.
+func (c *ConfigFile) SetKeyComments(section, key, comments string) bool {
+	// Blank section name represents DEFAULT section.
+	if len(section) == 0 {
+		section = DEFAULT_SECTION
+	}
+
+	// Check if section exists.
+	if _, ok := c.keyComments[section]; ok {
+		if len(comments) == 0 {
+			if _, ok := c.keyComments[section][key]; ok {
+				delete(c.keyComments[section], key)
+			}
+
+			// Not exists can be seen as remove.
+			return true
+		}
+	} else {
+		if len(comments) == 0 {
+			// Not exists can be seen as remove.
+			return true
+		} else {
+			// Execute add operation.
+			c.keyComments[section] = make(map[string]string)
+		}
+	}
+
+	// Check if key exists.
+	_, ok := c.keyComments[section][key]
+	if comments[0] != '#' && comments[0] != ';' {
+		comments = "; " + comments
+	}
+	c.keyComments[section][key] = comments
+	return !ok
+}
+
+// GetSectionComments returns the comments in the given section.
+// It returns an empty string(0 length) if the comments do not exist.
+func (c *ConfigFile) GetSectionComments(section string) (comments string) {
+	// Blank section name represents DEFAULT section.
+	if len(section) == 0 {
+		section = DEFAULT_SECTION
+	}
+	return c.sectionComments[section]
+}
+
+// GetKeyComments returns the comments of key in the given section.
+// It returns an empty string(0 length) if the comments do not exist.
+func (c *ConfigFile) GetKeyComments(section, key string) (comments string) {
+	// Blank section name represents DEFAULT section.
+	if len(section) == 0 {
+		section = DEFAULT_SECTION
+	}
+
+	if _, ok := c.keyComments[section]; ok {
+		return c.keyComments[section][key]
+	}
+	return ""
+}
+
+// getError occurs when get value in configuration file with invalid parameter.
+type getError struct {
+	Reason ParseError
+	Name   string
+}
+
+// Error implements Error interface.
+func (err getError) Error() string {
+	switch err.Reason {
+	case ERR_SECTION_NOT_FOUND:
+		return fmt.Sprintf("section '%s' not found", err.Name)
+	case ERR_KEY_NOT_FOUND:
+		return fmt.Sprintf("key '%s' not found", err.Name)
+	}
+	return "invalid get error"
+}

+ 294 - 0
vendor/github.com/Unknwon/goconfig/read.go

@@ -0,0 +1,294 @@
+// Copyright 2013 Unknwon
+//
+// Licensed under the Apache License, Version 2.0 (the "License"): you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+package goconfig
+
+import (
+	"bufio"
+	"bytes"
+	"fmt"
+	"io"
+	"io/ioutil"
+	"os"
+	"path"
+	"strings"
+	"time"
+)
+
+// Read reads an io.Reader and returns a configuration representation.
+// This representation can be queried with GetValue.
+func (c *ConfigFile) read(reader io.Reader) (err error) {
+	buf := bufio.NewReader(reader)
+
+	// Handle BOM-UTF8.
+	// http://en.wikipedia.org/wiki/Byte_order_mark#Representations_of_byte_order_marks_by_encoding
+	mask, err := buf.Peek(3)
+	if err == nil && len(mask) >= 3 &&
+		mask[0] == 239 && mask[1] == 187 && mask[2] == 191 {
+		buf.Read(mask)
+	}
+
+	count := 1 // Counter for auto increment.
+	// Current section name.
+	section := DEFAULT_SECTION
+	var comments string
+	// Parse line-by-line
+	for {
+		line, err := buf.ReadString('\n')
+		line = strings.TrimSpace(line)
+		lineLengh := len(line) //[SWH|+]
+		if err != nil {
+			if err != io.EOF {
+				return err
+			}
+
+			// Reached end of file, if nothing to read then break,
+			// otherwise handle the last line.
+			if lineLengh == 0 {
+				break
+			}
+		}
+
+		// switch written for readability (not performance)
+		switch {
+		case lineLengh == 0: // Empty line
+			continue
+		case line[0] == '#' || line[0] == ';': // Comment
+			// Append comments
+			if len(comments) == 0 {
+				comments = line
+			} else {
+				comments += LineBreak + line
+			}
+			continue
+		case line[0] == '[' && line[lineLengh-1] == ']': // New sction.
+			// Get section name.
+			section = strings.TrimSpace(line[1 : lineLengh-1])
+			// Set section comments and empty if it has comments.
+			if len(comments) > 0 {
+				c.SetSectionComments(section, comments)
+				comments = ""
+			}
+			// Make section exist even though it does not have any key.
+			c.SetValue(section, " ", " ")
+			// Reset counter.
+			count = 1
+			continue
+		case section == "": // No section defined so far
+			return readError{ERR_BLANK_SECTION_NAME, line}
+		default: // Other alternatives
+			var (
+				i        int
+				keyQuote string
+				key      string
+				valQuote string
+				value    string
+			)
+			//[SWH|+]:支持引号包围起来的字串
+			if line[0] == '"' {
+				if lineLengh >= 6 && line[0:3] == `"""` {
+					keyQuote = `"""`
+				} else {
+					keyQuote = `"`
+				}
+			} else if line[0] == '`' {
+				keyQuote = "`"
+			}
+			if keyQuote != "" {
+				qLen := len(keyQuote)
+				pos := strings.Index(line[qLen:], keyQuote)
+				if pos == -1 {
+					return readError{ERR_COULD_NOT_PARSE, line}
+				}
+				pos = pos + qLen
+				i = strings.IndexAny(line[pos:], "=:")
+				if i <= 0 {
+					return readError{ERR_COULD_NOT_PARSE, line}
+				}
+				i = i + pos
+				key = line[qLen:pos] //保留引号内的两端的空格
+			} else {
+				i = strings.IndexAny(line, "=:")
+				if i <= 0 {
+					return readError{ERR_COULD_NOT_PARSE, line}
+				}
+				key = strings.TrimSpace(line[0:i])
+			}
+			//[SWH|+];
+
+			// Check if it needs auto increment.
+			if key == "-" {
+				key = "#" + fmt.Sprint(count)
+				count++
+			}
+
+			//[SWH|+]:支持引号包围起来的字串
+			lineRight := strings.TrimSpace(line[i+1:])
+			lineRightLength := len(lineRight)
+			firstChar := ""
+			if lineRightLength >= 2 {
+				firstChar = lineRight[0:1]
+			}
+			if firstChar == "`" {
+				valQuote = "`"
+			} else if lineRightLength >= 6 && lineRight[0:3] == `"""` {
+				valQuote = `"""`
+			}
+			if valQuote != "" {
+				qLen := len(valQuote)
+				pos := strings.LastIndex(lineRight[qLen:], valQuote)
+				if pos == -1 {
+					return readError{ERR_COULD_NOT_PARSE, line}
+				}
+				pos = pos + qLen
+				value = lineRight[qLen:pos]
+			} else {
+				value = strings.TrimSpace(lineRight[0:])
+			}
+			//[SWH|+];
+
+			c.SetValue(section, key, value)
+			// Set key comments and empty if it has comments.
+			if len(comments) > 0 {
+				c.SetKeyComments(section, key, comments)
+				comments = ""
+			}
+		}
+
+		// Reached end of file.
+		if err == io.EOF {
+			break
+		}
+	}
+	return nil
+}
+
+// LoadFromData accepts raw data directly from memory
+// and returns a new configuration representation.
+// Note that the configuration is written to the system
+// temporary folder, so your file should not contain
+// sensitive information.
+func LoadFromData(data []byte) (c *ConfigFile, err error) {
+	// Save memory data to temporary file to support further operations.
+	tmpName := path.Join(os.TempDir(), "goconfig", fmt.Sprintf("%d", time.Now().Nanosecond()))
+	if err = os.MkdirAll(path.Dir(tmpName), os.ModePerm); err != nil {
+		return nil, err
+	}
+	if err = ioutil.WriteFile(tmpName, data, 0655); err != nil {
+		return nil, err
+	}
+
+	c = newConfigFile([]string{tmpName})
+	err = c.read(bytes.NewBuffer(data))
+	return c, err
+}
+
+// LoadFromReader accepts raw data directly from a reader
+// and returns a new configuration representation.
+// You must use ReloadData to reload.
+// You cannot append files a configfile read this way.
+func LoadFromReader(in io.Reader) (c *ConfigFile, err error) {
+	c = newConfigFile([]string{""})
+	err = c.read(in)
+	return c, err
+}
+
+func (c *ConfigFile) loadFile(fileName string) (err error) {
+	f, err := os.Open(fileName)
+	if err != nil {
+		return err
+	}
+	defer f.Close()
+
+	return c.read(f)
+}
+
+// LoadConfigFile reads a file and returns a new configuration representation.
+// This representation can be queried with GetValue.
+func LoadConfigFile(fileName string, moreFiles ...string) (c *ConfigFile, err error) {
+	// Append files' name together.
+	fileNames := make([]string, 1, len(moreFiles)+1)
+	fileNames[0] = fileName
+	if len(moreFiles) > 0 {
+		fileNames = append(fileNames, moreFiles...)
+	}
+
+	c = newConfigFile(fileNames)
+
+	for _, name := range fileNames {
+		if err = c.loadFile(name); err != nil {
+			return nil, err
+		}
+	}
+
+	return c, nil
+}
+
+// Reload reloads configuration file in case it has changes.
+func (c *ConfigFile) Reload() (err error) {
+	var cfg *ConfigFile
+	if len(c.fileNames) == 1 {
+		if c.fileNames[0] == "" {
+			return fmt.Errorf("file opened from in-memory data, use ReloadData to reload")
+		}
+		cfg, err = LoadConfigFile(c.fileNames[0])
+	} else {
+		cfg, err = LoadConfigFile(c.fileNames[0], c.fileNames[1:]...)
+	}
+
+	if err == nil {
+		*c = *cfg
+	}
+	return err
+}
+
+// ReloadData reloads configuration file from memory
+func (c *ConfigFile) ReloadData(in io.Reader) (err error) {
+	var cfg *ConfigFile
+	if len(c.fileNames) != 1 {
+		return fmt.Errorf("Multiple files loaded, unable to mix in-memory and file data")
+	}
+
+	cfg, err = LoadFromReader(in)
+	if err == nil {
+		*c = *cfg
+	}
+	return err
+}
+
+// AppendFiles appends more files to ConfigFile and reload automatically.
+func (c *ConfigFile) AppendFiles(files ...string) error {
+	if len(c.fileNames) == 1 && c.fileNames[0] == "" {
+		return fmt.Errorf("Cannot append file data to in-memory data")
+	}
+	c.fileNames = append(c.fileNames, files...)
+	return c.Reload()
+}
+
+// readError occurs when read configuration file with wrong format.
+type readError struct {
+	Reason  ParseError
+	Content string // Line content
+}
+
+// Error implement Error interface.
+func (err readError) Error() string {
+	switch err.Reason {
+	case ERR_BLANK_SECTION_NAME:
+		return "empty section name not allowed"
+	case ERR_COULD_NOT_PARSE:
+		return fmt.Sprintf("could not parse line: %s", string(err.Content))
+	}
+	return "invalid read error"
+}

+ 117 - 0
vendor/github.com/Unknwon/goconfig/write.go

@@ -0,0 +1,117 @@
+// Copyright 2013 Unknwon
+//
+// Licensed under the Apache License, Version 2.0 (the "License"): you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+package goconfig
+
+import (
+	"bytes"
+	"io"
+	"os"
+	"strings"
+)
+
+// Write spaces around "=" to look better.
+var PrettyFormat = true
+
+// SaveConfigData writes configuration to a writer
+func SaveConfigData(c *ConfigFile, out io.Writer) (err error) {
+	equalSign := "="
+	if PrettyFormat {
+		equalSign = " = "
+	}
+
+	buf := bytes.NewBuffer(nil)
+	for _, section := range c.sectionList {
+		// Write section comments.
+		if len(c.GetSectionComments(section)) > 0 {
+			if _, err = buf.WriteString(c.GetSectionComments(section) + LineBreak); err != nil {
+				return err
+			}
+		}
+
+		if section != DEFAULT_SECTION {
+			// Write section name.
+			if _, err = buf.WriteString("[" + section + "]" + LineBreak); err != nil {
+				return err
+			}
+		}
+
+		for _, key := range c.keyList[section] {
+			if key != " " {
+				// Write key comments.
+				if len(c.GetKeyComments(section, key)) > 0 {
+					if _, err = buf.WriteString(c.GetKeyComments(section, key) + LineBreak); err != nil {
+						return err
+					}
+				}
+
+				keyName := key
+				// Check if it's auto increment.
+				if keyName[0] == '#' {
+					keyName = "-"
+				}
+				//[SWH|+]:支持键名包含等号和冒号
+				if strings.Contains(keyName, `=`) || strings.Contains(keyName, `:`) {
+					if strings.Contains(keyName, "`") {
+						if strings.Contains(keyName, `"`) {
+							keyName = `"""` + keyName + `"""`
+						} else {
+							keyName = `"` + keyName + `"`
+						}
+					} else {
+						keyName = "`" + keyName + "`"
+					}
+				}
+				value := c.data[section][key]
+				// In case key value contains "`" or "\"".
+				if strings.Contains(value, "`") {
+					if strings.Contains(value, `"`) {
+						value = `"""` + value + `"""`
+					} else {
+						value = `"` + value + `"`
+					}
+				}
+
+				// Write key and value.
+				if _, err = buf.WriteString(keyName + equalSign + value + LineBreak); err != nil {
+					return err
+				}
+			}
+		}
+
+		// Put a line between sections.
+		if _, err = buf.WriteString(LineBreak); err != nil {
+			return err
+		}
+	}
+
+	if _, err := buf.WriteTo(out); err != nil {
+		return err
+	}
+	return nil
+}
+
+// SaveConfigFile writes configuration file to local file system
+func SaveConfigFile(c *ConfigFile, filename string) (err error) {
+	// Write configuration file by filename.
+	var f *os.File
+	if f, err = os.Create(filename); err != nil {
+		return err
+	}
+
+	if err := SaveConfigData(c, f); err != nil {
+		return err
+	}
+	return f.Close()
+}

+ 39 - 0
vendor/github.com/agrison/go-tablib/HISTORY.md

@@ -0,0 +1,39 @@
+## History
+
+### 2016-02-26
+- Added support for Markdown tables export
+
+### 2016-02-25
+
+- Constrained columns
+  - `Dataset.ValidSubset()`
+  - `Dataset.InvalidSubset()`
+- Tagging a specific row after it was already created
+- Loading Databooks
+  - JSON
+  - YAML
+- Loading Datasets
+  - CSV
+  - TSV
+  - XML
+- Unit test coverage
+
+### 2016-02-24
+
+- Constrained columns
+- Support for `time.Time` in `Dataset.MySQL()` and `Dataset.Postgres()` export.
+- Source files refactoring
+- Added on travis-ci
+- Retrieving specific rows
+  - `Dataset.Row(int)`
+  - `Dataset.Rows(int...)`
+  - `Dataset.Slice(int, int)`
+
+### 2016-02-23
+
+- First release with support for:
+  - Loading YAML, JSON
+  - Exporting YAML, JSON, CSV, TSV, XLS, XML, ASCII
+  - Filtering + Tagging
+  - Sorting
+  - ...

+ 21 - 0
vendor/github.com/agrison/go-tablib/LICENSE

@@ -0,0 +1,21 @@
+The MIT License (MIT)
+
+Copyright (c) 2016 Alexandre Grison
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.

+ 602 - 0
vendor/github.com/agrison/go-tablib/README.md

@@ -0,0 +1,602 @@
+# go-tablib: format-agnostic tabular dataset library
+
+[![MIT License](http://img.shields.io/badge/license-MIT-blue.svg?style=flat-square)][license]
+[![Go Documentation](http://img.shields.io/badge/go-documentation-blue.svg?style=flat-square)][godocs]
+[![Go Report Card](https://goreportcard.com/badge/github.com/agrison/go-tablib)][goreportcard]
+[![Build Status](https://travis-ci.org/agrison/go-tablib.svg?branch=master)](https://travis-ci.org/agrison/go-tablib)
+
+[license]: https://github.com/agrison/go-tablib/blob/master/LICENSE
+[godocs]: https://godoc.org/github.com/agrison/go-tablib
+[goreportcard]: https://goreportcard.com/report/github.com/agrison/go-tablib
+
+Go-Tablib is a format-agnostic tabular dataset library, written in Go.
+This is a port of the famous Python's [tablib](https://github.com/kennethreitz/tablib) by Kenneth Reitz with some new features.
+
+Export formats supported:
+
+* JSON (Sets + Books)
+* YAML (Sets + Books)
+* XLSX (Sets + Books)
+* XML (Sets + Books)
+* TSV (Sets)
+* CSV (Sets)
+* ASCII + Markdown (Sets)
+* MySQL (Sets)
+* Postgres (Sets)
+
+Loading formats supported:
+
+* JSON (Sets + Books)
+* YAML (Sets + Books)
+* XML (Sets)
+* CSV (Sets)
+* TSV (Sets)
+
+
+## Overview
+
+### tablib.Dataset
+A Dataset is a table of tabular data. It must have a header row. Datasets can be exported to JSON, YAML, CSV, TSV, and XML. They can be filtered, sorted and validated against constraint on columns.
+
+### tablib.Databook
+A Databook is a set of Datasets. The most common form of a Databook is an Excel file with multiple spreadsheets. Databooks can be exported to JSON, YAML and XML.
+
+### tablib.Exportable
+An exportable is a struct that holds a buffer representing the Databook or Dataset after it has been formated to any of the supported export formats.
+At this point the Datbook or Dataset cannot be modified anymore, but it can be returned as a `string`, a `[]byte` or written to a `io.Writer` or a file.
+
+## Usage
+
+Creates a dataset and populate it:
+
+```go
+ds := NewDataset([]string{"firstName", "lastName"})
+```
+
+Add new rows:
+```go
+ds.Append([]interface{}{"John", "Adams"})
+ds.AppendValues("George", "Washington")
+```
+
+Add new columns:
+```go
+ds.AppendColumn("age", []interface{}{90, 67})
+ds.AppendColumnValues("sex", "male", "male")
+```
+
+Add a dynamic column, by passing a function which has access to the current row, and must
+return a value:
+```go
+func lastNameLen(row []interface{}) interface{} {
+	return len(row[1].(string))
+}
+ds.AppendDynamicColumn("lastName length", lastNameLen)
+ds.CSV()
+// >>
+// firstName, lastName, age, sex, lastName length
+// John, Adams, 90, male, 5
+// George, Washington, 67, male, 10
+```
+
+Delete rows:
+```go
+ds.DeleteRow(1) // starts at 0
+```
+
+Delete columns:
+```go
+ds.DeleteColumn("sex")
+```
+
+Get a row or multiple rows:
+```go
+row, _ := ds.Row(0)
+fmt.Println(row["firstName"]) // George
+
+rows, _ := ds.Rows(0, 1)
+fmt.Println(rows[0]["firstName"]) // George
+fmt.Println(rows[1]["firstName"]) // Thomas
+```
+
+Slice a Dataset:
+```go
+newDs, _ := ds.Slice(1, 5) // returns a fresh Dataset with rows [1..5[
+```
+
+
+## Filtering
+
+You can add **tags** to rows by using a specific `Dataset` method. This allows you to filter your `Dataset` later. This can be useful to separate rows of data based on arbitrary criteria (e.g. origin) that you don’t want to include in your `Dataset`.
+```go
+ds := NewDataset([]string{"Maker", "Model"})
+ds.AppendTagged([]interface{}{"Porsche", "911"}, "fast", "luxury")
+ds.AppendTagged([]interface{}{"Skoda", "Octavia"}, "family")
+ds.AppendTagged([]interface{}{"Ferrari", "458"}, "fast", "luxury")
+ds.AppendValues("Citroen", "Picasso")
+ds.AppendValues("Bentley", "Continental")
+ds.Tag(4, "luxury") // Bentley
+ds.AppendValuesTagged("Aston Martin", "DB9", /* these are tags */ "fast", "luxury")
+```
+
+Filtering the `Dataset` is possible by calling `Filter(column)`:
+```go
+luxuryCars, err := ds.Filter("luxury").CSV()
+fmt.Println(luxuryCars)
+// >>>
+// Maker,Model
+// Porsche,911
+// Ferrari,458
+// Bentley,Continental
+// Aston Martin,DB9
+```
+
+```go
+fastCars, err := ds.Filter("fast").CSV()
+fmt.Println(fastCars)
+// >>>
+// Maker,Model
+// Porsche,911
+// Ferrari,458
+// Aston Martin,DB9
+```
+
+Tags at a specific row can be retrieved by calling `Dataset.Tags(index int)`
+
+## Sorting
+
+Datasets can be sorted by a specific column.
+```go
+ds := NewDataset([]string{"Maker", "Model", "Year"})
+ds.AppendValues("Porsche", "991", 2012)
+ds.AppendValues("Skoda", "Octavia", 2011)
+ds.AppendValues("Ferrari", "458", 2009)
+ds.AppendValues("Citroen", "Picasso II", 2013)
+ds.AppendValues("Bentley", "Continental GT", 2003)
+
+sorted, err := ds.Sort("Year").CSV()
+fmt.Println(sorted)
+// >>
+// Maker, Model, Year
+// Bentley, Continental GT, 2003
+// Ferrari, 458, 2009
+// Skoda, Octavia, 2011
+// Porsche, 991, 2012
+// Citroen, Picasso II, 2013
+```
+
+## Constraining
+
+Datasets can have columns constrained by functions and further checked if valid.
+```go
+ds := NewDataset([]string{"Maker", "Model", "Year"})
+ds.AppendValues("Porsche", "991", 2012)
+ds.AppendValues("Skoda", "Octavia", 2011)
+ds.AppendValues("Ferrari", "458", 2009)
+ds.AppendValues("Citroen", "Picasso II", 2013)
+ds.AppendValues("Bentley", "Continental GT", 2003)
+
+ds.ConstrainColumn("Year", func(val interface{}) bool { return val.(int) > 2008 })
+ds.ValidFailFast() // false
+if !ds.Valid() { // validate the whole dataset, errors are retrieved in Dataset.ValidationErrors
+	ds.ValidationErrors[0] // Row: 4, Column: 2
+}
+```
+
+A Dataset with constrained columns can be filtered to keep only the rows satisfying the constraints.
+```go
+valid := ds.ValidSubset().Tabular("simple") // Cars after 2008
+fmt.Println(valid)
+```
+
+Will output:
+```
+------------  ---------------  ---------
+      Maker            Model       Year
+------------  ---------------  ---------
+    Porsche              991       2012
+
+      Skoda          Octavia       2011
+
+    Ferrari              458       2009
+
+    Citroen       Picasso II       2013
+------------  ---------------  ---------
+```
+
+```go
+invalid := ds.InvalidSubset().Tabular("simple") // Cars before 2008
+fmt.Println(invalid)
+```
+
+Will output:
+```
+------------  -------------------  ---------
+      Maker                Model       Year
+------------  -------------------  ---------
+    Bentley       Continental GT       2003
+------------  -------------------  ---------
+```
+
+## Loading
+
+### JSON
+```go
+ds, _ := LoadJSON([]byte(`[
+  {"age":90,"firstName":"John","lastName":"Adams"},
+  {"age":67,"firstName":"George","lastName":"Washington"},
+  {"age":83,"firstName":"Henry","lastName":"Ford"}
+]`))
+```
+
+### YAML
+```go
+ds, _ := LoadYAML([]byte(`- age: 90
+  firstName: John
+  lastName: Adams
+- age: 67
+  firstName: George
+  lastName: Washington
+- age: 83
+  firstName: Henry
+  lastName: Ford`))
+```
+
+## Exports
+
+### Exportable
+
+Any of the following export format returns an `*Exportable` which means you can use:
+- `Bytes()` to get the content as a byte array
+- `String()` to get the content as a string
+- `WriteTo(io.Writer)` to write the content to an `io.Writer`
+- `WriteFile(filename string, perm os.FileMode)` to write to a file
+
+It avoids unnecessary conversion between `string` and `[]byte` to output/write/whatever.
+Thanks to [@figlief](https://github.com/figlief) for the proposition. 
+
+### JSON
+```go
+json, _ := ds.JSON()
+fmt.Println(json)
+```
+
+Will output:
+```json
+[{"age":90,"firstName":"John","lastName":"Adams"},{"age":67,"firstName":"George","lastName":"Washington"},{"age":83,"firstName":"Henry","lastName":"Ford"}]
+```
+
+### XML
+```go
+xml, _ := ds.XML()
+fmt.Println(xml)
+```
+
+Will ouput:
+```xml
+<dataset>
+ <row>
+   <age>90</age>
+   <firstName>John</firstName>
+   <lastName>Adams</lastName>
+ </row>  <row>
+   <age>67</age>
+   <firstName>George</firstName>
+   <lastName>Washington</lastName>
+ </row>  <row>
+   <age>83</age>
+   <firstName>Henry</firstName>
+   <lastName>Ford</lastName>
+ </row>
+</dataset>
+```
+
+### CSV
+```go
+csv, _ := ds.CSV()
+fmt.Println(csv)
+```
+
+Will ouput:
+```csv
+firstName,lastName,age
+John,Adams,90
+George,Washington,67
+Henry,Ford,83
+```
+
+### TSV
+```go
+tsv, _ := ds.TSV()
+fmt.Println(tsv)
+```
+
+Will ouput:
+```tsv
+firstName lastName  age
+John  Adams  90
+George  Washington  67
+Henry Ford 83
+```
+
+### YAML
+```go
+yaml, _ := ds.YAML()
+fmt.Println(yaml)
+```
+
+Will ouput:
+```yaml
+- age: 90
+  firstName: John
+  lastName: Adams
+- age: 67
+  firstName: George
+  lastName: Washington
+- age: 83
+  firstName: Henry
+  lastName: Ford
+```
+
+### HTML
+```go
+html := ds.HTML()
+fmt.Println(html)
+```
+
+Will output:
+```html
+<table class="table table-striped">
+	<thead>
+		<tr>
+			<th>firstName</th>
+			<th>lastName</th>
+			<th>age</th>
+		</tr>
+	</thead>
+	<tbody>
+		<tr>
+			<td>George</td>
+			<td>Washington</td>
+			<td>90</td>
+		</tr>
+		<tr>
+			<td>Henry</td>
+			<td>Ford</td>
+			<td>67</td>
+		</tr>
+		<tr>
+			<td>Foo</td>
+			<td>Bar</td>
+			<td>83</td>
+		</tr>
+	</tbody>
+</table>
+```
+
+### XLSX
+```go
+xlsx, _ := ds.XLSX()
+fmt.Println(xlsx)
+// >>>
+// binary content
+xlsx.WriteTo(...)
+```
+
+### ASCII
+
+#### Grid format
+```go
+ascii := ds.Tabular("grid" /* tablib.TabularGrid */)
+fmt.Println(ascii)
+```
+
+Will output:
+```
++--------------+---------------+--------+
+|    firstName |      lastName |    age |
++==============+===============+========+
+|       George |    Washington |     90 |
++--------------+---------------+--------+
+|        Henry |          Ford |     67 |
++--------------+---------------+--------+
+|          Foo |           Bar |     83 |
++--------------+---------------+--------+
+```
+
+#### Simple format
+```go
+ascii := ds.Tabular("simple" /* tablib.TabularSimple */)
+fmt.Println(ascii)
+```
+
+Will output:
+```
+--------------  ---------------  --------
+    firstName         lastName       age
+--------------  ---------------  --------
+       George       Washington        90
+
+        Henry             Ford        67
+
+          Foo              Bar        83
+--------------  ---------------  --------
+```
+
+#### Condensed format
+```go
+ascii := ds.Tabular("condensed" /* tablib.TabularCondensed */)
+fmt.Println(ascii)
+```
+
+Similar to simple but with less line feed:
+```
+--------------  ---------------  --------
+    firstName         lastName       age
+--------------  ---------------  --------
+       George       Washington        90
+        Henry             Ford        67
+          Foo              Bar        83
+--------------  ---------------  --------
+```
+
+### Markdown
+
+Markdown tables are similar to the Tabular condensed format, except that they have
+pipe characters separating columns.
+
+```go
+mkd := ds.Markdown() // or
+mkd := ds.Tabular("markdown" /* tablib.TabularMarkdown */)
+fmt.Println(mkd)
+```
+
+Will output:
+```
+|     firstName   |       lastName    |    gpa  |
+| --------------  | ---------------   | ------- |
+|          John   |          Adams    |     90  |
+|        George   |     Washington    |     67  |
+|        Thomas   |      Jefferson    |     50  |
+```
+
+Which equals to the following when rendered as HTML:
+
+|     firstName   |       lastName    |    gpa  |
+| --------------  | ---------------   | ------- |
+|          John   |          Adams    |     90  |
+|        George   |     Washington    |     67  |
+|        Thomas   |      Jefferson    |     50  |
+
+### MySQL
+```go
+sql := ds.MySQL()
+fmt.Println(sql)
+```
+
+Will output:
+```sql
+CREATE TABLE IF NOT EXISTS presidents
+(
+	id INT NOT NULL AUTO_INCREMENT PRIMARY KEY,
+	firstName VARCHAR(9),
+	lastName VARCHAR(8),
+	gpa DOUBLE
+);
+
+INSERT INTO presidents VALUES(1, 'Jacques', 'Chirac', 88);
+INSERT INTO presidents VALUES(2, 'Nicolas', 'Sarkozy', 98);
+INSERT INTO presidents VALUES(3, 'François', 'Hollande', 34);
+
+COMMIT;
+```
+
+Numeric (`uint`, `int`, `float`, ...) are stored as `DOUBLE`, `string`s as `VARCHAR` with width set to the length of the longest string in the column, and `time.Time`s are stored as `TIMESTAMP`.
+
+### Postgres
+```go
+sql := ds.Postgres()
+fmt.Println(sql)
+```
+
+Will output:
+```sql
+CREATE TABLE IF NOT EXISTS presidents
+(
+	id SERIAL PRIMARY KEY,
+	firstName TEXT,
+	lastName TEXT,
+	gpa NUMERIC
+);
+
+INSERT INTO presidents VALUES(1, 'Jacques', 'Chirac', 88);
+INSERT INTO presidents VALUES(2, 'Nicolas', 'Sarkozy', 98);
+INSERT INTO presidents VALUES(3, 'François', 'Hollande', 34);
+
+COMMIT;
+```
+
+Numerics (`uint`, `int`, `float`, ...) are stored as `NUMERIC`, `string`s as `TEXT` and `time.Time`s are stored as `TIMESTAMP`.
+
+## Databooks
+
+This is an example of how to use Databooks.
+
+```go
+db := NewDatabook()
+// or loading a JSON content
+db, err := LoadDatabookJSON([]byte(`...`))
+// or a YAML content
+db, err := LoadDatabookYAML([]byte(`...`))
+
+// a dataset of presidents
+presidents, _ := LoadJSON([]byte(`[
+  {"Age":90,"First name":"John","Last name":"Adams"},
+  {"Age":67,"First name":"George","Last name":"Washington"},
+  {"Age":83,"First name":"Henry","Last name":"Ford"}
+]`))
+
+// a dataset of cars
+cars := NewDataset([]string{"Maker", "Model", "Year"})
+cars.AppendValues("Porsche", "991", 2012)
+cars.AppendValues("Skoda", "Octavia", 2011)
+cars.AppendValues("Ferrari", "458", 2009)
+cars.AppendValues("Citroen", "Picasso II", 2013)
+cars.AppendValues("Bentley", "Continental GT", 2003)
+
+// add the sheets to the Databook
+db.AddSheet("Cars", cars.Sort("Year"))
+db.AddSheet("Presidents", presidents.SortReverse("Age"))
+
+fmt.Println(db.JSON())
+```
+
+Will output the following JSON representation of the Databook:
+```json
+[
+  {
+    "title": "Cars",
+    "data": [
+      {"Maker":"Bentley","Model":"Continental GT","Year":2003},
+      {"Maker":"Ferrari","Model":"458","Year":2009},
+      {"Maker":"Skoda","Model":"Octavia","Year":2011},
+      {"Maker":"Porsche","Model":"991","Year":2012},
+      {"Maker":"Citroen","Model":"Picasso II","Year":2013}
+    ]
+  },
+  {
+    "title": "Presidents",
+    "data": [
+      {"Age":90,"First name":"John","Last name":"Adams"},
+      {"Age":83,"First name":"Henry","Last name":"Ford"},
+      {"Age":67,"First name":"George","Last name":"Washington"}
+    ]
+  }
+]
+```
+
+## Installation
+
+```bash
+go get github.com/agrison/go-tablib
+```
+
+For those wanting the v1 version where export methods returned a `string` and not an `Exportable`:
+```bash
+go get gopkg.in/agrison/go-tablib.v1
+```
+
+## TODO
+
+* Loading in more formats
+* Support more formats: DBF, XLS, LATEX, ...
+
+## Contribute
+
+It is a work in progress, so it may exist some bugs and edge cases not covered by the test suite.
+
+But we're on Github and this is Open Source, pull requests are more than welcomed, come and have some fun :)
+
+## Acknowledgement
+
+Thanks to kennethreitz for the first implementation in Python, [`github.com/bndr/gotabulate`](https://github.com/bndr/gotabulate), [`github.com/clbanning/mxj`](https://github.com/clbanning/mxj), [`github.com/tealeg/xlsx`](https://github.com/tealeg/xlsx), [`gopkg.in/yaml.v2`](https://gopkg.in/yaml.v2)

+ 81 - 0
vendor/github.com/agrison/go-tablib/tablib_csv.go

@@ -0,0 +1,81 @@
+package tablib
+
+import (
+	"bytes"
+	"encoding/csv"
+)
+
+// CSV returns a CSV representation of the Dataset an Exportable.
+func (d *Dataset) CSV() (*Exportable, error) {
+	records := d.Records()
+	b := newBuffer()
+
+	w := csv.NewWriter(b)
+	w.WriteAll(records) // calls Flush internally
+
+	if err := w.Error(); err != nil {
+		return nil, err
+	}
+
+	return newExportable(b), nil
+}
+
+// LoadCSV loads a Dataset by its CSV representation.
+func LoadCSV(input []byte) (*Dataset, error) {
+	reader := csv.NewReader(bytes.NewReader(input))
+	records, err := reader.ReadAll()
+	if err != nil {
+		return nil, err
+	}
+
+	ds := NewDataset(records[0])
+	for i := 1; i < len(records); i++ {
+		// this is odd
+		row := make([]interface{}, len(records[i]))
+		for k, v := range records[i] {
+			row[k] = v
+		}
+		ds.Append(row)
+	}
+
+	return ds, nil
+}
+
+// TSV returns a TSV representation of the Dataset as string.
+func (d *Dataset) TSV() (*Exportable, error) {
+	records := d.Records()
+	b := newBuffer()
+
+	w := csv.NewWriter(b)
+	w.Comma = '\t'
+	w.WriteAll(records) // calls Flush internally
+
+	if err := w.Error(); err != nil {
+		return nil, err
+	}
+
+	return newExportable(b), nil
+}
+
+// LoadTSV loads a Dataset by its TSV representation.
+func LoadTSV(input []byte) (*Dataset, error) {
+	reader := csv.NewReader(bytes.NewReader(input))
+	reader.Comma = '\t'
+
+	records, err := reader.ReadAll()
+	if err != nil {
+		return nil, err
+	}
+
+	ds := NewDataset(records[0])
+	for i := 1; i < len(records); i++ {
+		// this is odd
+		row := make([]interface{}, len(records[i]))
+		for k, v := range records[i] {
+			row[k] = v
+		}
+		ds.Append(row)
+	}
+
+	return ds, nil
+}

+ 54 - 0
vendor/github.com/agrison/go-tablib/tablib_databook.go

@@ -0,0 +1,54 @@
+package tablib
+
+// Sheet represents a sheet in a Databook, holding a title (if any) and a dataset.
+type Sheet struct {
+	title   string
+	dataset *Dataset
+}
+
+// Title return the title of the sheet.
+func (s Sheet) Title() string {
+	return s.title
+}
+
+// Dataset returns the dataset of the sheet.
+func (s Sheet) Dataset() *Dataset {
+	return s.dataset
+}
+
+// Databook represents a Databook which is an array of sheets.
+type Databook struct {
+	sheets map[string]Sheet
+}
+
+// NewDatabook constructs a new Databook.
+func NewDatabook() *Databook {
+	return &Databook{make(map[string]Sheet)}
+}
+
+// Sheets returns the sheets in the Databook.
+func (d *Databook) Sheets() map[string]Sheet {
+	return d.sheets
+}
+
+// Sheet returns the sheet with a specific title.
+func (d *Databook) Sheet(title string) Sheet {
+	return d.sheets[title]
+}
+
+// AddSheet adds a sheet to the Databook.
+func (d *Databook) AddSheet(title string, dataset *Dataset) {
+	d.sheets[title] = Sheet{title, dataset}
+}
+
+// Size returns the number of sheets in the Databook.
+func (d *Databook) Size() int {
+	return len(d.sheets)
+}
+
+// Wipe removes all Dataset objects from the Databook.
+func (d *Databook) Wipe() {
+	for k := range d.sheets {
+		delete(d.sheets, k)
+	}
+}

+ 745 - 0
vendor/github.com/agrison/go-tablib/tablib_dataset.go

@@ -0,0 +1,745 @@
+// Package tablib is a format-agnostic tabular Dataset library, written in Go.
+// It allows you to import, export, and manipulate tabular data sets.
+// Advanced features include, dynamic columns, tags & filtering, and seamless format import & export.
+package tablib
+
+import (
+	"fmt"
+	"sort"
+	"time"
+)
+
+// Dataset represents a set of data, which is a list of data and header for each column.
+type Dataset struct {
+	// EmptyValue represents the string value to b output if a field cannot be
+	// formatted as a string during output of certain formats.
+	EmptyValue       string
+	headers          []string
+	data             [][]interface{}
+	tags             [][]string
+	constraints      []ColumnConstraint
+	rows             int
+	cols             int
+	ValidationErrors []ValidationError
+}
+
+// DynamicColumn represents a function that can be evaluated dynamically
+// when exporting to a predefined format.
+type DynamicColumn func([]interface{}) interface{}
+
+// ColumnConstraint represents a function that is bound as a constraint to
+// the column so that it can validate its value
+type ColumnConstraint func(interface{}) bool
+
+// ValidationError holds the position of a value in the Dataset that have failed
+// to validate a constraint.
+type ValidationError struct {
+	Row    int
+	Column int
+}
+
+// NewDataset creates a new Dataset.
+func NewDataset(headers []string) *Dataset {
+	return NewDatasetWithData(headers, nil)
+}
+
+// NewDatasetWithData creates a new Dataset.
+func NewDatasetWithData(headers []string, data [][]interface{}) *Dataset {
+	d := &Dataset{"", headers, data, make([][]string, 0), make([]ColumnConstraint,
+		len(headers)), len(data), len(headers), nil}
+	return d
+}
+
+// Headers return the headers of the Dataset.
+func (d *Dataset) Headers() []string {
+	return d.headers
+}
+
+// Width returns the number of columns in the Dataset.
+func (d *Dataset) Width() int {
+	return d.cols
+}
+
+// Height returns the number of rows in the Dataset.
+func (d *Dataset) Height() int {
+	return d.rows
+}
+
+// Append appends a row of values to the Dataset.
+func (d *Dataset) Append(row []interface{}) error {
+	if len(row) != d.cols {
+		return ErrInvalidDimensions
+	}
+	d.data = append(d.data, row)
+	d.tags = append(d.tags, make([]string, 0))
+	d.rows++
+	return nil
+}
+
+// AppendTagged appends a row of values to the Dataset with one or multiple tags
+// for filtering purposes.
+func (d *Dataset) AppendTagged(row []interface{}, tags ...string) error {
+	if err := d.Append(row); err != nil {
+		return err
+	}
+	d.tags[d.rows-1] = tags[:]
+	return nil
+}
+
+// AppendValues appends a row of values to the Dataset.
+func (d *Dataset) AppendValues(row ...interface{}) error {
+	return d.Append(row[:])
+}
+
+// AppendValuesTagged appends a row of values to the Dataset with one or multiple tags
+// for filtering purposes.
+func (d *Dataset) AppendValuesTagged(row ...interface{}) error {
+	if len(row) < d.cols {
+		return ErrInvalidDimensions
+	}
+	var tags []string
+	for _, tag := range row[d.cols:] {
+		if tagStr, ok := tag.(string); ok {
+			tags = append(tags, tagStr)
+		} else {
+			return ErrInvalidTag
+		}
+	}
+	return d.AppendTagged(row[:d.cols], tags...)
+}
+
+// Insert inserts a row at a given index.
+func (d *Dataset) Insert(index int, row []interface{}) error {
+	if index < 0 || index >= d.rows {
+		return ErrInvalidRowIndex
+	}
+
+	if len(row) != d.cols {
+		return ErrInvalidDimensions
+	}
+
+	ndata := make([][]interface{}, 0, d.rows+1)
+	ndata = append(ndata, d.data[:index]...)
+	ndata = append(ndata, row)
+	ndata = append(ndata, d.data[index:]...)
+	d.data = ndata
+	d.rows++
+
+	ntags := make([][]string, 0, d.rows+1)
+	ntags = append(ntags, d.tags[:index]...)
+	ntags = append(ntags, make([]string, 0))
+	ntags = append(ntags, d.tags[index:]...)
+	d.tags = ntags
+
+	return nil
+}
+
+// InsertValues inserts a row of values at a given index.
+func (d *Dataset) InsertValues(index int, values ...interface{}) error {
+	return d.Insert(index, values[:])
+}
+
+// InsertTagged inserts a row at a given index with specific tags.
+func (d *Dataset) InsertTagged(index int, row []interface{}, tags ...string) error {
+	if err := d.Insert(index, row); err != nil {
+		return err
+	}
+	d.Insert(index, row)
+	d.tags[index] = tags[:]
+
+	return nil
+}
+
+// Tag tags a row at a given index with specific tags.
+// Returns ErrInvalidRowIndex if the row does not exist.
+func (d *Dataset) Tag(index int, tags ...string) error {
+	if index < 0 || index >= d.rows {
+		return ErrInvalidRowIndex
+	}
+
+	for _, tag := range tags {
+		if !isTagged(tag, d.tags[index]) {
+			d.tags[index] = append(d.tags[index], tag)
+		}
+	}
+
+	return nil
+}
+
+// Tags returns the tags of a row at a given index.
+// Returns ErrInvalidRowIndex if the row does not exist.
+func (d *Dataset) Tags(index int) ([]string, error) {
+	if index < 0 || index >= d.rows {
+		return nil, ErrInvalidRowIndex
+	}
+
+	return d.tags[index], nil
+}
+
+// AppendColumn appends a new column with values to the Dataset.
+func (d *Dataset) AppendColumn(header string, cols []interface{}) error {
+	if len(cols) != d.rows {
+		return ErrInvalidDimensions
+	}
+	d.headers = append(d.headers, header)
+	d.constraints = append(d.constraints, nil) // no constraint by default
+	d.cols++
+	for i, e := range d.data {
+		d.data[i] = append(e, cols[i])
+	}
+	return nil
+}
+
+// AppendConstrainedColumn appends a constrained column to the Dataset.
+func (d *Dataset) AppendConstrainedColumn(header string, constraint ColumnConstraint, cols []interface{}) error {
+	err := d.AppendColumn(header, cols)
+	if err != nil {
+		return err
+	}
+
+	d.constraints[d.cols-1] = constraint
+	return nil
+}
+
+// AppendColumnValues appends a new column with values to the Dataset.
+func (d *Dataset) AppendColumnValues(header string, cols ...interface{}) error {
+	return d.AppendColumn(header, cols[:])
+}
+
+// AppendDynamicColumn appends a dynamic column to the Dataset.
+func (d *Dataset) AppendDynamicColumn(header string, fn DynamicColumn) {
+	d.headers = append(d.headers, header)
+	d.constraints = append(d.constraints, nil)
+	d.cols++
+	for i, e := range d.data {
+		d.data[i] = append(e, fn)
+	}
+}
+
+// ConstrainColumn adds a constraint to a column in the Dataset.
+func (d *Dataset) ConstrainColumn(header string, constraint ColumnConstraint) {
+	i := indexOfColumn(header, d)
+	if i != -1 {
+		d.constraints[i] = constraint
+	}
+}
+
+// InsertColumn insert a new column at a given index.
+func (d *Dataset) InsertColumn(index int, header string, cols []interface{}) error {
+	if index < 0 || index >= d.cols {
+		return ErrInvalidColumnIndex
+	}
+
+	if len(cols) != d.rows {
+		return ErrInvalidDimensions
+	}
+
+	d.insertHeader(index, header)
+
+	// for each row, insert the column
+	for i, r := range d.data {
+		row := make([]interface{}, 0, d.cols)
+		row = append(row, r[:index]...)
+		row = append(row, cols[i])
+		row = append(row, r[index:]...)
+		d.data[i] = row
+	}
+
+	return nil
+}
+
+// InsertDynamicColumn insert a new dynamic column at a given index.
+func (d *Dataset) InsertDynamicColumn(index int, header string, fn DynamicColumn) error {
+	if index < 0 || index >= d.cols {
+		return ErrInvalidColumnIndex
+	}
+
+	d.insertHeader(index, header)
+
+	// for each row, insert the column
+	for i, r := range d.data {
+		row := make([]interface{}, 0, d.cols)
+		row = append(row, r[:index]...)
+		row = append(row, fn)
+		row = append(row, r[index:]...)
+		d.data[i] = row
+	}
+
+	return nil
+}
+
+// InsertConstrainedColumn insert a new constrained column at a given index.
+func (d *Dataset) InsertConstrainedColumn(index int, header string,
+	constraint ColumnConstraint, cols []interface{}) error {
+	err := d.InsertColumn(index, header, cols)
+	if err != nil {
+		return err
+	}
+
+	d.constraints[index] = constraint
+	return nil
+}
+
+// insertHeader inserts a header at a specific index.
+func (d *Dataset) insertHeader(index int, header string) {
+	headers := make([]string, 0, d.cols+1)
+	headers = append(headers, d.headers[:index]...)
+	headers = append(headers, header)
+	headers = append(headers, d.headers[index:]...)
+	d.headers = headers
+
+	constraints := make([]ColumnConstraint, 0, d.cols+1)
+	constraints = append(constraints, d.constraints[:index]...)
+	constraints = append(constraints, nil)
+	constraints = append(constraints, d.constraints[index:]...)
+	d.constraints = constraints
+
+	d.cols++
+}
+
+// ValidFailFast returns whether the Dataset is valid regarding constraints that have
+// been previously set on columns.
+func (d *Dataset) ValidFailFast() bool {
+	valid := true
+	for column, constraint := range d.constraints {
+		if constraint != nil {
+			for row, val := range d.Column(d.headers[column]) {
+				cellIsValid := true
+
+				switch val.(type) {
+				case DynamicColumn:
+					cellIsValid = constraint((val.(DynamicColumn))(d.data[row]))
+				default:
+					cellIsValid = constraint(val)
+				}
+
+				if !cellIsValid {
+					valid = false
+					break
+				}
+			}
+		}
+	}
+
+	if valid {
+		d.ValidationErrors = make([]ValidationError, 0)
+	}
+
+	return valid
+}
+
+// Valid returns whether the Dataset is valid regarding constraints that have
+// been previously set on columns.
+// Its behaviour is different of ValidFailFast in a sense that it will validate the whole
+// Dataset and all the validation errors will be available by using Dataset.ValidationErrors
+func (d *Dataset) Valid() bool {
+	d.ValidationErrors = make([]ValidationError, 0)
+
+	valid := true
+	for column, constraint := range d.constraints {
+		if constraint != nil {
+			for row, val := range d.Column(d.headers[column]) {
+				cellIsValid := true
+
+				switch val.(type) {
+				case DynamicColumn:
+					cellIsValid = constraint((val.(DynamicColumn))(d.data[row]))
+				default:
+					cellIsValid = constraint(val)
+				}
+
+				if !cellIsValid {
+					d.ValidationErrors = append(d.ValidationErrors,
+						ValidationError{Row: row, Column: column})
+					valid = false
+				}
+			}
+		}
+	}
+	return valid
+}
+
+// HasAnyConstraint returns whether the Dataset has any constraint set.
+func (d *Dataset) HasAnyConstraint() bool {
+	hasConstraint := false
+	for _, constraint := range d.constraints {
+		if constraint != nil {
+			hasConstraint = true
+			break
+		}
+	}
+	return hasConstraint
+}
+
+// ValidSubset return a new Dataset containing only the rows validating their
+// constraints. This is similar to what Filter() does with tags, but with constraints.
+// If no constraints are set, it returns the same instance.
+// Note: The returned Dataset is free of any constraints, tags are conserved.
+func (d *Dataset) ValidSubset() *Dataset {
+	return d.internalValidSubset(true)
+}
+
+// InvalidSubset return a new Dataset containing only the rows failing to validate their
+// constraints.
+// If no constraints are set, it returns the same instance.
+// Note: The returned Dataset is free of any constraints, tags are conserved.
+func (d *Dataset) InvalidSubset() *Dataset {
+	return d.internalValidSubset(false)
+}
+
+// internalValidSubset return a new Dataset containing only the rows validating their
+// constraints or not depending on its parameter `valid`.
+func (d *Dataset) internalValidSubset(valid bool) *Dataset {
+	if !d.HasAnyConstraint() {
+		return d
+	}
+
+	nd := NewDataset(d.headers)
+	nd.data = make([][]interface{}, 0)
+	ndRowIndex := 0
+	nd.tags = make([][]string, 0)
+
+	for i, row := range d.data {
+		keep := true
+		for j, val := range d.data[i] {
+			if d.constraints[j] != nil {
+				switch val.(type) {
+				case DynamicColumn:
+					if valid {
+						keep = d.constraints[j]((val.(DynamicColumn))(row))
+					} else {
+						keep = !d.constraints[j]((val.(DynamicColumn))(row))
+					}
+				default:
+					if valid {
+						keep = d.constraints[j](val)
+					} else {
+						keep = !d.constraints[j](val)
+					}
+				}
+			}
+			if valid && !keep {
+				break
+			}
+		}
+		if keep {
+			nd.data = append(nd.data, make([]interface{}, 0, nd.cols))
+			nd.data[ndRowIndex] = append(nd.data[ndRowIndex], row...)
+
+			nd.tags = append(nd.tags, make([]string, 0, nd.cols))
+			nd.tags[ndRowIndex] = append(nd.tags[ndRowIndex], d.tags[i]...)
+			ndRowIndex++
+		}
+	}
+	nd.cols = d.cols
+	nd.rows = ndRowIndex
+
+	return nd
+}
+
+// Stack stacks two Dataset by joining at the row level, and return new combined Dataset.
+func (d *Dataset) Stack(other *Dataset) (*Dataset, error) {
+	if d.Width() != other.Width() {
+		return nil, ErrInvalidDimensions
+	}
+
+	nd := NewDataset(d.headers)
+	nd.cols = d.cols
+	nd.rows = d.rows + other.rows
+
+	nd.tags = make([][]string, 0, nd.rows)
+	nd.tags = append(nd.tags, d.tags...)
+	nd.tags = append(nd.tags, other.tags...)
+
+	nd.data = make([][]interface{}, 0, nd.rows)
+	nd.data = append(nd.data, d.data...)
+	nd.data = append(nd.data, other.data...)
+
+	return nd, nil
+}
+
+// StackColumn stacks two Dataset by joining them at the column level, and return new combined Dataset.
+func (d *Dataset) StackColumn(other *Dataset) (*Dataset, error) {
+	if d.Height() != other.Height() {
+		return nil, ErrInvalidDimensions
+	}
+
+	nheaders := d.headers
+	nheaders = append(nheaders, other.headers...)
+
+	nd := NewDataset(nheaders)
+	nd.cols = d.cols + nd.cols
+	nd.rows = d.rows
+	nd.data = make([][]interface{}, nd.rows, nd.rows)
+	nd.tags = make([][]string, nd.rows, nd.rows)
+
+	for i := range d.data {
+		nd.data[i] = make([]interface{}, 0, nd.cols)
+		nd.data[i] = append(nd.data[i], d.data[i]...)
+		nd.data[i] = append(nd.data[i], other.data[i]...)
+
+		nd.tags[i] = make([]string, 0, nd.cols)
+		nd.tags[i] = append(nd.tags[i], d.tags[i]...)
+		nd.tags[i] = append(nd.tags[i], other.tags[i]...)
+	}
+
+	return nd, nil
+}
+
+// Column returns all the values for a specific column
+// returns nil if column is not found.
+func (d *Dataset) Column(header string) []interface{} {
+	colIndex := indexOfColumn(header, d)
+	if colIndex == -1 {
+		return nil
+	}
+
+	values := make([]interface{}, d.rows)
+	for i, e := range d.data {
+		switch e[colIndex].(type) {
+		case DynamicColumn:
+			values[i] = e[colIndex].(DynamicColumn)(e)
+		default:
+			values[i] = e[colIndex]
+		}
+	}
+	return values
+}
+
+// Row returns a map representing a specific row of the Dataset.
+// returns tablib.ErrInvalidRowIndex if the row cannot be found
+func (d *Dataset) Row(index int) (map[string]interface{}, error) {
+	if index < 0 || index >= d.rows {
+		return nil, ErrInvalidRowIndex
+	}
+
+	row := make(map[string]interface{})
+	for i, e := range d.data[index] {
+		switch e.(type) {
+		case DynamicColumn:
+			row[d.headers[i]] = e.(DynamicColumn)(d.data[index])
+		default:
+			row[d.headers[i]] = e
+		}
+	}
+	return row, nil
+}
+
+// Rows returns an array of map representing a set of specific rows of the Dataset.
+// returns tablib.ErrInvalidRowIndex if the row cannot be found.
+func (d *Dataset) Rows(index ...int) ([]map[string]interface{}, error) {
+	for _, i := range index {
+		if i < 0 || i >= d.rows {
+			return nil, ErrInvalidRowIndex
+		}
+	}
+
+	rows := make([]map[string]interface{}, 0, len(index))
+	for _, i := range index {
+		row, _ := d.Row(i)
+		rows = append(rows, row)
+	}
+
+	return rows, nil
+}
+
+// Slice returns a new Dataset representing a slice of the orignal Dataset like a slice of an array.
+// returns tablib.ErrInvalidRowIndex if the lower or upper bound is out of range.
+func (d *Dataset) Slice(lower, upperNonInclusive int) (*Dataset, error) {
+	if lower > upperNonInclusive || lower < 0 || upperNonInclusive > d.rows {
+		return nil, ErrInvalidRowIndex
+	}
+
+	rowCount := upperNonInclusive - lower
+	cols := d.cols
+	nd := NewDataset(d.headers)
+	nd.data = make([][]interface{}, 0, rowCount)
+	nd.tags = make([][]string, 0, rowCount)
+	nd.rows = upperNonInclusive - lower
+	j := 0
+	for i := lower; i < upperNonInclusive; i++ {
+		nd.data = append(nd.data, make([]interface{}, 0, cols))
+		nd.data[j] = make([]interface{}, 0, cols)
+		nd.data[j] = append(nd.data[j], d.data[i]...)
+		nd.tags = append(nd.tags, make([]string, 0, cols))
+		nd.tags[j] = make([]string, 0, cols)
+		nd.tags[j] = append(nd.tags[j], d.tags[i]...)
+		j++
+	}
+
+	return nd, nil
+}
+
+// Filter filters a Dataset, returning a fresh Dataset including only the rows
+// previously tagged with one of the given tags. Returns a new Dataset.
+func (d *Dataset) Filter(tags ...string) *Dataset {
+	nd := NewDataset(d.headers)
+	for rowIndex, rowValue := range d.data {
+		for _, filterTag := range tags {
+			if isTagged(filterTag, d.tags[rowIndex]) {
+				nd.AppendTagged(rowValue, d.tags[rowIndex]...) // copy tags
+			}
+		}
+	}
+	return nd
+}
+
+// Sort sorts the Dataset by a specific column. Returns a new Dataset.
+func (d *Dataset) Sort(column string) *Dataset {
+	return d.internalSort(column, false)
+}
+
+// SortReverse sorts the Dataset by a specific column in reverse order. Returns a new Dataset.
+func (d *Dataset) SortReverse(column string) *Dataset {
+	return d.internalSort(column, true)
+}
+
+func (d *Dataset) internalSort(column string, reverse bool) *Dataset {
+	nd := NewDataset(d.headers)
+	pairs := make([]entryPair, 0, nd.rows)
+	for i, v := range d.Column(column) {
+		pairs = append(pairs, entryPair{i, v})
+	}
+
+	var how sort.Interface
+	// sort by column
+	switch pairs[0].value.(type) {
+	case string:
+		how = byStringValue(pairs)
+	case int:
+		how = byIntValue(pairs)
+	case int64:
+		how = byInt64Value(pairs)
+	case uint64:
+		how = byUint64Value(pairs)
+	case float64:
+		how = byFloatValue(pairs)
+	case time.Time:
+		how = byTimeValue(pairs)
+	default:
+		// nothing
+	}
+
+	if !reverse {
+		sort.Sort(how)
+	} else {
+		sort.Sort(sort.Reverse(how))
+	}
+
+	// now iterate on the pairs and add the data sorted to the new Dataset
+	for _, p := range pairs {
+		nd.AppendTagged(d.data[p.index], d.tags[p.index]...)
+	}
+
+	return nd
+}
+
+// Transpose transposes a Dataset, turning rows into columns and vice versa,
+// returning a new Dataset instance. The first row of the original instance
+// becomes the new header row. Tags, constraints and dynamic columns are lost
+// in the returned Dataset.
+// TODO
+func (d *Dataset) Transpose() *Dataset {
+	newHeaders := make([]string, 0, d.cols+1)
+	newHeaders = append(newHeaders, d.headers[0])
+	for _, c := range d.Column(d.headers[0]) {
+		newHeaders = append(newHeaders, d.asString(c))
+	}
+
+	nd := NewDataset(newHeaders)
+	nd.data = make([][]interface{}, 0, d.cols)
+	for i := 1; i < d.cols; i++ {
+		nd.data = append(nd.data, make([]interface{}, 0, d.rows))
+		nd.data[i-1] = make([]interface{}, 0, d.rows)
+		nd.data[i-1] = append(nd.data[i-1], d.headers[i])
+		nd.data[i-1] = append(nd.data[i-1], d.Column(d.headers[i])...)
+	}
+	nd.rows = d.cols - 1
+
+	return nd
+}
+
+// DeleteRow deletes a row at a specific index
+func (d *Dataset) DeleteRow(row int) error {
+	if row < 0 || row >= d.rows {
+		return ErrInvalidRowIndex
+	}
+	d.data = append(d.data[:row], d.data[row+1:]...)
+	d.rows--
+	return nil
+}
+
+// DeleteColumn deletes a column from the Dataset.
+func (d *Dataset) DeleteColumn(header string) error {
+	colIndex := indexOfColumn(header, d)
+	if colIndex == -1 {
+		return ErrInvalidColumnIndex
+	}
+	d.cols--
+	d.headers = append(d.headers[:colIndex], d.headers[colIndex+1:]...)
+	// remove the column
+	for i := range d.data {
+		d.data[i] = append(d.data[i][:colIndex], d.data[i][colIndex+1:]...)
+	}
+	return nil
+}
+
+func indexOfColumn(header string, d *Dataset) int {
+	for i, e := range d.headers {
+		if e == header {
+			return i
+		}
+	}
+	return -1
+}
+
+// Dict returns the Dataset as an array of map where each key is a column.
+func (d *Dataset) Dict() []interface{} {
+	back := make([]interface{}, d.rows)
+	for i, e := range d.data {
+		m := make(map[string]interface{}, d.cols-1)
+		for j, c := range d.headers {
+			switch e[j].(type) {
+			case DynamicColumn:
+				m[c] = e[j].(DynamicColumn)(e)
+			default:
+				m[c] = e[j]
+			}
+		}
+		back[i] = m
+	}
+	return back
+}
+
+// Records returns the Dataset as an array of array where each entry is a string.
+// The first row of the returned 2d array represents the columns of the Dataset.
+func (d *Dataset) Records() [][]string {
+	records := make([][]string, d.rows+1 /* +1 for header */)
+	records[0] = make([]string, d.cols)
+	for j, e := range d.headers {
+		records[0][j] = e
+	}
+	for i, e := range d.data {
+		rowIndex := i + 1
+		j := 0
+		records[rowIndex] = make([]string, d.cols)
+		for _, v := range e {
+			vv := v
+			switch v.(type) {
+			case DynamicColumn:
+				vv = v.(DynamicColumn)(e)
+			default:
+				// nothing
+			}
+			records[rowIndex][j] = d.asString(vv)
+			j++
+		}
+	}
+
+	return records
+}
+
+// ffs
+func justLetMeKeepFmt() {
+	fmt.Printf("")
+}

+ 20 - 0
vendor/github.com/agrison/go-tablib/tablib_errors.go

@@ -0,0 +1,20 @@
+package tablib
+
+import "errors"
+
+var (
+	// ErrInvalidDimensions is returned when trying to append/insert too much
+	// or not enough values to a row or column
+	ErrInvalidDimensions = errors.New("tablib: Invalid dimension")
+	// ErrInvalidColumnIndex is returned when trying to insert a column at an
+	// invalid index
+	ErrInvalidColumnIndex = errors.New("tablib: Invalid column index")
+	// ErrInvalidRowIndex is returned when trying to insert a row at an
+	// invalid index
+	ErrInvalidRowIndex = errors.New("tablib: Invalid row index")
+	// ErrInvalidDataset is returned when trying to validate a Dataset against
+	// the constraints that have been set on its columns.
+	ErrInvalidDataset = errors.New("tablib: Invalid dataset")
+	// ErrInvalidTag is returned when trying to add a tag which is not a string.
+	ErrInvalidTag = errors.New("tablib: A tag must be a string")
+)

+ 70 - 0
vendor/github.com/agrison/go-tablib/tablib_exportable.go

@@ -0,0 +1,70 @@
+package tablib
+
+import (
+	"bytes"
+	"io"
+	"io/ioutil"
+	"os"
+)
+
+const defaultBufferCap = 16 * 1024
+
+// newBuffer returns a new bytes.Buffer instance already initialized
+// with an underlying bytes array of the capacity equal to defaultBufferCap.
+func newBuffer() *bytes.Buffer {
+	return newBufferWithCap(defaultBufferCap)
+}
+
+// newBufferWithCap returns a new bytes.Buffer instance already initialized
+// with an underlying bytes array of the given capacity.
+func newBufferWithCap(initialCap int) *bytes.Buffer {
+	initialBuf := make([]byte, 0, initialCap)
+	return bytes.NewBuffer(initialBuf)
+}
+
+// Exportable represents an exportable dataset, it cannot be manipulated at this point
+// and it can just be converted to a string, []byte or written to a io.Writer.
+// The exportable struct just holds a bytes.Buffer that is used by the tablib library
+// to write export formats content. Real work is delegated to bytes.Buffer.
+type Exportable struct {
+	buffer *bytes.Buffer
+}
+
+// newExportable creates a new instance of Exportable from a bytes.Buffer.
+func newExportable(buffer *bytes.Buffer) *Exportable {
+	return &Exportable{buffer}
+}
+
+// newExportable creates a new instance of Exportable from a byte array.
+func newExportableFromBytes(buf []byte) *Exportable {
+	return &Exportable{bytes.NewBuffer(buf)}
+}
+
+// newExportableFromString creates a new instance of Exportable from a string.
+func newExportableFromString(str string) *Exportable {
+	buff := newBufferWithCap(len(str))
+	buff.WriteString(str)
+	return newExportable(buff)
+}
+
+// Bytes returns the contentes of the exported dataset as a byte array.
+func (e *Exportable) Bytes() []byte {
+	return e.buffer.Bytes()
+}
+
+// String returns the contents of the exported dataset as a string.
+func (e *Exportable) String() string {
+	return e.buffer.String()
+}
+
+// WriteTo writes the exported dataset to w.
+func (e *Exportable) WriteTo(w io.Writer) (int64, error) {
+	return e.buffer.WriteTo(w)
+}
+
+// WriteFile writes the databook or dataset content to a file named by filename.
+// If the file does not exist, WriteFile creates it with permissions perm;
+// otherwise WriteFile truncates it before writing.
+func (e *Exportable) WriteFile(filename string, perm os.FileMode) error {
+	return ioutil.WriteFile(filename, e.Bytes(), perm)
+}

+ 41 - 0
vendor/github.com/agrison/go-tablib/tablib_html.go

@@ -0,0 +1,41 @@
+package tablib
+
+// HTML returns the HTML representation of the Dataset as an Exportable.
+func (d *Dataset) HTML() *Exportable {
+	back := d.Records()
+	b := newBuffer()
+
+	b.WriteString("<table class=\"table table-striped\">\n\t<thead>")
+	for i, r := range back {
+		b.WriteString("\n\t\t<tr>")
+		for _, c := range r {
+			tag := "td"
+			if i == 0 {
+				tag = "th"
+			}
+			b.WriteString("\n\t\t\t<" + tag + ">")
+			b.WriteString(c)
+			b.WriteString("</" + tag + ">")
+		}
+		b.WriteString("\n\t\t</tr>")
+		if i == 0 {
+			b.WriteString("\n\t</thead>\n\t<tbody>")
+		}
+	}
+	b.WriteString("\n\t</tbody>\n</table>")
+
+	return newExportable(b)
+}
+
+// HTML returns a HTML representation of the Databook as an Exportable.
+func (d *Databook) HTML() *Exportable {
+	b := newBuffer()
+
+	for _, s := range d.sheets {
+		b.WriteString("<h1>" + s.title + "</h1>\n")
+		b.Write(s.dataset.HTML().Bytes())
+		b.WriteString("\n\n")
+	}
+
+	return newExportable(b)
+}

+ 72 - 0
vendor/github.com/agrison/go-tablib/tablib_json.go

@@ -0,0 +1,72 @@
+package tablib
+
+import (
+	"encoding/json"
+)
+
+// LoadJSON loads a dataset from a YAML source.
+func LoadJSON(jsonContent []byte) (*Dataset, error) {
+	var input []map[string]interface{}
+	if err := json.Unmarshal(jsonContent, &input); err != nil {
+		return nil, err
+	}
+
+	return internalLoadFromDict(input)
+}
+
+// LoadDatabookJSON loads a Databook from a JSON source.
+func LoadDatabookJSON(jsonContent []byte) (*Databook, error) {
+	var input []map[string]interface{}
+	var internalInput []map[string]interface{}
+	if err := json.Unmarshal(jsonContent, &input); err != nil {
+		return nil, err
+	}
+
+	db := NewDatabook()
+	for _, d := range input {
+		b, err := json.Marshal(d["data"])
+		if err != nil {
+			return nil, err
+		}
+		if err := json.Unmarshal(b, &internalInput); err != nil {
+			return nil, err
+		}
+
+		if ds, err := internalLoadFromDict(internalInput); err == nil {
+			db.AddSheet(d["title"].(string), ds)
+		} else {
+			return nil, err
+		}
+	}
+
+	return db, nil
+}
+
+// JSON returns a JSON representation of the Dataset as an Exportable.
+func (d *Dataset) JSON() (*Exportable, error) {
+	back := d.Dict()
+
+	b, err := json.Marshal(back)
+	if err != nil {
+		return nil, err
+	}
+	return newExportableFromBytes(b), nil
+}
+
+// JSON returns a JSON representation of the Databook as an Exportable.
+func (d *Databook) JSON() (*Exportable, error) {
+	b := newBuffer()
+	b.WriteString("[")
+	for _, s := range d.sheets {
+		b.WriteString("{\"title\": \"" + s.title + "\", \"data\": ")
+		js, err := s.dataset.JSON()
+		if err != nil {
+			return nil, err
+		}
+		b.Write(js.Bytes())
+		b.WriteString("},")
+	}
+	by := b.Bytes()
+	by[len(by)-1] = ']'
+	return newExportableFromBytes(by), nil
+}

+ 48 - 0
vendor/github.com/agrison/go-tablib/tablib_sort.go

@@ -0,0 +1,48 @@
+package tablib
+
+import "time"
+
+// entryPair represents a pair of a value and its row index in the dataset
+// which is used while sorting the dataset using a colum.
+type entryPair struct {
+	index int
+	value interface{}
+}
+
+type byIntValue []entryPair
+
+func (p byIntValue) Len() int           { return len(p) }
+func (p byIntValue) Swap(i, j int)      { p[i], p[j] = p[j], p[i] }
+func (p byIntValue) Less(i, j int) bool { return p[i].value.(int) < p[j].value.(int) }
+
+type byInt64Value []entryPair
+
+func (p byInt64Value) Len() int           { return len(p) }
+func (p byInt64Value) Swap(i, j int)      { p[i], p[j] = p[j], p[i] }
+func (p byInt64Value) Less(i, j int) bool { return p[i].value.(int64) < p[j].value.(int64) }
+
+type byUint64Value []entryPair
+
+func (p byUint64Value) Len() int           { return len(p) }
+func (p byUint64Value) Swap(i, j int)      { p[i], p[j] = p[j], p[i] }
+func (p byUint64Value) Less(i, j int) bool { return p[i].value.(uint64) < p[j].value.(uint64) }
+
+type byFloatValue []entryPair
+
+func (p byFloatValue) Len() int           { return len(p) }
+func (p byFloatValue) Swap(i, j int)      { p[i], p[j] = p[j], p[i] }
+func (p byFloatValue) Less(i, j int) bool { return p[i].value.(float64) < p[j].value.(float64) }
+
+type byTimeValue []entryPair
+
+func (p byTimeValue) Len() int      { return len(p) }
+func (p byTimeValue) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
+func (p byTimeValue) Less(i, j int) bool {
+	return p[i].value.(time.Time).UnixNano() < p[j].value.(time.Time).UnixNano()
+}
+
+type byStringValue []entryPair
+
+func (p byStringValue) Len() int           { return len(p) }
+func (p byStringValue) Swap(i, j int)      { p[i], p[j] = p[j], p[i] }
+func (p byStringValue) Less(i, j int) bool { return p[i].value.(string) < p[j].value.(string) }

+ 149 - 0
vendor/github.com/agrison/go-tablib/tablib_sql.go

@@ -0,0 +1,149 @@
+package tablib
+
+import (
+	"bytes"
+	"regexp"
+	"strconv"
+	"strings"
+	"time"
+)
+
+var (
+	typePostgres = "postgres"
+	typeMySQL    = "mysql"
+	defaults     = map[string]string{"various." + typePostgres: "TEXT",
+		"various." + typeMySQL: "VARCHAR(100)", "numeric." + typePostgres: "NUMERIC",
+		"numeric." + typeMySQL: "DOUBLE"}
+)
+
+// columnSQLType determines the type of a column
+// if throughout the whole column values have the same type then this type is
+// returned, otherwise the VARCHAR/TEXT type is returned.
+// numeric types are coerced into DOUBLE/NUMERIC
+func (d *Dataset) columnSQLType(header, dbType string) (string, []interface{}) {
+	types := 0
+	currentType := ""
+	maxString := 0
+	values := d.Column(header)
+	for _, c := range values {
+		switch c.(type) {
+		case uint, uint8, uint16, uint32, uint64,
+			int, int8, int16, int32, int64,
+			float32, float64:
+			if currentType != "numeric" {
+				currentType = "numeric"
+				types++
+			}
+		case time.Time:
+			if currentType != "time" {
+				currentType = "time"
+				types++
+			}
+		case string:
+			if currentType != "string" {
+				currentType = "string"
+				types++
+			}
+			if len(c.(string)) > maxString {
+				maxString = len(c.(string))
+			}
+		}
+	}
+
+	if types > 1 {
+		return defaults["various."+dbType], values
+	}
+	switch currentType {
+	case "numeric":
+		return defaults["numeric."+dbType], values
+	case "time":
+		return "TIMESTAMP", values
+	default:
+		if dbType == typePostgres {
+			return "TEXT", values
+		}
+		return "VARCHAR(" + strconv.Itoa(maxString) + ")", values
+	}
+}
+
+// isStringColumn returns whether a column is VARCHAR/TEXT
+func isStringColumn(c string) bool {
+	return strings.HasPrefix(c, "VARCHAR") || strings.HasPrefix(c, "TEXT")
+}
+
+// MySQL returns a string representing a suite of MySQL commands
+// recreating the Dataset into a table.
+func (d *Dataset) MySQL(table string) *Exportable {
+	return d.sql(table, typeMySQL)
+}
+
+// Postgres returns a string representing a suite of Postgres commands
+// recreating the Dataset into a table.
+func (d *Dataset) Postgres(table string) *Exportable {
+	return d.sql(table, typePostgres)
+}
+
+// sql returns a string representing a suite of SQL commands
+// recreating the Dataset into a table.
+func (d *Dataset) sql(table, dbType string) *Exportable {
+	b := newBuffer()
+
+	tableSQL, columnTypes, columnValues := d.createTable(table, dbType)
+	b.WriteString(tableSQL)
+
+	reg, _ := regexp.Compile("[']")
+	// inserts
+	for i := range d.data {
+		b.WriteString("INSERT INTO " + table + " VALUES(" + strconv.Itoa(i+1) + ", ")
+		for j, col := range d.headers {
+			asStr := d.asString(columnValues[col][i])
+			if isStringColumn(columnTypes[col]) {
+				b.WriteString("'" + reg.ReplaceAllString(asStr, "''") + "'")
+			} else if strings.HasPrefix(columnTypes[col], "TIMESTAMP") {
+				if dbType == typeMySQL {
+					b.WriteString("CONVERT_TZ('" + asStr[:10] + " " + asStr[11:19] + "', '" + asStr[len(asStr)-6:] + "', 'SYSTEM')")
+				} else {
+					b.WriteString("'" + asStr + "'") // simpler with Postgres
+				}
+			} else {
+				b.WriteString(asStr)
+			}
+			if j < len(d.headers)-1 {
+				b.WriteString(", ")
+			}
+		}
+		b.WriteString(");\n")
+	}
+	b.WriteString("\nCOMMIT;\n")
+
+	return newExportable(b)
+}
+
+func (d *Dataset) createTable(table, dbType string) (string, map[string]string, map[string][]interface{}) {
+	var b bytes.Buffer
+	columnValues := make(map[string][]interface{})
+	columnTypes := make(map[string]string)
+
+	// create table
+	b.WriteString("CREATE TABLE IF NOT EXISTS " + table)
+	if dbType == typePostgres {
+		b.WriteString("\n(\n\tid SERIAL PRIMARY KEY,\n")
+	} else {
+		b.WriteString("\n(\n\tid INT NOT NULL AUTO_INCREMENT PRIMARY KEY,\n")
+	}
+	for i, h := range d.headers {
+		b.WriteString("\t" + h)
+		t, v := d.columnSQLType(h, dbType)
+		columnValues[h] = v
+		columnTypes[h] = t
+		b.WriteString(" " + t)
+		if i < len(d.headers)-1 {
+			b.WriteString(",")
+		}
+		b.WriteString("\n")
+	}
+
+	b.WriteString(");\n\n")
+
+	return b.String(), columnTypes, columnValues
+}

+ 72 - 0
vendor/github.com/agrison/go-tablib/tablib_tabular.go

@@ -0,0 +1,72 @@
+package tablib
+
+import (
+	"github.com/bndr/gotabulate"
+	"regexp"
+	"strings"
+	"unicode/utf8"
+)
+
+var (
+	// TabularGrid is the value to be passed to gotabulate to render the table
+	// as ASCII table with grid format
+	TabularGrid = "grid"
+	// TabularSimple is the value to be passed to gotabulate to render the table
+	// as ASCII table with simple format
+	TabularSimple = "simple"
+	// TabularCondensed is the value to be passed to gotabulate to render the table
+	// as ASCII table with condensed format
+	TabularCondensed = "condensed"
+	// TabularMarkdown is the value to be passed to gotabulate to render the table
+	// as ASCII table with Markdown format
+	TabularMarkdown = "markdown"
+)
+
+// Markdown returns a Markdown table Exportable representation of the Dataset.
+func (d *Dataset) Markdown() *Exportable {
+	return d.Tabular(TabularMarkdown)
+}
+
+// Tabular returns a tabular Exportable representation of the Dataset.
+// format is either grid, simple, condensed or markdown.
+func (d *Dataset) Tabular(format string) *Exportable {
+	back := d.Records()
+	t := gotabulate.Create(back)
+
+	if format == TabularCondensed || format == TabularMarkdown {
+		rendered := regexp.MustCompile("\n\n\\s").ReplaceAllString(t.Render("simple"), "\n ")
+		if format == TabularMarkdown {
+			firstLine := regexp.MustCompile("-\\s+-").ReplaceAllString(strings.Split(rendered, "\n")[0], "- | -")
+			// now just locate the position of pipe characterds, and set them
+			positions := make([]int, 0, d.cols-1)
+			x := 0
+			for _, c := range firstLine {
+				if c == '|' {
+					positions = append(positions, x)
+				}
+				x += utf8.RuneLen(c)
+			}
+
+			b := newBuffer()
+			lines := strings.Split(rendered, "\n")
+			for _, line := range lines[1 : len(lines)-2] {
+				ipos := 0
+				b.WriteString("| ")
+				for _, pos := range positions {
+					if ipos < len(line) && pos < len(line) {
+						b.WriteString(line[ipos:pos])
+						b.WriteString(" | ")
+						ipos = pos + 1
+					}
+				}
+				if ipos < len(line) {
+					b.WriteString(line[ipos:])
+				}
+				b.WriteString(" | \n")
+			}
+			return newExportable(b)
+		}
+		return newExportableFromString(rendered)
+	}
+	return newExportableFromString(t.Render(format))
+}

+ 65 - 0
vendor/github.com/agrison/go-tablib/tablib_util.go

@@ -0,0 +1,65 @@
+package tablib
+
+import (
+	"fmt"
+	"strconv"
+	"time"
+)
+
+// internalLoadFromDict creates a Dataset from an array of map representing columns.
+func internalLoadFromDict(input []map[string]interface{}) (*Dataset, error) {
+	// retrieve columns
+	headers := make([]string, 0, 10)
+	for h := range input[0] {
+		headers = append(headers, h)
+	}
+
+	ds := NewDataset(headers)
+	for _, e := range input {
+		row := make([]interface{}, 0, len(headers))
+		for _, h := range headers {
+			row = append(row, e[h])
+		}
+		ds.AppendValues(row...)
+	}
+
+	return ds, nil
+}
+
+// isTagged checks if a tag is in an array of tags.
+func isTagged(tag string, tags []string) bool {
+	for _, t := range tags {
+		if t == tag {
+			return true
+		}
+	}
+	return false
+}
+
+// asString returns a value as a string.
+func (d *Dataset) asString(vv interface{}) string {
+	var v string
+	switch vv.(type) {
+	case string:
+		v = vv.(string)
+	case int:
+		v = strconv.Itoa(vv.(int))
+	case int64:
+		v = strconv.FormatInt(vv.(int64), 10)
+	case uint64:
+		v = strconv.FormatUint(vv.(uint64), 10)
+	case bool:
+		v = strconv.FormatBool(vv.(bool))
+	case float64:
+		v = strconv.FormatFloat(vv.(float64), 'G', -1, 32)
+	case time.Time:
+		v = vv.(time.Time).Format(time.RFC3339)
+	default:
+		if d.EmptyValue != "" {
+			v = d.EmptyValue
+		} else {
+			v = fmt.Sprintf("%s", v)
+		}
+	}
+	return v
+}

+ 50 - 0
vendor/github.com/agrison/go-tablib/tablib_xlsx.go

@@ -0,0 +1,50 @@
+package tablib
+
+import (
+	"github.com/tealeg/xlsx"
+)
+
+// XLSX exports the Dataset as a byte array representing the .xlsx format.
+func (d *Dataset) XLSX() (*Exportable, error) {
+	file := xlsx.NewFile()
+	if err := d.addXlsxSheetToFile(file, "Sheet 1"); err != nil {
+		return nil, err
+	}
+
+	b := newBuffer()
+	file.Write(b)
+	return newExportable(b), nil
+}
+
+// XLSX returns a XLSX representation of the Databook as an exportable.
+func (d *Databook) XLSX() (*Exportable, error) {
+	file := xlsx.NewFile()
+
+	for _, s := range d.sheets {
+		s.dataset.addXlsxSheetToFile(file, s.title)
+	}
+
+	b := newBuffer()
+	file.Write(b)
+	return newExportable(b), nil
+}
+
+func (d *Dataset) addXlsxSheetToFile(file *xlsx.File, sheetName string) error {
+	sheet, err := file.AddSheet(sheetName)
+	if err != nil {
+		return nil
+	}
+
+	back := d.Records()
+	for i, r := range back {
+		row := sheet.AddRow()
+		for _, c := range r {
+			cell := row.AddCell()
+			cell.Value = c
+			if i == 0 {
+				cell.GetStyle().Font.Bold = true
+			}
+		}
+	}
+	return nil
+}

+ 64 - 0
vendor/github.com/agrison/go-tablib/tablib_xml.go

@@ -0,0 +1,64 @@
+package tablib
+
+import (
+	"bytes"
+	"github.com/agrison/mxj"
+)
+
+// XML returns a XML representation of the Dataset as an Exportable.
+func (d *Dataset) XML() (*Exportable, error) {
+	return d.XMLWithTagNamePrefixIndent("row", "  ", "  ")
+}
+
+// XML returns a XML representation of the Databook as an Exportable.
+func (d *Databook) XML() (*Exportable, error) {
+	b := newBuffer()
+	b.WriteString("<databook>\n")
+	for _, s := range d.sheets {
+		b.WriteString("  <sheet>\n    <title>" + s.title + "</title>\n    ")
+		row, err := s.dataset.XMLWithTagNamePrefixIndent("row", "      ", "  ")
+		if err != nil {
+			return nil, err
+		}
+		b.Write(row.Bytes())
+		b.WriteString("\n  </sheet>")
+	}
+	b.WriteString("\n</databook>")
+	return newExportable(b), nil
+}
+
+// XMLWithTagNamePrefixIndent returns a XML representation with custom tag, prefix and indent.
+func (d *Dataset) XMLWithTagNamePrefixIndent(tagName, prefix, indent string) (*Exportable, error) {
+	back := d.Dict()
+
+	exportable := newExportable(newBuffer())
+	exportable.buffer.WriteString("<dataset>\n")
+	for _, r := range back {
+		m := mxj.Map(r.(map[string]interface{}))
+		if err := m.XmlIndentWriter(exportable.buffer, prefix, indent, tagName); err != nil {
+			return nil, err
+		}
+	}
+	exportable.buffer.WriteString("\n" + prefix + "</dataset>")
+
+	return exportable, nil
+}
+
+// LoadXML loads a Dataset from an XML source.
+func LoadXML(input []byte) (*Dataset, error) {
+	m, _, err := mxj.NewMapXmlReaderRaw(bytes.NewReader(input))
+	if err != nil {
+		return nil, err
+	}
+
+	// this seems quite a bit hacky
+	datasetNode, _ := m.ValueForPath("dataset")
+	rowNode := datasetNode.(map[string]interface{})["row"].([]interface{})
+
+	back := make([]map[string]interface{}, 0, len(rowNode))
+	for _, r := range rowNode {
+		back = append(back, r.(map[string]interface{}))
+	}
+
+	return internalLoadFromDict(back)
+}

+ 69 - 0
vendor/github.com/agrison/go-tablib/tablib_yaml.go

@@ -0,0 +1,69 @@
+package tablib
+
+import "gopkg.in/yaml.v2"
+
+// LoadYAML loads a dataset from a YAML source.
+func LoadYAML(yamlContent []byte) (*Dataset, error) {
+	var input []map[string]interface{}
+	if err := yaml.Unmarshal(yamlContent, &input); err != nil {
+		return nil, err
+	}
+
+	return internalLoadFromDict(input)
+}
+
+// LoadDatabookYAML loads a Databook from a YAML source.
+func LoadDatabookYAML(yamlContent []byte) (*Databook, error) {
+	var input []map[string]interface{}
+	var internalInput []map[string]interface{}
+	if err := yaml.Unmarshal(yamlContent, &input); err != nil {
+		return nil, err
+	}
+
+	db := NewDatabook()
+	for _, d := range input {
+		b, err := yaml.Marshal(d["data"])
+		if err != nil {
+			return nil, err
+		}
+		if err := yaml.Unmarshal(b, &internalInput); err != nil {
+			return nil, err
+		}
+
+		if ds, err := internalLoadFromDict(internalInput); err == nil {
+			db.AddSheet(d["title"].(string), ds)
+		} else {
+			return nil, err
+		}
+	}
+
+	return db, nil
+}
+
+// YAML returns a YAML representation of the Dataset as an Exportable.
+func (d *Dataset) YAML() (*Exportable, error) {
+	back := d.Dict()
+
+	b, err := yaml.Marshal(back)
+	if err != nil {
+		return nil, err
+	}
+	return newExportableFromBytes(b), nil
+}
+
+// YAML returns a YAML representation of the Databook as an Exportable.
+func (d *Databook) YAML() (*Exportable, error) {
+	y := make([]map[string]interface{}, len(d.sheets))
+	i := 0
+	for _, s := range d.sheets {
+		y[i] = make(map[string]interface{})
+		y[i]["title"] = s.title
+		y[i]["data"] = s.dataset.Dict()
+		i++
+	}
+	b, err := yaml.Marshal(y)
+	if err != nil {
+		return nil, err
+	}
+	return newExportableFromBytes(b), nil
+}

+ 55 - 0
vendor/github.com/agrison/mxj/LICENSE

@@ -0,0 +1,55 @@
+Copyright (c) 2012-2016 Charles Banning <clbanning@gmail.com>.  All rights reserved.
+
+The MIT License (MIT)
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
+
+===============================================================================
+
+Go Language Copyright & License - 
+
+Copyright 2009 The Go Authors. All rights reserved.
+Use of this source code is governed by a BSD-style
+license that can be found in the LICENSE file.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+   * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+   * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+   * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

+ 177 - 0
vendor/github.com/agrison/mxj/anyxml.go

@@ -0,0 +1,177 @@
+package mxj
+
+import (
+	"encoding/xml"
+	"reflect"
+)
+
+const (
+	DefaultElementTag = "element"
+)
+
+// Encode arbitrary value as XML.
+//
+// Note: unmarshaling the resultant
+// XML may not return the original value, since tag labels may have been injected
+// to create the XML representation of the value.
+/*
+ Encode an arbitrary JSON object.
+	package main
+
+	import (
+		"encoding/json"
+		"fmt"
+		"github.com/clbanning/mxj"
+	)
+
+	func main() {
+		jsondata := []byte(`[
+			{ "somekey":"somevalue" },
+			"string",
+			3.14159265,
+			true
+		]`)
+		var i interface{}
+		err := json.Unmarshal(jsondata, &i)
+		if err != nil {
+			// do something
+		}
+		x, err := mxj.AnyXmlIndent(i, "", "  ", "mydoc")
+		if err != nil {
+			// do something else
+		}
+		fmt.Println(string(x))
+	}
+
+	output:
+		<mydoc>
+		  <somekey>somevalue</somekey>
+		  <element>string</element>
+		  <element>3.14159265</element>
+		  <element>true</element>
+		</mydoc>
+*/
+// Alternative values for DefaultRootTag and DefaultElementTag can be set as:
+// AnyXmlIndent( v, myRootTag, myElementTag).
+func AnyXml(v interface{}, tags ...string) ([]byte, error) {
+	if reflect.TypeOf(v).Kind() == reflect.Struct {
+		return xml.Marshal(v)
+	}
+
+	var err error
+	s := new(string)
+	p := new(pretty)
+
+	var rt, et string
+	if len(tags) == 1 || len(tags) == 2 {
+		rt = tags[0]
+	} else {
+		rt = DefaultRootTag
+	}
+	if len(tags) == 2 {
+		et = tags[1]
+	} else {
+		et = DefaultElementTag
+	}
+
+	var ss string
+	var b []byte
+	switch v.(type) {
+	case []interface{}:
+		ss = "<" + rt + ">"
+		for _, vv := range v.([]interface{}) {
+			switch vv.(type) {
+			case map[string]interface{}:
+				m := vv.(map[string]interface{})
+				if len(m) == 1 {
+					for tag, val := range m {
+						err = mapToXmlIndent(false, s, tag, val, p)
+					}
+				} else {
+					err = mapToXmlIndent(false, s, et, vv, p)
+				}
+			default:
+				err = mapToXmlIndent(false, s, et, vv, p)
+			}
+			if err != nil {
+				break
+			}
+		}
+		ss += *s + "</" + rt + ">"
+		b = []byte(ss)
+	case map[string]interface{}:
+		m := Map(v.(map[string]interface{}))
+		b, err = m.Xml(rt)
+	default:
+		err = mapToXmlIndent(false, s, rt, v, p)
+		b = []byte(*s)
+	}
+
+	return b, err
+}
+
+// Encode an arbitrary value as a pretty XML string.
+// Alternative values for DefaultRootTag and DefaultElementTag can be set as:
+// AnyXmlIndent( v, "", "  ", myRootTag, myElementTag).
+func AnyXmlIndent(v interface{}, prefix, indent string, tags ...string) ([]byte, error) {
+	if reflect.TypeOf(v).Kind() == reflect.Struct {
+		return xml.MarshalIndent(v, prefix, indent)
+	}
+
+	var err error
+	s := new(string)
+	p := new(pretty)
+	p.indent = indent
+	p.padding = prefix
+
+	var rt, et string
+	if len(tags) == 1 || len(tags) == 2 {
+		rt = tags[0]
+	} else {
+		rt = DefaultRootTag
+	}
+	if len(tags) == 2 {
+		et = tags[1]
+	} else {
+		et = DefaultElementTag
+	}
+
+	var ss string
+	var b []byte
+	switch v.(type) {
+	case []interface{}:
+		ss = "<" + rt + ">\n"
+		p.Indent()
+		for _, vv := range v.([]interface{}) {
+			switch vv.(type) {
+			case map[string]interface{}:
+				m := vv.(map[string]interface{})
+				if len(m) == 1 {
+					for tag, val := range m {
+						err = mapToXmlIndent(true, s, tag, val, p)
+					}
+				} else {
+					p.start = 1 // we 1 tag in
+					err = mapToXmlIndent(true, s, et, vv, p)
+					*s += "\n"
+				}
+			default:
+				p.start = 0 // in case trailing p.start = 1
+				err = mapToXmlIndent(true, s, et, vv, p)
+			}
+			if err != nil {
+				break
+			}
+		}
+		ss += *s + "</" + rt + ">"
+		b = []byte(ss)
+	case map[string]interface{}:
+		m := Map(v.(map[string]interface{}))
+		b, err = m.XmlIndent(prefix, indent, rt)
+	default:
+		err = mapToXmlIndent(true, s, rt, v, p)
+		b = []byte(*s)
+	}
+
+	return b, err
+}

+ 54 - 0
vendor/github.com/agrison/mxj/atomFeedString.xml

@@ -0,0 +1,54 @@
+<?xml version="1.0" encoding="utf-8"?>
+<feed xmlns="http://www.w3.org/2005/Atom" xml:lang="en-us" updated="2009-10-04T01:35:58+00:00"><title>Code Review - My issues</title><link href="http://codereview.appspot.com/" rel="alternate"></link><link href="http://codereview.appspot.com/rss/mine/rsc" rel="self"></link><id>http://codereview.appspot.com/</id><author><name>rietveld&lt;&gt;</name></author><entry><title>rietveld: an attempt at pubsubhubbub
+</title><link href="http://codereview.appspot.com/126085" rel="alternate"></link><updated>2009-10-04T01:35:58+00:00</updated><author><name>email-address-removed</name></author><id>urn:md5:134d9179c41f806be79b3a5f7877d19a</id><summary type="html">
+  An attempt at adding pubsubhubbub support to Rietveld.
+http://code.google.com/p/pubsubhubbub
+http://code.google.com/p/rietveld/issues/detail?id=155
+
+The server side of the protocol is trivial:
+  1. add a &amp;lt;link rel=&amp;quot;hub&amp;quot; href=&amp;quot;hub-server&amp;quot;&amp;gt; tag to all
+     feeds that will be pubsubhubbubbed.
+  2. every time one of those feeds changes, tell the hub
+     with a simple POST request.
+
+I have tested this by adding debug prints to a local hub
+server and checking that the server got the right publish
+requests.
+
+I can&amp;#39;t quite get the server to work, but I think the bug
+is not in my code.  I think that the server expects to be
+able to grab the feed and see the feed&amp;#39;s actual URL in
+the link rel=&amp;quot;self&amp;quot;, but the default value for that drops
+the :port from the URL, and I cannot for the life of me
+figure out how to get the Atom generator deep inside
+django not to do that, or even where it is doing that,
+or even what code is running to generate the Atom feed.
+(I thought I knew but I added some assert False statements
+and it kept running!)
+
+Ignoring that particular problem, I would appreciate
+feedback on the right way to get the two values at
+the top of feeds.py marked NOTE(rsc).
+
+
+</summary></entry><entry><title>rietveld: correct tab handling
+</title><link href="http://codereview.appspot.com/124106" rel="alternate"></link><updated>2009-10-03T23:02:17+00:00</updated><author><name>email-address-removed</name></author><id>urn:md5:0a2a4f19bb815101f0ba2904aed7c35a</id><summary type="html">
+  This fixes the buggy tab rendering that can be seen at
+http://codereview.appspot.com/116075/diff/1/2
+
+The fundamental problem was that the tab code was
+not being told what column the text began in, so it
+didn&amp;#39;t know where to put the tab stops.  Another problem
+was that some of the code assumed that string byte
+offsets were the same as column offsets, which is only
+true if there are no tabs.
+
+In the process of fixing this, I cleaned up the arguments
+to Fold and ExpandTabs and renamed them Break and
+_ExpandTabs so that I could be sure that I found all the
+call sites.  I also wanted to verify that ExpandTabs was
+not being used from outside intra_region_diff.py.
+
+
+</summary></entry></feed> 	   `
+

+ 110 - 0
vendor/github.com/agrison/mxj/doc.go

@@ -0,0 +1,110 @@
+// mxj - A collection of map[string]interface{} and associated XML and JSON utilities.
+// Copyright 2012-2015 Charles Banning. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file
+
+/*
+Marshal/Unmarshal XML to/from JSON and map[string]interface{} values, and extract/modify values from maps by key or key-path, including wildcards.
+
+mxj supplants the legacy x2j and j2x packages. If you want the old syntax, use mxj/x2j or mxj/j2x packages.
+
+Note: this library was designed for processing ad hoc anonymous messages.  Bulk processing large data sets may be much more efficiently performed using the encoding/xml or encoding/json packages from Go's standard library directly.
+
+Note:
+	2015-12-02: NewMapXmlSeq() with mv.XmlSeq() & co. will try to preserve structure of XML doc when re-encoding.
+	2014-08-02: AnyXml() and AnyXmlIndent() will try to marshal arbitrary values to XML.
+
+SUMMARY
+
+   type Map map[string]interface{}
+
+   Create a Map value, 'm', from any map[string]interface{} value, 'v':
+      m := Map(v)
+
+   Unmarshal / marshal XML as a Map value, 'm':
+      m, err := NewMapXml(xmlValue) // unmarshal
+      xmlValue, err := m.Xml()      // marshal
+
+   Unmarshal XML from an io.Reader as a Map value, 'm':
+      m, err := NewMapReader(xmlReader)         // repeated calls, as with an os.File Reader, will process stream
+      m, raw, err := NewMapReaderRaw(xmlReader) // 'raw' is the raw XML that was decoded
+
+   Marshal Map value, 'm', to an XML Writer (io.Writer):
+      err := m.XmlWriter(xmlWriter)
+      raw, err := m.XmlWriterRaw(xmlWriter) // 'raw' is the raw XML that was written on xmlWriter
+
+   Also, for prettified output:
+      xmlValue, err := m.XmlIndent(prefix, indent, ...)
+      err := m.XmlIndentWriter(xmlWriter, prefix, indent, ...)
+      raw, err := m.XmlIndentWriterRaw(xmlWriter, prefix, indent, ...)
+
+   Bulk process XML with error handling (note: handlers must return a boolean value):
+      err := HandleXmlReader(xmlReader, mapHandler(Map), errHandler(error))
+      err := HandleXmlReaderRaw(xmlReader, mapHandler(Map, []byte), errHandler(error, []byte))
+
+   Converting XML to JSON: see Examples for NewMapXml and HandleXmlReader.
+
+   There are comparable functions and methods for JSON processing.
+
+   Arbitrary structure values can be decoded to / encoded from Map values:
+      m, err := NewMapStruct(structVal)
+      err := m.Struct(structPointer)
+
+   To work with XML tag values, JSON or Map key values or structure field values, decode the XML, JSON
+   or structure to a Map value, 'm', or cast a map[string]interface{} value to a Map value, 'm', then:
+      paths := m.PathsForKey(key)
+      path := m.PathForKeyShortest(key)
+      values, err := m.ValuesForKey(key, subkeys)
+      values, err := m.ValuesForPath(path, subkeys) // 'path' can be dot-notation with wildcards and indexed arrays.
+      count, err := m.UpdateValuesForPath(newVal, path, subkeys)
+
+   Get everything at once, irrespective of path depth:
+      leafnodes := m.LeafNodes()
+      leafvalues := m.LeafValues()
+
+   A new Map with whatever keys are desired can be created from the current Map and then encoded in XML
+   or JSON. (Note: keys can use dot-notation. 'oldKey' can also use wildcards and indexed arrays.)
+      newMap, err := m.NewMap("oldKey_1:newKey_1", "oldKey_2:newKey_2", ..., "oldKey_N:newKey_N")
+      newXml, err := newMap.Xml()   // for example
+      newJson, err := newMap.Json() // ditto
+
+XML PARSING CONVENTIONS
+
+   Using NewXml()
+
+   - Attributes are parsed to `map[string]interface{}` values by prefixing a hyphen, `-`,
+     to the attribute label. (Unless overridden by `PrependAttrWithHyphen(false)`.)
+   - If the element is a simple element and has attributes, the element value
+     is given the key `#text` for its `map[string]interface{}` representation.  (See
+     the 'atomFeedString.xml' test data, below.)
+   - XML comments, directives, and process instructions are ignored.
+   - If CoerceKeysToLower() has been called, then the resultant keys will be lower case.
+
+   Using NewXmlSeq()
+
+   - Attributes are parsed to `map["#attr"]map[<attr_label>]map[string]interface{}`values
+     where the `<attr_label>` value has "#text" and "#seq" keys - the "#text" key holds the 
+     value for `<attr_label>`.
+   - All elements, except for the root, have a "#seq" key.
+   - Comments, directives, and process instructions are unmarshalled into the Map using the
+     keys "#comment", "#directive", and "#procinst", respectively. (See documentation for more
+     specifics.)
+
+   Both
+
+   - By default, "Nan", "Inf", and "-Inf" values are not cast to float64.  If you want them
+     to be cast, set a flag to cast them  using CastNanInf(true).
+
+XML ENCODING CONVENTIONS
+   
+   - 'nil' Map values, which may represent 'null' JSON values, are encoded as "<tag/>".
+     NOTE: the operation is not symmetric as "<tag/>" elements are decoded as 'tag:""' Map values,
+           which, then, encode in JSON as '"tag":""' values..
+   - ALSO: there is no guarantee that the encoded XML doc will be the same as the decoded one.  (Go
+           randomizes the walk through map[string]interface{} values.) If you plan to re-encode the
+           Map value to XML and want the same sequencing of elements look at NewMapXmlSeq() and
+           m.XmlSeq() - these try to preserve the element sequencing but with added complexity when
+           working with the Map representation.
+
+*/
+package mxj

+ 7 - 0
vendor/github.com/agrison/mxj/exists.go

@@ -0,0 +1,7 @@
+package mxj
+
+// Checks whether the path exists
+func (mv Map) Exists(path string) bool {
+	v, err := mv.ValuesForPath(path)
+	return err == nil && len(v) > 0
+}

+ 299 - 0
vendor/github.com/agrison/mxj/files.go

@@ -0,0 +1,299 @@
+package mxj
+
+import (
+	"fmt"
+	"io"
+	"os"
+)
+
+type Maps []Map
+
+func NewMaps() Maps {
+	return make(Maps, 0)
+}
+
+type MapRaw struct {
+	M Map
+	R []byte
+}
+
+// NewMapsFromXmlFile - creates an array from a file of JSON values.
+func NewMapsFromJsonFile(name string) (Maps, error) {
+	fi, err := os.Stat(name)
+	if err != nil {
+		return nil, err
+	}
+	if !fi.Mode().IsRegular() {
+		return nil, fmt.Errorf("file %s is not a regular file", name)
+	}
+
+	fh, err := os.Open(name)
+	if err != nil {
+		return nil, err
+	}
+	defer fh.Close()
+
+	am := make([]Map, 0)
+	for {
+		m, raw, err := NewMapJsonReaderRaw(fh)
+		if err != nil && err != io.EOF {
+			return am, fmt.Errorf("error: %s - reading: %s", err.Error(), string(raw))
+		}
+		if len(m) > 0 {
+			am = append(am, m)
+		}
+		if err == io.EOF {
+			break
+		}
+	}
+	return am, nil
+}
+
+// ReadMapsFromJsonFileRaw - creates an array of MapRaw from a file of JSON values.
+func NewMapsFromJsonFileRaw(name string) ([]MapRaw, error) {
+	fi, err := os.Stat(name)
+	if err != nil {
+		return nil, err
+	}
+	if !fi.Mode().IsRegular() {
+		return nil, fmt.Errorf("file %s is not a regular file", name)
+	}
+
+	fh, err := os.Open(name)
+	if err != nil {
+		return nil, err
+	}
+	defer fh.Close()
+
+	am := make([]MapRaw, 0)
+	for {
+		mr := new(MapRaw)
+		mr.M, mr.R, err = NewMapJsonReaderRaw(fh)
+		if err != nil && err != io.EOF {
+			return am, fmt.Errorf("error: %s - reading: %s", err.Error(), string(mr.R))
+		}
+		if len(mr.M) > 0 {
+			am = append(am, *mr)
+		}
+		if err == io.EOF {
+			break
+		}
+	}
+	return am, nil
+}
+
+// NewMapsFromXmlFile - creates an array from a file of XML values.
+func NewMapsFromXmlFile(name string) (Maps, error) {
+	x := XmlWriterBufSize
+	XmlWriterBufSize = 0
+	defer func() {
+		XmlWriterBufSize = x
+	}()
+
+	fi, err := os.Stat(name)
+	if err != nil {
+		return nil, err
+	}
+	if !fi.Mode().IsRegular() {
+		return nil, fmt.Errorf("file %s is not a regular file", name)
+	}
+
+	fh, err := os.Open(name)
+	if err != nil {
+		return nil, err
+	}
+	defer fh.Close()
+
+	am := make([]Map, 0)
+	for {
+		m, raw, err := NewMapXmlReaderRaw(fh)
+		if err != nil && err != io.EOF {
+			return am, fmt.Errorf("error: %s - reading: %s", err.Error(), string(raw))
+		}
+		if len(m) > 0 {
+			am = append(am, m)
+		}
+		if err == io.EOF {
+			break
+		}
+	}
+	return am, nil
+}
+
+// NewMapsFromXmlFileRaw - creates an array of MapRaw from a file of XML values.
+// NOTE: the slice with the raw XML is clean with no extra capacity - unlike NewMapXmlReaderRaw().
+// It is slow at parsing a file from disk and is intended for relatively small utility files.
+func NewMapsFromXmlFileRaw(name string) ([]MapRaw, error) {
+	x := XmlWriterBufSize
+	XmlWriterBufSize = 0
+	defer func() {
+		XmlWriterBufSize = x
+	}()
+
+	fi, err := os.Stat(name)
+	if err != nil {
+		return nil, err
+	}
+	if !fi.Mode().IsRegular() {
+		return nil, fmt.Errorf("file %s is not a regular file", name)
+	}
+
+	fh, err := os.Open(name)
+	if err != nil {
+		return nil, err
+	}
+	defer fh.Close()
+
+	am := make([]MapRaw, 0)
+	for {
+		mr := new(MapRaw)
+		mr.M, mr.R, err = NewMapXmlReaderRaw(fh)
+		if err != nil && err != io.EOF {
+			return am, fmt.Errorf("error: %s - reading: %s", err.Error(), string(mr.R))
+		}
+		if len(mr.M) > 0 {
+			am = append(am, *mr)
+		}
+		if err == io.EOF {
+			break
+		}
+	}
+	return am, nil
+}
+
+// ------------------------ Maps writing -------------------------
+// These are handy-dandy methods for dumping configuration data, etc.
+
+// JsonString - analogous to mv.Json()
+func (mvs Maps) JsonString(safeEncoding ...bool) (string, error) {
+	var s string
+	for _, v := range mvs {
+		j, err := v.Json()
+		if err != nil {
+			return s, err
+		}
+		s += string(j)
+	}
+	return s, nil
+}
+
+// JsonStringIndent - analogous to mv.JsonIndent()
+func (mvs Maps) JsonStringIndent(prefix, indent string, safeEncoding ...bool) (string, error) {
+	var s string
+	var haveFirst bool
+	for _, v := range mvs {
+		j, err := v.JsonIndent(prefix, indent)
+		if err != nil {
+			return s, err
+		}
+		if haveFirst {
+			s += "\n"
+		} else {
+			haveFirst = true
+		}
+		s += string(j)
+	}
+	return s, nil
+}
+
+// XmlString - analogous to mv.Xml()
+func (mvs Maps) XmlString() (string, error) {
+	var s string
+	for _, v := range mvs {
+		x, err := v.Xml()
+		if err != nil {
+			return s, err
+		}
+		s += string(x)
+	}
+	return s, nil
+}
+
+// XmlStringIndent - analogous to mv.XmlIndent()
+func (mvs Maps) XmlStringIndent(prefix, indent string) (string, error) {
+	var s string
+	for _, v := range mvs {
+		x, err := v.XmlIndent(prefix, indent)
+		if err != nil {
+			return s, err
+		}
+		s += string(x)
+	}
+	return s, nil
+}
+
+// JsonFile - write Maps to named file as JSON
+// Note: the file will be created, if necessary; if it exists it will be truncated.
+// If you need to append to a file, open it and use JsonWriter method.
+func (mvs Maps) JsonFile(file string, safeEncoding ...bool) error {
+	var encoding bool
+	if len(safeEncoding) == 1 {
+		encoding = safeEncoding[0]
+	}
+	s, err := mvs.JsonString(encoding)
+	if err != nil {
+		return err
+	}
+	fh, err := os.Create(file)
+	if err != nil {
+		return err
+	}
+	defer fh.Close()
+	fh.WriteString(s)
+	return nil
+}
+
+// JsonFileIndent - write Maps to named file as pretty JSON
+// Note: the file will be created, if necessary; if it exists it will be truncated.
+// If you need to append to a file, open it and use JsonIndentWriter method.
+func (mvs Maps) JsonFileIndent(file, prefix, indent string, safeEncoding ...bool) error {
+	var encoding bool
+	if len(safeEncoding) == 1 {
+		encoding = safeEncoding[0]
+	}
+	s, err := mvs.JsonStringIndent(prefix, indent, encoding)
+	if err != nil {
+		return err
+	}
+	fh, err := os.Create(file)
+	if err != nil {
+		return err
+	}
+	defer fh.Close()
+	fh.WriteString(s)
+	return nil
+}
+
+// XmlFile - write Maps to named file as XML
+// Note: the file will be created, if necessary; if it exists it will be truncated.
+// If you need to append to a file, open it and use XmlWriter method.
+func (mvs Maps) XmlFile(file string) error {
+	s, err := mvs.XmlString()
+	if err != nil {
+		return err
+	}
+	fh, err := os.Create(file)
+	if err != nil {
+		return err
+	}
+	defer fh.Close()
+	fh.WriteString(s)
+	return nil
+}
+
+// XmlFileIndent - write Maps to named file as pretty XML
+// Note: the file will be created,if necessary; if it exists it will be truncated.
+// If you need to append to a file, open it and use XmlIndentWriter method.
+func (mvs Maps) XmlFileIndent(file, prefix, indent string) error {
+	s, err := mvs.XmlStringIndent(prefix, indent)
+	if err != nil {
+		return err
+	}
+	fh, err := os.Create(file)
+	if err != nil {
+		return err
+	}
+	defer fh.Close()
+	fh.WriteString(s)
+	return nil
+}

+ 2 - 0
vendor/github.com/agrison/mxj/files_test.badjson

@@ -0,0 +1,2 @@
+{ "this":"is", "a":"test", "file":"for", "files_test.go":"case" }
+{ "with":"some", "bad":JSON, "in":"it" }

+ 9 - 0
vendor/github.com/agrison/mxj/files_test.badxml

@@ -0,0 +1,9 @@
+<doc>
+	<some>test</some>
+	<data>for files.go</data>
+</doc>
+<msg>
+	<just>some</just>
+	<another>doc</other>
+	<for>test case</for>
+</msg>

+ 2 - 0
vendor/github.com/agrison/mxj/files_test.json

@@ -0,0 +1,2 @@
+{ "this":"is", "a":"test", "file":"for", "files_test.go":"case" }
+{ "with":"just", "two":2, "JSON":"values", "true":true }

+ 9 - 0
vendor/github.com/agrison/mxj/files_test.xml

@@ -0,0 +1,9 @@
+<doc>
+	<some>test</some>
+	<data>for files.go</data>
+</doc>
+<msg>
+	<just>some</just>
+	<another>doc</another>
+	<for>test case</for>
+</msg>

+ 1 - 0
vendor/github.com/agrison/mxj/files_test_dup.json

@@ -0,0 +1 @@
+{"a":"test","file":"for","files_test.go":"case","this":"is"}{"JSON":"values","true":true,"two":2,"with":"just"}

+ 1 - 0
vendor/github.com/agrison/mxj/files_test_dup.xml

@@ -0,0 +1 @@
+<doc><data>for files.go</data><some>test</some></doc><msg><another>doc</another><for>test case</for><just>some</just></msg>

+ 12 - 0
vendor/github.com/agrison/mxj/files_test_indent.json

@@ -0,0 +1,12 @@
+{
+  "a": "test",
+  "file": "for",
+  "files_test.go": "case",
+  "this": "is"
+}
+{
+  "JSON": "values",
+  "true": true,
+  "two": 2,
+  "with": "just"
+}

+ 8 - 0
vendor/github.com/agrison/mxj/files_test_indent.xml

@@ -0,0 +1,8 @@
+<doc>
+  <data>for files.go</data>
+  <some>test</some>
+</doc><msg>
+  <another>doc</another>
+  <for>test case</for>
+  <just>some</just>
+</msg>

+ 319 - 0
vendor/github.com/agrison/mxj/json.go

@@ -0,0 +1,319 @@
+// Copyright 2012-2014 Charles Banning. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file
+
+package mxj
+
+import (
+	"bytes"
+	"encoding/json"
+	"fmt"
+	"io"
+	"time"
+)
+
+// ------------------------------ write JSON -----------------------
+
+// Just a wrapper on json.Marshal.
+// If option safeEncoding is'true' then safe encoding of '<', '>' and '&'
+// is preserved. (see encoding/json#Marshal, encoding/json#Encode)
+func (mv Map) Json(safeEncoding ...bool) ([]byte, error) {
+	var s bool
+	if len(safeEncoding) == 1 {
+		s = safeEncoding[0]
+	}
+
+	b, err := json.Marshal(mv)
+
+	if !s {
+		b = bytes.Replace(b, []byte("\\u003c"), []byte("<"), -1)
+		b = bytes.Replace(b, []byte("\\u003e"), []byte(">"), -1)
+		b = bytes.Replace(b, []byte("\\u0026"), []byte("&"), -1)
+	}
+	return b, err
+}
+
+// Just a wrapper on json.MarshalIndent.
+// If option safeEncoding is'true' then safe encoding of '<' , '>' and '&'
+// is preserved. (see encoding/json#Marshal, encoding/json#Encode)
+func (mv Map) JsonIndent(prefix, indent string, safeEncoding ...bool) ([]byte, error) {
+	var s bool
+	if len(safeEncoding) == 1 {
+		s = safeEncoding[0]
+	}
+
+	b, err := json.MarshalIndent(mv, prefix, indent)
+	if !s {
+		b = bytes.Replace(b, []byte("\\u003c"), []byte("<"), -1)
+		b = bytes.Replace(b, []byte("\\u003e"), []byte(">"), -1)
+		b = bytes.Replace(b, []byte("\\u0026"), []byte("&"), -1)
+	}
+	return b, err
+}
+
+// The following implementation is provided for symmetry with NewMapJsonReader[Raw]
+// The names will also provide a key for the number of return arguments.
+
+// Writes the Map as JSON on the Writer.
+// If 'safeEncoding' is 'true', then "safe" encoding of '<', '>' and '&' is preserved.
+func (mv Map) JsonWriter(jsonWriter io.Writer, safeEncoding ...bool) error {
+	b, err := mv.Json(safeEncoding...)
+	if err != nil {
+		return err
+	}
+
+	_, err = jsonWriter.Write(b)
+	return err
+}
+
+// Writes the Map as JSON on the Writer. []byte is the raw JSON that was written.
+// If 'safeEncoding' is 'true', then "safe" encoding of '<', '>' and '&' is preserved.
+func (mv Map) JsonWriterRaw(jsonWriter io.Writer, safeEncoding ...bool) ([]byte, error) {
+	b, err := mv.Json(safeEncoding...)
+	if err != nil {
+		return b, err
+	}
+
+	_, err = jsonWriter.Write(b)
+	return b, err
+}
+
+// Writes the Map as pretty JSON on the Writer.
+// If 'safeEncoding' is 'true', then "safe" encoding of '<', '>' and '&' is preserved.
+func (mv Map) JsonIndentWriter(jsonWriter io.Writer, prefix, indent string, safeEncoding ...bool) error {
+	b, err := mv.JsonIndent(prefix, indent, safeEncoding...)
+	if err != nil {
+		return err
+	}
+
+	_, err = jsonWriter.Write(b)
+	return err
+}
+
+// Writes the Map as pretty JSON on the Writer. []byte is the raw JSON that was written.
+// If 'safeEncoding' is 'true', then "safe" encoding of '<', '>' and '&' is preserved.
+func (mv Map) JsonIndentWriterRaw(jsonWriter io.Writer, prefix, indent string, safeEncoding ...bool) ([]byte, error) {
+	b, err := mv.JsonIndent(prefix, indent, safeEncoding...)
+	if err != nil {
+		return b, err
+	}
+
+	_, err = jsonWriter.Write(b)
+	return b, err
+}
+
+// --------------------------- read JSON -----------------------------
+
+// Parse numeric values as json.Number types - see encoding/json#Number
+var JsonUseNumber bool
+
+// Just a wrapper on json.Unmarshal
+//	Converting JSON to XML is a simple as:
+//		...
+//		mapVal, merr := mxj.NewMapJson(jsonVal)
+//		if merr != nil {
+//			// handle error
+//		}
+//		xmlVal, xerr := mapVal.Xml()
+//		if xerr != nil {
+//			// handle error
+//		}
+// NOTE: as a special case, passing a list, e.g., [{"some-null-value":"", "a-non-null-value":"bar"}],
+// will be interpreted as having the root key 'object' prepended - {"object":[ ... ]} - to unmarshal to a Map.
+// See mxj/j2x/j2x_test.go.
+func NewMapJson(jsonVal []byte) (Map, error) {
+	// empty or nil begets empty
+	if len(jsonVal) == 0 {
+		m := make(map[string]interface{}, 0)
+		return m, nil
+	}
+	// handle a goofy case ...
+	if jsonVal[0] == '[' {
+		jsonVal = []byte(`{"object":` + string(jsonVal) + `}`)
+	}
+	m := make(map[string]interface{})
+	// err := json.Unmarshal(jsonVal, &m)
+	buf := bytes.NewReader(jsonVal)
+	dec := json.NewDecoder(buf)
+	if JsonUseNumber {
+		dec.UseNumber()
+	}
+	err := dec.Decode(&m)
+	return m, err
+}
+
+// Retrieve a Map value from an io.Reader.
+//  NOTE: The raw JSON off the reader is buffered to []byte using a ByteReader. If the io.Reader is an
+//        os.File, there may be significant performance impact. If the io.Reader is wrapping a []byte
+//        value in-memory, however, such as http.Request.Body you CAN use it to efficiently unmarshal
+//        a JSON object.
+func NewMapJsonReader(jsonReader io.Reader) (Map, error) {
+	jb, err := getJson(jsonReader)
+	if err != nil || len(*jb) == 0 {
+		return nil, err
+	}
+
+	// Unmarshal the 'presumed' JSON string
+	return NewMapJson(*jb)
+}
+
+// Retrieve a Map value and raw JSON - []byte - from an io.Reader.
+//  NOTE: The raw JSON off the reader is buffered to []byte using a ByteReader. If the io.Reader is an
+//        os.File, there may be significant performance impact. If the io.Reader is wrapping a []byte
+//        value in-memory, however, such as http.Request.Body you CAN use it to efficiently unmarshal
+//        a JSON object and retrieve the raw JSON in a single call.
+func NewMapJsonReaderRaw(jsonReader io.Reader) (Map, []byte, error) {
+	jb, err := getJson(jsonReader)
+	if err != nil || len(*jb) == 0 {
+		return nil, *jb, err
+	}
+
+	// Unmarshal the 'presumed' JSON string
+	m, merr := NewMapJson(*jb)
+	return m, *jb, merr
+}
+
+// Pull the next JSON string off the stream: just read from first '{' to its closing '}'.
+// Returning a pointer to the slice saves 16 bytes - maybe unnecessary, but internal to package.
+func getJson(rdr io.Reader) (*[]byte, error) {
+	bval := make([]byte, 1)
+	jb := make([]byte, 0)
+	var inQuote, inJson bool
+	var parenCnt int
+	var previous byte
+
+	// scan the input for a matched set of {...}
+	// json.Unmarshal will handle syntax checking.
+	for {
+		_, err := rdr.Read(bval)
+		if err != nil {
+			if err == io.EOF && inJson && parenCnt > 0 {
+				return &jb, fmt.Errorf("no closing } for JSON string: %s", string(jb))
+			}
+			return &jb, err
+		}
+		switch bval[0] {
+		case '{':
+			if !inQuote {
+				parenCnt++
+				inJson = true
+			}
+		case '}':
+			if !inQuote {
+				parenCnt--
+			}
+			if parenCnt < 0 {
+				return nil, fmt.Errorf("closing } without opening {: %s", string(jb))
+			}
+		case '"':
+			if inQuote {
+				if previous == '\\' {
+					break
+				}
+				inQuote = false
+			} else {
+				inQuote = true
+			}
+		case '\n', '\r', '\t', ' ':
+			if !inQuote {
+				continue
+			}
+		}
+		if inJson {
+			jb = append(jb, bval[0])
+			if parenCnt == 0 {
+				break
+			}
+		}
+		previous = bval[0]
+	}
+
+	return &jb, nil
+}
+
+// ------------------------------- JSON Reader handler via Map values  -----------------------
+
+// Default poll delay to keep Handler from spinning on an open stream
+// like sitting on os.Stdin waiting for imput.
+var jhandlerPollInterval = time.Duration(1e6)
+
+// While unnecessary, we make HandleJsonReader() have the same signature as HandleXmlReader().
+// This avoids treating one or other as a special case and discussing the underlying stdlib logic.
+
+// Bulk process JSON using handlers that process a Map value.
+//	'rdr' is an io.Reader for the JSON (stream).
+//	'mapHandler' is the Map processing handler. Return of 'false' stops io.Reader processing.
+//	'errHandler' is the error processor. Return of 'false' stops io.Reader  processing and returns the error.
+//	Note: mapHandler() and errHandler() calls are blocking, so reading and processing of messages is serialized.
+//	      This means that you can stop reading the file on error or after processing a particular message.
+//	      To have reading and handling run concurrently, pass argument to a go routine in handler and return 'true'.
+func HandleJsonReader(jsonReader io.Reader, mapHandler func(Map) bool, errHandler func(error) bool) error {
+	var n int
+	for {
+		m, merr := NewMapJsonReader(jsonReader)
+		n++
+
+		// handle error condition with errhandler
+		if merr != nil && merr != io.EOF {
+			merr = fmt.Errorf("[jsonReader: %d] %s", n, merr.Error())
+			if ok := errHandler(merr); !ok {
+				// caused reader termination
+				return merr
+			}
+			continue
+		}
+
+		// pass to maphandler
+		if len(m) != 0 {
+			if ok := mapHandler(m); !ok {
+				break
+			}
+		} else if merr != io.EOF {
+			<-time.After(jhandlerPollInterval)
+		}
+
+		if merr == io.EOF {
+			break
+		}
+	}
+	return nil
+}
+
+// Bulk process JSON using handlers that process a Map value and the raw JSON.
+//	'rdr' is an io.Reader for the JSON (stream).
+//	'mapHandler' is the Map and raw JSON - []byte - processor. Return of 'false' stops io.Reader processing.
+//	'errHandler' is the error and raw JSON processor. Return of 'false' stops io.Reader processing and returns the error.
+//	Note: mapHandler() and errHandler() calls are blocking, so reading and processing of messages is serialized.
+//	      This means that you can stop reading the file on error or after processing a particular message.
+//	      To have reading and handling run concurrently, pass argument(s) to a go routine in handler and return 'true'.
+func HandleJsonReaderRaw(jsonReader io.Reader, mapHandler func(Map, []byte) bool, errHandler func(error, []byte) bool) error {
+	var n int
+	for {
+		m, raw, merr := NewMapJsonReaderRaw(jsonReader)
+		n++
+
+		// handle error condition with errhandler
+		if merr != nil && merr != io.EOF {
+			merr = fmt.Errorf("[jsonReader: %d] %s", n, merr.Error())
+			if ok := errHandler(merr, raw); !ok {
+				// caused reader termination
+				return merr
+			}
+			continue
+		}
+
+		// pass to maphandler
+		if len(m) != 0 {
+			if ok := mapHandler(m, raw); !ok {
+				break
+			}
+		} else if merr != io.EOF {
+			<-time.After(jhandlerPollInterval)
+		}
+
+		if merr == io.EOF {
+			break
+		}
+	}
+	return nil
+}

+ 658 - 0
vendor/github.com/agrison/mxj/keyvalues.go

@@ -0,0 +1,658 @@
+// Copyright 2012-2014 Charles Banning. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file
+
+//	keyvalues.go: Extract values from an arbitrary XML doc. Tag path can include wildcard characters.
+
+package mxj
+
+import (
+	"errors"
+	"fmt"
+	"strconv"
+	"strings"
+)
+
+// ----------------------------- get everything FOR a single key -------------------------
+
+const (
+	minArraySize = 32
+)
+
+var defaultArraySize int = minArraySize
+
+// Adjust the buffers for expected number of values to return from ValuesForKey() and ValuesForPath().
+// This can have the effect of significantly reducing memory allocation-copy functions for large data sets.
+// Returns the initial buffer size.
+func SetArraySize(size int) int {
+	if size > minArraySize {
+		defaultArraySize = size
+	} else {
+		defaultArraySize = minArraySize
+	}
+	return defaultArraySize
+}
+
+// Return all values in Map, 'mv', associated with a 'key'. If len(returned_values) == 0, then no match.
+// On error, the returned array is 'nil'. NOTE: 'key' can be wildcard, "*".
+//   'subkeys' (optional) are "key:val[:type]" strings representing attributes or elements in a list.
+//             - By default 'val' is of type string. "key:val:bool" and "key:val:float" to coerce them.
+//             - For attributes prefix the label with a hyphen, '-', e.g., "-seq:3".
+//             - If the 'key' refers to a list, then "key:value" could select a list member of the list.
+//             - The subkey can be wildcarded - "key:*" - to require that it's there with some value.
+//             - If a subkey is preceeded with the '!' character, the key:value[:type] entry is treated as an
+//               exclusion critera - e.g., "!author:William T. Gaddis".
+func (mv Map) ValuesForKey(key string, subkeys ...string) ([]interface{}, error) {
+	m := map[string]interface{}(mv)
+	var subKeyMap map[string]interface{}
+	if len(subkeys) > 0 {
+		var err error
+		subKeyMap, err = getSubKeyMap(subkeys...)
+		if err != nil {
+			return nil, err
+		}
+	}
+
+	ret := make([]interface{}, 0, defaultArraySize)
+	var cnt int
+	hasKey(m, key, &ret, &cnt, subKeyMap)
+	return ret[:cnt], nil
+	// ret := make([]interface{}, 0)
+	// hasKey(m, key, &ret, subKeyMap)
+	// return ret, nil
+}
+
+// hasKey - if the map 'key' exists append it to array
+//          if it doesn't do nothing except scan array and map values
+func hasKey(iv interface{}, key string, ret *[]interface{}, cnt *int, subkeys map[string]interface{}) {
+	// func hasKey(iv interface{}, key string, ret *[]interface{}, subkeys map[string]interface{}) {
+	switch iv.(type) {
+	case map[string]interface{}:
+		vv := iv.(map[string]interface{})
+		// see if the current value is of interest
+		if v, ok := vv[key]; ok {
+			switch v.(type) {
+			case map[string]interface{}:
+				if hasSubKeys(v, subkeys) {
+					*ret = append(*ret, v)
+					*cnt++
+				}
+			case []interface{}:
+				for _, av := range v.([]interface{}) {
+					if hasSubKeys(av, subkeys) {
+						*ret = append(*ret, av)
+						*cnt++
+					}
+				}
+			default:
+				if len(subkeys) == 0 {
+					*ret = append(*ret, v)
+					*cnt++
+				}
+			}
+		}
+
+		// wildcard case
+		if key == "*" {
+			for _, v := range vv {
+				switch v.(type) {
+				case map[string]interface{}:
+					if hasSubKeys(v, subkeys) {
+						*ret = append(*ret, v)
+						*cnt++
+					}
+				case []interface{}:
+					for _, av := range v.([]interface{}) {
+						if hasSubKeys(av, subkeys) {
+							*ret = append(*ret, av)
+							*cnt++
+						}
+					}
+				default:
+					if len(subkeys) == 0 {
+						*ret = append(*ret, v)
+						*cnt++
+					}
+				}
+			}
+		}
+
+		// scan the rest
+		for _, v := range vv {
+			hasKey(v, key, ret, cnt, subkeys)
+			// hasKey(v, key, ret, subkeys)
+		}
+	case []interface{}:
+		for _, v := range iv.([]interface{}) {
+			hasKey(v, key, ret, cnt, subkeys)
+			// hasKey(v, key, ret, subkeys)
+		}
+	}
+}
+
+// -----------------------  get everything for a node in the Map ---------------------------
+
+// Allow indexed arrays in "path" specification. (Request from Abhijit Kadam - abhijitk100@gmail.com.)
+// 2014.04.28 - implementation note.
+// Implemented as a wrapper of (old)ValuesForPath() because we need look-ahead logic to handle expansion
+// of wildcards and unindexed arrays.  Embedding such logic into valuesForKeyPath() would have made the
+// code much more complicated; this wrapper is straightforward, easy to debug, and doesn't add significant overhead.
+
+// Retrieve all values for a path from the Map.  If len(returned_values) == 0, then no match.
+// On error, the returned array is 'nil'.
+//   'path' is a dot-separated path of key values.
+//          - If a node in the path is '*', then everything beyond is walked.
+//          - 'path' can contain indexed array references, such as, "*.data[1]" and "msgs[2].data[0].field" -
+//            even "*[2].*[0].field".
+//   'subkeys' (optional) are "key:val[:type]" strings representing attributes or elements in a list.
+//             - By default 'val' is of type string. "key:val:bool" and "key:val:float" to coerce them.
+//             - For attributes prefix the label with a hyphen, '-', e.g., "-seq:3".
+//             - If the 'path' refers to a list, then "tag:value" would return member of the list.
+//             - The subkey can be wildcarded - "key:*" - to require that it's there with some value.
+//             - If a subkey is preceeded with the '!' character, the key:value[:type] entry is treated as an
+//               exclusion critera - e.g., "!author:William T. Gaddis".
+func (mv Map) ValuesForPath(path string, subkeys ...string) ([]interface{}, error) {
+	// If there are no array indexes in path, use legacy ValuesForPath() logic.
+	if strings.Index(path, "[") < 0 {
+		return mv.oldValuesForPath(path, subkeys...)
+	}
+
+	var subKeyMap map[string]interface{}
+	if len(subkeys) > 0 {
+		var err error
+		subKeyMap, err = getSubKeyMap(subkeys...)
+		if err != nil {
+			return nil, err
+		}
+	}
+
+	keys, kerr := parsePath(path)
+	if kerr != nil {
+		return nil, kerr
+	}
+
+	vals, verr := valuesForArray(keys, mv)
+	if verr != nil {
+		return nil, verr // Vals may be nil, but return empty array.
+	}
+
+	// Need to handle subkeys ... only return members of vals that satisfy conditions.
+	retvals := make([]interface{}, 0)
+	for _, v := range vals {
+		if hasSubKeys(v, subKeyMap) {
+			retvals = append(retvals, v)
+		}
+	}
+	return retvals, nil
+}
+
+func valuesForArray(keys []*key, m Map) ([]interface{}, error) {
+	var tmppath string
+	var haveFirst bool
+	var vals []interface{}
+	var verr error
+
+	lastkey := len(keys) - 1
+	for i := 0; i <= lastkey; i++ {
+		if !haveFirst {
+			tmppath = keys[i].name
+			haveFirst = true
+		} else {
+			tmppath += "." + keys[i].name
+		}
+
+		// Look-ahead: explode wildcards and unindexed arrays.
+		// Need to handle un-indexed list recursively:
+		// e.g., path is "stuff.data[0]" rather than "stuff[0].data[0]".
+		// Need to treat it as "stuff[0].data[0]", "stuff[1].data[0]", ...
+		if !keys[i].isArray && i < lastkey && keys[i+1].isArray {
+			// Can't pass subkeys because we may not be at literal end of path.
+			vv, vverr := m.oldValuesForPath(tmppath)
+			if vverr != nil {
+				return nil, vverr
+			}
+			for _, v := range vv {
+				// See if we can walk the value.
+				am, ok := v.(map[string]interface{})
+				if !ok {
+					continue
+				}
+				// Work the backend.
+				nvals, nvalserr := valuesForArray(keys[i+1:], Map(am))
+				if nvalserr != nil {
+					return nil, nvalserr
+				}
+				vals = append(vals, nvals...)
+			}
+			break // have recursed the whole path - return
+		}
+
+		if keys[i].isArray || i == lastkey {
+			// Don't pass subkeys because may not be at literal end of path.
+			vals, verr = m.oldValuesForPath(tmppath)
+		} else {
+			continue
+		}
+		if verr != nil {
+			return nil, verr
+		}
+
+		if i == lastkey && !keys[i].isArray {
+			break
+		}
+
+		// Now we're looking at an array - supposedly.
+		// Is index in range of vals?
+		if len(vals) <= keys[i].position {
+			vals = nil
+			break
+		}
+
+		// Return the array member of interest, if at end of path.
+		if i == lastkey {
+			vals = vals[keys[i].position:(keys[i].position + 1)]
+			break
+		}
+
+		// Extract the array member of interest.
+		am := vals[keys[i].position:(keys[i].position + 1)]
+
+		// must be a map[string]interface{} value so we can keep walking the path
+		amm, ok := am[0].(map[string]interface{})
+		if !ok {
+			vals = nil
+			break
+		}
+
+		m = Map(amm)
+		haveFirst = false
+	}
+
+	return vals, nil
+}
+
+type key struct {
+	name     string
+	isArray  bool
+	position int
+}
+
+func parsePath(s string) ([]*key, error) {
+	keys := strings.Split(s, ".")
+
+	ret := make([]*key, 0)
+
+	for i := 0; i < len(keys); i++ {
+		if keys[i] == "" {
+			continue
+		}
+
+		newkey := new(key)
+		if strings.Index(keys[i], "[") < 0 {
+			newkey.name = keys[i]
+			ret = append(ret, newkey)
+			continue
+		}
+
+		p := strings.Split(keys[i], "[")
+		newkey.name = p[0]
+		p = strings.Split(p[1], "]")
+		if p[0] == "" { // no right bracket
+			return nil, fmt.Errorf("no right bracket on key index: %s", keys[i])
+		}
+		// convert p[0] to a int value
+		pos, nerr := strconv.ParseInt(p[0], 10, 32)
+		if nerr != nil {
+			return nil, fmt.Errorf("cannot convert index to int value: %s", p[0])
+		}
+		newkey.position = int(pos)
+		newkey.isArray = true
+		ret = append(ret, newkey)
+	}
+
+	return ret, nil
+}
+
+// legacy ValuesForPath() - now wrapped to handle special case of indexed arrays in 'path'.
+func (mv Map) oldValuesForPath(path string, subkeys ...string) ([]interface{}, error) {
+	m := map[string]interface{}(mv)
+	var subKeyMap map[string]interface{}
+	if len(subkeys) > 0 {
+		var err error
+		subKeyMap, err = getSubKeyMap(subkeys...)
+		if err != nil {
+			return nil, err
+		}
+	}
+
+	keys := strings.Split(path, ".")
+	if keys[len(keys)-1] == "" {
+		keys = keys[:len(keys)-1]
+	}
+	// ivals := make([]interface{}, 0)
+	// valuesForKeyPath(&ivals, m, keys, subKeyMap)
+	// return ivals, nil
+	ivals := make([]interface{}, 0, defaultArraySize)
+	var cnt int
+	valuesForKeyPath(&ivals, &cnt, m, keys, subKeyMap)
+	return ivals[:cnt], nil
+}
+
+func valuesForKeyPath(ret *[]interface{}, cnt *int, m interface{}, keys []string, subkeys map[string]interface{}) {
+	lenKeys := len(keys)
+
+	// load 'm' values into 'ret'
+	// expand any lists
+	if lenKeys == 0 {
+		switch m.(type) {
+		case map[string]interface{}:
+			if subkeys != nil {
+				if ok := hasSubKeys(m, subkeys); !ok {
+					return
+				}
+			}
+			*ret = append(*ret, m)
+			*cnt++
+		case []interface{}:
+			for i, v := range m.([]interface{}) {
+				if subkeys != nil {
+					if ok := hasSubKeys(v, subkeys); !ok {
+						continue // only load list members with subkeys
+					}
+				}
+				*ret = append(*ret, (m.([]interface{}))[i])
+				*cnt++
+			}
+		default:
+			if subkeys != nil {
+				return // must be map[string]interface{} if there are subkeys
+			}
+			*ret = append(*ret, m)
+			*cnt++
+		}
+		return
+	}
+
+	// key of interest
+	key := keys[0]
+	switch key {
+	case "*": // wildcard - scan all values
+		switch m.(type) {
+		case map[string]interface{}:
+			for _, v := range m.(map[string]interface{}) {
+				// valuesForKeyPath(ret, v, keys[1:], subkeys)
+				valuesForKeyPath(ret, cnt, v, keys[1:], subkeys)
+			}
+		case []interface{}:
+			for _, v := range m.([]interface{}) {
+				switch v.(type) {
+				// flatten out a list of maps - keys are processed
+				case map[string]interface{}:
+					for _, vv := range v.(map[string]interface{}) {
+						// valuesForKeyPath(ret, vv, keys[1:], subkeys)
+						valuesForKeyPath(ret, cnt, vv, keys[1:], subkeys)
+					}
+				default:
+					// valuesForKeyPath(ret, v, keys[1:], subkeys)
+					valuesForKeyPath(ret, cnt, v, keys[1:], subkeys)
+				}
+			}
+		}
+	default: // key - must be map[string]interface{}
+		switch m.(type) {
+		case map[string]interface{}:
+			if v, ok := m.(map[string]interface{})[key]; ok {
+				// valuesForKeyPath(ret, v, keys[1:], subkeys)
+				valuesForKeyPath(ret, cnt, v, keys[1:], subkeys)
+			}
+		case []interface{}: // may be buried in list
+			for _, v := range m.([]interface{}) {
+				switch v.(type) {
+				case map[string]interface{}:
+					if vv, ok := v.(map[string]interface{})[key]; ok {
+						// valuesForKeyPath(ret, vv, keys[1:], subkeys)
+						valuesForKeyPath(ret, cnt, vv, keys[1:], subkeys)
+					}
+				}
+			}
+		}
+	}
+}
+
+// hasSubKeys() - interface{} equality works for string, float64, bool
+// 'v' must be a map[string]interface{} value to have subkeys
+// 'a' can have k:v pairs with v.(string) == "*", which is treated like a wildcard.
+func hasSubKeys(v interface{}, subkeys map[string]interface{}) bool {
+	if len(subkeys) == 0 {
+		return true
+	}
+
+	switch v.(type) {
+	case map[string]interface{}:
+		// do all subKey name:value pairs match?
+		mv := v.(map[string]interface{})
+		for skey, sval := range subkeys {
+			isNotKey := false
+			if skey[:1] == "!" { // a NOT-key
+				skey = skey[1:]
+				isNotKey = true
+			}
+			vv, ok := mv[skey]
+			if !ok { // key doesn't exist
+				if isNotKey { // key not there, but that's what we want
+					if kv, ok := sval.(string); ok && kv == "*" {
+						continue
+					}
+				}
+				return false
+			}
+			// wildcard check
+			if kv, ok := sval.(string); ok && kv == "*" {
+				if isNotKey { // key is there, and we don't want it
+					return false
+				}
+				continue
+			}
+			switch sval.(type) {
+			case string:
+				if s, ok := vv.(string); ok && s == sval.(string) {
+					if isNotKey {
+						return false
+					}
+					continue
+				}
+			case bool:
+				if b, ok := vv.(bool); ok && b == sval.(bool) {
+					if isNotKey {
+						return false
+					}
+					continue
+				}
+			case float64:
+				if f, ok := vv.(float64); ok && f == sval.(float64) {
+					if isNotKey {
+						return false
+					}
+					continue
+				}
+			}
+			// key there but didn't match subkey value
+			if isNotKey { // that's what we want
+				continue
+			}
+			return false
+		}
+		// all subkeys matched
+		return true
+	}
+
+	// not a map[string]interface{} value, can't have subkeys
+	return false
+}
+
+// Generate map of key:value entries as map[string]string.
+//	'kv' arguments are "name:value" pairs: attribute keys are designated with prepended hyphen, '-'.
+//	If len(kv) == 0, the return is (nil, nil).
+func getSubKeyMap(kv ...string) (map[string]interface{}, error) {
+	if len(kv) == 0 {
+		return nil, nil
+	}
+	m := make(map[string]interface{}, 0)
+	for _, v := range kv {
+		vv := strings.Split(v, ":")
+		switch len(vv) {
+		case 2:
+			m[vv[0]] = interface{}(vv[1])
+		case 3:
+			switch vv[3] {
+			case "string", "char", "text":
+				m[vv[0]] = interface{}(vv[1])
+			case "bool", "boolean":
+				// ParseBool treats "1"==true & "0"==false
+				b, err := strconv.ParseBool(vv[1])
+				if err != nil {
+					return nil, fmt.Errorf("can't convert subkey value to bool: %s", vv[1])
+				}
+				m[vv[0]] = interface{}(b)
+			case "float", "float64", "num", "number", "numeric":
+				f, err := strconv.ParseFloat(vv[1], 64)
+				if err != nil {
+					return nil, fmt.Errorf("can't convert subkey value to float: %s", vv[1])
+				}
+				m[vv[0]] = interface{}(f)
+			default:
+				return nil, fmt.Errorf("unknown subkey conversion spec: %s", v)
+			}
+		default:
+			return nil, fmt.Errorf("unknown subkey spec: %s", v)
+		}
+	}
+	return m, nil
+}
+
+// -------------------------------  END of valuesFor ... ----------------------------
+
+// ----------------------- locate where a key value is in the tree -------------------
+
+//----------------------------- find all paths to a key --------------------------------
+
+// Get all paths through Map, 'mv', (in dot-notation) that terminate with the specified key.
+// Results can be used with ValuesForPath.
+func (mv Map) PathsForKey(key string) []string {
+	m := map[string]interface{}(mv)
+	breadbasket := make(map[string]bool, 0)
+	breadcrumbs := ""
+
+	hasKeyPath(breadcrumbs, m, key, breadbasket)
+	if len(breadbasket) == 0 {
+		return nil
+	}
+
+	// unpack map keys to return
+	res := make([]string, len(breadbasket))
+	var i int
+	for k := range breadbasket {
+		res[i] = k
+		i++
+	}
+
+	return res
+}
+
+// Extract the shortest path from all possible paths - from PathsForKey() - in Map, 'mv'..
+// Paths are strings using dot-notation.
+func (mv Map) PathForKeyShortest(key string) string {
+	paths := mv.PathsForKey(key)
+
+	lp := len(paths)
+	if lp == 0 {
+		return ""
+	}
+	if lp == 1 {
+		return paths[0]
+	}
+
+	shortest := paths[0]
+	shortestLen := len(strings.Split(shortest, "."))
+
+	for i := 1; i < len(paths); i++ {
+		vlen := len(strings.Split(paths[i], "."))
+		if vlen < shortestLen {
+			shortest = paths[i]
+			shortestLen = vlen
+		}
+	}
+
+	return shortest
+}
+
+// hasKeyPath - if the map 'key' exists append it to KeyPath.path and increment KeyPath.depth
+// This is really just a breadcrumber that saves all trails that hit the prescribed 'key'.
+func hasKeyPath(crumbs string, iv interface{}, key string, basket map[string]bool) {
+	switch iv.(type) {
+	case map[string]interface{}:
+		vv := iv.(map[string]interface{})
+		if _, ok := vv[key]; ok {
+			if crumbs == "" {
+				crumbs = key
+			} else {
+				crumbs += "." + key
+			}
+			// *basket = append(*basket, crumb)
+			basket[crumbs] = true
+		}
+		// walk on down the path, key could occur again at deeper node
+		for k, v := range vv {
+			// create a new breadcrumb, intialized with the one we have
+			var nbc string
+			if crumbs == "" {
+				nbc = k
+			} else {
+				nbc = crumbs + "." + k
+			}
+			hasKeyPath(nbc, v, key, basket)
+		}
+	case []interface{}:
+		// crumb-trail doesn't change, pass it on
+		for _, v := range iv.([]interface{}) {
+			hasKeyPath(crumbs, v, key, basket)
+		}
+	}
+}
+
+// Returns the first found value for the path.
+func (mv Map) ValueForPath(path string) (interface{}, error) {
+	vals, err := mv.ValuesForPath(path)
+	if err != nil {
+		return nil, err
+	}
+	if len(vals) == 0 {
+		return nil, errors.New("ValueForPath: path not found")
+	}
+	return vals[0], nil
+}
+
+// Returns the first found value for the path as a string.
+func (mv Map) ValueForPathString(path string) (string, error) {
+	vals, err := mv.ValuesForPath(path)
+	if err != nil {
+		return "", err
+	}
+	if len(vals) == 0 {
+		return "", errors.New("ValueForPath: path not found")
+	}
+	val := vals[0]
+	switch str := val.(type) {
+	case string:
+		return str, nil
+	default:
+		return "", fmt.Errorf("ValueForPath: unsupported type: %T", str)
+	}
+}
+
+// Returns the first found value for the path as a string.
+// If the path is not found then it returns an empty string.
+func (mv Map) ValueOrEmptyForPathString(path string) string {
+	str, _ := mv.ValueForPathString(path)
+	return str
+}

+ 82 - 0
vendor/github.com/agrison/mxj/leafnode.go

@@ -0,0 +1,82 @@
+package mxj
+
+// leafnode.go - return leaf nodes with paths and values for the Map
+// inspired by: https://groups.google.com/forum/#!topic/golang-nuts/3JhuVKRuBbw
+
+import (
+	"strconv"
+)
+
+const (
+	NoAttributes = true // suppress LeafNode values that are attributes
+)
+
+// LeafNode - a terminal path value in a Map.
+// For XML Map values it represents an attribute or simple element value  - of type
+// string unless Map was created using Cast flag. For JSON Map values it represents
+// a string, numeric, boolean, or null value.
+type LeafNode struct {
+	Path  string      // a dot-notation representation of the path with array subscripting
+	Value interface{} // the value at the path termination
+}
+
+// LeafNodes - returns an array of all LeafNode values for the Map.
+// The option no_attr argument suppresses attribute values (keys with prepended hyphen, '-')
+// as well as the "#text" key for the associated simple element value.
+func (mv Map) LeafNodes(no_attr ...bool) []LeafNode {
+	var a bool
+	if len(no_attr) == 1 {
+		a = no_attr[0]
+	}
+
+	l := make([]LeafNode, 0)
+	getLeafNodes("", "", map[string]interface{}(mv), &l, a)
+	return l
+}
+
+func getLeafNodes(path, node string, mv interface{}, l *[]LeafNode, noattr bool) {
+	// if stripping attributes, then also strip "#text" key
+	if !noattr || node != "#text" {
+		if path != "" && node[:1] != "[" {
+			path += "."
+		}
+		path += node
+	}
+	switch mv.(type) {
+	case map[string]interface{}:
+		for k, v := range mv.(map[string]interface{}) {
+			if noattr && k[:1] == "-" {
+				continue
+			}
+			getLeafNodes(path, k, v, l, noattr)
+		}
+	case []interface{}:
+		for i, v := range mv.([]interface{}) {
+			getLeafNodes(path, "["+strconv.Itoa(i)+"]", v, l, noattr)
+		}
+	default:
+		// can't walk any further, so create leaf
+		n := LeafNode{path, mv}
+		*l = append(*l, n)
+	}
+}
+
+// LeafPaths - all paths that terminate in LeafNode values.
+func (mv Map) LeafPaths(no_attr ...bool) []string {
+	ln := mv.LeafNodes()
+	ss := make([]string, len(ln))
+	for i := 0; i < len(ln); i++ {
+		ss[i] = ln[i].Path
+	}
+	return ss
+}
+
+// LeafValues - all terminal values in the Map.
+func (mv Map) LeafValues(no_attr ...bool) []interface{} {
+	ln := mv.LeafNodes()
+	vv := make([]interface{}, len(ln))
+	for i := 0; i < len(ln); i++ {
+		vv[i] = ln[i].Value
+	}
+	return vv
+}

+ 83 - 0
vendor/github.com/agrison/mxj/misc.go

@@ -0,0 +1,83 @@
+// Copyright 2016 Charles Banning. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file
+
+// misc.go - mimic functions (+others) called out in:
+//          https://groups.google.com/forum/#!topic/golang-nuts/jm_aGsJNbdQ
+// Primarily these methods let you retrive XML structure information.
+
+package mxj
+
+import (
+	"fmt"
+	"sort"
+)
+
+// Return the root element of the Map. If there is not a single key in Map,
+// then an error is returned.
+func (m Map) Root() (string, error) {
+	mm := map[string]interface{}(m)
+	if len(mm) != 1 {
+		return "", fmt.Errorf("Map does not have singleton root. Len: %d.", len(mm))
+	}
+	for k, _ := range mm {
+		return k, nil
+	}
+	return "", nil
+}
+
+// If the path is an element with sub-elements, return a list of the sub-element
+// keys.  (The list is alphabeticly sorted.)  NOTE: Map keys that are prefixed with
+// '-', a hyphen, are considered attributes; see m.Attributes(path).
+func (m Map) Elements(path string) ([]string, error) {
+	e, err := m.ValueForPath(path)
+	if err != nil {
+		return nil, err
+	}
+	switch e.(type) {
+	case map[string]interface{}:
+		ee := e.(map[string]interface{})
+		elems := make([]string, len(ee))
+		var i int
+		for k, _ := range ee {
+			if k[:1] == "-" {
+				continue // skip attributes
+			}
+			elems[i] = k
+			i++
+		}
+		elems = elems[:i]
+		// alphabetic sort keeps things tidy
+		sort.Strings(elems)
+		return elems, nil
+	}
+	return nil, fmt.Errorf("no elements for path: %s", path)
+}
+
+// If the path is an element with attributes, return a list of the attribute
+// keys.  (The list is alphabeticly sorted.)  NOTE: Map keys that are not prefixed with
+// '-', a hyphen, are not treated as attributes; see m.Elements(path).
+func (m Map) Attributes(path string) ([]string, error) {
+	a, err := m.ValueForPath(path)
+	if err != nil {
+		return nil, err
+	}
+	switch a.(type) {
+	case map[string]interface{}:
+		aa := a.(map[string]interface{})
+		attrs := make([]string, len(aa))
+		var i int
+		for k, _ := range aa {
+			if k[:1] != "-" {
+				continue // skip non-attributes
+			}
+			attrs[i] = k[1:]
+			i++
+		}
+		attrs = attrs[:i]
+		// alphabetic sort keeps things tidy
+		sort.Strings(attrs)
+		return attrs, nil
+	}
+	return nil, fmt.Errorf("no attributes for path: %s", path)
+}

+ 206 - 0
vendor/github.com/agrison/mxj/mxj.go

@@ -0,0 +1,206 @@
+// mxj - A collection of map[string]interface{} and associated XML and JSON utilities.
+// Copyright 2012-2014 Charles Banning. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file
+
+package mxj
+
+import (
+	"fmt"
+	"sort"
+	"strconv"
+)
+
+const (
+	Cast         = true // for clarity - e.g., mxj.NewMapXml(doc, mxj.Cast)
+	SafeEncoding = true // ditto - e.g., mv.Json(mxj.SafeEncoding)
+)
+
+type Map map[string]interface{}
+
+// Allocate a Map.
+func New() Map {
+	m := make(map[string]interface{}, 0)
+	return m
+}
+
+// Cast a Map to map[string]interface{}
+func (mv Map) Old() map[string]interface{} {
+	return mv
+}
+
+// Return a copy of mv as a newly allocated Map.  If the Map only contains string,
+// numeric, map[string]interface{}, and []interface{} values, then it can be thought
+// of as a "deep copy."  Copying a structure (or structure reference) value is subject
+// to the noted restrictions.
+//	NOTE: If 'mv' includes structure values with, possibly, JSON encoding tags
+//	      then only public fields of the structure are in the new Map - and with
+//	      keys that conform to any encoding tag instructions. The structure itself will
+//	      be represented as a map[string]interface{} value.
+func (mv Map) Copy() (Map, error) {
+	// this is the poor-man's deep copy
+	// not efficient, but it works
+	j, jerr := mv.Json()
+	// must handle, we don't know how mv got built
+	if jerr != nil {
+		return nil, jerr
+	}
+	return NewMapJson(j)
+}
+
+// --------------- StringIndent ... from x2j.WriteMap -------------
+
+// Pretty print a Map.
+func (mv Map) StringIndent(offset ...int) string {
+	return writeMap(map[string]interface{}(mv), true, offset...)
+}
+
+// Pretty print a Map without the value type information - just key:value entries.
+func (mv Map) StringIndentNoTypeInfo(offset ...int) string {
+	return writeMapNoTypes(map[string]interface{}(mv), true, offset...)
+}
+
+// writeMap - dumps the map[string]interface{} for examination.
+//	'offset' is initial indentation count; typically: Write(m).
+func writeMap(m interface{}, root bool, offset ...int) string {
+	var indent int
+	if len(offset) == 1 {
+		indent = offset[0]
+	}
+
+	var s string
+	switch m.(type) {
+	case nil:
+		return "[nil] nil"
+	case string:
+		return "[string] " + m.(string)
+	case int, int32, int64:
+		return "[int] " + strconv.Itoa(m.(int))
+	case float64, float32:
+		return "[float64] " + strconv.FormatFloat(m.(float64), 'e', 2, 64)
+	case bool:
+		return "[bool] " + strconv.FormatBool(m.(bool))
+	case []interface{}:
+		s += "[[]interface{}]"
+		for i, v := range m.([]interface{}) {
+			s += "\n"
+			for i := 0; i < indent; i++ {
+				s += "  "
+			}
+			s += "[item: " + strconv.FormatInt(int64(i), 10) + "]"
+			switch v.(type) {
+			case string, float64, bool:
+				s += "\n"
+			default:
+				// noop
+			}
+			for i := 0; i < indent; i++ {
+				s += "  "
+			}
+			s += writeMap(v, false, indent+1)
+		}
+	case map[string]interface{}:
+		list := make([][2]string, len(m.(map[string]interface{})))
+		var n int
+		for k, v := range m.(map[string]interface{}) {
+			list[n][0] = k
+			list[n][1] = writeMap(v, false, indent+1)
+			n++
+		}
+		sort.Sort(mapList(list))
+		for _, v := range list {
+			if !root {
+				s += "\n"
+			}
+			for i := 0; i < indent; i++ {
+				s += "  "
+			}
+			s += v[0] + " :" + v[1]
+		}
+	default:
+		// shouldn't ever be here ...
+		s += fmt.Sprintf("[unknown] %#v", m)
+	}
+	return s
+}
+
+// writeMapNoTypes - dumps the map[string]interface{} for examination.
+//	'offset' is initial indentation count; typically: Write(m).
+func writeMapNoTypes(m interface{}, root bool, offset ...int) string {
+	var indent int
+	if len(offset) == 1 {
+		indent = offset[0]
+	}
+
+	var s string
+	switch m.(type) {
+	case nil:
+		return "nil"
+	case string:
+		return m.(string)
+	case float64:
+		return strconv.FormatFloat(m.(float64), 'e', 2, 64)
+	case bool:
+		return strconv.FormatBool(m.(bool))
+	case []interface{}:
+		s += ""
+		for i, v := range m.([]interface{}) {
+			s += "\n"
+			for i := 0; i < indent; i++ {
+				s += "  "
+			}
+			s += "[" + strconv.FormatInt(int64(i), 10) + "]"
+			switch v.(type) {
+			case string, float64, bool:
+				s += "\n"
+			default:
+				// noop
+			}
+			for i := 0; i < indent; i++ {
+				s += "  "
+			}
+			s += writeMapNoTypes(v, false, indent+1)
+		}
+	case map[string]interface{}:
+		list := make([][2]string, len(m.(map[string]interface{})))
+		var n int
+		for k, v := range m.(map[string]interface{}) {
+			list[n][0] = k
+			list[n][1] = writeMapNoTypes(v, false, indent+1)
+			n++
+		}
+		sort.Sort(mapList(list))
+		for _, v := range list {
+			if !root {
+				s += "\n"
+			}
+			for i := 0; i < indent; i++ {
+				s += "  "
+			}
+			s += v[0] + " :" + v[1]
+		}
+	default:
+		// shouldn't ever be here ...
+		s += fmt.Sprintf("[?] %#v", m)
+	}
+	return s
+}
+
+// ======================== utility ===============
+
+type mapList [][2]string
+
+func (ml mapList) Len() int {
+	return len(ml)
+}
+
+func (ml mapList) Swap(i, j int) {
+	ml[i], ml[j] = ml[j], ml[i]
+}
+
+func (ml mapList) Less(i, j int) bool {
+	if ml[i][0] > ml[j][0] {
+		return false
+	}
+	return true
+}

+ 183 - 0
vendor/github.com/agrison/mxj/newmap.go

@@ -0,0 +1,183 @@
+// mxj - A collection of map[string]interface{} and associated XML and JSON utilities.
+// Copyright 2012-2014 Charles Banning. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file
+
+// remap.go - build a new Map from the current Map based on keyOld:keyNew mapppings
+//            keys can use dot-notation, keyOld can use wildcard, '*'
+//
+// Computational strategy -
+// Using the key path - []string - traverse a new map[string]interface{} and
+// insert the oldVal as the newVal when we arrive at the end of the path.
+// If the type at the end is nil, then that is newVal
+// If the type at the end is a singleton (string, float64, bool) an array is created.
+// If the type at the end is an array, newVal is just appended.
+// If the type at the end is a map, it is inserted if possible or the map value
+//    is converted into an array if necessary.
+
+package mxj
+
+import (
+	"errors"
+	"strings"
+)
+
+// (Map)NewMap - create a new Map from data in the current Map.
+//	'keypairs' are key mappings "oldKey:newKey" and specify that the current value of 'oldKey'
+//	should be the value for 'newKey' in the returned Map.
+//		- 'oldKey' supports dot-notation as described for (Map)ValuesForPath()
+//		- 'newKey' supports dot-notation but with no wildcards, '*', or indexed arrays
+//		- "oldKey" is shorthand for for the keypair value "oldKey:oldKey"
+//		- "oldKey:" and ":newKey" are invalid keypair values
+//		- if 'oldKey' does not exist in the current Map, it is not written to the new Map.
+//		  "null" is not supported unless it is the current Map.
+//		- see newmap_test.go for several syntax examples
+//
+//	NOTE: mv.NewMap() == mxj.New().
+func (mv Map) NewMap(keypairs ...string) (Map, error) {
+	n := make(map[string]interface{}, 0)
+	if len(keypairs) == 0 {
+		return n, nil
+	}
+
+	// loop through the pairs
+	var oldKey, newKey string
+	var path []string
+	for _, v := range keypairs {
+		if len(v) == 0 {
+			continue // just skip over empty keypair arguments
+		}
+
+		// initialize oldKey, newKey and check
+		vv := strings.Split(v, ":")
+		if len(vv) > 2 {
+			return n, errors.New("oldKey:newKey keypair value not valid - " + v)
+		}
+		if len(vv) == 1 {
+			oldKey, newKey = vv[0], vv[0]
+		} else {
+			oldKey, newKey = vv[0], vv[1]
+		}
+		strings.TrimSpace(oldKey)
+		strings.TrimSpace(newKey)
+		if i := strings.Index(newKey, "*"); i > -1 {
+			return n, errors.New("newKey value cannot contain wildcard character - " + v)
+		}
+		if i := strings.Index(newKey, "["); i > -1 {
+			return n, errors.New("newKey value cannot contain indexed arrays - " + v)
+		}
+		if oldKey == "" || newKey == "" {
+			return n, errors.New("oldKey or newKey is not specified - " + v)
+		}
+
+		// get oldKey value
+		oldVal, err := mv.ValuesForPath(oldKey)
+		if err != nil {
+			return n, err
+		}
+		if len(oldVal) == 0 {
+			continue // oldKey has no value, may not exist in mv
+		}
+
+		// break down path
+		path = strings.Split(newKey, ".")
+		if path[len(path)-1] == "" { // ignore a trailing dot in newKey spec
+			path = path[:len(path)-1]
+		}
+
+		addNewVal(&n, path, oldVal)
+	}
+
+	return n, nil
+}
+
+// navigate 'n' to end of path and add val
+func addNewVal(n *map[string]interface{}, path []string, val []interface{}) {
+	// newVal - either singleton or array
+	var newVal interface{}
+	if len(val) == 1 {
+		newVal = val[0] // is type interface{}
+	} else {
+		newVal = interface{}(val)
+	}
+
+	// walk to the position of interest, create it if necessary
+	m := (*n)           // initialize map walker
+	var k string        // key for m
+	lp := len(path) - 1 // when to stop looking
+	for i := 0; i < len(path); i++ {
+		k = path[i]
+		if i == lp {
+			break
+		}
+		var nm map[string]interface{} // holds position of next-map
+		switch m[k].(type) {
+		case nil: // need a map for next node in path, so go there
+			nm = make(map[string]interface{}, 0)
+			m[k] = interface{}(nm)
+			m = m[k].(map[string]interface{})
+		case map[string]interface{}:
+			// OK - got somewhere to walk to, go there
+			m = m[k].(map[string]interface{})
+		case []interface{}:
+			// add a map and nm points to new map unless there's already
+			// a map in the array, then nm points there
+			// The placement of the next value in the array is dependent
+			// on the sequence of members - could land on a map or a nil
+			// value first.  TODO: how to test this.
+			a := make([]interface{}, 0)
+			var foundmap bool
+			for _, vv := range m[k].([]interface{}) {
+				switch vv.(type) {
+				case nil: // doesn't appear that this occurs, need a test case
+					if foundmap { // use the first one in array
+						a = append(a, vv)
+						continue
+					}
+					nm = make(map[string]interface{}, 0)
+					a = append(a, interface{}(nm))
+					foundmap = true
+				case map[string]interface{}:
+					if foundmap { // use the first one in array
+						a = append(a, vv)
+						continue
+					}
+					nm = vv.(map[string]interface{})
+					a = append(a, vv)
+					foundmap = true
+				default:
+					a = append(a, vv)
+				}
+			}
+			// no map found in array
+			if !foundmap {
+				nm = make(map[string]interface{}, 0)
+				a = append(a, interface{}(nm))
+			}
+			m[k] = interface{}(a) // must insert in map
+			m = nm
+		default: // it's a string, float, bool, etc.
+			aa := make([]interface{}, 0)
+			nm = make(map[string]interface{}, 0)
+			aa = append(aa, m[k], nm)
+			m[k] = interface{}(aa)
+			m = nm
+		}
+	}
+
+	// value is nil, array or a singleton of some kind
+	// initially m.(type) == map[string]interface{}
+	v := m[k]
+	switch v.(type) {
+	case nil: // initialized
+		m[k] = newVal
+	case []interface{}:
+		a := m[k].([]interface{})
+		a = append(a, newVal)
+		m[k] = interface{}(a)
+	default: // v exists:string, float64, bool, map[string]interface, etc.
+		a := make([]interface{}, 0)
+		a = append(a, v, newVal)
+		m[k] = interface{}(a)
+	}
+}

+ 159 - 0
vendor/github.com/agrison/mxj/readme.md

@@ -0,0 +1,159 @@
+<h2>mxj - to/from maps, XML and JSON</h2>
+Decode/encode XML to/from map[string]interface{} (or JSON) values, and extract/modify values from maps by key or key-path, including wildcards.
+
+mxj supplants the legacy x2j and j2x packages. If you want the old syntax, use mxj/x2j and mxj/j2x packages.
+
+<h4>Refactor Decoder - 2015.11.15</h4>
+For over a year I've wanted to refactor the XML-to-map[string]interface{} decoder to make it more performant.  I recently took the time to do that, since we were using github.com/clbanning/mxj in a production system that could be deployed on a Raspberry Pi.  Now the decoder is comparable to the stdlib JSON-to-map[string]interface{} decoder in terms of its additional processing overhead relative to decoding to a structure value.  As shown by:
+
+	BenchmarkNewMapXml-4         	  100000	     18043 ns/op
+	BenchmarkNewStructXml-4      	  100000	     14892 ns/op
+	BenchmarkNewMapJson-4        	  300000	      4633 ns/op
+	BenchmarkNewStructJson-4     	  300000	      3427 ns/op
+	BenchmarkNewMapXmlBooks-4    	   20000	     82850 ns/op
+	BenchmarkNewStructXmlBooks-4 	   20000	     67822 ns/op
+	BenchmarkNewMapJsonBooks-4   	  100000	     17222 ns/op
+	BenchmarkNewStructJsonBooks-4	  100000	     15309 ns/op
+
+<h4>Notices</h4>
+	2016.03.02: By default decoding XML with float64 and bool value casting will not cast "NaN", "Inf", and "-Inf".
+	            To cast them to float64, first set flag with CastNanInf(true).
+	2016.02.22: New m.Root(), m.Elements(), m.Attributes methods let you examine XML document structure.
+	2016.02.16: Add CoerceKeysToLower() option to handle tags with mixed capitalization.
+	2016.02.12: Seek for first xml.StartElement token; only return error if io.EOF is reached first (handles BOM).
+	2015.12.02: EXPERIMENTAL XML decoding/encoding that preserves original structure of document. See
+	            NewMapXmlSeq() and mv.XmlSeq() / mv.XmlSeqIndent().
+	2015-05-20: New: mv.StringIndentNoTypeInfo().
+	            Also, alphabetically sort map[string]interface{} values by key to prettify output for mv.Xml(),
+	            mv.XmlIndent(), mv.StringIndent(), mv.StringIndentNoTypeInfo().
+	2014-11-09: IncludeTagSeqNum() adds "_seq" key with XML doc positional information.
+	            (NOTE: PreserveXmlList() is similar and will be here soon.)
+	2014-09-18: inspired by NYTimes fork, added PrependAttrWithHyphen() to allow stripping hyphen from attribute tag.
+	2014-08-02: AnyXml() and AnyXmlIndent() will try to marshal arbitrary values to XML.
+	2014-04-28: ValuesForPath() and NewMap() now accept path with indexed array references.
+
+<h4>Basic Unmarshal XML to map[string]interface{}</h4>
+<pre>type Map map[string]interface{}</pre>
+
+Create a `Map` value, 'm', from any `map[string]interface{}` value, 'v':
+<pre>m := Map(v)</pre>
+
+Unmarshal / marshal XML as a `Map` value, 'm':
+<pre>m, err := NewMapXml(xmlValue) // unmarshal
+xmlValue, err := m.Xml()      // marshal</pre>
+
+Unmarshal XML from an `io.Reader` as a `Map` value, 'm':
+<pre>m, err := NewMapReader(xmlReader)         // repeated calls, as with an os.File Reader, will process stream
+m, raw, err := NewMapReaderRaw(xmlReader) // 'raw' is the raw XML that was decoded</pre>
+
+Marshal `Map` value, 'm', to an XML Writer (`io.Writer`):
+<pre>err := m.XmlWriter(xmlWriter)
+raw, err := m.XmlWriterRaw(xmlWriter) // 'raw' is the raw XML that was written on xmlWriter</pre>
+   
+Also, for prettified output:
+<pre>xmlValue, err := m.XmlIndent(prefix, indent, ...)
+err := m.XmlIndentWriter(xmlWriter, prefix, indent, ...)
+raw, err := m.XmlIndentWriterRaw(xmlWriter, prefix, indent, ...)</pre>
+
+Bulk process XML with error handling (note: handlers must return a boolean value):
+<pre>err := HandleXmlReader(xmlReader, mapHandler(Map), errHandler(error))
+err := HandleXmlReaderRaw(xmlReader, mapHandler(Map, []byte), errHandler(error, []byte))</pre>
+
+Converting XML to JSON: see Examples for `NewMapXml` and `HandleXmlReader`.
+
+There are comparable functions and methods for JSON processing.
+
+Arbitrary structure values can be decoded to / encoded from `Map` values:
+<pre>m, err := NewMapStruct(structVal)
+err := m.Struct(structPointer)</pre>
+
+<h4>Extract / modify Map values</h4>
+To work with XML tag values, JSON or Map key values or structure field values, decode the XML, JSON
+or structure to a `Map` value, 'm', or cast a `map[string]interface{}` value to a `Map` value, 'm', then:
+<pre>paths := m.PathsForKey(key)
+path := m.PathForKeyShortest(key)
+values, err := m.ValuesForKey(key, subkeys)
+values, err := m.ValuesForPath(path, subkeys)
+count, err := m.UpdateValuesForPath(newVal, path, subkeys)</pre>
+
+Get everything at once, irrespective of path depth:
+<pre>leafnodes := m.LeafNodes()
+leafvalues := m.LeafValues()</pre>
+
+A new `Map` with whatever keys are desired can be created from the current `Map` and then encoded in XML
+or JSON. (Note: keys can use dot-notation.)
+<pre>newMap, err := m.NewMap("oldKey_1:newKey_1", "oldKey_2:newKey_2", ..., "oldKey_N:newKey_N")
+newXml, err := newMap.Xml()   // for example
+newJson, err := newMap.Json() // ditto</pre>
+
+<h4>Usage</h4>
+
+The package is fairly well self-documented with examples. (http://godoc.org/github.com/clbanning/mxj)
+
+Also, the subdirectory "examples" contains a wide range of examples, several taken from golang-nuts discussions.
+
+<h4>XML parsing conventions</h4>
+
+Using NewXml()
+
+   - Attributes are parsed to `map[string]interface{}` values by prefixing a hyphen, `-`,
+     to the attribute label. (Unless overridden by `PrependAttrWithHyphen(false)`.)
+   - If the element is a simple element and has attributes, the element value
+     is given the key `#text` for its `map[string]interface{}` representation.  (See
+     the 'atomFeedString.xml' test data, below.)
+   - XML comments, directives, and process instructions are ignored.
+   - If CoerceKeysToLower() has been called, then the resultant keys will be lower case.
+
+Using NewXmlSeq()
+
+   - Attributes are parsed to `map["#attr"]map[<attr_label>]map[string]interface{}`values
+     where the `<attr_label>` value has "#text" and "#seq" keys - the "#text" key holds the 
+     value for `<attr_label>`.
+   - All elements, except for the root, have a "#seq" key.
+   - Comments, directives, and process instructions are unmarshalled into the Map using the
+     keys "#comment", "#directive", and "#procinst", respectively. (See documentation for more
+     specifics.)
+
+Both
+
+   - By default, "Nan", "Inf", and "-Inf" values are not cast to float64.  If you want them
+     to be cast, set a flag to cast them  using CastNanInf(true).
+
+<h4>XML encoding conventions</h4>
+
+   - 'nil' `Map` values, which may represent 'null' JSON values, are encoded as `<tag/>`.
+     NOTE: the operation is not symmetric as `<tag/>` elements are decoded as `tag:""` `Map` values,
+           which, then, encode in JSON as `"tag":""` values.
+   - ALSO: there is no guarantee that the encoded XML doc will be the same as the decoded one.  (Go
+           randomizes the walk through map[string]interface{} values.) If you plan to re-encode the
+           Map value to XML and want the same sequencing of elements look at NewMapXmlSeq() and
+           m.XmlSeq() - these try to preserve the element sequencing but with added complexity when
+           working with the Map representation.
+
+<h4>Running "go test"</h4>
+
+Because there are no guarantees on the sequence map elements are retrieved, the tests have been 
+written for visual verification in most cases.  One advantage is that you can easily use the 
+output from running "go test" as examples of calling the various functions and methods.
+
+<h4>Motivation</h4>
+
+I make extensive use of JSON for messaging and typically unmarshal the messages into
+`map[string]interface{}` values.  This is easily done using `json.Unmarshal` from the
+standard Go libraries.  Unfortunately, many legacy solutions use structured
+XML messages; in those environments the applications would have to be refactored to
+interoperate with my components.
+
+The better solution is to just provide an alternative HTTP handler that receives
+XML messages and parses it into a `map[string]interface{}` value and then reuse
+all the JSON-based code.  The Go `xml.Unmarshal()` function does not provide the same
+option of unmarshaling XML messages into `map[string]interface{}` values. So I wrote
+a couple of small functions to fill this gap and released them as the x2j package.
+
+Over the next year and a half additional features were added, and the companion j2x
+package was released to address XML encoding of arbitrary JSON and `map[string]interface{}`
+values.  As part of a refactoring of our production system and looking at how we had been
+using the x2j and j2x packages we found that we rarely performed direct XML-to-JSON or
+JSON-to_XML conversion and that working with the XML or JSON as `map[string]interface{}`
+values was the primary value.  Thus, everything was refactored into the mxj package.
+

+ 37 - 0
vendor/github.com/agrison/mxj/remove.go

@@ -0,0 +1,37 @@
+package mxj
+
+import "strings"
+
+// Removes the path.
+func (mv Map) Remove(path string) error {
+	m := map[string]interface{}(mv)
+	return remove(m, path)
+}
+
+func remove(m interface{}, path string) error {
+	val, err := prevValueByPath(m, path)
+	if err != nil {
+		return err
+	}
+
+	lastKey := lastKey(path)
+	delete(val, lastKey)
+
+	return nil
+}
+
+// returns the last key of the path.
+// lastKey("a.b.c") would had returned "c"
+func lastKey(path string) string {
+	keys := strings.Split(path, ".")
+	key := keys[len(keys)-1]
+	return key
+}
+
+// returns the path without the last key
+// parentPath("a.b.c") whould had returned "a.b"
+func parentPath(path string) string {
+	keys := strings.Split(path, ".")
+	parentPath := strings.Join(keys[0:len(keys)-1], ".")
+	return parentPath
+}

+ 54 - 0
vendor/github.com/agrison/mxj/rename.go

@@ -0,0 +1,54 @@
+package mxj
+
+import (
+	"errors"
+	"strings"
+)
+
+// RenameKey renames a key in a Map.
+// It works only for nested maps. It doesn't work for cases when it buried in a list.
+func (mv Map) RenameKey(path string, newName string) error {
+	if !mv.Exists(path) {
+		return errors.New("RenameKey: the path not found: " + path)
+	}
+	if mv.Exists(parentPath(path) + "." + newName) {
+		return errors.New("RenameKey: the key already exists: " + newName)
+	}
+
+	m := map[string]interface{}(mv)
+	return renameKey(m, path, newName)
+}
+
+func renameKey(m interface{}, path string, newName string) error {
+	val, err := prevValueByPath(m, path)
+	if err != nil {
+		return err
+	}
+
+	oldName := lastKey(path)
+	val[newName] = val[oldName]
+	delete(val, oldName)
+
+	return nil
+}
+
+// returns a value which contains a last key in the path
+// For example: prevValueByPath("a.b.c", {a{b{c: 3}}}) returns {c: 3}
+func prevValueByPath(m interface{}, path string) (map[string]interface{}, error) {
+	keys := strings.Split(path, ".")
+
+	switch mValue := m.(type) {
+	case map[string]interface{}:
+		for key, value := range mValue {
+			if key == keys[0] {
+				if len(keys) == 1 {
+					return mValue, nil
+				} else {
+					// keep looking for the full path to the key
+					return prevValueByPath(value, strings.Join(keys[1:], "."))
+				}
+			}
+		}
+	}
+	return nil, errors.New("prevValueByPath: didn't find the path – " + path)
+}

+ 26 - 0
vendor/github.com/agrison/mxj/set.go

@@ -0,0 +1,26 @@
+package mxj
+
+import (
+	"strings"
+)
+
+// Sets the value for the path
+func (mv Map) SetValueForPath(value interface{}, path string) error {
+	pathAry := strings.Split(path, ".")
+	parentPathAry := pathAry[0 : len(pathAry)-1]
+	parentPath := strings.Join(parentPathAry, ".")
+
+	val, err := mv.ValueForPath(parentPath)
+	if err != nil {
+		return err
+	}
+	if val == nil {
+		return nil // we just ignore the request if there's no val
+	}
+
+	key := pathAry[len(pathAry)-1]
+	cVal := val.(map[string]interface{})
+	cVal[key] = value
+
+	return nil
+}

+ 29 - 0
vendor/github.com/agrison/mxj/songtext.xml

@@ -0,0 +1,29 @@
+<msg mtype="alert" mpriority="1">
+	<text>help me!</text>
+	<song title="A Long Time" author="Mayer Hawthorne">
+		<verses>
+			<verse name="verse 1" no="1">
+				<line no="1">Henry was a renegade</line>
+				<line no="2">Didn't like to play it safe</line>
+				<line no="3">One component at a time</line>
+				<line no="4">There's got to be a better way</line>
+				<line no="5">Oh, people came from miles around</line>
+				<line no="6">Searching for a steady job</line>
+				<line no="7">Welcome to the Motor Town</line>
+				<line no="8">Booming like an atom bomb</line>
+			</verse>
+			<verse name="verse 2" no="2">
+				<line no="1">Oh, Henry was the end of the story</line>
+				<line no="2">Then everything went wrong</line>
+				<line no="3">And we'll return it to its former glory</line>
+				<line no="4">But it just takes so long</line>
+			</verse>
+		</verses>
+		<chorus>
+			<line no="1">It's going to take a long time</line>
+			<line no="2">It's going to take it, but we'll make it one day</line>
+			<line no="3">It's going to take a long time</line>
+			<line no="4">It's going to take it, but we'll make it one day</line>
+		</chorus>
+	</song>
+</msg>

+ 41 - 0
vendor/github.com/agrison/mxj/struct.go

@@ -0,0 +1,41 @@
+// Copyright 2012-2014 Charles Banning. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file
+
+package mxj
+
+import (
+	"encoding/json"
+	"errors"
+	"github.com/fatih/structs"
+	"reflect"
+)
+
+// Create a new Map value from a structure.  Error returned if argument is not a structure
+// or if there is a json.Marshal or json.Unmarshal error.
+//	Only public structure fields are decoded in the Map value. Also, json.Marshal structure encoding rules
+//	are followed for decoding the structure fields.
+func NewMapStruct(structVal interface{}) (Map, error) {
+	if !structs.IsStruct(structVal) {
+		return nil, errors.New("NewMapStruct() error: argument is not type Struct")
+	}
+	return structs.Map(structVal), nil
+}
+
+// Marshal a map[string]interface{} into a structure referenced by 'structPtr'. Error returned
+// if argument is not a pointer or if json.Unmarshal returns an error.
+//	json.Unmarshal structure encoding rules are followed to encode public structure fields.
+func (mv Map) Struct(structPtr interface{}) error {
+	// should check that we're getting a pointer.
+	if reflect.ValueOf(structPtr).Kind() != reflect.Ptr {
+		return errors.New("mv.Struct() error: argument is not type Ptr")
+	}
+
+	m := map[string]interface{}(mv)
+	j, err := json.Marshal(m)
+	if err != nil {
+		return err
+	}
+
+	return json.Unmarshal(j, structPtr)
+}

+ 249 - 0
vendor/github.com/agrison/mxj/updatevalues.go

@@ -0,0 +1,249 @@
+// Copyright 2012-2014 Charles Banning. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file
+
+// updatevalues.go - modify a value based on path and possibly sub-keys
+
+package mxj
+
+import (
+	"fmt"
+	"strconv"
+	"strings"
+)
+
+// Update value based on path and possible sub-key values.
+// A count of the number of values changed and any error are returned.
+// If the count == 0, then no path (and subkeys) matched.
+//	'newVal' can be a Map or map[string]interface{} value with a single 'key' that is the key to be modified
+//	             or a string value "key:value[:type]" where type is "bool" or "num" to cast the value.
+//	'path' is dot-notation list of keys to traverse; last key in path can be newVal key
+//	       NOTE: 'path' spec does not currently support indexed array references.
+//	'subkeys' are "key:value[:type]" entries that must match for path node
+//	            The subkey can be wildcarded - "key:*" - to require that it's there with some value.
+//	            If a subkey is preceeded with the '!' character, the key:value[:type] entry is treated as an
+//	            exclusion critera - e.g., "!author:William T. Gaddis".
+func (mv Map) UpdateValuesForPath(newVal interface{}, path string, subkeys ...string) (int, error) {
+	m := map[string]interface{}(mv)
+
+	// extract the subkeys
+	var subKeyMap map[string]interface{}
+	if len(subkeys) > 0 {
+		var err error
+		subKeyMap, err = getSubKeyMap(subkeys...)
+		if err != nil {
+			return 0, err
+		}
+	}
+
+	// extract key and value from newVal
+	var key string
+	var val interface{}
+	switch newVal.(type) {
+	case map[string]interface{}, Map:
+		switch newVal.(type) { // "fallthrough is not permitted in type switch" (Spec)
+		case Map:
+			newVal = newVal.(Map).Old()
+		}
+		if len(newVal.(map[string]interface{})) != 1 {
+			return 0, fmt.Errorf("newVal map can only have len == 1 - %+v", newVal)
+		}
+		for key, val = range newVal.(map[string]interface{}) {
+		}
+	case string: // split it as a key:value pair
+		ss := strings.Split(newVal.(string), ":")
+		n := len(ss)
+		if n < 2 || n > 3 {
+			return 0, fmt.Errorf("unknown newVal spec - %+v", newVal)
+		}
+		key = ss[0]
+		if n == 2 {
+			val = interface{}(ss[1])
+		} else if n == 3 {
+			switch ss[2] {
+			case "bool", "boolean":
+				nv, err := strconv.ParseBool(ss[1])
+				if err != nil {
+					return 0, fmt.Errorf("can't convert newVal to bool - %+v", newVal)
+				}
+				val = interface{}(nv)
+			case "num", "numeric", "float", "int":
+				nv, err := strconv.ParseFloat(ss[1], 64)
+				if err != nil {
+					return 0, fmt.Errorf("can't convert newVal to float64 - %+v", newVal)
+				}
+				val = interface{}(nv)
+			default:
+				return 0, fmt.Errorf("unknown type for newVal value - %+v", newVal)
+			}
+		}
+	default:
+		return 0, fmt.Errorf("invalid newVal type - %+v", newVal)
+	}
+
+	// parse path
+	keys := strings.Split(path, ".")
+
+	var count int
+	updateValuesForKeyPath(key, val, m, keys, subKeyMap, &count)
+
+	return count, nil
+}
+
+// navigate the path
+func updateValuesForKeyPath(key string, value interface{}, m interface{}, keys []string, subkeys map[string]interface{}, cnt *int) {
+	// ----- at end node: looking at possible node to get 'key' ----
+	if len(keys) == 1 {
+		updateValue(key, value, m, keys[0], subkeys, cnt)
+		return
+	}
+
+	// ----- here we are navigating the path thru the penultimate node --------
+	// key of interest is keys[0] - the next in the path
+	switch keys[0] {
+	case "*": // wildcard - scan all values
+		switch m.(type) {
+		case map[string]interface{}:
+			for _, v := range m.(map[string]interface{}) {
+				updateValuesForKeyPath(key, value, v, keys[1:], subkeys, cnt)
+			}
+		case []interface{}:
+			for _, v := range m.([]interface{}) {
+				switch v.(type) {
+				// flatten out a list of maps - keys are processed
+				case map[string]interface{}:
+					for _, vv := range v.(map[string]interface{}) {
+						updateValuesForKeyPath(key, value, vv, keys[1:], subkeys, cnt)
+					}
+				default:
+					updateValuesForKeyPath(key, value, v, keys[1:], subkeys, cnt)
+				}
+			}
+		}
+	default: // key - must be map[string]interface{}
+		switch m.(type) {
+		case map[string]interface{}:
+			if v, ok := m.(map[string]interface{})[keys[0]]; ok {
+				updateValuesForKeyPath(key, value, v, keys[1:], subkeys, cnt)
+			}
+		case []interface{}: // may be buried in list
+			for _, v := range m.([]interface{}) {
+				switch v.(type) {
+				case map[string]interface{}:
+					if vv, ok := v.(map[string]interface{})[keys[0]]; ok {
+						updateValuesForKeyPath(key, value, vv, keys[1:], subkeys, cnt)
+					}
+				}
+			}
+		}
+	}
+}
+
+// change value if key and subkeys are present
+func updateValue(key string, value interface{}, m interface{}, keys0 string, subkeys map[string]interface{}, cnt *int) {
+	// there are two possible options for the value of 'keys0': map[string]interface, []interface{}
+	// and 'key' is a key in the map or is a key in a map in a list.
+	switch m.(type) {
+	case map[string]interface{}: // gotta have the last key
+		if keys0 == "*" {
+			for k := range m.(map[string]interface{}) {
+				updateValue(key, value, m, k, subkeys, cnt)
+			}
+			return
+		}
+		endVal, _ := m.(map[string]interface{})[keys0]
+
+		// if newV key is the end of path, replace the value for path-end
+		// may be []interface{} - means replace just an entry w/ subkeys
+		// otherwise replace the keys0 value if subkeys are there
+		// NOTE: this will replace the subkeys, also
+		if key == keys0 {
+			switch endVal.(type) {
+			case map[string]interface{}:
+				if ok := hasSubKeys(m, subkeys); ok {
+					(m.(map[string]interface{}))[keys0] = value
+					(*cnt)++
+				}
+			case []interface{}:
+				// without subkeys can't select list member to modify
+				// so key:value spec is it ...
+				if len(subkeys) == 0 {
+					(m.(map[string]interface{}))[keys0] = value
+					(*cnt)++
+					break
+				}
+				nv := make([]interface{}, 0)
+				var valmodified bool
+				for _, v := range endVal.([]interface{}) {
+					// check entry subkeys
+					if ok := hasSubKeys(v, subkeys); ok {
+						// replace v with value
+						nv = append(nv, value)
+						valmodified = true
+						(*cnt)++
+						continue
+					}
+					nv = append(nv, v)
+				}
+				if valmodified {
+					(m.(map[string]interface{}))[keys0] = interface{}(nv)
+				}
+			default: // anything else is a strict replacement
+				if len(subkeys) == 0 {
+					(m.(map[string]interface{}))[keys0] = value
+					(*cnt)++
+				}
+			}
+			return
+		}
+
+		// so value is for an element of endVal
+		// if endVal is a map then 'key' must be there w/ subkeys
+		// if endVal is a list then 'key' must be in a list member w/ subkeys
+		switch endVal.(type) {
+		case map[string]interface{}:
+			if ok := hasSubKeys(endVal, subkeys); !ok {
+				return
+			}
+			if _, ok := (endVal.(map[string]interface{}))[key]; ok {
+				(endVal.(map[string]interface{}))[key] = value
+				(*cnt)++
+			}
+		case []interface{}: // keys0 points to a list, check subkeys
+			for _, v := range endVal.([]interface{}) {
+				// got to be a map so we can replace value for 'key'
+				vv, vok := v.(map[string]interface{})
+				if !vok {
+					continue
+				}
+				if _, ok := vv[key]; !ok {
+					continue
+				}
+				if !hasSubKeys(vv, subkeys) {
+					continue
+				}
+				vv[key] = value
+				(*cnt)++
+			}
+		}
+	case []interface{}: // key may be in a list member
+		// don't need to handle keys0 == "*"; we're looking at everything, anyway.
+		for _, v := range m.([]interface{}) {
+			// only map values - we're looking for 'key'
+			mm, ok := v.(map[string]interface{})
+			if !ok {
+				continue
+			}
+			if _, ok := mm[key]; !ok {
+				continue
+			}
+			if !hasSubKeys(mm, subkeys) {
+				continue
+			}
+			mm[key] = value
+			(*cnt)++
+		}
+	}
+
+	// return
+}

+ 919 - 0
vendor/github.com/agrison/mxj/xml.go

@@ -0,0 +1,919 @@
+// Copyright 2012-2016 Charles Banning. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file
+
+// xml.go - basically the core of X2j for map[string]interface{} values.
+//          NewMapXml, NewMapXmlReader, mv.Xml, mv.XmlWriter
+// see x2j and j2x for wrappers to provide end-to-end transformation of XML and JSON messages.
+
+package mxj
+
+import (
+	"bytes"
+	"encoding/xml"
+	"errors"
+	"fmt"
+	"io"
+	"sort"
+	"strconv"
+	"strings"
+	"time"
+)
+
+// ------------------- NewMapXml & NewMapXmlReader ... -------------------------
+
+// If XmlCharsetReader != nil, it will be used to decode the XML, if required.
+//   import (
+//	     charset "code.google.com/p/go-charset/charset"
+//	     github.com/clbanning/mxj
+//	 )
+//   ...
+//   mxj.XmlCharsetReader = charset.NewReader
+//   m, merr := mxj.NewMapXml(xmlValue)
+var XmlCharsetReader func(charset string, input io.Reader) (io.Reader, error)
+
+// NewMapXml - convert a XML doc into a Map
+// (This is analogous to unmarshalling a JSON string to map[string]interface{} using json.Unmarshal().)
+//	If the optional argument 'cast' is 'true', then values will be converted to boolean or float64 if possible.
+//
+//	Converting XML to JSON is a simple as:
+//		...
+//		mapVal, merr := mxj.NewMapXml(xmlVal)
+//		if merr != nil {
+//			// handle error
+//		}
+//		jsonVal, jerr := mapVal.Json()
+//		if jerr != nil {
+//			// handle error
+//		}
+//
+//	NOTES:
+//	   1. The 'xmlVal' will be parsed looking for an xml.StartElement, so BOM and other
+//	      extraneous xml.CharData will be ignored unless io.EOF is reached first.
+//	   2. If CoerceKeysToLower() has been called, then all key values will be lower case.
+func NewMapXml(xmlVal []byte, cast ...bool) (Map, error) {
+	var r bool
+	if len(cast) == 1 {
+		r = cast[0]
+	}
+	return xmlToMap(xmlVal, r)
+}
+
+// Get next XML doc from an io.Reader as a Map value.  Returns Map value.
+//	NOTES:
+//	   1. The 'xmlReader' will be parsed looking for an xml.StartElement, so BOM and other
+//	      extraneous xml.CharData will be ignored unless io.EOF is reached first.
+//	   2. If CoerceKeysToLower() has been called, then all key values will be lower case.
+func NewMapXmlReader(xmlReader io.Reader, cast ...bool) (Map, error) {
+	var r bool
+	if len(cast) == 1 {
+		r = cast[0]
+	}
+
+	// build the node tree
+	return xmlReaderToMap(xmlReader, r)
+}
+
+// XmlWriterBufSize - set the size of io.Writer for the TeeReader used by NewMapXmlReaderRaw()
+// and HandleXmlReaderRaw().  This reduces repeated memory allocations and copy() calls in most cases.
+//	NOTE: the 'xmlVal' will be parsed looking for an xml.StartElement, so BOM and other
+//	      extraneous xml.CharData will be ignored unless io.EOF is reached first.
+var XmlWriterBufSize int = 256
+
+// Get next XML doc from an io.Reader as a Map value.  Returns Map value and slice with the raw XML.
+//	NOTES:
+//	   1. Due to the implementation of xml.Decoder, the raw XML off the reader is buffered to []byte
+//	      using a ByteReader. If the io.Reader is an os.File, there may be significant performance impact.
+//	      See the examples - getmetrics1.go through getmetrics4.go - for comparative use cases on a large
+//	      data set. If the io.Reader is wrapping a []byte value in-memory, however, such as http.Request.Body
+//	      you CAN use it to efficiently unmarshal a XML doc and retrieve the raw XML in a single call.
+//	   2. The 'raw' return value may be larger than the XML text value.
+//	   3. The 'xmlReader' will be parsed looking for an xml.StartElement, so BOM and other
+//	      extraneous xml.CharData will be ignored unless io.EOF is reached first.
+//	   4. If CoerceKeysToLower() has been called, then all key values will be lower case.
+func NewMapXmlReaderRaw(xmlReader io.Reader, cast ...bool) (Map, []byte, error) {
+	var r bool
+	if len(cast) == 1 {
+		r = cast[0]
+	}
+	// create TeeReader so we can retrieve raw XML
+	buf := make([]byte, XmlWriterBufSize)
+	wb := bytes.NewBuffer(buf)
+	trdr := myTeeReader(xmlReader, wb) // see code at EOF
+
+	// build the node tree
+	m, err := xmlReaderToMap(trdr, r)
+
+	// retrieve the raw XML that was decoded
+	b := make([]byte, wb.Len())
+	_, _ = wb.Read(b)
+
+	if err != nil {
+		return nil, b, err
+	}
+
+	return m, b, nil
+}
+
+// xmlReaderToMap() - parse a XML io.Reader to a map[string]interface{} value
+func xmlReaderToMap(rdr io.Reader, r bool) (map[string]interface{}, error) {
+	// parse the Reader
+	p := xml.NewDecoder(rdr)
+	p.CharsetReader = XmlCharsetReader
+	return xmlToMapParser("", nil, p, r)
+}
+
+// xmlToMap - convert a XML doc into map[string]interface{} value
+func xmlToMap(doc []byte, r bool) (map[string]interface{}, error) {
+	b := bytes.NewReader(doc)
+	p := xml.NewDecoder(b)
+	p.CharsetReader = XmlCharsetReader
+	return xmlToMapParser("", nil, p, r)
+}
+
+// ===================================== where the work happens =============================
+
+// Allow people to drop hyphen when unmarshaling the XML doc.
+var useHyphen bool = true
+
+// PrependAttrWithHyphen. Prepend attribute tags with a hyphen.
+// Default is 'true'.
+//	Note:
+//		If 'false', unmarshaling and marshaling is not symmetric. Attributes will be
+//		marshal'd as <attr_tag>attr</attr_tag> and may be part of a list.
+func PrependAttrWithHyphen(v bool) {
+	useHyphen = v
+}
+
+// Include sequence id with inner tags. - per Sean Murphy, murphysean84@gmail.com.
+var includeTagSeqNum bool
+
+// IncludeTagSeqNum - include a "_seq":N key:value pair with each inner tag, denoting
+// its position when parsed. This is of limited usefulness, since list values cannot
+// be tagged with "_seq" without changing their depth in the Map.
+// So THIS SHOULD BE USED WITH CAUTION - see the test cases. Here's a sample of what
+// you get.
+/*
+		<Obj c="la" x="dee" h="da">
+			<IntObj id="3"/>
+			<IntObj1 id="1"/>
+			<IntObj id="2"/>
+			<StrObj>hello</StrObj>
+		</Obj>
+
+	parses as:
+
+		{
+		Obj:{
+			"-c":"la",
+			"-h":"da",
+			"-x":"dee",
+			"intObj":[
+				{
+					"-id"="3",
+					"_seq":"0" // if mxj.Cast is passed, then: "_seq":0
+				},
+				{
+					"-id"="2",
+					"_seq":"2"
+				}],
+			"intObj1":{
+				"-id":"1",
+				"_seq":"1"
+				},
+			"StrObj":{
+				"#text":"hello", // simple element value gets "#text" tag
+				"_seq":"3"
+				}
+			}
+		}
+*/
+func IncludeTagSeqNum(b bool) {
+	includeTagSeqNum = b
+}
+
+// all keys will be "lower case"
+var lowerCase bool
+
+// Coerce all tag values to keys in lower case.  This is useful if you've got sources with variable
+// tag capitalization, and you want to use m.ValuesForKeys(), etc., with the key or path spec
+// in lower case.
+//	CoerceKeysToLower() will toggle the coercion flag true|false - on|off
+//	CoerceKeysToLower(true|false) will set the coercion flag on|off
+//
+//	NOTE: only recognized by NewMapXml, NewMapXmlReader, and NewMapXmlReaderRaw functions as well as
+//	      the associated HandleXmlReader and HandleXmlReaderRaw.
+func CoerceKeysToLower(b ...bool) {
+	if len(b) == 1 {
+		lowerCase = b[0]
+		return
+	}
+	if !lowerCase {
+		lowerCase = true
+	} else {
+		lowerCase = false
+	}
+}
+
+// xmlToMapParser (2015.11.12) - load a 'clean' XML doc into a map[string]interface{} directly.
+// A refactoring of xmlToTreeParser(), markDuplicate() and treeToMap() - here, all-in-one.
+// We've removed the intermediate *node tree with the allocation and subsequent rescanning.
+func xmlToMapParser(skey string, a []xml.Attr, p *xml.Decoder, r bool) (map[string]interface{}, error) {
+	if lowerCase {
+		skey = strings.ToLower(skey)
+	}
+
+	// NOTE: all attributes and sub-elements parsed into 'na', 'na' is returned as value for 'skey'
+	// Unless 'skey' is a simple element w/o attributes, in which case the xml.CharData value is the value.
+	var n, na map[string]interface{}
+	var seq int // for includeTagSeqNum
+
+	// Allocate maps and load attributes, if any.
+	if skey != "" {
+		n = make(map[string]interface{})  // old n
+		na = make(map[string]interface{}) // old n.nodes
+		if len(a) > 0 {
+			for _, v := range a {
+				var key string
+				if useHyphen {
+					key = `-` + v.Name.Local
+				} else {
+					key = v.Name.Local
+				}
+				if lowerCase {
+					key = strings.ToLower(key)
+				}
+				na[key] = cast(v.Value, r)
+			}
+		}
+	}
+	for {
+		t, err := p.Token()
+		if err != nil {
+			if err != io.EOF {
+				return nil, errors.New("xml.Decoder.Token() - " + err.Error())
+			}
+			return nil, err
+		}
+		switch t.(type) {
+		case xml.StartElement:
+			tt := t.(xml.StartElement)
+
+			// First call to xmlToMapParser() doesn't pass xml.StartElement - the map key.
+			// So when the loop is first entered, the first token is the root tag along
+			// with any attributes, which we process here.
+			//
+			// Subsequent calls to xmlToMapParser() will pass in tag+attributes for
+			// processing before getting the next token which is the element value,
+			// which is done above.
+			if skey == "" {
+				return xmlToMapParser(tt.Name.Local, tt.Attr, p, r)
+			}
+
+			// If not initializing the map, parse the element.
+			// len(nn) == 1, necessarily - it is just an 'n'.
+			nn, err := xmlToMapParser(tt.Name.Local, tt.Attr, p, r)
+			if err != nil {
+				return nil, err
+			}
+
+			// The nn map[string]interface{} value is a na[nn_key] value.
+			// We need to see if nn_key already exists - means we're parsing a list.
+			// This may require converting na[nn_key] value into []interface{} type.
+			// First, extract the key:val for the map - it's a singleton.
+			// Note: if CoerceKeysToLower() called, then key will be lower case.
+			var key string
+			var val interface{}
+			for key, val = range nn {
+				break
+			}
+
+			// IncludeTagSeqNum requests that the element be augmented with a "_seq" sub-element.
+			// In theory, we don't need this if len(na) == 1. But, we don't know what might
+			// come next - we're only parsing forward.  So if you ask for 'includeTagSeqNum' you
+			// get it on every element. (Personally, I never liked this, but I added it on request
+			// and did get a $50 Amazon gift card in return - now we support it for backwards compatibility!)
+			if includeTagSeqNum {
+				switch val.(type) {
+				case []interface{}:
+					// noop - There's no clean way to handle this w/o changing message structure.
+				case map[string]interface{}:
+					val.(map[string]interface{})["_seq"] = seq // will overwrite an "_seq" XML tag
+					seq++
+				case interface{}: // a non-nil simple element: string, float64, bool
+					v := map[string]interface{}{"#text": val}
+					v["_seq"] = seq
+					seq++
+					val = v
+				}
+			}
+
+			// 'na' holding sub-elements of n.
+			// See if 'key' already exists.
+			// If 'key' exists, then this is a list, if not just add key:val to na.
+			if v, ok := na[key]; ok {
+				var a []interface{}
+				switch v.(type) {
+				case []interface{}:
+					a = v.([]interface{})
+				default: // anything else - note: v.(type) != nil
+					a = []interface{}{v}
+				}
+				a = append(a, val)
+				na[key] = a
+			} else {
+				na[key] = val // save it as a singleton
+			}
+		case xml.EndElement:
+			// len(n) > 0 if this is a simple element w/o xml.Attrs - see xml.CharData case.
+			if len(n) == 0 {
+				// If len(na)==0 we have an empty element == "";
+				// it has no xml.Attr nor xml.CharData.
+				// Note: in original node-tree parser, val defaulted to "";
+				// so we always had the default if len(node.nodes) == 0.
+				if len(na) > 0 {
+					n[skey] = na
+				} else {
+					n[skey] = "" // empty element
+				}
+			}
+			return n, nil
+		case xml.CharData:
+			// clean up possible noise
+			tt := strings.Trim(string(t.(xml.CharData)), "\t\r\b\n ")
+			if len(tt) > 0 {
+				if len(na) > 0 {
+					na["#text"] = cast(tt, r)
+				} else if skey != "" {
+					n[skey] = cast(tt, r)
+				} else {
+					// per Adrian (http://www.adrianlungu.com/) catch stray text
+					// in decoder stream -
+					// https://github.com/clbanning/mxj/pull/14#issuecomment-182816374
+					// NOTE: CharSetReader must be set to non-UTF-8 CharSet or you'll get
+					// a p.Token() decoding error when the BOM is UTF-16 or UTF-32.
+					continue
+				}
+			}
+		default:
+			// noop
+		}
+	}
+}
+
+var castNanInf bool
+
+// Cast "Nan", "Inf", "-Inf" XML values to 'float64'.
+// By default, these values will be decoded as 'string'.
+func CastNanInf(b bool) {
+	castNanInf = b
+}
+
+// cast - try to cast string values to bool or float64
+func cast(s string, r bool) interface{} {
+	if r {
+		// handle nan and inf
+		if !castNanInf {
+			switch strings.ToLower(s) {
+			case "nan", "inf", "-inf":
+				return interface{}(s)
+			}
+		}
+
+		// handle numeric strings ahead of boolean
+		if f, err := strconv.ParseFloat(s, 64); err == nil {
+			return interface{}(f)
+		}
+		// ParseBool treats "1"==true & "0"==false
+		// but be more strick - only allow TRUE, True, true, FALSE, False, false
+		if s != "t" && s != "T" && s != "f" && s != "F" {
+			if b, err := strconv.ParseBool(s); err == nil {
+				return interface{}(b)
+			}
+		}
+	}
+	return interface{}(s)
+}
+
+// ------------------ END: NewMapXml & NewMapXmlReader -------------------------
+
+// ------------------ mv.Xml & mv.XmlWriter - from j2x ------------------------
+
+const (
+	DefaultRootTag = "doc"
+)
+
+var useGoXmlEmptyElemSyntax bool
+
+// XmlGoEmptyElemSyntax() - <tag ...></tag> rather than <tag .../>.
+//	Go's encoding/xml package marshals empty XML elements as <tag ...></tag>.  By default this package
+//	encodes empty elements as <tag .../>.  If you're marshaling Map values that include structures
+//	(which are passed to xml.Marshal for encoding), this will let you conform to the standard package.
+func XmlGoEmptyElemSyntax() {
+	useGoXmlEmptyElemSyntax = true
+}
+
+// XmlDefaultEmptyElemSyntax() - <tag .../> rather than <tag ...></tag>.
+// Return XML encoding for empty elements to the default package setting.
+// Reverses effect of XmlGoEmptyElemSyntax().
+func XmlDefaultEmptyElemSyntax() {
+	useGoXmlEmptyElemSyntax = false
+}
+
+// Encode a Map as XML.  The companion of NewMapXml().
+// The following rules apply.
+//    - The key label "#text" is treated as the value for a simple element with attributes.
+//    - Map keys that begin with a hyphen, '-', are interpreted as attributes.
+//      It is an error if the attribute doesn't have a []byte, string, number, or boolean value.
+//    - Map value type encoding:
+//          > string, bool, float64, int, int32, int64, float32: per "%v" formating
+//          > []bool, []uint8: by casting to string
+//          > structures, etc.: handed to xml.Marshal() - if there is an error, the element
+//            value is "UNKNOWN"
+//    - Elements with only attribute values or are null are terminated using "/>".
+//    - If len(mv) == 1 and no rootTag is provided, then the map key is used as the root tag, possible.
+//      Thus, `{ "key":"value" }` encodes as "<key>value</key>".
+//    - To encode empty elements in a syntax consistent with encoding/xml call UseGoXmlEmptyElementSyntax().
+// The attributes tag=value pairs are alphabetized by "tag".  Also, when encoding map[string]interface{} values -
+// complex elements, etc. - the key:value pairs are alphabetized by key so the resulting tags will appear sorted.
+func (mv Map) Xml(rootTag ...string) ([]byte, error) {
+	m := map[string]interface{}(mv)
+	var err error
+	s := new(string)
+	p := new(pretty) // just a stub
+
+	if len(m) == 1 && len(rootTag) == 0 {
+		for key, value := range m {
+			// if it an array, see if all values are map[string]interface{}
+			// we force a new root tag if we'll end up with no key:value in the list
+			// so: key:[string_val, bool:true] --> <doc><key>string_val</key><bool>true</bool></doc>
+			switch value.(type) {
+			case []interface{}:
+				for _, v := range value.([]interface{}) {
+					switch v.(type) {
+					case map[string]interface{}: // noop
+					default: // anything else
+						err = mapToXmlIndent(false, s, DefaultRootTag, m, p)
+						goto done
+					}
+				}
+			}
+			err = mapToXmlIndent(false, s, key, value, p)
+		}
+	} else if len(rootTag) == 1 {
+		err = mapToXmlIndent(false, s, rootTag[0], m, p)
+	} else {
+		err = mapToXmlIndent(false, s, DefaultRootTag, m, p)
+	}
+done:
+	return []byte(*s), err
+}
+
+// The following implementation is provided only for symmetry with NewMapXmlReader[Raw]
+// The names will also provide a key for the number of return arguments.
+
+// Writes the Map as  XML on the Writer.
+// See Xml() for encoding rules.
+func (mv Map) XmlWriter(xmlWriter io.Writer, rootTag ...string) error {
+	x, err := mv.Xml(rootTag...)
+	if err != nil {
+		return err
+	}
+
+	_, err = xmlWriter.Write(x)
+	return err
+}
+
+// Writes the Map as  XML on the Writer. []byte is the raw XML that was written.
+// See Xml() for encoding rules.
+func (mv Map) XmlWriterRaw(xmlWriter io.Writer, rootTag ...string) ([]byte, error) {
+	x, err := mv.Xml(rootTag...)
+	if err != nil {
+		return x, err
+	}
+
+	_, err = xmlWriter.Write(x)
+	return x, err
+}
+
+// Writes the Map as pretty XML on the Writer.
+// See Xml() for encoding rules.
+func (mv Map) XmlIndentWriter(xmlWriter io.Writer, prefix, indent string, rootTag ...string) error {
+	x, err := mv.XmlIndent(prefix, indent, rootTag...)
+	if err != nil {
+		return err
+	}
+
+	_, err = xmlWriter.Write(x)
+	return err
+}
+
+// Writes the Map as pretty XML on the Writer. []byte is the raw XML that was written.
+// See Xml() for encoding rules.
+func (mv Map) XmlIndentWriterRaw(xmlWriter io.Writer, prefix, indent string, rootTag ...string) ([]byte, error) {
+	x, err := mv.XmlIndent(prefix, indent, rootTag...)
+	if err != nil {
+		return x, err
+	}
+
+	_, err = xmlWriter.Write(x)
+	return x, err
+}
+
+// -------------------- END: mv.Xml & mv.XmlWriter -------------------------------
+
+// --------------  Handle XML stream by processing Map value --------------------
+
+// Default poll delay to keep Handler from spinning on an open stream
+// like sitting on os.Stdin waiting for imput.
+var xhandlerPollInterval = time.Millisecond
+
+// Bulk process XML using handlers that process a Map value.
+//	'rdr' is an io.Reader for XML (stream)
+//	'mapHandler' is the Map processor. Return of 'false' stops io.Reader processing.
+//	'errHandler' is the error processor. Return of 'false' stops io.Reader processing and returns the error.
+//	Note: mapHandler() and errHandler() calls are blocking, so reading and processing of messages is serialized.
+//	      This means that you can stop reading the file on error or after processing a particular message.
+//	      To have reading and handling run concurrently, pass argument to a go routine in handler and return 'true'.
+func HandleXmlReader(xmlReader io.Reader, mapHandler func(Map) bool, errHandler func(error) bool) error {
+	var n int
+	for {
+		m, merr := NewMapXmlReader(xmlReader)
+		n++
+
+		// handle error condition with errhandler
+		if merr != nil && merr != io.EOF {
+			merr = fmt.Errorf("[xmlReader: %d] %s", n, merr.Error())
+			if ok := errHandler(merr); !ok {
+				// caused reader termination
+				return merr
+			}
+			continue
+		}
+
+		// pass to maphandler
+		if len(m) != 0 {
+			if ok := mapHandler(m); !ok {
+				break
+			}
+		} else if merr != io.EOF {
+			time.Sleep(xhandlerPollInterval)
+		}
+
+		if merr == io.EOF {
+			break
+		}
+	}
+	return nil
+}
+
+// Bulk process XML using handlers that process a Map value and the raw XML.
+//	'rdr' is an io.Reader for XML (stream)
+//	'mapHandler' is the Map and raw XML - []byte - processor. Return of 'false' stops io.Reader processing.
+//	'errHandler' is the error and raw XML processor. Return of 'false' stops io.Reader processing and returns the error.
+//	Note: mapHandler() and errHandler() calls are blocking, so reading and processing of messages is serialized.
+//	      This means that you can stop reading the file on error or after processing a particular message.
+//	      To have reading and handling run concurrently, pass argument(s) to a go routine in handler and return 'true'.
+//	See NewMapXmlReaderRaw for comment on performance associated with retrieving raw XML from a Reader.
+func HandleXmlReaderRaw(xmlReader io.Reader, mapHandler func(Map, []byte) bool, errHandler func(error, []byte) bool) error {
+	var n int
+	for {
+		m, raw, merr := NewMapXmlReaderRaw(xmlReader)
+		n++
+
+		// handle error condition with errhandler
+		if merr != nil && merr != io.EOF {
+			merr = fmt.Errorf("[xmlReader: %d] %s", n, merr.Error())
+			if ok := errHandler(merr, raw); !ok {
+				// caused reader termination
+				return merr
+			}
+			continue
+		}
+
+		// pass to maphandler
+		if len(m) != 0 {
+			if ok := mapHandler(m, raw); !ok {
+				break
+			}
+		} else if merr != io.EOF {
+			time.Sleep(xhandlerPollInterval)
+		}
+
+		if merr == io.EOF {
+			break
+		}
+	}
+	return nil
+}
+
+// ----------------- END: Handle XML stream by processing Map value --------------
+
+// --------  a hack of io.TeeReader ... need one that's an io.ByteReader for xml.NewDecoder() ----------
+
+// This is a clone of io.TeeReader with the additional method t.ReadByte().
+// Thus, this TeeReader is also an io.ByteReader.
+// This is necessary because xml.NewDecoder uses a ByteReader not a Reader. It appears to have been written
+// with bufio.Reader or bytes.Reader in mind ... not a generic io.Reader, which doesn't have to have ReadByte()..
+// If NewDecoder is passed a Reader that does not satisfy ByteReader() it wraps the Reader with
+// bufio.NewReader and uses ReadByte rather than Read that runs the TeeReader pipe logic.
+
+type teeReader struct {
+	r io.Reader
+	w io.Writer
+	b []byte
+}
+
+func myTeeReader(r io.Reader, w io.Writer) io.Reader {
+	b := make([]byte, 1)
+	return &teeReader{r, w, b}
+}
+
+// need for io.Reader - but we don't use it ...
+func (t *teeReader) Read(p []byte) (n int, err error) {
+	return 0, nil
+}
+
+func (t *teeReader) ReadByte() (c byte, err error) {
+	n, err := t.r.Read(t.b)
+	if n > 0 {
+		if _, err := t.w.Write(t.b[:1]); err != nil {
+			return t.b[0], err
+		}
+	}
+	return t.b[0], err
+}
+
+// ----------------------- END: io.TeeReader hack -----------------------------------
+
+// ---------------------- XmlIndent - from j2x package ----------------------------
+
+// Encode a map[string]interface{} as a pretty XML string.
+// See Xml for encoding rules.
+func (mv Map) XmlIndent(prefix, indent string, rootTag ...string) ([]byte, error) {
+	m := map[string]interface{}(mv)
+
+	var err error
+	s := new(string)
+	p := new(pretty)
+	p.indent = indent
+	p.padding = prefix
+
+	if len(m) == 1 && len(rootTag) == 0 {
+		// this can extract the key for the single map element
+		// use it if it isn't a key for a list
+		for key, value := range m {
+			if _, ok := value.([]interface{}); ok {
+				err = mapToXmlIndent(true, s, DefaultRootTag, m, p)
+			} else {
+				err = mapToXmlIndent(true, s, key, value, p)
+			}
+		}
+	} else if len(rootTag) == 1 {
+		err = mapToXmlIndent(true, s, rootTag[0], m, p)
+	} else {
+		err = mapToXmlIndent(true, s, DefaultRootTag, m, p)
+	}
+	return []byte(*s), err
+}
+
+type pretty struct {
+	indent   string
+	cnt      int
+	padding  string
+	mapDepth int
+	start    int
+}
+
+func (p *pretty) Indent() {
+	p.padding += p.indent
+	p.cnt++
+}
+
+func (p *pretty) Outdent() {
+	if p.cnt > 0 {
+		p.padding = p.padding[:len(p.padding)-len(p.indent)]
+		p.cnt--
+	}
+}
+
+// where the work actually happens
+// returns an error if an attribute is not atomic
+func mapToXmlIndent(doIndent bool, s *string, key string, value interface{}, pp *pretty) error {
+	var endTag bool
+	var isSimple bool
+	var elen int
+	p := &pretty{pp.indent, pp.cnt, pp.padding, pp.mapDepth, pp.start}
+
+	switch value.(type) {
+	case map[string]interface{}, []byte, string, float64, bool, int, int32, int64, float32:
+		if doIndent {
+			*s += p.padding
+		}
+		*s += `<` + key
+	}
+	switch value.(type) {
+	case map[string]interface{}:
+		vv := value.(map[string]interface{})
+		lenvv := len(vv)
+		// scan out attributes - keys have prepended hyphen, '-'
+		var cntAttr int
+		attrlist := make([][2]string, len(vv))
+		var n int
+		for k, v := range vv {
+			if k[:1] == "-" {
+				cntAttr++
+				switch v.(type) {
+				case string, float64, bool, int, int32, int64, float32:
+					attrlist[n][0] = k[1:]
+					attrlist[n][1] = fmt.Sprintf("%v", v)
+					n++
+				case []byte:
+					attrlist[n][0] = k[1:]
+					attrlist[n][1] = fmt.Sprintf("%v", string(v.([]byte)))
+				default:
+					return fmt.Errorf("invalid attribute value for: %s", k)
+				}
+			}
+		}
+		if cntAttr > 0 {
+			attrlist = attrlist[:n]
+			sort.Sort(attrList(attrlist))
+			for _, v := range attrlist {
+				*s += ` ` + v[0] + `="` + v[1] + `"`
+			}
+		}
+
+		// only attributes?
+		if cntAttr == lenvv {
+			break
+		}
+		// simple element? Note: '#text" is an invalid XML tag.
+		if v, ok := vv["#text"]; ok && cntAttr+1 == lenvv {
+			*s += ">" + fmt.Sprintf("%v", v)
+			endTag = true
+			elen = 1
+			isSimple = true
+			break
+		}
+		// close tag with possible attributes
+		*s += ">"
+		if doIndent {
+			*s += "\n"
+		}
+		// something more complex
+		p.mapDepth++
+		// extract the map k:v pairs and sort on key
+		elemlist := make([][2]interface{}, len(vv))
+		n = 0
+		for k, v := range vv {
+			if k[:1] == "-" {
+				continue
+			}
+			elemlist[n][0] = k
+			elemlist[n][1] = v
+			n++
+		}
+		elemlist = elemlist[:n]
+		sort.Sort(elemList(elemlist))
+		var i int
+		for _, v := range elemlist {
+			switch v[1].(type) {
+			case []interface{}:
+			default:
+				if i == 0 && doIndent {
+					p.Indent()
+				}
+			}
+			i++
+			mapToXmlIndent(doIndent, s, v[0].(string), v[1], p)
+			switch v[1].(type) {
+			case []interface{}: // handled in []interface{} case
+			default:
+				if doIndent {
+					p.Outdent()
+				}
+			}
+			i--
+		}
+		p.mapDepth--
+		endTag = true
+		elen = 1 // we do have some content ...
+	case []interface{}:
+		for _, v := range value.([]interface{}) {
+			if doIndent {
+				p.Indent()
+			}
+			mapToXmlIndent(doIndent, s, key, v, p)
+			if doIndent {
+				p.Outdent()
+			}
+		}
+		return nil
+	case nil:
+		// terminate the tag
+		*s += "<" + key
+		break
+	default: // handle anything - even goofy stuff
+		elen = 0
+		switch value.(type) {
+		case string, float64, bool, int, int32, int64, float32:
+			v := fmt.Sprintf("%v", value)
+			elen = len(v)
+			if elen > 0 {
+				*s += ">" + v
+			}
+		case []byte: // NOTE: byte is just an alias for uint8
+			// similar to how xml.Marshal handles []byte structure members
+			v := string(value.([]byte))
+			elen = len(v)
+			if elen > 0 {
+				*s += ">" + v
+			}
+		default:
+			var v []byte
+			var err error
+			if doIndent {
+				v, err = xml.MarshalIndent(value, p.padding, p.indent)
+			} else {
+				v, err = xml.Marshal(value)
+			}
+			if err != nil {
+				*s += ">UNKNOWN"
+			} else {
+				elen = len(v)
+				if elen > 0 {
+					*s += string(v)
+				}
+			}
+		}
+		isSimple = true
+		endTag = true
+	}
+	if endTag {
+		if doIndent {
+			if !isSimple {
+				*s += p.padding
+			}
+		}
+		switch value.(type) {
+		case map[string]interface{}, []byte, string, float64, bool, int, int32, int64, float32:
+			if elen > 0 || useGoXmlEmptyElemSyntax {
+				if elen == 0 {
+					*s += ">"
+				}
+				*s += `</` + key + ">"
+			} else {
+				*s += `/>`
+			}
+		}
+	} else if useGoXmlEmptyElemSyntax {
+		*s += "></" + key + ">"
+	} else {
+		*s += "/>"
+	}
+	if doIndent {
+		if p.cnt > p.start {
+			*s += "\n"
+		}
+		p.Outdent()
+	}
+
+	return nil
+}
+
+// ============================ sort interface implementation =================
+
+type attrList [][2]string
+
+func (a attrList) Len() int {
+	return len(a)
+}
+
+func (a attrList) Swap(i, j int) {
+	a[i], a[j] = a[j], a[i]
+}
+
+func (a attrList) Less(i, j int) bool {
+	if a[i][0] > a[j][0] {
+		return false
+	}
+	return true
+}
+
+type elemList [][2]interface{}
+
+func (e elemList) Len() int {
+	return len(e)
+}
+
+func (e elemList) Swap(i, j int) {
+	e[i], e[j] = e[j], e[i]
+}
+
+func (e elemList) Less(i, j int) bool {
+	if e[i][0].(string) > e[j][0].(string) {
+		return false
+	}
+	return true
+}

+ 723 - 0
vendor/github.com/agrison/mxj/xmlseq.go

@@ -0,0 +1,723 @@
+// Copyright 2012-2016 Charles Banning. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file
+
+// xmlseq.go - version of xml.go with sequence # injection on Decoding and sorting on Encoding.
+// Also, handles comments, directives and process instructions.
+
+package mxj
+
+import (
+	"bytes"
+	"encoding/xml"
+	"errors"
+	"fmt"
+	"io"
+	"sort"
+	"strings"
+)
+
+var NoRoot = errors.New("no root key")
+var NO_ROOT = NoRoot // maintain backwards compatibility
+
+// ------------------- NewMapXmlSeq & NewMapXmlSeqReader ... -------------------------
+
+// This is only useful if you want to re-encode the Map as XML using mv.XmlSeq(), etc., to preserve the original structure.
+//
+// NewMapXmlSeq - convert a XML doc into a Map with elements id'd with decoding sequence int - #seq.
+// If the optional argument 'cast' is 'true', then values will be converted to boolean or float64 if possible.
+// NOTE: "#seq" key/value pairs are removed on encoding with mv.XmlSeq() / mv.XmlSeqIndent().
+//	• attributes are a map - map["#attr"]map["attr_key"]map[string]interface{}{"#text":<aval>, "#seq":<num>}
+//	• all simple elements are decoded as map["#text"]interface{} with a "#seq" k:v pair, as well.
+//	• lists always decode as map["list_tag"][]map[string]interface{} where the array elements are maps that
+//	  include a "#seq" k:v pair based on sequence they are decoded.  Thus, XML like:
+//	      <doc>
+//	         <ltag>value 1</ltag>
+//	         <newtag>value 2</newtag>
+//	         <ltag>value 3</ltag>
+//	      </doc>
+//	  is decoded as:
+//	    doc :
+//	      ltag :[[]interface{}]
+//	        [item: 0]        
+//	          #seq :[int] 0
+//	          #text :[string] value 1
+//	        [item: 1]        
+//	          #seq :[int] 2
+//	          #text :[string] value 3
+//	      newtag :
+//	        #seq :[int] 1
+//	        #text :[string] value 2
+//	  It will encode in proper sequence even though the Map representation merges all "ltag" elements in an array.
+//	• comments - "<!--comment-->" -  are decoded as map["#comment"]map["#text"]"cmnt_text" with a "#seq" k:v pair.
+//	• directives - "<!text>" - are decoded as map["#directive"]map[#text"]"directive_text" with a "#seq" k:v pair.
+//	• process instructions  - "<?instr?>" - are decoded as map["#procinst"]interface{} where the #procinst value
+//	  is of map[string]interface{} type with the following keys: #target, #inst, and #seq.
+//	• comments, directives, and procinsts that are NOT part of a document with a root key will be returned as
+//	  map[string]interface{} and the error value 'NoRoot'.
+//	• note: "<![CDATA[" syntax is lost in xml.Decode parser - and is not handled here, either.
+//	   and: "\r\n" is converted to "\n"
+//
+//	NOTES:
+//	   1. The 'xmlVal' will be parsed looking for an xml.StartElement, xml.Comment, etc., so BOM and other
+//	      extraneous xml.CharData will be ignored unless io.EOF is reached first.
+//	   2. CoerceKeysToLower() is NOT recognized, since the intent here is to eventually call m.XmlSeq() to
+//	      re-encode the message in its original structure.
+func NewMapXmlSeq(xmlVal []byte, cast ...bool) (Map, error) {
+	var r bool
+	if len(cast) == 1 {
+		r = cast[0]
+	}
+	return xmlSeqToMap(xmlVal, r)
+}
+
+// This is only useful if you want to re-encode the Map as XML using mv.XmlSeq(), etc., to preserve the original structure.
+//
+// Get next XML doc from an io.Reader as a Map value.  Returns Map value.
+//	NOTES:
+//	   1. The 'xmlReader' will be parsed looking for an xml.StartElement, xml.Comment, etc., so BOM and other
+//	      extraneous xml.CharData will be ignored unless io.EOF is reached first.
+//	   2. CoerceKeysToLower() is NOT recognized, since the intent here is to eventually call m.XmlSeq() to
+//	      re-encode the message in its original structure.
+func NewMapXmlSeqReader(xmlReader io.Reader, cast ...bool) (Map, error) {
+	var r bool
+	if len(cast) == 1 {
+		r = cast[0]
+	}
+
+	// build the node tree
+	return xmlSeqReaderToMap(xmlReader, r)
+}
+
+// This is only useful if you want to re-encode the Map as XML using mv.XmlSeq(), etc., to preserve the original structure.
+//
+// Get next XML doc from an io.Reader as a Map value.  Returns Map value and slice with the raw XML.
+//	NOTES:
+//	   1. Due to the implementation of xml.Decoder, the raw XML off the reader is buffered to []byte
+//	      using a ByteReader. If the io.Reader is an os.File, there may be significant performance impact.
+//	      See the examples - getmetrics1.go through getmetrics4.go - for comparative use cases on a large
+//	      data set. If the io.Reader is wrapping a []byte value in-memory, however, such as http.Request.Body
+//	      you CAN use it to efficiently unmarshal a XML doc and retrieve the raw XML in a single call.
+//	    2. The 'raw' return value may be larger than the XML text value.
+//	    3. The 'xmlReader' will be parsed looking for an xml.StartElement, xml.Comment, etc., so BOM and other
+//	       extraneous xml.CharData will be ignored unless io.EOF is reached first.
+//	    4. CoerceKeysToLower() is NOT recognized, since the intent here is to eventually call m.XmlSeq() to
+//	       re-encode the message in its original structure.
+func NewMapXmlSeqReaderRaw(xmlReader io.Reader, cast ...bool) (Map, []byte, error) {
+	var r bool
+	if len(cast) == 1 {
+		r = cast[0]
+	}
+	// create TeeReader so we can retrieve raw XML
+	buf := make([]byte, XmlWriterBufSize)
+	wb := bytes.NewBuffer(buf)
+	trdr := myTeeReader(xmlReader, wb)
+
+	// build the node tree
+	m, err := xmlSeqReaderToMap(trdr, r)
+
+	// retrieve the raw XML that was decoded
+	b := make([]byte, wb.Len())
+	_, _ = wb.Read(b)
+
+	if err != nil {
+		return nil, b, err
+	}
+
+	return m, b, nil
+}
+
+// xmlSeqReaderToMap() - parse a XML io.Reader to a map[string]interface{} value
+func xmlSeqReaderToMap(rdr io.Reader, r bool) (map[string]interface{}, error) {
+	// parse the Reader
+	p := xml.NewDecoder(rdr)
+	p.CharsetReader = XmlCharsetReader
+	return xmlSeqToMapParser("", nil, p, r)
+}
+
+// xmlSeqToMap - convert a XML doc into map[string]interface{} value
+func xmlSeqToMap(doc []byte, r bool) (map[string]interface{}, error) {
+	b := bytes.NewReader(doc)
+	p := xml.NewDecoder(b)
+	p.CharsetReader = XmlCharsetReader
+	return xmlSeqToMapParser("", nil, p, r)
+}
+
+// ===================================== where the work happens =============================
+
+// xmlSeqToMapParser - load a 'clean' XML doc into a map[string]interface{} directly.
+// Add #seq tag value for each element decoded - to be used for Encoding later.
+func xmlSeqToMapParser(skey string, a []xml.Attr, p *xml.Decoder, r bool) (map[string]interface{}, error) {
+	// NOTE: all attributes and sub-elements parsed into 'na', 'na' is returned as value for 'skey'
+	var n, na map[string]interface{}
+	var seq int // for including seq num when decoding
+
+	// Allocate maps and load attributes, if any.
+	if skey != "" {
+		// 'n' only needs one slot - save call to runtime•hashGrow()
+		// 'na' we don't know
+		n = make(map[string]interface{}, 1)
+		na = make(map[string]interface{})
+		if len(a) > 0 {
+			// xml.Attr is decoded into: map["#attr"]map[<attr_label>]interface{}
+			// where interface{} is map[string]interface{}{"#text":<attr_val>, "#seq":<attr_seq>}
+			aa := make(map[string]interface{}, len(a))
+			for i, v := range a {
+				aa[v.Name.Local] = map[string]interface{}{"#text": cast(v.Value, r), "#seq": i}
+			}
+			na["#attr"] = aa
+		}
+	}
+	for {
+		t, err := p.Token()
+		if err != nil {
+			if err != io.EOF {
+				return nil, errors.New("xml.Decoder.Token() - " + err.Error())
+			}
+			return nil, err
+		}
+		switch t.(type) {
+		case xml.StartElement:
+			tt := t.(xml.StartElement)
+
+			// First call to xmlSeqToMapParser() doesn't pass xml.StartElement - the map key.
+			// So when the loop is first entered, the first token is the root tag along
+			// with any attributes, which we process here.
+			//
+			// Subsequent calls to xmlSeqToMapParser() will pass in tag+attributes for
+			// processing before getting the next token which is the element value,
+			// which is done above.
+			if skey == "" {
+				return xmlSeqToMapParser(tt.Name.Local, tt.Attr, p, r)
+			}
+
+			// If not initializing the map, parse the element.
+			// len(nn) == 1, necessarily - it is just an 'n'.
+			nn, err := xmlSeqToMapParser(tt.Name.Local, tt.Attr, p, r)
+			if err != nil {
+				return nil, err
+			}
+
+			// The nn map[string]interface{} value is a na[nn_key] value.
+			// We need to see if nn_key already exists - means we're parsing a list.
+			// This may require converting na[nn_key] value into []interface{} type.
+			// First, extract the key:val for the map - it's a singleton.
+			var key string
+			var val interface{}
+			for key, val = range nn {
+				break
+			}
+
+			// add "#seq" k:v pair -
+			// Sequence number included even in list elements - this should allow us
+			// to properly resequence even something goofy like:
+			//     <list>item 1</list>
+			//     <subelement>item 2</subelement>
+			//     <list>item 3</list>
+			// where all the "list" subelements are decoded into an array.
+			switch val.(type) {
+			case map[string]interface{}:
+				val.(map[string]interface{})["#seq"] = seq
+				seq++
+			case interface{}: // a non-nil simple element: string, float64, bool
+				v := map[string]interface{}{"#text": val, "#seq": seq}
+				seq++
+				val = v
+			}
+
+			// 'na' holding sub-elements of n.
+			// See if 'key' already exists.
+			// If 'key' exists, then this is a list, if not just add key:val to na.
+			if v, ok := na[key]; ok {
+				var a []interface{}
+				switch v.(type) {
+				case []interface{}:
+					a = v.([]interface{})
+				default: // anything else - note: v.(type) != nil
+					a = []interface{}{v}
+				}
+				a = append(a, val)
+				na[key] = a
+			} else {
+				na[key] = val // save it as a singleton
+			}
+		case xml.EndElement:
+			// len(n) > 0 if this is a simple element w/o xml.Attrs - see xml.CharData case.
+			if len(n) == 0 {
+				// If len(na)==0 we have an empty element == "";
+				// it has no xml.Attr nor xml.CharData.
+				// Empty element content will be  map["etag"]map["#text"]""
+				// after #seq injection - map["etag"]map["#seq"]seq - after return.
+				if len(na) > 0 {
+					n[skey] = na
+				} else {
+					n[skey] = "" // empty element
+				}
+			}
+			return n, nil
+		case xml.CharData:
+			// clean up possible noise
+			tt := strings.Trim(string(t.(xml.CharData)), "\t\r\b\n ")
+			if skey == "" {
+				// per Adrian (http://www.adrianlungu.com/) catch stray text
+				// in decoder stream -
+				// https://github.com/clbanning/mxj/pull/14#issuecomment-182816374
+				// NOTE: CharSetReader must be set to non-UTF-8 CharSet or you'll get
+				// a p.Token() decoding error when the BOM is UTF-16 or UTF-32.
+				continue
+			}
+			if len(tt) > 0 {
+				// every simple element is a #text and has #seq associated with it
+				na["#text"] = cast(tt, r)
+				na["#seq"] = seq
+				seq++
+			}
+		case xml.Comment:
+			if n == nil { // no root 'key'
+				n = map[string]interface{}{"#comment": string(t.(xml.Comment))}
+				return n, NoRoot
+			}
+			cm := make(map[string]interface{}, 2)
+			cm["#text"] = string(t.(xml.Comment))
+			cm["#seq"] = seq
+			seq++
+			na["#comment"] = cm
+		case xml.Directive:
+			if n == nil { // no root 'key'
+				n = map[string]interface{}{"#directive": string(t.(xml.Directive))}
+				return n, NoRoot
+			}
+			dm := make(map[string]interface{}, 2)
+			dm["#text"] = string(t.(xml.Directive))
+			dm["#seq"] = seq
+			seq++
+			na["#directive"] = dm
+		case xml.ProcInst:
+			if n == nil {
+				na = map[string]interface{}{"#target": t.(xml.ProcInst).Target, "#inst": string(t.(xml.ProcInst).Inst)}
+				n = map[string]interface{}{"#procinst": na}
+				return n, NoRoot
+			}
+			pm := make(map[string]interface{}, 3)
+			pm["#target"] = t.(xml.ProcInst).Target
+			pm["#inst"] = string(t.(xml.ProcInst).Inst)
+			pm["#seq"] = seq
+			seq++
+			na["#procinst"] = pm
+		default:
+			// noop - shouldn't ever get here, now, since we handle all token types
+		}
+	}
+}
+
+// ------------------ END: NewMapXml & NewMapXmlReader -------------------------
+
+// ------------------ mv.Xml & mv.XmlWriter - from j2x ------------------------
+
+// This should ONLY be used on Map values that were decoded using NewMapXmlSeq() & co.
+//
+// Encode a Map as XML with elements sorted on #seq.  The companion of NewMapXmlSeq().
+// The following rules apply.
+//    - The key label "#text" is treated as the value for a simple element with attributes.
+//    - The "#seq" key is used to seqence the subelements or attributes but is ignored for writing.
+//    - The "#attr" map key identifies the map of attribute map[string]interface{} values with "#text" key.
+//    - The "#comment" map key identifies a comment in the value "#text" map entry - <!--comment-->.
+//    - The "#directive" map key identifies a directive in the value "#text" map entry - <!directive>.
+//    - The "#procinst" map key identifies a process instruction in the value "#target" and "#inst"
+//      map entries - <?target inst?>.
+//    - Value type encoding:
+//          > string, bool, float64, int, int32, int64, float32: per "%v" formating
+//          > []bool, []uint8: by casting to string
+//          > structures, etc.: handed to xml.Marshal() - if there is an error, the element
+//            value is "UNKNOWN"
+//    - Elements with only attribute values or are null are terminated using "/>" unless XmlGoEmptyElemSystax() called.
+//    - If len(mv) == 1 and no rootTag is provided, then the map key is used as the root tag, possible.
+//      Thus, `{ "key":"value" }` encodes as "<key>value</key>".
+func (mv Map) XmlSeq(rootTag ...string) ([]byte, error) {
+	m := map[string]interface{}(mv)
+	var err error
+	s := new(string)
+	p := new(pretty) // just a stub
+
+	if len(m) == 1 && len(rootTag) == 0 {
+		for key, value := range m {
+			// if it an array, see if all values are map[string]interface{}
+			// we force a new root tag if we'll end up with no key:value in the list
+			// so: key:[string_val, bool:true] --> <doc><key>string_val</key><bool>true</bool></doc>
+			switch value.(type) {
+			case []interface{}:
+				for _, v := range value.([]interface{}) {
+					switch v.(type) {
+					case map[string]interface{}: // noop
+					default: // anything else
+						err = mapToXmlSeqIndent(false, s, DefaultRootTag, m, p)
+						goto done
+					}
+				}
+			}
+			err = mapToXmlSeqIndent(false, s, key, value, p)
+		}
+	} else if len(rootTag) == 1 {
+		err = mapToXmlSeqIndent(false, s, rootTag[0], m, p)
+	} else {
+		err = mapToXmlSeqIndent(false, s, DefaultRootTag, m, p)
+	}
+done:
+	return []byte(*s), err
+}
+
+// The following implementation is provided only for symmetry with NewMapXmlReader[Raw]
+// The names will also provide a key for the number of return arguments.
+
+// This should ONLY be used on Map values that were decoded using NewMapXmlSeq() & co.
+//
+// Writes the Map as  XML on the Writer.
+// See Xml() for encoding rules.
+func (mv Map) XmlSeqWriter(xmlWriter io.Writer, rootTag ...string) error {
+	x, err := mv.XmlSeq(rootTag...)
+	if err != nil {
+		return err
+	}
+
+	_, err = xmlWriter.Write(x)
+	return err
+}
+
+// This should ONLY be used on Map values that were decoded using NewMapXmlSeq() & co.
+//
+// Writes the Map as  XML on the Writer. []byte is the raw XML that was written.
+// See Xml() for encoding rules.
+func (mv Map) XmlSeqWriterRaw(xmlWriter io.Writer, rootTag ...string) ([]byte, error) {
+	x, err := mv.XmlSeq(rootTag...)
+	if err != nil {
+		return x, err
+	}
+
+	_, err = xmlWriter.Write(x)
+	return x, err
+}
+
+// This should ONLY be used on Map values that were decoded using NewMapXmlSeq() & co.
+//
+// Writes the Map as pretty XML on the Writer.
+// See Xml() for encoding rules.
+func (mv Map) XmlSeqIndentWriter(xmlWriter io.Writer, prefix, indent string, rootTag ...string) error {
+	x, err := mv.XmlSeqIndent(prefix, indent, rootTag...)
+	if err != nil {
+		return err
+	}
+
+	_, err = xmlWriter.Write(x)
+	return err
+}
+
+// This should ONLY be used on Map values that were decoded using NewMapXmlSeq() & co.
+//
+// Writes the Map as pretty XML on the Writer. []byte is the raw XML that was written.
+// See Xml() for encoding rules.
+func (mv Map) XmlSeqIndentWriterRaw(xmlWriter io.Writer, prefix, indent string, rootTag ...string) ([]byte, error) {
+	x, err := mv.XmlSeqIndent(prefix, indent, rootTag...)
+	if err != nil {
+		return x, err
+	}
+
+	_, err = xmlWriter.Write(x)
+	return x, err
+}
+
+// -------------------- END: mv.Xml & mv.XmlWriter -------------------------------
+
+// ---------------------- XmlSeqIndent ----------------------------
+
+// This should ONLY be used on Map values that were decoded using NewMapXmlSeq() & co.
+//
+// Encode a map[string]interface{} as a pretty XML string.
+// See mv.XmlSeq() for encoding rules.
+func (mv Map) XmlSeqIndent(prefix, indent string, rootTag ...string) ([]byte, error) {
+	m := map[string]interface{}(mv)
+
+	var err error
+	s := new(string)
+	p := new(pretty)
+	p.indent = indent
+	p.padding = prefix
+
+	if len(m) == 1 && len(rootTag) == 0 {
+		// this can extract the key for the single map element
+		// use it if it isn't a key for a list
+		for key, value := range m {
+			if _, ok := value.([]interface{}); ok {
+				err = mapToXmlSeqIndent(true, s, DefaultRootTag, m, p)
+			} else {
+				err = mapToXmlSeqIndent(true, s, key, value, p)
+			}
+		}
+	} else if len(rootTag) == 1 {
+		err = mapToXmlSeqIndent(true, s, rootTag[0], m, p)
+	} else {
+		err = mapToXmlSeqIndent(true, s, DefaultRootTag, m, p)
+	}
+	return []byte(*s), err
+}
+
+// where the work actually happens
+// returns an error if an attribute is not atomic
+func mapToXmlSeqIndent(doIndent bool, s *string, key string, value interface{}, pp *pretty) error {
+	var endTag bool
+	var isSimple bool
+	var noEndTag bool
+	var elen int
+	p := &pretty{pp.indent, pp.cnt, pp.padding, pp.mapDepth, pp.start}
+
+	switch value.(type) {
+	case map[string]interface{}, []byte, string, float64, bool, int, int32, int64, float32:
+		if doIndent {
+			*s += p.padding
+		}
+		if key != "#comment" && key != "#directive" && key != "#procinst" {
+			*s += `<` + key
+		}
+	}
+	switch value.(type) {
+	case map[string]interface{}:
+		val := value.(map[string]interface{})
+
+		if key == "#comment" {
+			*s += `<!--` + val["#text"].(string) + `-->`
+			noEndTag = true
+			break
+		}
+
+		if key == "#directive" {
+			*s += `<!` + val["#text"].(string) + `>`
+			noEndTag = true
+			break
+		}
+
+		if key == "#procinst" {
+			*s += `<?` + val["#target"].(string) + ` ` + val["#inst"].(string) + `?>`
+			noEndTag = true
+			break
+		}
+
+		haveAttrs := false
+		// process attributes first
+		if v, ok := val["#attr"].(map[string]interface{}); ok {
+			// First, unroll the map[string]interface{} into a []keyval array.
+			// Then sequence it.
+			kv := make([]keyval, len(v))
+			n := 0
+			for ak, av := range v {
+				kv[n] = keyval{ak, av}
+				n++
+			}
+			sort.Sort(elemListSeq(kv))
+			// Now encode the attributes in original decoding sequence, using keyval array.
+			for _, a := range kv {
+				vv := a.v.(map[string]interface{})
+				switch vv["#text"].(type) {
+				case string, float64, bool, int, int32, int64, float32:
+					*s += ` ` + a.k + `="` + fmt.Sprintf("%v", vv["#text"]) + `"`
+				case []byte:
+					*s += ` ` + a.k + `="` + fmt.Sprintf("%v", string(vv["#text"].([]byte))) + `"`
+				default:
+					return fmt.Errorf("invalid attribute value for: %s", a.k)
+				}
+			}
+			haveAttrs = true
+		}
+
+		// simple element?
+		// every map value has, at least, "#seq" and, perhaps, "#text" and/or "#attr"
+		_, seqOK := val["#seq"] // have key
+		if v, ok := val["#text"]; ok && ((len(val) == 3 && haveAttrs) || (len(val) == 2 && !haveAttrs)) && seqOK {
+			if stmp, ok := v.(string); ok && stmp != "" {
+				*s += ">" + fmt.Sprintf("%v", v)
+				endTag = true
+				elen = 1
+			}
+			isSimple = true
+			break
+		} else if !ok && ((len(val) == 2 && haveAttrs) || (len(val) == 1 && !haveAttrs)) && seqOK {
+			// here no #text but have #seq or #seq+#attr
+			endTag = false
+			break
+		}
+
+		// we now need to sequence everything except attributes
+		// 'kv' will hold everything that needs to be written
+		kv := make([]keyval, 0)
+		for k, v := range val {
+			if k == "#attr" { // already processed
+				continue
+			}
+			if k == "#seq" { // ignore - just for sorting
+				continue
+			}
+			switch v.(type) {
+			case []interface{}:
+				// unwind the array as separate entries
+				for _, vv := range v.([]interface{}) {
+					kv = append(kv, keyval{k, vv})
+				}
+			default:
+				kv = append(kv, keyval{k, v})
+			}
+		}
+
+		// close tag with possible attributes
+		*s += ">"
+		if doIndent {
+			*s += "\n"
+		}
+		// something more complex
+		p.mapDepth++
+		// PrintElemListSeq(elemListSeq(kv))
+		sort.Sort(elemListSeq(kv))
+		// PrintElemListSeq(elemListSeq(kv))
+		i := 0
+		for _, v := range kv {
+			switch v.v.(type) {
+			case []interface{}:
+			default:
+				if i == 0 && doIndent {
+					p.Indent()
+				}
+			}
+			i++
+			mapToXmlSeqIndent(doIndent, s, v.k, v.v, p)
+			switch v.v.(type) {
+			case []interface{}: // handled in []interface{} case
+			default:
+				if doIndent {
+					p.Outdent()
+				}
+			}
+			i--
+		}
+		p.mapDepth--
+		endTag = true
+		elen = 1 // we do have some content other than attrs
+	case []interface{}:
+		for _, v := range value.([]interface{}) {
+			if doIndent {
+				p.Indent()
+			}
+			mapToXmlSeqIndent(doIndent, s, key, v, p)
+			if doIndent {
+				p.Outdent()
+			}
+		}
+		return nil
+	case nil:
+		// terminate the tag
+		*s += "<" + key
+		break
+	default: // handle anything - even goofy stuff
+		elen = 0
+		switch value.(type) {
+		case string, float64, bool, int, int32, int64, float32:
+			v := fmt.Sprintf("%v", value)
+			elen = len(v)
+			if elen > 0 {
+				*s += ">" + v
+			}
+		case []byte: // NOTE: byte is just an alias for uint8
+			// similar to how xml.Marshal handles []byte structure members
+			v := string(value.([]byte))
+			elen = len(v)
+			if elen > 0 {
+				*s += ">" + v
+			}
+		default:
+			var v []byte
+			var err error
+			if doIndent {
+				v, err = xml.MarshalIndent(value, p.padding, p.indent)
+			} else {
+				v, err = xml.Marshal(value)
+			}
+			if err != nil {
+				*s += ">UNKNOWN"
+			} else {
+				elen = len(v)
+				if elen > 0 {
+					*s += string(v)
+				}
+			}
+		}
+		isSimple = true
+		endTag = true
+	}
+	if endTag && !noEndTag {
+		if doIndent {
+			if !isSimple {
+				*s += p.padding
+			}
+		}
+		switch value.(type) {
+		case map[string]interface{}, []byte, string, float64, bool, int, int32, int64, float32:
+			if elen > 0 || useGoXmlEmptyElemSyntax {
+				if elen == 0 {
+					*s += ">"
+				}
+				*s += `</` + key + ">"
+			} else {
+				*s += `/>`
+			}
+		}
+	} else if !noEndTag {
+		if useGoXmlEmptyElemSyntax {
+			*s += "></" + key + ">"
+		} else {
+			*s += "/>"
+		}
+	}
+	if doIndent {
+		if p.cnt > p.start {
+			*s += "\n"
+		}
+		p.Outdent()
+	}
+
+	return nil
+}
+
+// the element sort implementation
+
+type keyval struct {
+	k string
+	v interface{}
+}
+type elemListSeq []keyval
+
+func (e elemListSeq) Len() int {
+	return len(e)
+}
+
+func (e elemListSeq) Swap(i, j int) {
+	e[i], e[j] = e[j], e[i]
+}
+
+func (e elemListSeq) Less(i, j int) bool {
+	var iseq, jseq int
+	var ok bool
+	if iseq, ok = e[i].v.(map[string]interface{})["#seq"].(int); !ok {
+		iseq = 9999999
+	}
+
+	if jseq, ok = e[j].v.(map[string]interface{})["#seq"].(int); !ok {
+		jseq = 9999999
+	}
+
+	if iseq > jseq {
+		return false
+	}
+	return true
+}
+
+func PrintElemListSeq(e elemListSeq) {
+	for n, v := range e {
+		fmt.Printf("%d: %v\n", n, v)
+	}
+}

+ 1 - 0
vendor/github.com/bndr/gotabulate/AUTHOR

@@ -0,0 +1 @@
+Vadim Kravcenko 2014

+ 0 - 0
vendor/github.com/bndr/gotabulate/CHANGELOG


+ 2 - 0
vendor/github.com/bndr/gotabulate/CONTRIBUTORS

@@ -0,0 +1,2 @@
+# Contributors git log --format='%aN <%aE>' | sort -uf
+Vadim Kravcenko <bndrzz@gmail.com>

+ 201 - 0
vendor/github.com/bndr/gotabulate/LICENSE

@@ -0,0 +1,201 @@
+Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
+   END OF TERMS AND CONDITIONS
+
+   APPENDIX: How to apply the Apache License to your work.
+
+      To apply the Apache License to your work, attach the following
+      boilerplate notice, with the fields enclosed by brackets "{}"
+      replaced with your own identifying information. (Don't include
+      the brackets!)  The text should be enclosed in the appropriate
+      comment syntax for the file format. We also recommend that a
+      file or class name and description of purpose be included on the
+      same "printed page" as the copyright notice for easier
+      identification within third-party archives.
+
+   Copyright {yyyy} {name of copyright owner}
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.

+ 222 - 0
vendor/github.com/bndr/gotabulate/README.md

@@ -0,0 +1,222 @@
+# Gotabulate - Easily pretty-print tabular data
+[![GoDoc](https://godoc.org/github.com/bndr/gotabulate?status.svg)](https://godoc.org/github.com/bndr/gotabulate)
+[![Build Status](https://travis-ci.org/bndr/gotabulate.svg?branch=master)](https://travis-ci.org/bndr/gotabulate)
+
+## Summary
+
+Go-Tabulate - Generic Go Library for easy pretty-printing of tabular data. 
+
+## Installation
+
+    go get github.com/bndr/gotabulate
+
+## Description
+
+Supported data types:
+- 2D Array of `Int`, `Int64`, `Float64`, `String`, `interface{}`
+- Map of `String`, `interface{}` (Keys will be used as header)
+
+## Usage
+
+```go
+// Create Some Fake Rows
+row_1 := []interface{}{"john", 20, "ready"}
+row_2 := []interface{}{"bndr", 23, "ready"}
+
+// Create an object from 2D interface array
+t := gotabulate.Create([][]interface{}{row_1, row_2})
+
+// Set the Headers (optional)
+t.SetHeaders([]string{"age", "status"})
+
+// Set the Empty String (optional)
+t.SetEmptyString("None")
+
+// Set Align (Optional)
+t.SetAlign("right")
+
+// Print the result: grid, or simple
+fmt.Println(t.Render("grid"))
+
++---------+--------+-----------+
+|         |    age |    status |
++=========+========+===========+
+|    john |     20 |     ready |
++---------+--------+-----------+
+|    bndr |     23 |     ready |
++---------+--------+-----------+
+```
+
+## Example with String
+
+```go
+// Some Strings
+string_1 := []string{"TV", "1000$", "Sold"}
+string_2 := []string{"PC", "50%", "on Hold"}
+
+// Create Object
+tabulate := gotabulate.Create([][]string{string_1, string_2})
+
+// Set Headers
+tabulate.SetHeaders([]string{"Type", "Cost", "Status"})
+
+// Render
+fmt.Println(tabulate.Render("simple"))
+
+---------  ----------  ------------
+    Type        Cost        Status
+---------  ----------  ------------
+      TV       1000$          Sold
+
+      PC         50%       on Hold
+---------  ----------  ------------
+```
+
+## Example with String Wrapping
+
+```go
+tabulate := gotabulate.Create([][]string{[]string{"Lorem ipsum dolor sit amet, consectetur adipiscing elit. Vivamus laoreet vestibulum pretium. Nulla et ornare elit. Cum sociis natoque penatibus et magnis",
+	"Vivamus laoreet vestibulum pretium. Nulla et ornare elit. Cum sociis natoque penatibus et magnis", "zzLorem ipsum", " test", "test"}, []string{"Lorem ipsum dolor sit amet, consectetur adipiscing elit. Vivamus laoreet vestibulum pretium. Nulla et ornare elit. Cum sociis natoque penatibus et magnis",
+	"Vivamus laoreet vestibulum pretium. Nulla et ornare elit. Cum sociis natoque penatibus et magnis", "zzLorem ipsum", " test", "test"}, STRING_ARRAY, []string{"Lorem ipsum dolor sit amet, consectetur adipiscing elit. Vivamus laoreet vestibulum pretium. Nulla et ornare elit. Cum sociis natoque penatibus et magnis",
+	"Vivamus laoreet vestibulum pretium. Nulla et ornare elit. Cum sociis natoque penatibus et magnis", "zzLorem ipsum", " test", "test"}, STRING_ARRAY})
+
+tabulate.SetHeaders([]string{"Header 1", "header 2", "header 3", "header 4"})
+// Set Max Cell Size
+tabulate.SetMaxCellSize(16)
+
+// Turn On String Wrapping
+tabulate.SetWrapStrings(true)
+
+// Render the table
+fmt.Println(tabulate.Render("grid"))
+
++---------------------+---------------------+----------------+-------------+-------------+
+|                     |            Header 1 |       header 2 |    header 3 |    header 4 |
++=====================+=====================+================+=============+=============+
+|    Lorem ipsum dolo |    Vivamus laoreet  |    Lorem ipsum |        test |        test |
+|    r sit amet, cons |    vestibulum preti |                |             |             |
+|    ectetur adipisci |    um. Nulla et orn |                |             |             |
+|    ng elit. Vivamus |    are elit. Cum so |                |             |             |
+|     laoreet vestibu |    ciis natoque pen |                |             |             |
+|    lum pretium. Nul |    atibus et magnis |                |             |             |
+|    la et ornare eli |                     |                |             |             |
+|    t. Cum sociis na |                     |                |             |             |
+|    toque penatibus  |                     |                |             |             |
+|           et magnis |                     |                |             |             |
++---------------------+---------------------+----------------+-------------+-------------+
+|    Lorem ipsum dolo |    Vivamus laoreet  |    Lorem ipsum |        test |        test |
+|    r sit amet, cons |    vestibulum preti |                |             |             |
+|    ectetur adipisci |    um. Nulla et orn |                |             |             |
+|    ng elit. Vivamus |    are elit. Cum so |                |             |             |
+|     laoreet vestibu |    ciis natoque pen |                |             |             |
+|    lum pretium. Nul |    atibus et magnis |                |             |             |
+|    la et ornare eli |                     |                |             |             |
+|    t. Cum sociis na |                     |                |             |             |
+|    toque penatibus  |                     |                |             |             |
+|           et magnis |                     |                |             |             |
++---------------------+---------------------+----------------+-------------+-------------+
+|         test string |       test string 2 |           test |         row |        bndr |
++---------------------+---------------------+----------------+-------------+-------------+
+|    Lorem ipsum dolo |    Vivamus laoreet  |    Lorem ipsum |        test |        test |
+|    r sit amet, cons |    vestibulum preti |                |             |             |
+|    ectetur adipisci |    um. Nulla et orn |                |             |             |
+|    ng elit. Vivamus |    are elit. Cum so |                |             |             |
+|     laoreet vestibu |    ciis natoque pen |                |             |             |
+|    lum pretium. Nul |    atibus et magnis |                |             |             |
+|    la et ornare eli |                     |                |             |             |
+|    t. Cum sociis na |                     |                |             |             |
+|    toque penatibus  |                     |                |             |             |
+|           et magnis |                     |                |             |             |
++---------------------+---------------------+----------------+-------------+-------------+
+|         test string |       test string 2 |           test |         row |        bndr |
++---------------------+---------------------+----------------+-------------+-------------+
+```
+## Examples
+
+```go
+t := gotabulate.Create([][]string{STRING_ARRAY, STRING_ARRAY})
+
+t.SetHeaders(HEADERS) // If not headers are set, the first row will be used.
+
+t.SetEmptyString("None") // Set what will be printed in the empty cell
+
+rendered_string := t.Render("simple") // Render() will return a string
+
+Simple Table
+----------------------  ----------------------  ----------------------  -------------  -------------
+             Header 1                Header 2                Header 3       Header 4       Header 5 
+----------------------  ----------------------  ----------------------  -------------  -------------
+          test string           test string 2                    test            row           bndr 
+
+          test string           test string 2                    test            row           bndr 
+
+    4th element empty       4th element empty       4th element empty           None           None 
+----------------------  ----------------------  ----------------------  -------------  -------------
+
+Grid Table (Align Right)
++-------------+-------------+-------------+-------------+-------------+
+|    Header 1 |    Header 2 |    Header 3 |    Header 4 |    Header 5 |
++=============+=============+=============+=============+=============+
+|       10.01 |      12.002 |      -123.5 |    20.00005 |        1.01 |
++-------------+-------------+-------------+-------------+-------------+
+|       10.01 |      12.002 |      -123.5 |    20.00005 |        1.01 |
++-------------+-------------+-------------+-------------+-------------+
+|       10.01 |      12.002 |      -123.5 |    20.00005 |        None |
++-------------+-------------+-------------+-------------+-------------+
+
+Padded Headers:
++----------------------+----------------------+----------------------+-------------+-------------+
+|                      |             Header 1 |             header 2 |    header 3 |    header 4 |
++======================+======================+======================+=============+=============+
+|          test string |        test string 2 |                 test |         row |        bndr |
++----------------------+----------------------+----------------------+-------------+-------------+
+|          test string |        test string 2 |                 test |         row |        bndr |
++----------------------+----------------------+----------------------+-------------+-------------+
+|    4th element empty |    4th element empty |    4th element empty |        None |        None |
++----------------------+----------------------+----------------------+-------------+-------------+
+
+Align Center:
++-------------+-------------+-------------+-------------+-------------+
+|   Header 1  |   Header 2  |   Header 3  |   Header 4  |   Header 5  |
++=============+=============+=============+=============+=============+
+|    10.01    |    12.002   |    -123.5   |   20.00005  |     1.01    |
++-------------+-------------+-------------+-------------+-------------+
+|    10.01    |    12.002   |    -123.5   |   20.00005  |     1.01    |
++-------------+-------------+-------------+-------------+-------------+
+|    10.01    |    12.002   |    -123.5   |   20.00005  |     1.01    |
++-------------+-------------+-------------+-------------+-------------+
+
+Align Left:
++-------------+-------------+-------------+-------------+-------------+
+| Header 1    | Header 2    | Header 3    | Header 4    | Header 5    |
++=============+=============+=============+=============+=============+
+| 10.01       | 12.002      | -123.5      | 20.00005    | 1.01        |
++-------------+-------------+-------------+-------------+-------------+
+| 10.01       | 12.002      | -123.5      | 20.00005    | 1.01        |
++-------------+-------------+-------------+-------------+-------------+
+| 10.01       | 12.002      | -123.5      | 20.00005    | 1.01        |
++-------------+-------------+-------------+-------------+-------------+
+```
+
+### Status
+
+Beta version. There may be edge cases that I have missed, so if your tables don't render properly please open up an issue. 
+
+## Contribute
+
+All Contributions are welcome. The todo list is on the bottom of this README. Feel free to send a pull request.
+
+## License
+
+Apache License 2.0
+
+## TODO
+
+- [ ] Add more examples
+- [ ] Better Documentation
+- [ ] Implement more data table formats
+- [ ] Decimal point alignment for floats
+
+## Acknowledgement
+
+Inspired by Python package [tabulate](https://pypi.python.org/pypi/tabulate)

+ 483 - 0
vendor/github.com/bndr/gotabulate/tabulate.go

@@ -0,0 +1,483 @@
+package gotabulate
+
+import "fmt"
+import "bytes"
+import "github.com/mattn/go-runewidth"
+import "unicode/utf8"
+import "math"
+
+// Basic Structure of TableFormat
+type TableFormat struct {
+	LineTop         Line
+	LineBelowHeader Line
+	LineBetweenRows Line
+	LineBottom      Line
+	HeaderRow       Row
+	DataRow         Row
+	Padding         int
+	HeaderHide      bool
+	FitScreen       bool
+}
+
+// Represents a Line
+type Line struct {
+	begin string
+	hline string
+	sep   string
+	end   string
+}
+
+// Represents a Row
+type Row struct {
+	begin string
+	sep   string
+	end   string
+}
+
+// Table Formats that are available to the user
+// The user can define his own format, just by addind an entry to this map
+// and calling it with Render function e.g t.Render("customFormat")
+var TableFormats = map[string]TableFormat{
+	"simple": TableFormat{
+		LineTop:         Line{"", "-", "  ", ""},
+		LineBelowHeader: Line{"", "-", "  ", ""},
+		LineBottom:      Line{"", "-", "  ", ""},
+		HeaderRow:       Row{"", "  ", ""},
+		DataRow:         Row{"", "  ", ""},
+		Padding:         1,
+	},
+	"plain": TableFormat{
+		HeaderRow: Row{"", "  ", ""},
+		DataRow:   Row{"", "  ", ""},
+		Padding:   1,
+	},
+	"grid": TableFormat{
+		LineTop:         Line{"+", "-", "+", "+"},
+		LineBelowHeader: Line{"+", "=", "+", "+"},
+		LineBetweenRows: Line{"+", "-", "+", "+"},
+		LineBottom:      Line{"+", "-", "+", "+"},
+		HeaderRow:       Row{"|", "|", "|"},
+		DataRow:         Row{"|", "|", "|"},
+		Padding:         1,
+	},
+}
+
+// Minimum padding that will be applied
+var MIN_PADDING = 5
+
+// Main Tabulate structure
+type Tabulate struct {
+	Data          []*TabulateRow
+	Headers       []string
+	FloatFormat   byte
+	TableFormat   TableFormat
+	Align         string
+	EmptyVar      string
+	HideLines     []string
+	MaxSize       int
+	WrapStrings   bool
+	WrapDelimiter rune
+	SplitConcat   string
+}
+
+// Represents normalized tabulate Row
+type TabulateRow struct {
+	Elements  []string
+	Continuos bool
+}
+
+type writeBuffer struct {
+	Buffer bytes.Buffer
+}
+
+func createBuffer() *writeBuffer {
+	return &writeBuffer{}
+}
+
+func (b *writeBuffer) Write(str string, count int) *writeBuffer {
+	for i := 0; i < count; i++ {
+		b.Buffer.WriteString(str)
+	}
+	return b
+}
+func (b *writeBuffer) String() string {
+	return b.Buffer.String()
+}
+
+// Add padding to each cell
+func (t *Tabulate) padRow(arr []string, padding int) []string {
+	if len(arr) < 1 {
+		return arr
+	}
+	padded := make([]string, len(arr))
+	for index, el := range arr {
+		b := createBuffer()
+		b.Write(" ", padding)
+		b.Write(el, 1)
+		b.Write(" ", padding)
+		padded[index] = b.String()
+	}
+	return padded
+}
+
+// Align right (Add padding left)
+func (t *Tabulate) padLeft(width int, str string) string {
+	b := createBuffer()
+	b.Write(" ", (width - runewidth.StringWidth(str)))
+	b.Write(str, 1)
+	return b.String()
+}
+
+// Align Left (Add padding right)
+func (t *Tabulate) padRight(width int, str string) string {
+	b := createBuffer()
+	b.Write(str, 1)
+	b.Write(" ", (width - runewidth.StringWidth(str)))
+	return b.String()
+}
+
+// Center the element in the cell
+func (t *Tabulate) padCenter(width int, str string) string {
+	b := createBuffer()
+	padding := int(math.Ceil(float64((width - runewidth.StringWidth(str))) / 2.0))
+	b.Write(" ", padding)
+	b.Write(str, 1)
+	b.Write(" ", (width - runewidth.StringWidth(b.String())))
+
+	return b.String()
+}
+
+// Build Line based on padded_widths from t.GetWidths()
+func (t *Tabulate) buildLine(padded_widths []int, padding []int, l Line) string {
+	cells := make([]string, len(padded_widths))
+
+	for i, _ := range cells {
+		b := createBuffer()
+		b.Write(l.hline, padding[i]+MIN_PADDING)
+		cells[i] = b.String()
+	}
+
+	var buffer bytes.Buffer
+	buffer.WriteString(l.begin)
+
+	// Print contents
+	for i := 0; i < len(cells); i++ {
+		buffer.WriteString(cells[i])
+		if i != len(cells)-1 {
+			buffer.WriteString(l.sep)
+		}
+	}
+
+	buffer.WriteString(l.end)
+	return buffer.String()
+}
+
+// Build Row based on padded_widths from t.GetWidths()
+func (t *Tabulate) buildRow(elements []string, padded_widths []int, paddings []int, d Row) string {
+
+	var buffer bytes.Buffer
+	buffer.WriteString(d.begin)
+	padFunc := t.getAlignFunc()
+	// Print contents
+	for i := 0; i < len(padded_widths); i++ {
+		output := ""
+		if len(elements) <= i || (len(elements) > i && elements[i] == " nil ") {
+			output = padFunc(padded_widths[i], t.EmptyVar)
+		} else if len(elements) > i {
+			output = padFunc(padded_widths[i], elements[i])
+		}
+		buffer.WriteString(output)
+		if i != len(padded_widths)-1 {
+			buffer.WriteString(d.sep)
+		}
+	}
+
+	buffer.WriteString(d.end)
+	return buffer.String()
+}
+
+//SetWrapDelimiter assigns the character ina  string that the rednderer
+//will attempt to split strings on when a cell must be wrapped
+func (t *Tabulate) SetWrapDelimiter(r rune) {
+	t.WrapDelimiter = r
+}
+
+//SetSplitConcat assigns the character that will be used when a WrapDelimiter is
+//set but the renderer cannot abide by the desired split.  This may happen when
+//the WrapDelimiter is a space ' ' but a single word is longer than the width of a cell
+func (t *Tabulate) SetSplitConcat(r string) {
+	t.SplitConcat = r
+}
+
+// Render the data table
+func (t *Tabulate) Render(format ...interface{}) string {
+	var lines []string
+
+	// If headers are set use them, otherwise pop the first row
+	if len(t.Headers) < 1 && len(t.Data) > 1 {
+		t.Headers, t.Data = t.Data[0].Elements, t.Data[1:]
+	}
+
+	// Use the format that was passed as parameter, otherwise
+	// use the format defined in the struct
+	if len(format) > 0 {
+		t.TableFormat = TableFormats[format[0].(string)]
+	}
+
+	// If Wrap Strings is set to True,then break up the string to multiple cells
+	if t.WrapStrings {
+		t.Data = t.wrapCellData()
+	}
+
+	// Check if Data is present
+	if len(t.Data) < 1 {
+		return ""
+	}
+
+	if len(t.Headers) < len(t.Data[0].Elements) {
+		diff := len(t.Data[0].Elements) - len(t.Headers)
+		padded_header := make([]string, diff)
+		for _, e := range t.Headers {
+			padded_header = append(padded_header, e)
+		}
+		t.Headers = padded_header
+	}
+
+	// Get Column widths for all columns
+	cols := t.getWidths(t.Headers, t.Data)
+
+	padded_widths := make([]int, len(cols))
+	for i, _ := range padded_widths {
+		padded_widths[i] = cols[i] + MIN_PADDING*t.TableFormat.Padding
+	}
+
+	// Start appending lines
+
+	// Append top line if not hidden
+	if !inSlice("top", t.HideLines) {
+		lines = append(lines, t.buildLine(padded_widths, cols, t.TableFormat.LineTop))
+	}
+
+	// Add Header
+	lines = append(lines, t.buildRow(t.padRow(t.Headers, t.TableFormat.Padding), padded_widths, cols, t.TableFormat.HeaderRow))
+
+	// Add Line Below Header if not hidden
+	if !inSlice("belowheader", t.HideLines) {
+		lines = append(lines, t.buildLine(padded_widths, cols, t.TableFormat.LineBelowHeader))
+	}
+
+	// Add Data Rows
+	for index, element := range t.Data {
+		lines = append(lines, t.buildRow(t.padRow(element.Elements, t.TableFormat.Padding), padded_widths, cols, t.TableFormat.DataRow))
+		if index < len(t.Data)-1 {
+			if element.Continuos != true {
+				lines = append(lines, t.buildLine(padded_widths, cols, t.TableFormat.LineBetweenRows))
+			}
+		}
+	}
+
+	if !inSlice("bottomLine", t.HideLines) {
+		lines = append(lines, t.buildLine(padded_widths, cols, t.TableFormat.LineBottom))
+	}
+
+	// Join lines
+	var buffer bytes.Buffer
+	for _, line := range lines {
+		buffer.WriteString(line + "\n")
+	}
+
+	return buffer.String()
+}
+
+// Calculate the max column width for each element
+func (t *Tabulate) getWidths(headers []string, data []*TabulateRow) []int {
+	widths := make([]int, len(headers))
+	current_max := len(t.EmptyVar)
+	for i := 0; i < len(headers); i++ {
+		current_max = runewidth.StringWidth(headers[i])
+		for _, item := range data {
+			if len(item.Elements) > i && len(widths) > i {
+				element := item.Elements[i]
+				strLength := runewidth.StringWidth(element)
+				if strLength > current_max {
+					widths[i] = strLength
+					current_max = strLength
+				} else {
+					widths[i] = current_max
+				}
+			}
+		}
+	}
+
+	return widths
+}
+
+// Set Headers of the table
+// If Headers count is less than the data row count, the headers will be padded to the right
+func (t *Tabulate) SetHeaders(headers []string) *Tabulate {
+	t.Headers = headers
+	return t
+}
+
+// Set Float Formatting
+// will be used in strconv.FormatFloat(element, format, -1, 64)
+func (t *Tabulate) SetFloatFormat(format byte) *Tabulate {
+	t.FloatFormat = format
+	return t
+}
+
+// Set Align Type, Available options: left, right, center
+func (t *Tabulate) SetAlign(align string) {
+	t.Align = align
+}
+
+// Select the padding function based on the align type
+func (t *Tabulate) getAlignFunc() func(int, string) string {
+	if len(t.Align) < 1 || t.Align == "right" {
+		return t.padLeft
+	} else if t.Align == "left" {
+		return t.padRight
+	} else {
+		return t.padCenter
+	}
+}
+
+// Set how an empty cell will be represented
+func (t *Tabulate) SetEmptyString(empty string) {
+	t.EmptyVar = empty + " "
+}
+
+// Set which lines to hide.
+// Can be:
+// top - Top line of the table,
+// belowheader - Line below the header,
+// bottom - Bottom line of the table
+func (t *Tabulate) SetHideLines(hide []string) {
+	t.HideLines = hide
+}
+
+func (t *Tabulate) SetWrapStrings(wrap bool) {
+	t.WrapStrings = wrap
+}
+
+// Sets the maximum size of cell
+// If WrapStrings is set to true, then the string inside
+// the cell will be split up into multiple cell
+func (t *Tabulate) SetMaxCellSize(max int) {
+	t.MaxSize = max
+}
+
+func (t *Tabulate) splitElement(e string) (bool, string) {
+	//check if we are not attempting to smartly wrap
+	if t.WrapDelimiter == 0 {
+		if t.SplitConcat == "" {
+			return false, runewidth.Truncate(e, t.MaxSize, "")
+		} else {
+			return false, runewidth.Truncate(e, t.MaxSize, t.SplitConcat)
+		}
+	}
+
+	//we are attempting to wrap
+	//grab the current width
+	var i int
+	for i = t.MaxSize; i > 1; i-- {
+		//loop through our proposed truncation size looking for one that ends on
+		//our requested delimiter
+		x := runewidth.Truncate(e, i, "")
+		//check if the NEXT string is a
+		//delimiter, if it IS, then we truncate and tell the caller to shrink
+		r, _ := utf8.DecodeRuneInString(e[i:])
+		if r == 0 || r == 1 {
+			//decode failed, take the truncation as is
+			return false, x
+		}
+		if r == t.WrapDelimiter {
+			return true, x //inform the caller that they can remove the next rune
+		}
+	}
+	//didn't find a good length, truncate at will
+	if t.SplitConcat != "" {
+		return false, runewidth.Truncate(e, t.MaxSize, t.SplitConcat)
+	}
+	return false, runewidth.Truncate(e, t.MaxSize, "")
+}
+
+// If string size is larger than t.MaxSize, then split it to multiple cells (downwards)
+func (t *Tabulate) wrapCellData() []*TabulateRow {
+	var arr []*TabulateRow
+	var cleanSplit bool
+	var addr int
+	next := t.Data[0]
+	for index := 0; index <= len(t.Data); index++ {
+		elements := next.Elements
+		new_elements := make([]string, len(elements))
+
+		for i, e := range elements {
+			if runewidth.StringWidth(e) > t.MaxSize {
+				elements[i] = runewidth.Truncate(e, t.MaxSize, "")
+				cleanSplit, elements[i] = t.splitElement(e)
+				if cleanSplit {
+					//remove the next rune
+					r, w := utf8.DecodeRuneInString(e[len(elements[i]):])
+					if r != 0 && r != 1 {
+						addr = w
+					}
+				} else {
+					addr = 0
+				}
+				new_elements[i] = e[len(elements[i])+addr:]
+				next.Continuos = true
+			}
+		}
+
+		if next.Continuos {
+			arr = append(arr, next)
+			next = &TabulateRow{Elements: new_elements}
+			index--
+		} else if index+1 < len(t.Data) {
+			arr = append(arr, next)
+			next = t.Data[index+1]
+		} else if index >= len(t.Data) {
+			arr = append(arr, next)
+		}
+
+	}
+	return arr
+}
+
+// Create a new Tabulate Object
+// Accepts 2D String Array, 2D Int Array, 2D Int64 Array,
+// 2D Bool Array, 2D Float64 Array, 2D interface{} Array,
+// Map map[strig]string, Map map[string]interface{},
+func Create(data interface{}) *Tabulate {
+	t := &Tabulate{FloatFormat: 'f', MaxSize: 30}
+
+	switch v := data.(type) {
+	case [][]string:
+		t.Data = createFromString(data.([][]string))
+	case [][]int32:
+		t.Data = createFromInt32(data.([][]int32))
+	case [][]int64:
+		t.Data = createFromInt64(data.([][]int64))
+	case [][]int:
+		t.Data = createFromInt(data.([][]int))
+	case [][]bool:
+		t.Data = createFromBool(data.([][]bool))
+	case [][]float64:
+		t.Data = createFromFloat64(data.([][]float64), t.FloatFormat)
+	case [][]interface{}:
+		t.Data = createFromMixed(data.([][]interface{}), t.FloatFormat)
+	case []string:
+		t.Data = createFromString([][]string{data.([]string)})
+	case []interface{}:
+		t.Data = createFromMixed([][]interface{}{data.([]interface{})}, t.FloatFormat)
+	case map[string][]interface{}:
+		t.Headers, t.Data = createFromMapMixed(data.(map[string][]interface{}), t.FloatFormat)
+	case map[string][]string:
+		t.Headers, t.Data = createFromMapString(data.(map[string][]string))
+	default:
+		fmt.Println(v)
+	}
+
+	return t
+}

+ 144 - 0
vendor/github.com/bndr/gotabulate/utils.go

@@ -0,0 +1,144 @@
+package gotabulate
+
+import "strconv"
+import "fmt"
+
+// Create normalized Array from strings
+func createFromString(data [][]string) []*TabulateRow {
+	rows := make([]*TabulateRow, len(data))
+
+	for index, el := range data {
+		rows[index] = &TabulateRow{Elements: el}
+	}
+	return rows
+}
+
+// Create normalized array of rows from mixed data (interface{})
+func createFromMixed(data [][]interface{}, format byte) []*TabulateRow {
+	rows := make([]*TabulateRow, len(data))
+	for index_1, element := range data {
+		normalized := make([]string, len(element))
+		for index, el := range element {
+			switch el.(type) {
+			case int32:
+				quoted := strconv.QuoteRuneToASCII(el.(int32))
+				normalized[index] = quoted[1 : len(quoted)-1]
+			case int:
+				normalized[index] = strconv.Itoa(el.(int))
+			case int64:
+				normalized[index] = strconv.FormatInt(el.(int64), 10)
+			case bool:
+				normalized[index] = strconv.FormatBool(el.(bool))
+			case float64:
+				normalized[index] = strconv.FormatFloat(el.(float64), format, -1, 64)
+			case uint64:
+				normalized[index] = strconv.FormatUint(el.(uint64), 10)
+			case nil:
+				normalized[index] = "nil"
+			default:
+				normalized[index] = fmt.Sprintf("%s", el)
+			}
+		}
+		rows[index_1] = &TabulateRow{Elements: normalized}
+	}
+	return rows
+}
+
+// Create normalized array from ints
+func createFromInt(data [][]int) []*TabulateRow {
+	rows := make([]*TabulateRow, len(data))
+	for index_1, arr := range data {
+		row := make([]string, len(arr))
+		for index, el := range arr {
+			row[index] = strconv.Itoa(el)
+		}
+		rows[index_1] = &TabulateRow{Elements: row}
+	}
+	return rows
+}
+
+// Create normalized array from float64
+func createFromFloat64(data [][]float64, format byte) []*TabulateRow {
+	rows := make([]*TabulateRow, len(data))
+	for index_1, arr := range data {
+		row := make([]string, len(arr))
+		for index, el := range arr {
+			row[index] = strconv.FormatFloat(el, format, -1, 64)
+		}
+		rows[index_1] = &TabulateRow{Elements: row}
+	}
+	return rows
+}
+
+// Create normalized array from ints32
+func createFromInt32(data [][]int32) []*TabulateRow {
+	rows := make([]*TabulateRow, len(data))
+	for index_1, arr := range data {
+		row := make([]string, len(arr))
+		for index, el := range arr {
+			quoted := strconv.QuoteRuneToASCII(el)
+			row[index] = quoted[1 : len(quoted)-1]
+		}
+		rows[index_1] = &TabulateRow{Elements: row}
+	}
+	return rows
+}
+
+// Create normalized array from ints64
+func createFromInt64(data [][]int64) []*TabulateRow {
+	rows := make([]*TabulateRow, len(data))
+	for index_1, arr := range data {
+		row := make([]string, len(arr))
+		for index, el := range arr {
+			row[index] = strconv.FormatInt(el, 10)
+		}
+		rows[index_1] = &TabulateRow{Elements: row}
+	}
+	return rows
+}
+
+// Create normalized array from bools
+func createFromBool(data [][]bool) []*TabulateRow {
+	rows := make([]*TabulateRow, len(data))
+	for index_1, arr := range data {
+		row := make([]string, len(arr))
+		for index, el := range arr {
+			row[index] = strconv.FormatBool(el)
+		}
+		rows[index_1] = &TabulateRow{Elements: row}
+	}
+	return rows
+}
+
+// Create normalized array from a map of mixed elements (interface{})
+// Keys will be used as header
+func createFromMapMixed(data map[string][]interface{}, format byte) (headers []string, tData []*TabulateRow) {
+
+	var dataslice [][]interface{}
+	for key, value := range data {
+		headers = append(headers, key)
+		dataslice = append(dataslice, value)
+	}
+	return headers, createFromMixed(dataslice, format)
+}
+
+// Create normalized array from Map of strings
+// Keys will be used as header
+func createFromMapString(data map[string][]string) (headers []string, tData []*TabulateRow) {
+	var dataslice [][]string
+	for key, value := range data {
+		headers = append(headers, key)
+		dataslice = append(dataslice, value)
+	}
+	return headers, createFromString(dataslice)
+}
+
+// Check if element is present in a slice.
+func inSlice(a string, list []string) bool {
+	for _, b := range list {
+		if b == a {
+			return true
+		}
+	}
+	return false
+}

+ 21 - 0
vendor/github.com/fatih/structs/LICENSE

@@ -0,0 +1,21 @@
+The MIT License (MIT)
+
+Copyright (c) 2014 Fatih Arslan
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.

+ 163 - 0
vendor/github.com/fatih/structs/README.md

@@ -0,0 +1,163 @@
+# Structs [![GoDoc](http://img.shields.io/badge/go-documentation-blue.svg?style=flat-square)](http://godoc.org/github.com/fatih/structs) [![Build Status](http://img.shields.io/travis/fatih/structs.svg?style=flat-square)](https://travis-ci.org/fatih/structs) [![Coverage Status](http://img.shields.io/coveralls/fatih/structs.svg?style=flat-square)](https://coveralls.io/r/fatih/structs)
+
+Structs contains various utilities to work with Go (Golang) structs. It was
+initially used by me to convert a struct into a `map[string]interface{}`. With
+time I've added other utilities for structs.  It's basically a high level
+package based on primitives from the reflect package. Feel free to add new
+functions or improve the existing code.
+
+## Install
+
+```bash
+go get github.com/fatih/structs
+```
+
+## Usage and Examples
+
+Just like the standard lib `strings`, `bytes` and co packages, `structs` has
+many global functions to manipulate or organize your struct data. Lets define
+and declare a struct:
+
+```go
+type Server struct {
+	Name        string `json:"name,omitempty"`
+	ID          int
+	Enabled     bool
+	users       []string // not exported
+	http.Server          // embedded
+}
+
+server := &Server{
+	Name:    "gopher",
+	ID:      123456,
+	Enabled: true,
+}
+```
+
+```go
+// Convert a struct to a map[string]interface{}
+// => {"Name":"gopher", "ID":123456, "Enabled":true}
+m := structs.Map(server)
+
+// Convert the values of a struct to a []interface{}
+// => ["gopher", 123456, true]
+v := structs.Values(server)
+
+// Convert the names of a struct to a []string
+// (see "Names methods" for more info about fields)
+n := structs.Names(server)
+
+// Convert the values of a struct to a []*Field
+// (see "Field methods" for more info about fields)
+f := structs.Fields(server)
+
+// Return the struct name => "Server"
+n := structs.Name(server)
+
+// Check if any field of a struct is initialized or not.
+h := structs.HasZero(server)
+
+// Check if all fields of a struct is initialized or not.
+z := structs.IsZero(server)
+
+// Check if server is a struct or a pointer to struct
+i := structs.IsStruct(server)
+```
+
+### Struct methods
+
+The structs functions can be also used as independent methods by creating a new
+`*structs.Struct`. This is handy if you want to have more control over the
+structs (such as retrieving a single Field).
+
+```go
+// Create a new struct type:
+s := structs.New(server)
+
+m := s.Map()              // Get a map[string]interface{}
+v := s.Values()           // Get a []interface{}
+f := s.Fields()           // Get a []*Field
+n := s.Names()            // Get a []string
+f := s.Field(name)        // Get a *Field based on the given field name
+f, ok := s.FieldOk(name)  // Get a *Field based on the given field name
+n := s.Name()             // Get the struct name
+h := s.HasZero()          // Check if any field is initialized
+z := s.IsZero()           // Check if all fields are initialized
+```
+
+### Field methods
+
+We can easily examine a single Field for more detail. Below you can see how we
+get and interact with various field methods:
+
+
+```go
+s := structs.New(server)
+
+// Get the Field struct for the "Name" field
+name := s.Field("Name")
+
+// Get the underlying value,  value => "gopher"
+value := name.Value().(string)
+
+// Set the field's value
+name.Set("another gopher")
+
+// Get the field's kind, kind =>  "string"
+name.Kind()
+
+// Check if the field is exported or not
+if name.IsExported() {
+	fmt.Println("Name field is exported")
+}
+
+// Check if the value is a zero value, such as "" for string, 0 for int
+if !name.IsZero() {
+	fmt.Println("Name is initialized")
+}
+
+// Check if the field is an anonymous (embedded) field
+if !name.IsEmbedded() {
+	fmt.Println("Name is not an embedded field")
+}
+
+// Get the Field's tag value for tag name "json", tag value => "name,omitempty"
+tagValue := name.Tag("json")
+```
+
+Nested structs are supported too:
+
+```go
+addrField := s.Field("Server").Field("Addr")
+
+// Get the value for addr
+a := addrField.Value().(string)
+
+// Or get all fields
+httpServer := s.Field("Server").Fields()
+```
+
+We can also get a slice of Fields from the Struct type to iterate over all
+fields. This is handy if you wish to examine all fields:
+
+```go
+s := structs.New(server)
+
+for _, f := range s.Fields() {
+	fmt.Printf("field name: %+v\n", f.Name())
+
+	if f.IsExported() {
+		fmt.Printf("value   : %+v\n", f.Value())
+		fmt.Printf("is zero : %+v\n", f.IsZero())
+	}
+}
+```
+
+## Credits
+
+ * [Fatih Arslan](https://github.com/fatih)
+ * [Cihangir Savas](https://github.com/cihangir)
+
+## License
+
+The MIT License (MIT) - see LICENSE.md for more details

+ 141 - 0
vendor/github.com/fatih/structs/field.go

@@ -0,0 +1,141 @@
+package structs
+
+import (
+	"errors"
+	"fmt"
+	"reflect"
+)
+
+var (
+	errNotExported = errors.New("field is not exported")
+	errNotSettable = errors.New("field is not settable")
+)
+
+// Field represents a single struct field that encapsulates high level
+// functions around the field.
+type Field struct {
+	value      reflect.Value
+	field      reflect.StructField
+	defaultTag string
+}
+
+// Tag returns the value associated with key in the tag string. If there is no
+// such key in the tag, Tag returns the empty string.
+func (f *Field) Tag(key string) string {
+	return f.field.Tag.Get(key)
+}
+
+// Value returns the underlying value of the field. It panics if the field
+// is not exported.
+func (f *Field) Value() interface{} {
+	return f.value.Interface()
+}
+
+// IsEmbedded returns true if the given field is an anonymous field (embedded)
+func (f *Field) IsEmbedded() bool {
+	return f.field.Anonymous
+}
+
+// IsExported returns true if the given field is exported.
+func (f *Field) IsExported() bool {
+	return f.field.PkgPath == ""
+}
+
+// IsZero returns true if the given field is not initialized (has a zero value).
+// It panics if the field is not exported.
+func (f *Field) IsZero() bool {
+	zero := reflect.Zero(f.value.Type()).Interface()
+	current := f.Value()
+
+	return reflect.DeepEqual(current, zero)
+}
+
+// Name returns the name of the given field
+func (f *Field) Name() string {
+	return f.field.Name
+}
+
+// Kind returns the fields kind, such as "string", "map", "bool", etc ..
+func (f *Field) Kind() reflect.Kind {
+	return f.value.Kind()
+}
+
+// Set sets the field to given value v. It returns an error if the field is not
+// settable (not addressable or not exported) or if the given value's type
+// doesn't match the fields type.
+func (f *Field) Set(val interface{}) error {
+	// we can't set unexported fields, so be sure this field is exported
+	if !f.IsExported() {
+		return errNotExported
+	}
+
+	// do we get here? not sure...
+	if !f.value.CanSet() {
+		return errNotSettable
+	}
+
+	given := reflect.ValueOf(val)
+
+	if f.value.Kind() != given.Kind() {
+		return fmt.Errorf("wrong kind. got: %s want: %s", given.Kind(), f.value.Kind())
+	}
+
+	f.value.Set(given)
+	return nil
+}
+
+// Zero sets the field to its zero value. It returns an error if the field is not
+// settable (not addressable or not exported).
+func (f *Field) Zero() error {
+	zero := reflect.Zero(f.value.Type()).Interface()
+	return f.Set(zero)
+}
+
+// Fields returns a slice of Fields. This is particular handy to get the fields
+// of a nested struct . A struct tag with the content of "-" ignores the
+// checking of that particular field. Example:
+//
+//   // Field is ignored by this package.
+//   Field *http.Request `structs:"-"`
+//
+// It panics if field is not exported or if field's kind is not struct
+func (f *Field) Fields() []*Field {
+	return getFields(f.value, f.defaultTag)
+}
+
+// Field returns the field from a nested struct. It panics if the nested struct
+// is not exported or if the field was not found.
+func (f *Field) Field(name string) *Field {
+	field, ok := f.FieldOk(name)
+	if !ok {
+		panic("field not found")
+	}
+
+	return field
+}
+
+// FieldOk returns the field from a nested struct. The boolean returns whether
+// the field was found (true) or not (false).
+func (f *Field) FieldOk(name string) (*Field, bool) {
+	value := &f.value
+	// value must be settable so we need to make sure it holds the address of the
+	// variable and not a copy, so we can pass the pointer to strctVal instead of a
+	// copy (which is not assigned to any variable, hence not settable).
+	// see "https://blog.golang.org/laws-of-reflection#TOC_8."
+	if f.value.Kind() != reflect.Ptr {
+		a := f.value.Addr()
+		value = &a
+	}
+	v := strctVal(value.Interface())
+	t := v.Type()
+
+	field, ok := t.FieldByName(name)
+	if !ok {
+		return nil, false
+	}
+
+	return &Field{
+		field: field,
+		value: v.FieldByName(name),
+	}, true
+}

+ 579 - 0
vendor/github.com/fatih/structs/structs.go

@@ -0,0 +1,579 @@
+// Package structs contains various utilities functions to work with structs.
+package structs
+
+import (
+	"fmt"
+
+	"reflect"
+)
+
+var (
+	// DefaultTagName is the default tag name for struct fields which provides
+	// a more granular to tweak certain structs. Lookup the necessary functions
+	// for more info.
+	DefaultTagName = "structs" // struct's field default tag name
+)
+
+// Struct encapsulates a struct type to provide several high level functions
+// around the struct.
+type Struct struct {
+	raw     interface{}
+	value   reflect.Value
+	TagName string
+}
+
+// New returns a new *Struct with the struct s. It panics if the s's kind is
+// not struct.
+func New(s interface{}) *Struct {
+	return &Struct{
+		raw:     s,
+		value:   strctVal(s),
+		TagName: DefaultTagName,
+	}
+}
+
+// Map converts the given struct to a map[string]interface{}, where the keys
+// of the map are the field names and the values of the map the associated
+// values of the fields. The default key string is the struct field name but
+// can be changed in the struct field's tag value. The "structs" key in the
+// struct's field tag value is the key name. Example:
+//
+//   // Field appears in map as key "myName".
+//   Name string `structs:"myName"`
+//
+// A tag value with the content of "-" ignores that particular field. Example:
+//
+//   // Field is ignored by this package.
+//   Field bool `structs:"-"`
+//
+// A tag value with the content of "string" uses the stringer to get the value. Example:
+//
+//   // The value will be output of Animal's String() func.
+//   // Map will panic if Animal does not implement String().
+//   Field *Animal `structs:"field,string"`
+//
+// A tag value with the option of "flatten" used in a struct field is to flatten its fields
+// in the output map. Example:
+//
+//   // The FieldStruct's fields will be flattened into the output map.
+//   FieldStruct time.Time `structs:",flatten"`
+//
+// A tag value with the option of "omitnested" stops iterating further if the type
+// is a struct. Example:
+//
+//   // Field is not processed further by this package.
+//   Field time.Time     `structs:"myName,omitnested"`
+//   Field *http.Request `structs:",omitnested"`
+//
+// A tag value with the option of "omitempty" ignores that particular field if
+// the field value is empty. Example:
+//
+//   // Field appears in map as key "myName", but the field is
+//   // skipped if empty.
+//   Field string `structs:"myName,omitempty"`
+//
+//   // Field appears in map as key "Field" (the default), but
+//   // the field is skipped if empty.
+//   Field string `structs:",omitempty"`
+//
+// Note that only exported fields of a struct can be accessed, non exported
+// fields will be neglected.
+func (s *Struct) Map() map[string]interface{} {
+	out := make(map[string]interface{})
+	s.FillMap(out)
+	return out
+}
+
+// FillMap is the same as Map. Instead of returning the output, it fills the
+// given map.
+func (s *Struct) FillMap(out map[string]interface{}) {
+	if out == nil {
+		return
+	}
+
+	fields := s.structFields()
+
+	for _, field := range fields {
+		name := field.Name
+		val := s.value.FieldByName(name)
+		isSubStruct := false
+		var finalVal interface{}
+
+		tagName, tagOpts := parseTag(field.Tag.Get(s.TagName))
+		if tagName != "" {
+			name = tagName
+		}
+
+		// if the value is a zero value and the field is marked as omitempty do
+		// not include
+		if tagOpts.Has("omitempty") {
+			zero := reflect.Zero(val.Type()).Interface()
+			current := val.Interface()
+
+			if reflect.DeepEqual(current, zero) {
+				continue
+			}
+		}
+
+		if !tagOpts.Has("omitnested") {
+			finalVal = s.nested(val)
+
+			v := reflect.ValueOf(val.Interface())
+			if v.Kind() == reflect.Ptr {
+				v = v.Elem()
+			}
+
+			switch v.Kind() {
+			case reflect.Map, reflect.Struct:
+				isSubStruct = true
+			}
+		} else {
+			finalVal = val.Interface()
+		}
+
+		if tagOpts.Has("string") {
+			s, ok := val.Interface().(fmt.Stringer)
+			if ok {
+				out[name] = s.String()
+			}
+			continue
+		}
+
+		if isSubStruct && (tagOpts.Has("flatten")) {
+			for k := range finalVal.(map[string]interface{}) {
+				out[k] = finalVal.(map[string]interface{})[k]
+			}
+		} else {
+			out[name] = finalVal
+		}
+	}
+}
+
+// Values converts the given s struct's field values to a []interface{}.  A
+// struct tag with the content of "-" ignores the that particular field.
+// Example:
+//
+//   // Field is ignored by this package.
+//   Field int `structs:"-"`
+//
+// A value with the option of "omitnested" stops iterating further if the type
+// is a struct. Example:
+//
+//   // Fields is not processed further by this package.
+//   Field time.Time     `structs:",omitnested"`
+//   Field *http.Request `structs:",omitnested"`
+//
+// A tag value with the option of "omitempty" ignores that particular field and
+// is not added to the values if the field value is empty. Example:
+//
+//   // Field is skipped if empty
+//   Field string `structs:",omitempty"`
+//
+// Note that only exported fields of a struct can be accessed, non exported
+// fields  will be neglected.
+func (s *Struct) Values() []interface{} {
+	fields := s.structFields()
+
+	var t []interface{}
+
+	for _, field := range fields {
+		val := s.value.FieldByName(field.Name)
+
+		_, tagOpts := parseTag(field.Tag.Get(s.TagName))
+
+		// if the value is a zero value and the field is marked as omitempty do
+		// not include
+		if tagOpts.Has("omitempty") {
+			zero := reflect.Zero(val.Type()).Interface()
+			current := val.Interface()
+
+			if reflect.DeepEqual(current, zero) {
+				continue
+			}
+		}
+
+		if tagOpts.Has("string") {
+			s, ok := val.Interface().(fmt.Stringer)
+			if ok {
+				t = append(t, s.String())
+			}
+			continue
+		}
+
+		if IsStruct(val.Interface()) && !tagOpts.Has("omitnested") {
+			// look out for embedded structs, and convert them to a
+			// []interface{} to be added to the final values slice
+			for _, embeddedVal := range Values(val.Interface()) {
+				t = append(t, embeddedVal)
+			}
+		} else {
+			t = append(t, val.Interface())
+		}
+	}
+
+	return t
+}
+
+// Fields returns a slice of Fields. A struct tag with the content of "-"
+// ignores the checking of that particular field. Example:
+//
+//   // Field is ignored by this package.
+//   Field bool `structs:"-"`
+//
+// It panics if s's kind is not struct.
+func (s *Struct) Fields() []*Field {
+	return getFields(s.value, s.TagName)
+}
+
+// Names returns a slice of field names. A struct tag with the content of "-"
+// ignores the checking of that particular field. Example:
+//
+//   // Field is ignored by this package.
+//   Field bool `structs:"-"`
+//
+// It panics if s's kind is not struct.
+func (s *Struct) Names() []string {
+	fields := getFields(s.value, s.TagName)
+
+	names := make([]string, len(fields))
+
+	for i, field := range fields {
+		names[i] = field.Name()
+	}
+
+	return names
+}
+
+func getFields(v reflect.Value, tagName string) []*Field {
+	if v.Kind() == reflect.Ptr {
+		v = v.Elem()
+	}
+
+	t := v.Type()
+
+	var fields []*Field
+
+	for i := 0; i < t.NumField(); i++ {
+		field := t.Field(i)
+
+		if tag := field.Tag.Get(tagName); tag == "-" {
+			continue
+		}
+
+		f := &Field{
+			field: field,
+			value: v.FieldByName(field.Name),
+		}
+
+		fields = append(fields, f)
+
+	}
+
+	return fields
+}
+
+// Field returns a new Field struct that provides several high level functions
+// around a single struct field entity. It panics if the field is not found.
+func (s *Struct) Field(name string) *Field {
+	f, ok := s.FieldOk(name)
+	if !ok {
+		panic("field not found")
+	}
+
+	return f
+}
+
+// FieldOk returns a new Field struct that provides several high level functions
+// around a single struct field entity. The boolean returns true if the field
+// was found.
+func (s *Struct) FieldOk(name string) (*Field, bool) {
+	t := s.value.Type()
+
+	field, ok := t.FieldByName(name)
+	if !ok {
+		return nil, false
+	}
+
+	return &Field{
+		field:      field,
+		value:      s.value.FieldByName(name),
+		defaultTag: s.TagName,
+	}, true
+}
+
+// IsZero returns true if all fields in a struct is a zero value (not
+// initialized) A struct tag with the content of "-" ignores the checking of
+// that particular field. Example:
+//
+//   // Field is ignored by this package.
+//   Field bool `structs:"-"`
+//
+// A value with the option of "omitnested" stops iterating further if the type
+// is a struct. Example:
+//
+//   // Field is not processed further by this package.
+//   Field time.Time     `structs:"myName,omitnested"`
+//   Field *http.Request `structs:",omitnested"`
+//
+// Note that only exported fields of a struct can be accessed, non exported
+// fields  will be neglected. It panics if s's kind is not struct.
+func (s *Struct) IsZero() bool {
+	fields := s.structFields()
+
+	for _, field := range fields {
+		val := s.value.FieldByName(field.Name)
+
+		_, tagOpts := parseTag(field.Tag.Get(s.TagName))
+
+		if IsStruct(val.Interface()) && !tagOpts.Has("omitnested") {
+			ok := IsZero(val.Interface())
+			if !ok {
+				return false
+			}
+
+			continue
+		}
+
+		// zero value of the given field, such as "" for string, 0 for int
+		zero := reflect.Zero(val.Type()).Interface()
+
+		//  current value of the given field
+		current := val.Interface()
+
+		if !reflect.DeepEqual(current, zero) {
+			return false
+		}
+	}
+
+	return true
+}
+
+// HasZero returns true if a field in a struct is not initialized (zero value).
+// A struct tag with the content of "-" ignores the checking of that particular
+// field. Example:
+//
+//   // Field is ignored by this package.
+//   Field bool `structs:"-"`
+//
+// A value with the option of "omitnested" stops iterating further if the type
+// is a struct. Example:
+//
+//   // Field is not processed further by this package.
+//   Field time.Time     `structs:"myName,omitnested"`
+//   Field *http.Request `structs:",omitnested"`
+//
+// Note that only exported fields of a struct can be accessed, non exported
+// fields  will be neglected. It panics if s's kind is not struct.
+func (s *Struct) HasZero() bool {
+	fields := s.structFields()
+
+	for _, field := range fields {
+		val := s.value.FieldByName(field.Name)
+
+		_, tagOpts := parseTag(field.Tag.Get(s.TagName))
+
+		if IsStruct(val.Interface()) && !tagOpts.Has("omitnested") {
+			ok := HasZero(val.Interface())
+			if ok {
+				return true
+			}
+
+			continue
+		}
+
+		// zero value of the given field, such as "" for string, 0 for int
+		zero := reflect.Zero(val.Type()).Interface()
+
+		//  current value of the given field
+		current := val.Interface()
+
+		if reflect.DeepEqual(current, zero) {
+			return true
+		}
+	}
+
+	return false
+}
+
+// Name returns the structs's type name within its package. For more info refer
+// to Name() function.
+func (s *Struct) Name() string {
+	return s.value.Type().Name()
+}
+
+// structFields returns the exported struct fields for a given s struct. This
+// is a convenient helper method to avoid duplicate code in some of the
+// functions.
+func (s *Struct) structFields() []reflect.StructField {
+	t := s.value.Type()
+
+	var f []reflect.StructField
+
+	for i := 0; i < t.NumField(); i++ {
+		field := t.Field(i)
+		// we can't access the value of unexported fields
+		if field.PkgPath != "" {
+			continue
+		}
+
+		// don't check if it's omitted
+		if tag := field.Tag.Get(s.TagName); tag == "-" {
+			continue
+		}
+
+		f = append(f, field)
+	}
+
+	return f
+}
+
+func strctVal(s interface{}) reflect.Value {
+	v := reflect.ValueOf(s)
+
+	// if pointer get the underlying element≤
+	for v.Kind() == reflect.Ptr {
+		v = v.Elem()
+	}
+
+	if v.Kind() != reflect.Struct {
+		panic("not struct")
+	}
+
+	return v
+}
+
+// Map converts the given struct to a map[string]interface{}. For more info
+// refer to Struct types Map() method. It panics if s's kind is not struct.
+func Map(s interface{}) map[string]interface{} {
+	return New(s).Map()
+}
+
+// FillMap is the same as Map. Instead of returning the output, it fills the
+// given map.
+func FillMap(s interface{}, out map[string]interface{}) {
+	New(s).FillMap(out)
+}
+
+// Values converts the given struct to a []interface{}. For more info refer to
+// Struct types Values() method.  It panics if s's kind is not struct.
+func Values(s interface{}) []interface{} {
+	return New(s).Values()
+}
+
+// Fields returns a slice of *Field. For more info refer to Struct types
+// Fields() method.  It panics if s's kind is not struct.
+func Fields(s interface{}) []*Field {
+	return New(s).Fields()
+}
+
+// Names returns a slice of field names. For more info refer to Struct types
+// Names() method.  It panics if s's kind is not struct.
+func Names(s interface{}) []string {
+	return New(s).Names()
+}
+
+// IsZero returns true if all fields is equal to a zero value. For more info
+// refer to Struct types IsZero() method.  It panics if s's kind is not struct.
+func IsZero(s interface{}) bool {
+	return New(s).IsZero()
+}
+
+// HasZero returns true if any field is equal to a zero value. For more info
+// refer to Struct types HasZero() method.  It panics if s's kind is not struct.
+func HasZero(s interface{}) bool {
+	return New(s).HasZero()
+}
+
+// IsStruct returns true if the given variable is a struct or a pointer to
+// struct.
+func IsStruct(s interface{}) bool {
+	v := reflect.ValueOf(s)
+	if v.Kind() == reflect.Ptr {
+		v = v.Elem()
+	}
+
+	// uninitialized zero value of a struct
+	if v.Kind() == reflect.Invalid {
+		return false
+	}
+
+	return v.Kind() == reflect.Struct
+}
+
+// Name returns the structs's type name within its package. It returns an
+// empty string for unnamed types. It panics if s's kind is not struct.
+func Name(s interface{}) string {
+	return New(s).Name()
+}
+
+// nested retrieves recursively all types for the given value and returns the
+// nested value.
+func (s *Struct) nested(val reflect.Value) interface{} {
+	var finalVal interface{}
+
+	v := reflect.ValueOf(val.Interface())
+	if v.Kind() == reflect.Ptr {
+		v = v.Elem()
+	}
+
+	switch v.Kind() {
+	case reflect.Struct:
+		n := New(val.Interface())
+		n.TagName = s.TagName
+		m := n.Map()
+
+		// do not add the converted value if there are no exported fields, ie:
+		// time.Time
+		if len(m) == 0 {
+			finalVal = val.Interface()
+		} else {
+			finalVal = m
+		}
+	case reflect.Map:
+		v := val.Type().Elem()
+		if v.Kind() == reflect.Ptr {
+			v = v.Elem()
+		}
+
+		// only iterate over struct types, ie: map[string]StructType,
+		// map[string][]StructType,
+		if v.Kind() == reflect.Struct ||
+			(v.Kind() == reflect.Slice && v.Elem().Kind() == reflect.Struct) {
+			m := make(map[string]interface{}, val.Len())
+			for _, k := range val.MapKeys() {
+				m[k.String()] = s.nested(val.MapIndex(k))
+			}
+			finalVal = m
+			break
+		}
+
+		// TODO(arslan): should this be optional?
+		finalVal = val.Interface()
+	case reflect.Slice, reflect.Array:
+		if val.Type().Kind() == reflect.Interface {
+			finalVal = val.Interface()
+			break
+		}
+
+		// TODO(arslan): should this be optional?
+		// do not iterate of non struct types, just pass the value. Ie: []int,
+		// []string, co... We only iterate further if it's a struct.
+		// i.e []foo or []*foo
+		if val.Type().Elem().Kind() != reflect.Struct &&
+			!(val.Type().Elem().Kind() == reflect.Ptr &&
+				val.Type().Elem().Elem().Kind() == reflect.Struct) {
+			finalVal = val.Interface()
+			break
+		}
+
+		slices := make([]interface{}, val.Len(), val.Len())
+		for x := 0; x < val.Len(); x++ {
+			slices[x] = s.nested(val.Index(x))
+		}
+		finalVal = slices
+	default:
+		finalVal = val.Interface()
+	}
+
+	return finalVal
+}

+ 32 - 0
vendor/github.com/fatih/structs/tags.go

@@ -0,0 +1,32 @@
+package structs
+
+import "strings"
+
+// tagOptions contains a slice of tag options
+type tagOptions []string
+
+// Has returns true if the given optiton is available in tagOptions
+func (t tagOptions) Has(opt string) bool {
+	for _, tagOpt := range t {
+		if tagOpt == opt {
+			return true
+		}
+	}
+
+	return false
+}
+
+// parseTag splits a struct field's tag into its name and a list of options
+// which comes after a name. A tag is in the form of: "name,option1,option2".
+// The name can be neglectected.
+func parseTag(tag string) (string, tagOptions) {
+	// tag is one of followings:
+	// ""
+	// "name"
+	// "name,opt"
+	// "name,opt,opt2"
+	// ",opt"
+
+	res := strings.Split(tag, ",")
+	return res[0], res[1:]
+}

+ 46 - 0
vendor/github.com/fsnotify/fsnotify/AUTHORS

@@ -0,0 +1,46 @@
+# Names should be added to this file as
+#	Name or Organization <email address>
+# The email address is not required for organizations.
+
+# You can update this list using the following command:
+#
+#   $ git shortlog -se | awk '{print $2 " " $3 " " $4}'
+
+# Please keep the list sorted.
+
+Adrien Bustany <adrien@bustany.org>
+Amit Krishnan <amit.krishnan@oracle.com>
+Bjørn Erik Pedersen <bjorn.erik.pedersen@gmail.com>
+Bruno Bigras <bigras.bruno@gmail.com>
+Caleb Spare <cespare@gmail.com>
+Case Nelson <case@teammating.com>
+Chris Howey <chris@howey.me> <howeyc@gmail.com>
+Christoffer Buchholz <christoffer.buchholz@gmail.com>
+Daniel Wagner-Hall <dawagner@gmail.com>
+Dave Cheney <dave@cheney.net>
+Evan Phoenix <evan@fallingsnow.net>
+Francisco Souza <f@souza.cc>
+Hari haran <hariharan.uno@gmail.com>
+John C Barstow
+Kelvin Fo <vmirage@gmail.com>
+Ken-ichirou MATSUZAWA <chamas@h4.dion.ne.jp>
+Matt Layher <mdlayher@gmail.com>
+Nathan Youngman <git@nathany.com>
+Patrick <patrick@dropbox.com>
+Paul Hammond <paul@paulhammond.org>
+Pawel Knap <pawelknap88@gmail.com>
+Pieter Droogendijk <pieter@binky.org.uk>
+Pursuit92 <JoshChase@techpursuit.net>
+Riku Voipio <riku.voipio@linaro.org>
+Rob Figueiredo <robfig@gmail.com>
+Slawek Ligus <root@ooz.ie>
+Soge Zhang <zhssoge@gmail.com>
+Tiffany Jernigan <tiffany.jernigan@intel.com>
+Tilak Sharma <tilaks@google.com>
+Travis Cline <travis.cline@gmail.com>
+Tudor Golubenco <tudor.g@gmail.com>
+Yukang <moorekang@gmail.com>
+bronze1man <bronze1man@gmail.com>
+debrando <denis.brandolini@gmail.com>
+henrikedwards <henrik.edwards@gmail.com>
+铁哥 <guotie.9@gmail.com>

+ 307 - 0
vendor/github.com/fsnotify/fsnotify/CHANGELOG.md

@@ -0,0 +1,307 @@
+# Changelog
+
+## v1.4.2 / 2016-10-10
+
+* Linux: use InotifyInit1 with IN_CLOEXEC to stop leaking a file descriptor to a child process when using fork/exec [#178](https://github.com/fsnotify/fsnotify/pull/178) (thanks @pattyshack)
+
+## v1.4.1 / 2016-10-04
+
+* Fix flaky inotify stress test on Linux [#177](https://github.com/fsnotify/fsnotify/pull/177) (thanks @pattyshack)
+
+## v1.4.0 / 2016-10-01
+
+* add a String() method to Event.Op [#165](https://github.com/fsnotify/fsnotify/pull/165) (thanks @oozie)
+
+## v1.3.1 / 2016-06-28
+
+* Windows: fix for double backslash when watching the root of a drive [#151](https://github.com/fsnotify/fsnotify/issues/151) (thanks @brunoqc)
+
+## v1.3.0 / 2016-04-19
+
+* Support linux/arm64 by [patching](https://go-review.googlesource.com/#/c/21971/) x/sys/unix and switching to to it from syscall (thanks @suihkulokki) [#135](https://github.com/fsnotify/fsnotify/pull/135)
+
+## v1.2.10 / 2016-03-02
+
+* Fix golint errors in windows.go [#121](https://github.com/fsnotify/fsnotify/pull/121) (thanks @tiffanyfj)
+
+## v1.2.9 / 2016-01-13
+
+kqueue: Fix logic for CREATE after REMOVE [#111](https://github.com/fsnotify/fsnotify/pull/111) (thanks @bep)
+
+## v1.2.8 / 2015-12-17
+
+* kqueue: fix race condition in Close [#105](https://github.com/fsnotify/fsnotify/pull/105) (thanks @djui for reporting the issue and @ppknap for writing a failing test)
+* inotify: fix race in test
+* enable race detection for continuous integration (Linux, Mac, Windows)
+
+## v1.2.5 / 2015-10-17
+
+* inotify: use epoll_create1 for arm64 support (requires Linux 2.6.27 or later) [#100](https://github.com/fsnotify/fsnotify/pull/100) (thanks @suihkulokki)
+* inotify: fix path leaks [#73](https://github.com/fsnotify/fsnotify/pull/73) (thanks @chamaken)
+* kqueue: watch for rename events on subdirectories [#83](https://github.com/fsnotify/fsnotify/pull/83) (thanks @guotie)
+* kqueue: avoid infinite loops from symlinks cycles [#101](https://github.com/fsnotify/fsnotify/pull/101) (thanks @illicitonion)
+
+## v1.2.1 / 2015-10-14
+
+* kqueue: don't watch named pipes [#98](https://github.com/fsnotify/fsnotify/pull/98) (thanks @evanphx)
+
+## v1.2.0 / 2015-02-08
+
+* inotify: use epoll to wake up readEvents [#66](https://github.com/fsnotify/fsnotify/pull/66) (thanks @PieterD)
+* inotify: closing watcher should now always shut down goroutine [#63](https://github.com/fsnotify/fsnotify/pull/63) (thanks @PieterD)
+* kqueue: close kqueue after removing watches, fixes [#59](https://github.com/fsnotify/fsnotify/issues/59)
+
+## v1.1.1 / 2015-02-05
+
+* inotify: Retry read on EINTR [#61](https://github.com/fsnotify/fsnotify/issues/61) (thanks @PieterD)
+
+## v1.1.0 / 2014-12-12
+
+* kqueue: rework internals [#43](https://github.com/fsnotify/fsnotify/pull/43)
+    * add low-level functions
+    * only need to store flags on directories
+    * less mutexes [#13](https://github.com/fsnotify/fsnotify/issues/13)
+    * done can be an unbuffered channel
+    * remove calls to os.NewSyscallError
+* More efficient string concatenation for Event.String() [#52](https://github.com/fsnotify/fsnotify/pull/52) (thanks @mdlayher)
+* kqueue: fix regression in  rework causing subdirectories to be watched [#48](https://github.com/fsnotify/fsnotify/issues/48)
+* kqueue: cleanup internal watch before sending remove event [#51](https://github.com/fsnotify/fsnotify/issues/51)
+
+## v1.0.4 / 2014-09-07
+
+* kqueue: add dragonfly to the build tags.
+* Rename source code files, rearrange code so exported APIs are at the top.
+* Add done channel to example code. [#37](https://github.com/fsnotify/fsnotify/pull/37) (thanks @chenyukang)
+
+## v1.0.3 / 2014-08-19
+
+* [Fix] Windows MOVED_TO now translates to Create like on BSD and Linux. [#36](https://github.com/fsnotify/fsnotify/issues/36)
+
+## v1.0.2 / 2014-08-17
+
+* [Fix] Missing create events on macOS. [#14](https://github.com/fsnotify/fsnotify/issues/14) (thanks @zhsso)
+* [Fix] Make ./path and path equivalent. (thanks @zhsso)
+
+## v1.0.0 / 2014-08-15
+
+* [API] Remove AddWatch on Windows, use Add.
+* Improve documentation for exported identifiers. [#30](https://github.com/fsnotify/fsnotify/issues/30)
+* Minor updates based on feedback from golint.
+
+## dev / 2014-07-09
+
+* Moved to [github.com/fsnotify/fsnotify](https://github.com/fsnotify/fsnotify).
+* Use os.NewSyscallError instead of returning errno (thanks @hariharan-uno)
+
+## dev / 2014-07-04
+
+* kqueue: fix incorrect mutex used in Close()
+* Update example to demonstrate usage of Op.
+
+## dev / 2014-06-28
+
+* [API] Don't set the Write Op for attribute notifications [#4](https://github.com/fsnotify/fsnotify/issues/4)
+* Fix for String() method on Event (thanks Alex Brainman)
+* Don't build on Plan 9 or Solaris (thanks @4ad)
+
+## dev / 2014-06-21
+
+* Events channel of type Event rather than *Event.
+* [internal] use syscall constants directly for inotify and kqueue.
+* [internal] kqueue: rename events to kevents and fileEvent to event.
+
+## dev / 2014-06-19
+
+* Go 1.3+ required on Windows (uses syscall.ERROR_MORE_DATA internally).
+* [internal] remove cookie from Event struct (unused).
+* [internal] Event struct has the same definition across every OS.
+* [internal] remove internal watch and removeWatch methods.
+
+## dev / 2014-06-12
+
+* [API] Renamed Watch() to Add() and RemoveWatch() to Remove().
+* [API] Pluralized channel names: Events and Errors.
+* [API] Renamed FileEvent struct to Event.
+* [API] Op constants replace methods like IsCreate().
+
+## dev / 2014-06-12
+
+* Fix data race on kevent buffer (thanks @tilaks) [#98](https://github.com/howeyc/fsnotify/pull/98)
+
+## dev / 2014-05-23
+
+* [API] Remove current implementation of WatchFlags.
+    * current implementation doesn't take advantage of OS for efficiency
+    * provides little benefit over filtering events as they are received, but has  extra bookkeeping and mutexes
+    * no tests for the current implementation
+    * not fully implemented on Windows [#93](https://github.com/howeyc/fsnotify/issues/93#issuecomment-39285195)
+
+## v0.9.3 / 2014-12-31
+
+* kqueue: cleanup internal watch before sending remove event [#51](https://github.com/fsnotify/fsnotify/issues/51)
+
+## v0.9.2 / 2014-08-17
+
+* [Backport] Fix missing create events on macOS. [#14](https://github.com/fsnotify/fsnotify/issues/14) (thanks @zhsso)
+
+## v0.9.1 / 2014-06-12
+
+* Fix data race on kevent buffer (thanks @tilaks) [#98](https://github.com/howeyc/fsnotify/pull/98)
+
+## v0.9.0 / 2014-01-17
+
+* IsAttrib() for events that only concern a file's metadata [#79][] (thanks @abustany)
+* [Fix] kqueue: fix deadlock [#77][] (thanks @cespare)
+* [NOTICE] Development has moved to `code.google.com/p/go.exp/fsnotify` in preparation for inclusion in the Go standard library.
+
+## v0.8.12 / 2013-11-13
+
+* [API] Remove FD_SET and friends from Linux adapter
+
+## v0.8.11 / 2013-11-02
+
+* [Doc] Add Changelog [#72][] (thanks @nathany)
+* [Doc] Spotlight and double modify events on macOS [#62][] (reported by @paulhammond)
+
+## v0.8.10 / 2013-10-19
+
+* [Fix] kqueue: remove file watches when parent directory is removed [#71][] (reported by @mdwhatcott)
+* [Fix] kqueue: race between Close and readEvents [#70][] (reported by @bernerdschaefer)
+* [Doc] specify OS-specific limits in README (thanks @debrando)
+
+## v0.8.9 / 2013-09-08
+
+* [Doc] Contributing (thanks @nathany)
+* [Doc] update package path in example code [#63][] (thanks @paulhammond)
+* [Doc] GoCI badge in README (Linux only) [#60][]
+* [Doc] Cross-platform testing with Vagrant  [#59][] (thanks @nathany)
+
+## v0.8.8 / 2013-06-17
+
+* [Fix] Windows: handle `ERROR_MORE_DATA` on Windows [#49][] (thanks @jbowtie)
+
+## v0.8.7 / 2013-06-03
+
+* [API] Make syscall flags internal
+* [Fix] inotify: ignore event changes
+* [Fix] race in symlink test [#45][] (reported by @srid)
+* [Fix] tests on Windows
+* lower case error messages
+
+## v0.8.6 / 2013-05-23
+
+* kqueue: Use EVT_ONLY flag on Darwin
+* [Doc] Update README with full example
+
+## v0.8.5 / 2013-05-09
+
+* [Fix] inotify: allow monitoring of "broken" symlinks (thanks @tsg)
+
+## v0.8.4 / 2013-04-07
+
+* [Fix] kqueue: watch all file events [#40][] (thanks @ChrisBuchholz)
+
+## v0.8.3 / 2013-03-13
+
+* [Fix] inoitfy/kqueue memory leak [#36][] (reported by @nbkolchin)
+* [Fix] kqueue: use fsnFlags for watching a directory [#33][] (reported by @nbkolchin)
+
+## v0.8.2 / 2013-02-07
+
+* [Doc] add Authors
+* [Fix] fix data races for map access [#29][] (thanks @fsouza)
+
+## v0.8.1 / 2013-01-09
+
+* [Fix] Windows path separators
+* [Doc] BSD License
+
+## v0.8.0 / 2012-11-09
+
+* kqueue: directory watching improvements (thanks @vmirage)
+* inotify: add `IN_MOVED_TO` [#25][] (requested by @cpisto)
+* [Fix] kqueue: deleting watched directory [#24][] (reported by @jakerr)
+
+## v0.7.4 / 2012-10-09
+
+* [Fix] inotify: fixes from https://codereview.appspot.com/5418045/ (ugorji)
+* [Fix] kqueue: preserve watch flags when watching for delete [#21][] (reported by @robfig)
+* [Fix] kqueue: watch the directory even if it isn't a new watch (thanks @robfig)
+* [Fix] kqueue: modify after recreation of file
+
+## v0.7.3 / 2012-09-27
+
+* [Fix] kqueue: watch with an existing folder inside the watched folder (thanks @vmirage)
+* [Fix] kqueue: no longer get duplicate CREATE events
+
+## v0.7.2 / 2012-09-01
+
+* kqueue: events for created directories
+
+## v0.7.1 / 2012-07-14
+
+* [Fix] for renaming files
+
+## v0.7.0 / 2012-07-02
+
+* [Feature] FSNotify flags
+* [Fix] inotify: Added file name back to event path
+
+## v0.6.0 / 2012-06-06
+
+* kqueue: watch files after directory created (thanks @tmc)
+
+## v0.5.1 / 2012-05-22
+
+* [Fix] inotify: remove all watches before Close()
+
+## v0.5.0 / 2012-05-03
+
+* [API] kqueue: return errors during watch instead of sending over channel
+* kqueue: match symlink behavior on Linux
+* inotify: add `DELETE_SELF` (requested by @taralx)
+* [Fix] kqueue: handle EINTR (reported by @robfig)
+* [Doc] Godoc example [#1][] (thanks @davecheney)
+
+## v0.4.0 / 2012-03-30
+
+* Go 1 released: build with go tool
+* [Feature] Windows support using winfsnotify
+* Windows does not have attribute change notifications
+* Roll attribute notifications into IsModify
+
+## v0.3.0 / 2012-02-19
+
+* kqueue: add files when watch directory
+
+## v0.2.0 / 2011-12-30
+
+* update to latest Go weekly code
+
+## v0.1.0 / 2011-10-19
+
+* kqueue: add watch on file creation to match inotify
+* kqueue: create file event
+* inotify: ignore `IN_IGNORED` events
+* event String()
+* linux: common FileEvent functions
+* initial commit
+
+[#79]: https://github.com/howeyc/fsnotify/pull/79
+[#77]: https://github.com/howeyc/fsnotify/pull/77
+[#72]: https://github.com/howeyc/fsnotify/issues/72
+[#71]: https://github.com/howeyc/fsnotify/issues/71
+[#70]: https://github.com/howeyc/fsnotify/issues/70
+[#63]: https://github.com/howeyc/fsnotify/issues/63
+[#62]: https://github.com/howeyc/fsnotify/issues/62
+[#60]: https://github.com/howeyc/fsnotify/issues/60
+[#59]: https://github.com/howeyc/fsnotify/issues/59
+[#49]: https://github.com/howeyc/fsnotify/issues/49
+[#45]: https://github.com/howeyc/fsnotify/issues/45
+[#40]: https://github.com/howeyc/fsnotify/issues/40
+[#36]: https://github.com/howeyc/fsnotify/issues/36
+[#33]: https://github.com/howeyc/fsnotify/issues/33
+[#29]: https://github.com/howeyc/fsnotify/issues/29
+[#25]: https://github.com/howeyc/fsnotify/issues/25
+[#24]: https://github.com/howeyc/fsnotify/issues/24
+[#21]: https://github.com/howeyc/fsnotify/issues/21

+ 77 - 0
vendor/github.com/fsnotify/fsnotify/CONTRIBUTING.md

@@ -0,0 +1,77 @@
+# Contributing
+
+## Issues
+
+* Request features and report bugs using the [GitHub Issue Tracker](https://github.com/fsnotify/fsnotify/issues).
+* Please indicate the platform you are using fsnotify on.
+* A code example to reproduce the problem is appreciated.
+
+## Pull Requests
+
+### Contributor License Agreement
+
+fsnotify is derived from code in the [golang.org/x/exp](https://godoc.org/golang.org/x/exp) package and it may be included [in the standard library](https://github.com/fsnotify/fsnotify/issues/1) in the future. Therefore fsnotify carries the same [LICENSE](https://github.com/fsnotify/fsnotify/blob/master/LICENSE) as Go. Contributors retain their copyright, so you need to fill out a short form before we can accept your contribution: [Google Individual Contributor License Agreement](https://developers.google.com/open-source/cla/individual).
+
+Please indicate that you have signed the CLA in your pull request.
+
+### How fsnotify is Developed
+
+* Development is done on feature branches.
+* Tests are run on BSD, Linux, macOS and Windows.
+* Pull requests are reviewed and [applied to master][am] using [hub][].
+  * Maintainers may modify or squash commits rather than asking contributors to.
+* To issue a new release, the maintainers will:
+  * Update the CHANGELOG
+  * Tag a version, which will become available through gopkg.in.
+ 
+### How to Fork
+
+For smooth sailing, always use the original import path. Installing with `go get` makes this easy. 
+
+1. Install from GitHub (`go get -u github.com/fsnotify/fsnotify`)
+2. Create your feature branch (`git checkout -b my-new-feature`)
+3. Ensure everything works and the tests pass (see below)
+4. Commit your changes (`git commit -am 'Add some feature'`)
+
+Contribute upstream:
+
+1. Fork fsnotify on GitHub
+2. Add your remote (`git remote add fork git@github.com:mycompany/repo.git`)
+3. Push to the branch (`git push fork my-new-feature`)
+4. Create a new Pull Request on GitHub
+
+This workflow is [thoroughly explained by Katrina Owen](https://splice.com/blog/contributing-open-source-git-repositories-go/).
+
+### Testing
+
+fsnotify uses build tags to compile different code on Linux, BSD, macOS, and Windows.
+
+Before doing a pull request, please do your best to test your changes on multiple platforms, and list which platforms you were able/unable to test on.
+
+To aid in cross-platform testing there is a Vagrantfile for Linux and BSD.
+
+* Install [Vagrant](http://www.vagrantup.com/) and [VirtualBox](https://www.virtualbox.org/)
+* Setup [Vagrant Gopher](https://github.com/nathany/vagrant-gopher) in your `src` folder.
+* Run `vagrant up` from the project folder. You can also setup just one box with `vagrant up linux` or `vagrant up bsd` (note: the BSD box doesn't support Windows hosts at this time, and NFS may prompt for your host OS password)
+* Once setup, you can run the test suite on a given OS with a single command `vagrant ssh linux -c 'cd fsnotify/fsnotify; go test'`.
+* When you're done, you will want to halt or destroy the Vagrant boxes.
+
+Notice: fsnotify file system events won't trigger in shared folders. The tests get around this limitation by using the /tmp directory.
+
+Right now there is no equivalent solution for Windows and macOS, but there are Windows VMs [freely available from Microsoft](http://www.modern.ie/en-us/virtualization-tools#downloads).
+
+### Maintainers
+
+Help maintaining fsnotify is welcome. To be a maintainer:
+
+* Submit a pull request and sign the CLA as above.
+* You must be able to run the test suite on Mac, Windows, Linux and BSD.
+
+To keep master clean, the fsnotify project uses the "apply mail" workflow outlined in Nathaniel Talbott's post ["Merge pull request" Considered Harmful][am]. This requires installing [hub][].
+
+All code changes should be internal pull requests.
+
+Releases are tagged using [Semantic Versioning](http://semver.org/).
+
+[hub]: https://github.com/github/hub
+[am]: http://blog.spreedly.com/2014/06/24/merge-pull-request-considered-harmful/#.VGa5yZPF_Zs

+ 28 - 0
vendor/github.com/fsnotify/fsnotify/LICENSE

@@ -0,0 +1,28 @@
+Copyright (c) 2012 The Go Authors. All rights reserved.
+Copyright (c) 2012 fsnotify Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+   * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+   * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+   * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

+ 50 - 0
vendor/github.com/fsnotify/fsnotify/README.md

@@ -0,0 +1,50 @@
+# File system notifications for Go
+
+[![GoDoc](https://godoc.org/github.com/fsnotify/fsnotify?status.svg)](https://godoc.org/github.com/fsnotify/fsnotify) [![Go Report Card](https://goreportcard.com/badge/github.com/fsnotify/fsnotify)](https://goreportcard.com/report/github.com/fsnotify/fsnotify)
+
+fsnotify utilizes [golang.org/x/sys](https://godoc.org/golang.org/x/sys) rather than `syscall` from the standard library. Ensure you have the latest version installed by running:
+
+```console
+go get -u golang.org/x/sys/...
+```
+
+Cross platform: Windows, Linux, BSD and macOS.
+
+|Adapter   |OS        |Status    |
+|----------|----------|----------|
+|inotify   |Linux 2.6.27 or later, Android\*|Supported [![Build Status](https://travis-ci.org/fsnotify/fsnotify.svg?branch=master)](https://travis-ci.org/fsnotify/fsnotify)|
+|kqueue    |BSD, macOS, iOS\*|Supported [![Build Status](https://travis-ci.org/fsnotify/fsnotify.svg?branch=master)](https://travis-ci.org/fsnotify/fsnotify)|
+|ReadDirectoryChangesW|Windows|Supported [![Build status](https://ci.appveyor.com/api/projects/status/ivwjubaih4r0udeh/branch/master?svg=true)](https://ci.appveyor.com/project/NathanYoungman/fsnotify/branch/master)|
+|FSEvents  |macOS         |[Planned](https://github.com/fsnotify/fsnotify/issues/11)|
+|FEN       |Solaris 11    |[In Progress](https://github.com/fsnotify/fsnotify/issues/12)|
+|fanotify  |Linux 2.6.37+ | |
+|USN Journals |Windows    |[Maybe](https://github.com/fsnotify/fsnotify/issues/53)|
+|Polling   |*All*         |[Maybe](https://github.com/fsnotify/fsnotify/issues/9)|
+
+\* Android and iOS are untested.
+
+Please see [the documentation](https://godoc.org/github.com/fsnotify/fsnotify) for usage. Consult the [Wiki](https://github.com/fsnotify/fsnotify/wiki) for the FAQ and further information.
+
+## API stability
+
+fsnotify is a fork of [howeyc/fsnotify](https://godoc.org/github.com/howeyc/fsnotify) with a new API as of v1.0. The API is based on [this design document](http://goo.gl/MrYxyA). 
+
+All [releases](https://github.com/fsnotify/fsnotify/releases) are tagged based on [Semantic Versioning](http://semver.org/). Further API changes are [planned](https://github.com/fsnotify/fsnotify/milestones), and will be tagged with a new major revision number.
+
+Go 1.6 supports dependencies located in the `vendor/` folder. Unless you are creating a library, it is recommended that you copy fsnotify into `vendor/github.com/fsnotify/fsnotify` within your project, and likewise for `golang.org/x/sys`.
+
+## Contributing
+
+Please refer to [CONTRIBUTING][] before opening an issue or pull request.
+
+## Example
+
+See [example_test.go](https://github.com/fsnotify/fsnotify/blob/master/example_test.go).
+
+[contributing]: https://github.com/fsnotify/fsnotify/blob/master/CONTRIBUTING.md
+
+## Related Projects
+
+* [notify](https://github.com/rjeczalik/notify)
+* [fsevents](https://github.com/fsnotify/fsevents)
+

+ 37 - 0
vendor/github.com/fsnotify/fsnotify/fen.go

@@ -0,0 +1,37 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build solaris
+
+package fsnotify
+
+import (
+	"errors"
+)
+
+// Watcher watches a set of files, delivering events to a channel.
+type Watcher struct {
+	Events chan Event
+	Errors chan error
+}
+
+// NewWatcher establishes a new watcher with the underlying OS and begins waiting for events.
+func NewWatcher() (*Watcher, error) {
+	return nil, errors.New("FEN based watcher not yet supported for fsnotify\n")
+}
+
+// Close removes all watches and closes the events channel.
+func (w *Watcher) Close() error {
+	return nil
+}
+
+// Add starts watching the named file or directory (non-recursively).
+func (w *Watcher) Add(name string) error {
+	return nil
+}
+
+// Remove stops watching the the named file or directory (non-recursively).
+func (w *Watcher) Remove(name string) error {
+	return nil
+}

+ 66 - 0
vendor/github.com/fsnotify/fsnotify/fsnotify.go

@@ -0,0 +1,66 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !plan9
+
+// Package fsnotify provides a platform-independent interface for file system notifications.
+package fsnotify
+
+import (
+	"bytes"
+	"errors"
+	"fmt"
+)
+
+// Event represents a single file system notification.
+type Event struct {
+	Name string // Relative path to the file or directory.
+	Op   Op     // File operation that triggered the event.
+}
+
+// Op describes a set of file operations.
+type Op uint32
+
+// These are the generalized file operations that can trigger a notification.
+const (
+	Create Op = 1 << iota
+	Write
+	Remove
+	Rename
+	Chmod
+)
+
+func (op Op) String() string {
+	// Use a buffer for efficient string concatenation
+	var buffer bytes.Buffer
+
+	if op&Create == Create {
+		buffer.WriteString("|CREATE")
+	}
+	if op&Remove == Remove {
+		buffer.WriteString("|REMOVE")
+	}
+	if op&Write == Write {
+		buffer.WriteString("|WRITE")
+	}
+	if op&Rename == Rename {
+		buffer.WriteString("|RENAME")
+	}
+	if op&Chmod == Chmod {
+		buffer.WriteString("|CHMOD")
+	}
+	if buffer.Len() == 0 {
+		return ""
+	}
+	return buffer.String()[1:] // Strip leading pipe
+}
+
+// String returns a string representation of the event in the form
+// "file: REMOVE|WRITE|..."
+func (e Event) String() string {
+	return fmt.Sprintf("%q: %s", e.Name, e.Op.String())
+}
+
+// Common errors that can be reported by a watcher
+var ErrEventOverflow = errors.New("fsnotify queue overflow")

+ 334 - 0
vendor/github.com/fsnotify/fsnotify/inotify.go

@@ -0,0 +1,334 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build linux
+
+package fsnotify
+
+import (
+	"errors"
+	"fmt"
+	"io"
+	"os"
+	"path/filepath"
+	"strings"
+	"sync"
+	"unsafe"
+
+	"golang.org/x/sys/unix"
+)
+
+// Watcher watches a set of files, delivering events to a channel.
+type Watcher struct {
+	Events   chan Event
+	Errors   chan error
+	mu       sync.Mutex // Map access
+	cv       *sync.Cond // sync removing on rm_watch with IN_IGNORE
+	fd       int
+	poller   *fdPoller
+	watches  map[string]*watch // Map of inotify watches (key: path)
+	paths    map[int]string    // Map of watched paths (key: watch descriptor)
+	done     chan struct{}     // Channel for sending a "quit message" to the reader goroutine
+	doneResp chan struct{}     // Channel to respond to Close
+}
+
+// NewWatcher establishes a new watcher with the underlying OS and begins waiting for events.
+func NewWatcher() (*Watcher, error) {
+	// Create inotify fd
+	fd, errno := unix.InotifyInit1(unix.IN_CLOEXEC)
+	if fd == -1 {
+		return nil, errno
+	}
+	// Create epoll
+	poller, err := newFdPoller(fd)
+	if err != nil {
+		unix.Close(fd)
+		return nil, err
+	}
+	w := &Watcher{
+		fd:       fd,
+		poller:   poller,
+		watches:  make(map[string]*watch),
+		paths:    make(map[int]string),
+		Events:   make(chan Event),
+		Errors:   make(chan error),
+		done:     make(chan struct{}),
+		doneResp: make(chan struct{}),
+	}
+	w.cv = sync.NewCond(&w.mu)
+
+	go w.readEvents()
+	return w, nil
+}
+
+func (w *Watcher) isClosed() bool {
+	select {
+	case <-w.done:
+		return true
+	default:
+		return false
+	}
+}
+
+// Close removes all watches and closes the events channel.
+func (w *Watcher) Close() error {
+	if w.isClosed() {
+		return nil
+	}
+
+	// Send 'close' signal to goroutine, and set the Watcher to closed.
+	close(w.done)
+
+	// Wake up goroutine
+	w.poller.wake()
+
+	// Wait for goroutine to close
+	<-w.doneResp
+
+	return nil
+}
+
+// Add starts watching the named file or directory (non-recursively).
+func (w *Watcher) Add(name string) error {
+	name = filepath.Clean(name)
+	if w.isClosed() {
+		return errors.New("inotify instance already closed")
+	}
+
+	const agnosticEvents = unix.IN_MOVED_TO | unix.IN_MOVED_FROM |
+		unix.IN_CREATE | unix.IN_ATTRIB | unix.IN_MODIFY |
+		unix.IN_MOVE_SELF | unix.IN_DELETE | unix.IN_DELETE_SELF
+
+	var flags uint32 = agnosticEvents
+
+	w.mu.Lock()
+	watchEntry, found := w.watches[name]
+	w.mu.Unlock()
+	if found {
+		watchEntry.flags |= flags
+		flags |= unix.IN_MASK_ADD
+	}
+	wd, errno := unix.InotifyAddWatch(w.fd, name, flags)
+	if wd == -1 {
+		return errno
+	}
+
+	w.mu.Lock()
+	w.watches[name] = &watch{wd: uint32(wd), flags: flags}
+	w.paths[wd] = name
+	w.mu.Unlock()
+
+	return nil
+}
+
+// Remove stops watching the named file or directory (non-recursively).
+func (w *Watcher) Remove(name string) error {
+	name = filepath.Clean(name)
+
+	// Fetch the watch.
+	w.mu.Lock()
+	defer w.mu.Unlock()
+	watch, ok := w.watches[name]
+
+	// Remove it from inotify.
+	if !ok {
+		return fmt.Errorf("can't remove non-existent inotify watch for: %s", name)
+	}
+	// inotify_rm_watch will return EINVAL if the file has been deleted;
+	// the inotify will already have been removed.
+	// watches and pathes are deleted in ignoreLinux() implicitly and asynchronously
+	// by calling inotify_rm_watch() below. e.g. readEvents() goroutine receives IN_IGNORE
+	// so that EINVAL means that the wd is being rm_watch()ed or its file removed
+	// by another thread and we have not received IN_IGNORE event.
+	success, errno := unix.InotifyRmWatch(w.fd, watch.wd)
+	if success == -1 {
+		// TODO: Perhaps it's not helpful to return an error here in every case.
+		// the only two possible errors are:
+		// EBADF, which happens when w.fd is not a valid file descriptor of any kind.
+		// EINVAL, which is when fd is not an inotify descriptor or wd is not a valid watch descriptor.
+		// Watch descriptors are invalidated when they are removed explicitly or implicitly;
+		// explicitly by inotify_rm_watch, implicitly when the file they are watching is deleted.
+		return errno
+	}
+
+	// wait until ignoreLinux() deleting maps
+	exists := true
+	for exists {
+		w.cv.Wait()
+		_, exists = w.watches[name]
+	}
+
+	return nil
+}
+
+type watch struct {
+	wd    uint32 // Watch descriptor (as returned by the inotify_add_watch() syscall)
+	flags uint32 // inotify flags of this watch (see inotify(7) for the list of valid flags)
+}
+
+// readEvents reads from the inotify file descriptor, converts the
+// received events into Event objects and sends them via the Events channel
+func (w *Watcher) readEvents() {
+	var (
+		buf   [unix.SizeofInotifyEvent * 4096]byte // Buffer for a maximum of 4096 raw events
+		n     int                                  // Number of bytes read with read()
+		errno error                                // Syscall errno
+		ok    bool                                 // For poller.wait
+	)
+
+	defer close(w.doneResp)
+	defer close(w.Errors)
+	defer close(w.Events)
+	defer unix.Close(w.fd)
+	defer w.poller.close()
+
+	for {
+		// See if we have been closed.
+		if w.isClosed() {
+			return
+		}
+
+		ok, errno = w.poller.wait()
+		if errno != nil {
+			select {
+			case w.Errors <- errno:
+			case <-w.done:
+				return
+			}
+			continue
+		}
+
+		if !ok {
+			continue
+		}
+
+		n, errno = unix.Read(w.fd, buf[:])
+		// If a signal interrupted execution, see if we've been asked to close, and try again.
+		// http://man7.org/linux/man-pages/man7/signal.7.html :
+		// "Before Linux 3.8, reads from an inotify(7) file descriptor were not restartable"
+		if errno == unix.EINTR {
+			continue
+		}
+
+		// unix.Read might have been woken up by Close. If so, we're done.
+		if w.isClosed() {
+			return
+		}
+
+		if n < unix.SizeofInotifyEvent {
+			var err error
+			if n == 0 {
+				// If EOF is received. This should really never happen.
+				err = io.EOF
+			} else if n < 0 {
+				// If an error occurred while reading.
+				err = errno
+			} else {
+				// Read was too short.
+				err = errors.New("notify: short read in readEvents()")
+			}
+			select {
+			case w.Errors <- err:
+			case <-w.done:
+				return
+			}
+			continue
+		}
+
+		var offset uint32
+		// We don't know how many events we just read into the buffer
+		// While the offset points to at least one whole event...
+		for offset <= uint32(n-unix.SizeofInotifyEvent) {
+			// Point "raw" to the event in the buffer
+			raw := (*unix.InotifyEvent)(unsafe.Pointer(&buf[offset]))
+
+			mask := uint32(raw.Mask)
+			nameLen := uint32(raw.Len)
+
+			if mask&unix.IN_Q_OVERFLOW != 0 {
+				select {
+				case w.Errors <- ErrEventOverflow:
+				case <-w.done:
+					return
+				}
+			}
+
+			// If the event happened to the watched directory or the watched file, the kernel
+			// doesn't append the filename to the event, but we would like to always fill the
+			// the "Name" field with a valid filename. We retrieve the path of the watch from
+			// the "paths" map.
+			w.mu.Lock()
+			name := w.paths[int(raw.Wd)]
+			w.mu.Unlock()
+			if nameLen > 0 {
+				// Point "bytes" at the first byte of the filename
+				bytes := (*[unix.PathMax]byte)(unsafe.Pointer(&buf[offset+unix.SizeofInotifyEvent]))
+				// The filename is padded with NULL bytes. TrimRight() gets rid of those.
+				name += "/" + strings.TrimRight(string(bytes[0:nameLen]), "\000")
+			}
+
+			event := newEvent(name, mask)
+
+			// Send the events that are not ignored on the events channel
+			if !event.ignoreLinux(w, raw.Wd, mask) {
+				select {
+				case w.Events <- event:
+				case <-w.done:
+					return
+				}
+			}
+
+			// Move to the next event in the buffer
+			offset += unix.SizeofInotifyEvent + nameLen
+		}
+	}
+}
+
+// Certain types of events can be "ignored" and not sent over the Events
+// channel. Such as events marked ignore by the kernel, or MODIFY events
+// against files that do not exist.
+func (e *Event) ignoreLinux(w *Watcher, wd int32, mask uint32) bool {
+	// Ignore anything the inotify API says to ignore
+	if mask&unix.IN_IGNORED == unix.IN_IGNORED {
+		w.mu.Lock()
+		defer w.mu.Unlock()
+		name := w.paths[int(wd)]
+		delete(w.paths, int(wd))
+		delete(w.watches, name)
+		w.cv.Broadcast()
+		return true
+	}
+
+	// If the event is not a DELETE or RENAME, the file must exist.
+	// Otherwise the event is ignored.
+	// *Note*: this was put in place because it was seen that a MODIFY
+	// event was sent after the DELETE. This ignores that MODIFY and
+	// assumes a DELETE will come or has come if the file doesn't exist.
+	if !(e.Op&Remove == Remove || e.Op&Rename == Rename) {
+		_, statErr := os.Lstat(e.Name)
+		return os.IsNotExist(statErr)
+	}
+	return false
+}
+
+// newEvent returns an platform-independent Event based on an inotify mask.
+func newEvent(name string, mask uint32) Event {
+	e := Event{Name: name}
+	if mask&unix.IN_CREATE == unix.IN_CREATE || mask&unix.IN_MOVED_TO == unix.IN_MOVED_TO {
+		e.Op |= Create
+	}
+	if mask&unix.IN_DELETE_SELF == unix.IN_DELETE_SELF || mask&unix.IN_DELETE == unix.IN_DELETE {
+		e.Op |= Remove
+	}
+	if mask&unix.IN_MODIFY == unix.IN_MODIFY {
+		e.Op |= Write
+	}
+	if mask&unix.IN_MOVE_SELF == unix.IN_MOVE_SELF || mask&unix.IN_MOVED_FROM == unix.IN_MOVED_FROM {
+		e.Op |= Rename
+	}
+	if mask&unix.IN_ATTRIB == unix.IN_ATTRIB {
+		e.Op |= Chmod
+	}
+	return e
+}

+ 187 - 0
vendor/github.com/fsnotify/fsnotify/inotify_poller.go

@@ -0,0 +1,187 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build linux
+
+package fsnotify
+
+import (
+	"errors"
+
+	"golang.org/x/sys/unix"
+)
+
+type fdPoller struct {
+	fd   int    // File descriptor (as returned by the inotify_init() syscall)
+	epfd int    // Epoll file descriptor
+	pipe [2]int // Pipe for waking up
+}
+
+func emptyPoller(fd int) *fdPoller {
+	poller := new(fdPoller)
+	poller.fd = fd
+	poller.epfd = -1
+	poller.pipe[0] = -1
+	poller.pipe[1] = -1
+	return poller
+}
+
+// Create a new inotify poller.
+// This creates an inotify handler, and an epoll handler.
+func newFdPoller(fd int) (*fdPoller, error) {
+	var errno error
+	poller := emptyPoller(fd)
+	defer func() {
+		if errno != nil {
+			poller.close()
+		}
+	}()
+	poller.fd = fd
+
+	// Create epoll fd
+	poller.epfd, errno = unix.EpollCreate1(0)
+	if poller.epfd == -1 {
+		return nil, errno
+	}
+	// Create pipe; pipe[0] is the read end, pipe[1] the write end.
+	errno = unix.Pipe2(poller.pipe[:], unix.O_NONBLOCK)
+	if errno != nil {
+		return nil, errno
+	}
+
+	// Register inotify fd with epoll
+	event := unix.EpollEvent{
+		Fd:     int32(poller.fd),
+		Events: unix.EPOLLIN,
+	}
+	errno = unix.EpollCtl(poller.epfd, unix.EPOLL_CTL_ADD, poller.fd, &event)
+	if errno != nil {
+		return nil, errno
+	}
+
+	// Register pipe fd with epoll
+	event = unix.EpollEvent{
+		Fd:     int32(poller.pipe[0]),
+		Events: unix.EPOLLIN,
+	}
+	errno = unix.EpollCtl(poller.epfd, unix.EPOLL_CTL_ADD, poller.pipe[0], &event)
+	if errno != nil {
+		return nil, errno
+	}
+
+	return poller, nil
+}
+
+// Wait using epoll.
+// Returns true if something is ready to be read,
+// false if there is not.
+func (poller *fdPoller) wait() (bool, error) {
+	// 3 possible events per fd, and 2 fds, makes a maximum of 6 events.
+	// I don't know whether epoll_wait returns the number of events returned,
+	// or the total number of events ready.
+	// I decided to catch both by making the buffer one larger than the maximum.
+	events := make([]unix.EpollEvent, 7)
+	for {
+		n, errno := unix.EpollWait(poller.epfd, events, -1)
+		if n == -1 {
+			if errno == unix.EINTR {
+				continue
+			}
+			return false, errno
+		}
+		if n == 0 {
+			// If there are no events, try again.
+			continue
+		}
+		if n > 6 {
+			// This should never happen. More events were returned than should be possible.
+			return false, errors.New("epoll_wait returned more events than I know what to do with")
+		}
+		ready := events[:n]
+		epollhup := false
+		epollerr := false
+		epollin := false
+		for _, event := range ready {
+			if event.Fd == int32(poller.fd) {
+				if event.Events&unix.EPOLLHUP != 0 {
+					// This should not happen, but if it does, treat it as a wakeup.
+					epollhup = true
+				}
+				if event.Events&unix.EPOLLERR != 0 {
+					// If an error is waiting on the file descriptor, we should pretend
+					// something is ready to read, and let unix.Read pick up the error.
+					epollerr = true
+				}
+				if event.Events&unix.EPOLLIN != 0 {
+					// There is data to read.
+					epollin = true
+				}
+			}
+			if event.Fd == int32(poller.pipe[0]) {
+				if event.Events&unix.EPOLLHUP != 0 {
+					// Write pipe descriptor was closed, by us. This means we're closing down the
+					// watcher, and we should wake up.
+				}
+				if event.Events&unix.EPOLLERR != 0 {
+					// If an error is waiting on the pipe file descriptor.
+					// This is an absolute mystery, and should never ever happen.
+					return false, errors.New("Error on the pipe descriptor.")
+				}
+				if event.Events&unix.EPOLLIN != 0 {
+					// This is a regular wakeup, so we have to clear the buffer.
+					err := poller.clearWake()
+					if err != nil {
+						return false, err
+					}
+				}
+			}
+		}
+
+		if epollhup || epollerr || epollin {
+			return true, nil
+		}
+		return false, nil
+	}
+}
+
+// Close the write end of the poller.
+func (poller *fdPoller) wake() error {
+	buf := make([]byte, 1)
+	n, errno := unix.Write(poller.pipe[1], buf)
+	if n == -1 {
+		if errno == unix.EAGAIN {
+			// Buffer is full, poller will wake.
+			return nil
+		}
+		return errno
+	}
+	return nil
+}
+
+func (poller *fdPoller) clearWake() error {
+	// You have to be woken up a LOT in order to get to 100!
+	buf := make([]byte, 100)
+	n, errno := unix.Read(poller.pipe[0], buf)
+	if n == -1 {
+		if errno == unix.EAGAIN {
+			// Buffer is empty, someone else cleared our wake.
+			return nil
+		}
+		return errno
+	}
+	return nil
+}
+
+// Close all poller file descriptors, but not the one passed to it.
+func (poller *fdPoller) close() {
+	if poller.pipe[1] != -1 {
+		unix.Close(poller.pipe[1])
+	}
+	if poller.pipe[0] != -1 {
+		unix.Close(poller.pipe[0])
+	}
+	if poller.epfd != -1 {
+		unix.Close(poller.epfd)
+	}
+}

+ 503 - 0
vendor/github.com/fsnotify/fsnotify/kqueue.go

@@ -0,0 +1,503 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build freebsd openbsd netbsd dragonfly darwin
+
+package fsnotify
+
+import (
+	"errors"
+	"fmt"
+	"io/ioutil"
+	"os"
+	"path/filepath"
+	"sync"
+	"time"
+
+	"golang.org/x/sys/unix"
+)
+
+// Watcher watches a set of files, delivering events to a channel.
+type Watcher struct {
+	Events chan Event
+	Errors chan error
+	done   chan bool // Channel for sending a "quit message" to the reader goroutine
+
+	kq int // File descriptor (as returned by the kqueue() syscall).
+
+	mu              sync.Mutex        // Protects access to watcher data
+	watches         map[string]int    // Map of watched file descriptors (key: path).
+	externalWatches map[string]bool   // Map of watches added by user of the library.
+	dirFlags        map[string]uint32 // Map of watched directories to fflags used in kqueue.
+	paths           map[int]pathInfo  // Map file descriptors to path names for processing kqueue events.
+	fileExists      map[string]bool   // Keep track of if we know this file exists (to stop duplicate create events).
+	isClosed        bool              // Set to true when Close() is first called
+}
+
+type pathInfo struct {
+	name  string
+	isDir bool
+}
+
+// NewWatcher establishes a new watcher with the underlying OS and begins waiting for events.
+func NewWatcher() (*Watcher, error) {
+	kq, err := kqueue()
+	if err != nil {
+		return nil, err
+	}
+
+	w := &Watcher{
+		kq:              kq,
+		watches:         make(map[string]int),
+		dirFlags:        make(map[string]uint32),
+		paths:           make(map[int]pathInfo),
+		fileExists:      make(map[string]bool),
+		externalWatches: make(map[string]bool),
+		Events:          make(chan Event),
+		Errors:          make(chan error),
+		done:            make(chan bool),
+	}
+
+	go w.readEvents()
+	return w, nil
+}
+
+// Close removes all watches and closes the events channel.
+func (w *Watcher) Close() error {
+	w.mu.Lock()
+	if w.isClosed {
+		w.mu.Unlock()
+		return nil
+	}
+	w.isClosed = true
+	w.mu.Unlock()
+
+	// copy paths to remove while locked
+	w.mu.Lock()
+	var pathsToRemove = make([]string, 0, len(w.watches))
+	for name := range w.watches {
+		pathsToRemove = append(pathsToRemove, name)
+	}
+	w.mu.Unlock()
+	// unlock before calling Remove, which also locks
+
+	var err error
+	for _, name := range pathsToRemove {
+		if e := w.Remove(name); e != nil && err == nil {
+			err = e
+		}
+	}
+
+	// Send "quit" message to the reader goroutine:
+	w.done <- true
+
+	return nil
+}
+
+// Add starts watching the named file or directory (non-recursively).
+func (w *Watcher) Add(name string) error {
+	w.mu.Lock()
+	w.externalWatches[name] = true
+	w.mu.Unlock()
+	_, err := w.addWatch(name, noteAllEvents)
+	return err
+}
+
+// Remove stops watching the the named file or directory (non-recursively).
+func (w *Watcher) Remove(name string) error {
+	name = filepath.Clean(name)
+	w.mu.Lock()
+	watchfd, ok := w.watches[name]
+	w.mu.Unlock()
+	if !ok {
+		return fmt.Errorf("can't remove non-existent kevent watch for: %s", name)
+	}
+
+	const registerRemove = unix.EV_DELETE
+	if err := register(w.kq, []int{watchfd}, registerRemove, 0); err != nil {
+		return err
+	}
+
+	unix.Close(watchfd)
+
+	w.mu.Lock()
+	isDir := w.paths[watchfd].isDir
+	delete(w.watches, name)
+	delete(w.paths, watchfd)
+	delete(w.dirFlags, name)
+	w.mu.Unlock()
+
+	// Find all watched paths that are in this directory that are not external.
+	if isDir {
+		var pathsToRemove []string
+		w.mu.Lock()
+		for _, path := range w.paths {
+			wdir, _ := filepath.Split(path.name)
+			if filepath.Clean(wdir) == name {
+				if !w.externalWatches[path.name] {
+					pathsToRemove = append(pathsToRemove, path.name)
+				}
+			}
+		}
+		w.mu.Unlock()
+		for _, name := range pathsToRemove {
+			// Since these are internal, not much sense in propagating error
+			// to the user, as that will just confuse them with an error about
+			// a path they did not explicitly watch themselves.
+			w.Remove(name)
+		}
+	}
+
+	return nil
+}
+
+// Watch all events (except NOTE_EXTEND, NOTE_LINK, NOTE_REVOKE)
+const noteAllEvents = unix.NOTE_DELETE | unix.NOTE_WRITE | unix.NOTE_ATTRIB | unix.NOTE_RENAME
+
+// keventWaitTime to block on each read from kevent
+var keventWaitTime = durationToTimespec(100 * time.Millisecond)
+
+// addWatch adds name to the watched file set.
+// The flags are interpreted as described in kevent(2).
+// Returns the real path to the file which was added, if any, which may be different from the one passed in the case of symlinks.
+func (w *Watcher) addWatch(name string, flags uint32) (string, error) {
+	var isDir bool
+	// Make ./name and name equivalent
+	name = filepath.Clean(name)
+
+	w.mu.Lock()
+	if w.isClosed {
+		w.mu.Unlock()
+		return "", errors.New("kevent instance already closed")
+	}
+	watchfd, alreadyWatching := w.watches[name]
+	// We already have a watch, but we can still override flags.
+	if alreadyWatching {
+		isDir = w.paths[watchfd].isDir
+	}
+	w.mu.Unlock()
+
+	if !alreadyWatching {
+		fi, err := os.Lstat(name)
+		if err != nil {
+			return "", err
+		}
+
+		// Don't watch sockets.
+		if fi.Mode()&os.ModeSocket == os.ModeSocket {
+			return "", nil
+		}
+
+		// Don't watch named pipes.
+		if fi.Mode()&os.ModeNamedPipe == os.ModeNamedPipe {
+			return "", nil
+		}
+
+		// Follow Symlinks
+		// Unfortunately, Linux can add bogus symlinks to watch list without
+		// issue, and Windows can't do symlinks period (AFAIK). To  maintain
+		// consistency, we will act like everything is fine. There will simply
+		// be no file events for broken symlinks.
+		// Hence the returns of nil on errors.
+		if fi.Mode()&os.ModeSymlink == os.ModeSymlink {
+			name, err = filepath.EvalSymlinks(name)
+			if err != nil {
+				return "", nil
+			}
+
+			w.mu.Lock()
+			_, alreadyWatching = w.watches[name]
+			w.mu.Unlock()
+
+			if alreadyWatching {
+				return name, nil
+			}
+
+			fi, err = os.Lstat(name)
+			if err != nil {
+				return "", nil
+			}
+		}
+
+		watchfd, err = unix.Open(name, openMode, 0700)
+		if watchfd == -1 {
+			return "", err
+		}
+
+		isDir = fi.IsDir()
+	}
+
+	const registerAdd = unix.EV_ADD | unix.EV_CLEAR | unix.EV_ENABLE
+	if err := register(w.kq, []int{watchfd}, registerAdd, flags); err != nil {
+		unix.Close(watchfd)
+		return "", err
+	}
+
+	if !alreadyWatching {
+		w.mu.Lock()
+		w.watches[name] = watchfd
+		w.paths[watchfd] = pathInfo{name: name, isDir: isDir}
+		w.mu.Unlock()
+	}
+
+	if isDir {
+		// Watch the directory if it has not been watched before,
+		// or if it was watched before, but perhaps only a NOTE_DELETE (watchDirectoryFiles)
+		w.mu.Lock()
+
+		watchDir := (flags&unix.NOTE_WRITE) == unix.NOTE_WRITE &&
+			(!alreadyWatching || (w.dirFlags[name]&unix.NOTE_WRITE) != unix.NOTE_WRITE)
+		// Store flags so this watch can be updated later
+		w.dirFlags[name] = flags
+		w.mu.Unlock()
+
+		if watchDir {
+			if err := w.watchDirectoryFiles(name); err != nil {
+				return "", err
+			}
+		}
+	}
+	return name, nil
+}
+
+// readEvents reads from kqueue and converts the received kevents into
+// Event values that it sends down the Events channel.
+func (w *Watcher) readEvents() {
+	eventBuffer := make([]unix.Kevent_t, 10)
+
+	for {
+		// See if there is a message on the "done" channel
+		select {
+		case <-w.done:
+			err := unix.Close(w.kq)
+			if err != nil {
+				w.Errors <- err
+			}
+			close(w.Events)
+			close(w.Errors)
+			return
+		default:
+		}
+
+		// Get new events
+		kevents, err := read(w.kq, eventBuffer, &keventWaitTime)
+		// EINTR is okay, the syscall was interrupted before timeout expired.
+		if err != nil && err != unix.EINTR {
+			w.Errors <- err
+			continue
+		}
+
+		// Flush the events we received to the Events channel
+		for len(kevents) > 0 {
+			kevent := &kevents[0]
+			watchfd := int(kevent.Ident)
+			mask := uint32(kevent.Fflags)
+			w.mu.Lock()
+			path := w.paths[watchfd]
+			w.mu.Unlock()
+			event := newEvent(path.name, mask)
+
+			if path.isDir && !(event.Op&Remove == Remove) {
+				// Double check to make sure the directory exists. This can happen when
+				// we do a rm -fr on a recursively watched folders and we receive a
+				// modification event first but the folder has been deleted and later
+				// receive the delete event
+				if _, err := os.Lstat(event.Name); os.IsNotExist(err) {
+					// mark is as delete event
+					event.Op |= Remove
+				}
+			}
+
+			if event.Op&Rename == Rename || event.Op&Remove == Remove {
+				w.Remove(event.Name)
+				w.mu.Lock()
+				delete(w.fileExists, event.Name)
+				w.mu.Unlock()
+			}
+
+			if path.isDir && event.Op&Write == Write && !(event.Op&Remove == Remove) {
+				w.sendDirectoryChangeEvents(event.Name)
+			} else {
+				// Send the event on the Events channel
+				w.Events <- event
+			}
+
+			if event.Op&Remove == Remove {
+				// Look for a file that may have overwritten this.
+				// For example, mv f1 f2 will delete f2, then create f2.
+				if path.isDir {
+					fileDir := filepath.Clean(event.Name)
+					w.mu.Lock()
+					_, found := w.watches[fileDir]
+					w.mu.Unlock()
+					if found {
+						// make sure the directory exists before we watch for changes. When we
+						// do a recursive watch and perform rm -fr, the parent directory might
+						// have gone missing, ignore the missing directory and let the
+						// upcoming delete event remove the watch from the parent directory.
+						if _, err := os.Lstat(fileDir); err == nil {
+							w.sendDirectoryChangeEvents(fileDir)
+						}
+					}
+				} else {
+					filePath := filepath.Clean(event.Name)
+					if fileInfo, err := os.Lstat(filePath); err == nil {
+						w.sendFileCreatedEventIfNew(filePath, fileInfo)
+					}
+				}
+			}
+
+			// Move to next event
+			kevents = kevents[1:]
+		}
+	}
+}
+
+// newEvent returns an platform-independent Event based on kqueue Fflags.
+func newEvent(name string, mask uint32) Event {
+	e := Event{Name: name}
+	if mask&unix.NOTE_DELETE == unix.NOTE_DELETE {
+		e.Op |= Remove
+	}
+	if mask&unix.NOTE_WRITE == unix.NOTE_WRITE {
+		e.Op |= Write
+	}
+	if mask&unix.NOTE_RENAME == unix.NOTE_RENAME {
+		e.Op |= Rename
+	}
+	if mask&unix.NOTE_ATTRIB == unix.NOTE_ATTRIB {
+		e.Op |= Chmod
+	}
+	return e
+}
+
+func newCreateEvent(name string) Event {
+	return Event{Name: name, Op: Create}
+}
+
+// watchDirectoryFiles to mimic inotify when adding a watch on a directory
+func (w *Watcher) watchDirectoryFiles(dirPath string) error {
+	// Get all files
+	files, err := ioutil.ReadDir(dirPath)
+	if err != nil {
+		return err
+	}
+
+	for _, fileInfo := range files {
+		filePath := filepath.Join(dirPath, fileInfo.Name())
+		filePath, err = w.internalWatch(filePath, fileInfo)
+		if err != nil {
+			return err
+		}
+
+		w.mu.Lock()
+		w.fileExists[filePath] = true
+		w.mu.Unlock()
+	}
+
+	return nil
+}
+
+// sendDirectoryEvents searches the directory for newly created files
+// and sends them over the event channel. This functionality is to have
+// the BSD version of fsnotify match Linux inotify which provides a
+// create event for files created in a watched directory.
+func (w *Watcher) sendDirectoryChangeEvents(dirPath string) {
+	// Get all files
+	files, err := ioutil.ReadDir(dirPath)
+	if err != nil {
+		w.Errors <- err
+	}
+
+	// Search for new files
+	for _, fileInfo := range files {
+		filePath := filepath.Join(dirPath, fileInfo.Name())
+		err := w.sendFileCreatedEventIfNew(filePath, fileInfo)
+
+		if err != nil {
+			return
+		}
+	}
+}
+
+// sendFileCreatedEvent sends a create event if the file isn't already being tracked.
+func (w *Watcher) sendFileCreatedEventIfNew(filePath string, fileInfo os.FileInfo) (err error) {
+	w.mu.Lock()
+	_, doesExist := w.fileExists[filePath]
+	w.mu.Unlock()
+	if !doesExist {
+		// Send create event
+		w.Events <- newCreateEvent(filePath)
+	}
+
+	// like watchDirectoryFiles (but without doing another ReadDir)
+	filePath, err = w.internalWatch(filePath, fileInfo)
+	if err != nil {
+		return err
+	}
+
+	w.mu.Lock()
+	w.fileExists[filePath] = true
+	w.mu.Unlock()
+
+	return nil
+}
+
+func (w *Watcher) internalWatch(name string, fileInfo os.FileInfo) (string, error) {
+	if fileInfo.IsDir() {
+		// mimic Linux providing delete events for subdirectories
+		// but preserve the flags used if currently watching subdirectory
+		w.mu.Lock()
+		flags := w.dirFlags[name]
+		w.mu.Unlock()
+
+		flags |= unix.NOTE_DELETE | unix.NOTE_RENAME
+		return w.addWatch(name, flags)
+	}
+
+	// watch file to mimic Linux inotify
+	return w.addWatch(name, noteAllEvents)
+}
+
+// kqueue creates a new kernel event queue and returns a descriptor.
+func kqueue() (kq int, err error) {
+	kq, err = unix.Kqueue()
+	if kq == -1 {
+		return kq, err
+	}
+	return kq, nil
+}
+
+// register events with the queue
+func register(kq int, fds []int, flags int, fflags uint32) error {
+	changes := make([]unix.Kevent_t, len(fds))
+
+	for i, fd := range fds {
+		// SetKevent converts int to the platform-specific types:
+		unix.SetKevent(&changes[i], fd, unix.EVFILT_VNODE, flags)
+		changes[i].Fflags = fflags
+	}
+
+	// register the events
+	success, err := unix.Kevent(kq, changes, nil, nil)
+	if success == -1 {
+		return err
+	}
+	return nil
+}
+
+// read retrieves pending events, or waits until an event occurs.
+// A timeout of nil blocks indefinitely, while 0 polls the queue.
+func read(kq int, events []unix.Kevent_t, timeout *unix.Timespec) ([]unix.Kevent_t, error) {
+	n, err := unix.Kevent(kq, nil, events, timeout)
+	if err != nil {
+		return nil, err
+	}
+	return events[0:n], nil
+}
+
+// durationToTimespec prepares a timeout value
+func durationToTimespec(d time.Duration) unix.Timespec {
+	return unix.NsecToTimespec(d.Nanoseconds())
+}

+ 11 - 0
vendor/github.com/fsnotify/fsnotify/open_mode_bsd.go

@@ -0,0 +1,11 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build freebsd openbsd netbsd dragonfly
+
+package fsnotify
+
+import "golang.org/x/sys/unix"
+
+const openMode = unix.O_NONBLOCK | unix.O_RDONLY

+ 12 - 0
vendor/github.com/fsnotify/fsnotify/open_mode_darwin.go

@@ -0,0 +1,12 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build darwin
+
+package fsnotify
+
+import "golang.org/x/sys/unix"
+
+// note: this constant is not defined on BSD
+const openMode = unix.O_EVTONLY

+ 561 - 0
vendor/github.com/fsnotify/fsnotify/windows.go

@@ -0,0 +1,561 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build windows
+
+package fsnotify
+
+import (
+	"errors"
+	"fmt"
+	"os"
+	"path/filepath"
+	"runtime"
+	"sync"
+	"syscall"
+	"unsafe"
+)
+
+// Watcher watches a set of files, delivering events to a channel.
+type Watcher struct {
+	Events   chan Event
+	Errors   chan error
+	isClosed bool           // Set to true when Close() is first called
+	mu       sync.Mutex     // Map access
+	port     syscall.Handle // Handle to completion port
+	watches  watchMap       // Map of watches (key: i-number)
+	input    chan *input    // Inputs to the reader are sent on this channel
+	quit     chan chan<- error
+}
+
+// NewWatcher establishes a new watcher with the underlying OS and begins waiting for events.
+func NewWatcher() (*Watcher, error) {
+	port, e := syscall.CreateIoCompletionPort(syscall.InvalidHandle, 0, 0, 0)
+	if e != nil {
+		return nil, os.NewSyscallError("CreateIoCompletionPort", e)
+	}
+	w := &Watcher{
+		port:    port,
+		watches: make(watchMap),
+		input:   make(chan *input, 1),
+		Events:  make(chan Event, 50),
+		Errors:  make(chan error),
+		quit:    make(chan chan<- error, 1),
+	}
+	go w.readEvents()
+	return w, nil
+}
+
+// Close removes all watches and closes the events channel.
+func (w *Watcher) Close() error {
+	if w.isClosed {
+		return nil
+	}
+	w.isClosed = true
+
+	// Send "quit" message to the reader goroutine
+	ch := make(chan error)
+	w.quit <- ch
+	if err := w.wakeupReader(); err != nil {
+		return err
+	}
+	return <-ch
+}
+
+// Add starts watching the named file or directory (non-recursively).
+func (w *Watcher) Add(name string) error {
+	if w.isClosed {
+		return errors.New("watcher already closed")
+	}
+	in := &input{
+		op:    opAddWatch,
+		path:  filepath.Clean(name),
+		flags: sysFSALLEVENTS,
+		reply: make(chan error),
+	}
+	w.input <- in
+	if err := w.wakeupReader(); err != nil {
+		return err
+	}
+	return <-in.reply
+}
+
+// Remove stops watching the the named file or directory (non-recursively).
+func (w *Watcher) Remove(name string) error {
+	in := &input{
+		op:    opRemoveWatch,
+		path:  filepath.Clean(name),
+		reply: make(chan error),
+	}
+	w.input <- in
+	if err := w.wakeupReader(); err != nil {
+		return err
+	}
+	return <-in.reply
+}
+
+const (
+	// Options for AddWatch
+	sysFSONESHOT = 0x80000000
+	sysFSONLYDIR = 0x1000000
+
+	// Events
+	sysFSACCESS     = 0x1
+	sysFSALLEVENTS  = 0xfff
+	sysFSATTRIB     = 0x4
+	sysFSCLOSE      = 0x18
+	sysFSCREATE     = 0x100
+	sysFSDELETE     = 0x200
+	sysFSDELETESELF = 0x400
+	sysFSMODIFY     = 0x2
+	sysFSMOVE       = 0xc0
+	sysFSMOVEDFROM  = 0x40
+	sysFSMOVEDTO    = 0x80
+	sysFSMOVESELF   = 0x800
+
+	// Special events
+	sysFSIGNORED   = 0x8000
+	sysFSQOVERFLOW = 0x4000
+)
+
+func newEvent(name string, mask uint32) Event {
+	e := Event{Name: name}
+	if mask&sysFSCREATE == sysFSCREATE || mask&sysFSMOVEDTO == sysFSMOVEDTO {
+		e.Op |= Create
+	}
+	if mask&sysFSDELETE == sysFSDELETE || mask&sysFSDELETESELF == sysFSDELETESELF {
+		e.Op |= Remove
+	}
+	if mask&sysFSMODIFY == sysFSMODIFY {
+		e.Op |= Write
+	}
+	if mask&sysFSMOVE == sysFSMOVE || mask&sysFSMOVESELF == sysFSMOVESELF || mask&sysFSMOVEDFROM == sysFSMOVEDFROM {
+		e.Op |= Rename
+	}
+	if mask&sysFSATTRIB == sysFSATTRIB {
+		e.Op |= Chmod
+	}
+	return e
+}
+
+const (
+	opAddWatch = iota
+	opRemoveWatch
+)
+
+const (
+	provisional uint64 = 1 << (32 + iota)
+)
+
+type input struct {
+	op    int
+	path  string
+	flags uint32
+	reply chan error
+}
+
+type inode struct {
+	handle syscall.Handle
+	volume uint32
+	index  uint64
+}
+
+type watch struct {
+	ov     syscall.Overlapped
+	ino    *inode            // i-number
+	path   string            // Directory path
+	mask   uint64            // Directory itself is being watched with these notify flags
+	names  map[string]uint64 // Map of names being watched and their notify flags
+	rename string            // Remembers the old name while renaming a file
+	buf    [4096]byte
+}
+
+type indexMap map[uint64]*watch
+type watchMap map[uint32]indexMap
+
+func (w *Watcher) wakeupReader() error {
+	e := syscall.PostQueuedCompletionStatus(w.port, 0, 0, nil)
+	if e != nil {
+		return os.NewSyscallError("PostQueuedCompletionStatus", e)
+	}
+	return nil
+}
+
+func getDir(pathname string) (dir string, err error) {
+	attr, e := syscall.GetFileAttributes(syscall.StringToUTF16Ptr(pathname))
+	if e != nil {
+		return "", os.NewSyscallError("GetFileAttributes", e)
+	}
+	if attr&syscall.FILE_ATTRIBUTE_DIRECTORY != 0 {
+		dir = pathname
+	} else {
+		dir, _ = filepath.Split(pathname)
+		dir = filepath.Clean(dir)
+	}
+	return
+}
+
+func getIno(path string) (ino *inode, err error) {
+	h, e := syscall.CreateFile(syscall.StringToUTF16Ptr(path),
+		syscall.FILE_LIST_DIRECTORY,
+		syscall.FILE_SHARE_READ|syscall.FILE_SHARE_WRITE|syscall.FILE_SHARE_DELETE,
+		nil, syscall.OPEN_EXISTING,
+		syscall.FILE_FLAG_BACKUP_SEMANTICS|syscall.FILE_FLAG_OVERLAPPED, 0)
+	if e != nil {
+		return nil, os.NewSyscallError("CreateFile", e)
+	}
+	var fi syscall.ByHandleFileInformation
+	if e = syscall.GetFileInformationByHandle(h, &fi); e != nil {
+		syscall.CloseHandle(h)
+		return nil, os.NewSyscallError("GetFileInformationByHandle", e)
+	}
+	ino = &inode{
+		handle: h,
+		volume: fi.VolumeSerialNumber,
+		index:  uint64(fi.FileIndexHigh)<<32 | uint64(fi.FileIndexLow),
+	}
+	return ino, nil
+}
+
+// Must run within the I/O thread.
+func (m watchMap) get(ino *inode) *watch {
+	if i := m[ino.volume]; i != nil {
+		return i[ino.index]
+	}
+	return nil
+}
+
+// Must run within the I/O thread.
+func (m watchMap) set(ino *inode, watch *watch) {
+	i := m[ino.volume]
+	if i == nil {
+		i = make(indexMap)
+		m[ino.volume] = i
+	}
+	i[ino.index] = watch
+}
+
+// Must run within the I/O thread.
+func (w *Watcher) addWatch(pathname string, flags uint64) error {
+	dir, err := getDir(pathname)
+	if err != nil {
+		return err
+	}
+	if flags&sysFSONLYDIR != 0 && pathname != dir {
+		return nil
+	}
+	ino, err := getIno(dir)
+	if err != nil {
+		return err
+	}
+	w.mu.Lock()
+	watchEntry := w.watches.get(ino)
+	w.mu.Unlock()
+	if watchEntry == nil {
+		if _, e := syscall.CreateIoCompletionPort(ino.handle, w.port, 0, 0); e != nil {
+			syscall.CloseHandle(ino.handle)
+			return os.NewSyscallError("CreateIoCompletionPort", e)
+		}
+		watchEntry = &watch{
+			ino:   ino,
+			path:  dir,
+			names: make(map[string]uint64),
+		}
+		w.mu.Lock()
+		w.watches.set(ino, watchEntry)
+		w.mu.Unlock()
+		flags |= provisional
+	} else {
+		syscall.CloseHandle(ino.handle)
+	}
+	if pathname == dir {
+		watchEntry.mask |= flags
+	} else {
+		watchEntry.names[filepath.Base(pathname)] |= flags
+	}
+	if err = w.startRead(watchEntry); err != nil {
+		return err
+	}
+	if pathname == dir {
+		watchEntry.mask &= ^provisional
+	} else {
+		watchEntry.names[filepath.Base(pathname)] &= ^provisional
+	}
+	return nil
+}
+
+// Must run within the I/O thread.
+func (w *Watcher) remWatch(pathname string) error {
+	dir, err := getDir(pathname)
+	if err != nil {
+		return err
+	}
+	ino, err := getIno(dir)
+	if err != nil {
+		return err
+	}
+	w.mu.Lock()
+	watch := w.watches.get(ino)
+	w.mu.Unlock()
+	if watch == nil {
+		return fmt.Errorf("can't remove non-existent watch for: %s", pathname)
+	}
+	if pathname == dir {
+		w.sendEvent(watch.path, watch.mask&sysFSIGNORED)
+		watch.mask = 0
+	} else {
+		name := filepath.Base(pathname)
+		w.sendEvent(filepath.Join(watch.path, name), watch.names[name]&sysFSIGNORED)
+		delete(watch.names, name)
+	}
+	return w.startRead(watch)
+}
+
+// Must run within the I/O thread.
+func (w *Watcher) deleteWatch(watch *watch) {
+	for name, mask := range watch.names {
+		if mask&provisional == 0 {
+			w.sendEvent(filepath.Join(watch.path, name), mask&sysFSIGNORED)
+		}
+		delete(watch.names, name)
+	}
+	if watch.mask != 0 {
+		if watch.mask&provisional == 0 {
+			w.sendEvent(watch.path, watch.mask&sysFSIGNORED)
+		}
+		watch.mask = 0
+	}
+}
+
+// Must run within the I/O thread.
+func (w *Watcher) startRead(watch *watch) error {
+	if e := syscall.CancelIo(watch.ino.handle); e != nil {
+		w.Errors <- os.NewSyscallError("CancelIo", e)
+		w.deleteWatch(watch)
+	}
+	mask := toWindowsFlags(watch.mask)
+	for _, m := range watch.names {
+		mask |= toWindowsFlags(m)
+	}
+	if mask == 0 {
+		if e := syscall.CloseHandle(watch.ino.handle); e != nil {
+			w.Errors <- os.NewSyscallError("CloseHandle", e)
+		}
+		w.mu.Lock()
+		delete(w.watches[watch.ino.volume], watch.ino.index)
+		w.mu.Unlock()
+		return nil
+	}
+	e := syscall.ReadDirectoryChanges(watch.ino.handle, &watch.buf[0],
+		uint32(unsafe.Sizeof(watch.buf)), false, mask, nil, &watch.ov, 0)
+	if e != nil {
+		err := os.NewSyscallError("ReadDirectoryChanges", e)
+		if e == syscall.ERROR_ACCESS_DENIED && watch.mask&provisional == 0 {
+			// Watched directory was probably removed
+			if w.sendEvent(watch.path, watch.mask&sysFSDELETESELF) {
+				if watch.mask&sysFSONESHOT != 0 {
+					watch.mask = 0
+				}
+			}
+			err = nil
+		}
+		w.deleteWatch(watch)
+		w.startRead(watch)
+		return err
+	}
+	return nil
+}
+
+// readEvents reads from the I/O completion port, converts the
+// received events into Event objects and sends them via the Events channel.
+// Entry point to the I/O thread.
+func (w *Watcher) readEvents() {
+	var (
+		n, key uint32
+		ov     *syscall.Overlapped
+	)
+	runtime.LockOSThread()
+
+	for {
+		e := syscall.GetQueuedCompletionStatus(w.port, &n, &key, &ov, syscall.INFINITE)
+		watch := (*watch)(unsafe.Pointer(ov))
+
+		if watch == nil {
+			select {
+			case ch := <-w.quit:
+				w.mu.Lock()
+				var indexes []indexMap
+				for _, index := range w.watches {
+					indexes = append(indexes, index)
+				}
+				w.mu.Unlock()
+				for _, index := range indexes {
+					for _, watch := range index {
+						w.deleteWatch(watch)
+						w.startRead(watch)
+					}
+				}
+				var err error
+				if e := syscall.CloseHandle(w.port); e != nil {
+					err = os.NewSyscallError("CloseHandle", e)
+				}
+				close(w.Events)
+				close(w.Errors)
+				ch <- err
+				return
+			case in := <-w.input:
+				switch in.op {
+				case opAddWatch:
+					in.reply <- w.addWatch(in.path, uint64(in.flags))
+				case opRemoveWatch:
+					in.reply <- w.remWatch(in.path)
+				}
+			default:
+			}
+			continue
+		}
+
+		switch e {
+		case syscall.ERROR_MORE_DATA:
+			if watch == nil {
+				w.Errors <- errors.New("ERROR_MORE_DATA has unexpectedly null lpOverlapped buffer")
+			} else {
+				// The i/o succeeded but the buffer is full.
+				// In theory we should be building up a full packet.
+				// In practice we can get away with just carrying on.
+				n = uint32(unsafe.Sizeof(watch.buf))
+			}
+		case syscall.ERROR_ACCESS_DENIED:
+			// Watched directory was probably removed
+			w.sendEvent(watch.path, watch.mask&sysFSDELETESELF)
+			w.deleteWatch(watch)
+			w.startRead(watch)
+			continue
+		case syscall.ERROR_OPERATION_ABORTED:
+			// CancelIo was called on this handle
+			continue
+		default:
+			w.Errors <- os.NewSyscallError("GetQueuedCompletionPort", e)
+			continue
+		case nil:
+		}
+
+		var offset uint32
+		for {
+			if n == 0 {
+				w.Events <- newEvent("", sysFSQOVERFLOW)
+				w.Errors <- errors.New("short read in readEvents()")
+				break
+			}
+
+			// Point "raw" to the event in the buffer
+			raw := (*syscall.FileNotifyInformation)(unsafe.Pointer(&watch.buf[offset]))
+			buf := (*[syscall.MAX_PATH]uint16)(unsafe.Pointer(&raw.FileName))
+			name := syscall.UTF16ToString(buf[:raw.FileNameLength/2])
+			fullname := filepath.Join(watch.path, name)
+
+			var mask uint64
+			switch raw.Action {
+			case syscall.FILE_ACTION_REMOVED:
+				mask = sysFSDELETESELF
+			case syscall.FILE_ACTION_MODIFIED:
+				mask = sysFSMODIFY
+			case syscall.FILE_ACTION_RENAMED_OLD_NAME:
+				watch.rename = name
+			case syscall.FILE_ACTION_RENAMED_NEW_NAME:
+				if watch.names[watch.rename] != 0 {
+					watch.names[name] |= watch.names[watch.rename]
+					delete(watch.names, watch.rename)
+					mask = sysFSMOVESELF
+				}
+			}
+
+			sendNameEvent := func() {
+				if w.sendEvent(fullname, watch.names[name]&mask) {
+					if watch.names[name]&sysFSONESHOT != 0 {
+						delete(watch.names, name)
+					}
+				}
+			}
+			if raw.Action != syscall.FILE_ACTION_RENAMED_NEW_NAME {
+				sendNameEvent()
+			}
+			if raw.Action == syscall.FILE_ACTION_REMOVED {
+				w.sendEvent(fullname, watch.names[name]&sysFSIGNORED)
+				delete(watch.names, name)
+			}
+			if w.sendEvent(fullname, watch.mask&toFSnotifyFlags(raw.Action)) {
+				if watch.mask&sysFSONESHOT != 0 {
+					watch.mask = 0
+				}
+			}
+			if raw.Action == syscall.FILE_ACTION_RENAMED_NEW_NAME {
+				fullname = filepath.Join(watch.path, watch.rename)
+				sendNameEvent()
+			}
+
+			// Move to the next event in the buffer
+			if raw.NextEntryOffset == 0 {
+				break
+			}
+			offset += raw.NextEntryOffset
+
+			// Error!
+			if offset >= n {
+				w.Errors <- errors.New("Windows system assumed buffer larger than it is, events have likely been missed.")
+				break
+			}
+		}
+
+		if err := w.startRead(watch); err != nil {
+			w.Errors <- err
+		}
+	}
+}
+
+func (w *Watcher) sendEvent(name string, mask uint64) bool {
+	if mask == 0 {
+		return false
+	}
+	event := newEvent(name, uint32(mask))
+	select {
+	case ch := <-w.quit:
+		w.quit <- ch
+	case w.Events <- event:
+	}
+	return true
+}
+
+func toWindowsFlags(mask uint64) uint32 {
+	var m uint32
+	if mask&sysFSACCESS != 0 {
+		m |= syscall.FILE_NOTIFY_CHANGE_LAST_ACCESS
+	}
+	if mask&sysFSMODIFY != 0 {
+		m |= syscall.FILE_NOTIFY_CHANGE_LAST_WRITE
+	}
+	if mask&sysFSATTRIB != 0 {
+		m |= syscall.FILE_NOTIFY_CHANGE_ATTRIBUTES
+	}
+	if mask&(sysFSMOVE|sysFSCREATE|sysFSDELETE) != 0 {
+		m |= syscall.FILE_NOTIFY_CHANGE_FILE_NAME | syscall.FILE_NOTIFY_CHANGE_DIR_NAME
+	}
+	return m
+}
+
+func toFSnotifyFlags(action uint32) uint64 {
+	switch action {
+	case syscall.FILE_ACTION_ADDED:
+		return sysFSCREATE
+	case syscall.FILE_ACTION_REMOVED:
+		return sysFSDELETE
+	case syscall.FILE_ACTION_MODIFIED:
+		return sysFSMODIFY
+	case syscall.FILE_ACTION_RENAMED_OLD_NAME:
+		return sysFSMOVEDFROM
+	case syscall.FILE_ACTION_RENAMED_NEW_NAME:
+		return sysFSMOVEDTO
+	}
+	return 0
+}

+ 21 - 0
vendor/github.com/mattn/go-runewidth/LICENSE

@@ -0,0 +1,21 @@
+The MIT License (MIT)
+
+Copyright (c) 2016 Yasuhiro Matsumoto
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.

+ 27 - 0
vendor/github.com/mattn/go-runewidth/README.mkd

@@ -0,0 +1,27 @@
+go-runewidth
+============
+
+[![Build Status](https://travis-ci.org/mattn/go-runewidth.png?branch=master)](https://travis-ci.org/mattn/go-runewidth)
+[![Coverage Status](https://coveralls.io/repos/mattn/go-runewidth/badge.png?branch=HEAD)](https://coveralls.io/r/mattn/go-runewidth?branch=HEAD)
+[![GoDoc](https://godoc.org/github.com/mattn/go-runewidth?status.svg)](http://godoc.org/github.com/mattn/go-runewidth)
+[![Go Report Card](https://goreportcard.com/badge/github.com/mattn/go-runewidth)](https://goreportcard.com/report/github.com/mattn/go-runewidth)
+
+Provides functions to get fixed width of the character or string.
+
+Usage
+-----
+
+```go
+runewidth.StringWidth("つのだ☆HIRO") == 12
+```
+
+
+Author
+------
+
+Yasuhiro Matsumoto
+
+License
+-------
+
+under the MIT License: http://mattn.mit-license.org/2013

+ 481 - 0
vendor/github.com/mattn/go-runewidth/runewidth.go

@@ -0,0 +1,481 @@
+package runewidth
+
+var (
+	// EastAsianWidth will be set true if the current locale is CJK
+	EastAsianWidth = IsEastAsian()
+
+	// DefaultCondition is a condition in current locale
+	DefaultCondition = &Condition{EastAsianWidth}
+)
+
+type interval struct {
+	first rune
+	last  rune
+}
+
+var combining = []interval{
+	{0x0300, 0x036F}, {0x0483, 0x0486}, {0x0488, 0x0489},
+	{0x0591, 0x05BD}, {0x05BF, 0x05BF}, {0x05C1, 0x05C2},
+	{0x05C4, 0x05C5}, {0x05C7, 0x05C7}, {0x0600, 0x0603},
+	{0x0610, 0x0615}, {0x064B, 0x065E}, {0x0670, 0x0670},
+	{0x06D6, 0x06E4}, {0x06E7, 0x06E8}, {0x06EA, 0x06ED},
+	{0x070F, 0x070F}, {0x0711, 0x0711}, {0x0730, 0x074A},
+	{0x07A6, 0x07B0}, {0x07EB, 0x07F3}, {0x0901, 0x0902},
+	{0x093C, 0x093C}, {0x0941, 0x0948}, {0x094D, 0x094D},
+	{0x0951, 0x0954}, {0x0962, 0x0963}, {0x0981, 0x0981},
+	{0x09BC, 0x09BC}, {0x09C1, 0x09C4}, {0x09CD, 0x09CD},
+	{0x09E2, 0x09E3}, {0x0A01, 0x0A02}, {0x0A3C, 0x0A3C},
+	{0x0A41, 0x0A42}, {0x0A47, 0x0A48}, {0x0A4B, 0x0A4D},
+	{0x0A70, 0x0A71}, {0x0A81, 0x0A82}, {0x0ABC, 0x0ABC},
+	{0x0AC1, 0x0AC5}, {0x0AC7, 0x0AC8}, {0x0ACD, 0x0ACD},
+	{0x0AE2, 0x0AE3}, {0x0B01, 0x0B01}, {0x0B3C, 0x0B3C},
+	{0x0B3F, 0x0B3F}, {0x0B41, 0x0B43}, {0x0B4D, 0x0B4D},
+	{0x0B56, 0x0B56}, {0x0B82, 0x0B82}, {0x0BC0, 0x0BC0},
+	{0x0BCD, 0x0BCD}, {0x0C3E, 0x0C40}, {0x0C46, 0x0C48},
+	{0x0C4A, 0x0C4D}, {0x0C55, 0x0C56}, {0x0CBC, 0x0CBC},
+	{0x0CBF, 0x0CBF}, {0x0CC6, 0x0CC6}, {0x0CCC, 0x0CCD},
+	{0x0CE2, 0x0CE3}, {0x0D41, 0x0D43}, {0x0D4D, 0x0D4D},
+	{0x0DCA, 0x0DCA}, {0x0DD2, 0x0DD4}, {0x0DD6, 0x0DD6},
+	{0x0E31, 0x0E31}, {0x0E34, 0x0E3A}, {0x0E47, 0x0E4E},
+	{0x0EB1, 0x0EB1}, {0x0EB4, 0x0EB9}, {0x0EBB, 0x0EBC},
+	{0x0EC8, 0x0ECD}, {0x0F18, 0x0F19}, {0x0F35, 0x0F35},
+	{0x0F37, 0x0F37}, {0x0F39, 0x0F39}, {0x0F71, 0x0F7E},
+	{0x0F80, 0x0F84}, {0x0F86, 0x0F87}, {0x0F90, 0x0F97},
+	{0x0F99, 0x0FBC}, {0x0FC6, 0x0FC6}, {0x102D, 0x1030},
+	{0x1032, 0x1032}, {0x1036, 0x1037}, {0x1039, 0x1039},
+	{0x1058, 0x1059}, {0x1160, 0x11FF}, {0x135F, 0x135F},
+	{0x1712, 0x1714}, {0x1732, 0x1734}, {0x1752, 0x1753},
+	{0x1772, 0x1773}, {0x17B4, 0x17B5}, {0x17B7, 0x17BD},
+	{0x17C6, 0x17C6}, {0x17C9, 0x17D3}, {0x17DD, 0x17DD},
+	{0x180B, 0x180D}, {0x18A9, 0x18A9}, {0x1920, 0x1922},
+	{0x1927, 0x1928}, {0x1932, 0x1932}, {0x1939, 0x193B},
+	{0x1A17, 0x1A18}, {0x1B00, 0x1B03}, {0x1B34, 0x1B34},
+	{0x1B36, 0x1B3A}, {0x1B3C, 0x1B3C}, {0x1B42, 0x1B42},
+	{0x1B6B, 0x1B73}, {0x1DC0, 0x1DCA}, {0x1DFE, 0x1DFF},
+	{0x200B, 0x200F}, {0x202A, 0x202E}, {0x2060, 0x2063},
+	{0x206A, 0x206F}, {0x20D0, 0x20EF}, {0x302A, 0x302F},
+	{0x3099, 0x309A}, {0xA806, 0xA806}, {0xA80B, 0xA80B},
+	{0xA825, 0xA826}, {0xFB1E, 0xFB1E}, {0xFE00, 0xFE0F},
+	{0xFE20, 0xFE23}, {0xFEFF, 0xFEFF}, {0xFFF9, 0xFFFB},
+	{0x10A01, 0x10A03}, {0x10A05, 0x10A06}, {0x10A0C, 0x10A0F},
+	{0x10A38, 0x10A3A}, {0x10A3F, 0x10A3F}, {0x1D167, 0x1D169},
+	{0x1D173, 0x1D182}, {0x1D185, 0x1D18B}, {0x1D1AA, 0x1D1AD},
+	{0x1D242, 0x1D244}, {0xE0001, 0xE0001}, {0xE0020, 0xE007F},
+	{0xE0100, 0xE01EF},
+}
+
+type ctype int
+
+const (
+	narrow ctype = iota
+	ambiguous
+	wide
+	halfwidth
+	fullwidth
+	neutral
+)
+
+type intervalType struct {
+	first rune
+	last  rune
+	ctype ctype
+}
+
+var ctypes = []intervalType{
+	{0x0020, 0x007E, narrow},
+	{0x00A1, 0x00A1, ambiguous},
+	{0x00A2, 0x00A3, narrow},
+	{0x00A4, 0x00A4, ambiguous},
+	{0x00A5, 0x00A6, narrow},
+	{0x00A7, 0x00A8, ambiguous},
+	{0x00AA, 0x00AA, ambiguous},
+	{0x00AC, 0x00AC, narrow},
+	{0x00AD, 0x00AE, ambiguous},
+	{0x00AF, 0x00AF, narrow},
+	{0x00B0, 0x00B4, ambiguous},
+	{0x00B6, 0x00BA, ambiguous},
+	{0x00BC, 0x00BF, ambiguous},
+	{0x00C6, 0x00C6, ambiguous},
+	{0x00D0, 0x00D0, ambiguous},
+	{0x00D7, 0x00D8, ambiguous},
+	{0x00DE, 0x00E1, ambiguous},
+	{0x00E6, 0x00E6, ambiguous},
+	{0x00E8, 0x00EA, ambiguous},
+	{0x00EC, 0x00ED, ambiguous},
+	{0x00F0, 0x00F0, ambiguous},
+	{0x00F2, 0x00F3, ambiguous},
+	{0x00F7, 0x00FA, ambiguous},
+	{0x00FC, 0x00FC, ambiguous},
+	{0x00FE, 0x00FE, ambiguous},
+	{0x0101, 0x0101, ambiguous},
+	{0x0111, 0x0111, ambiguous},
+	{0x0113, 0x0113, ambiguous},
+	{0x011B, 0x011B, ambiguous},
+	{0x0126, 0x0127, ambiguous},
+	{0x012B, 0x012B, ambiguous},
+	{0x0131, 0x0133, ambiguous},
+	{0x0138, 0x0138, ambiguous},
+	{0x013F, 0x0142, ambiguous},
+	{0x0144, 0x0144, ambiguous},
+	{0x0148, 0x014B, ambiguous},
+	{0x014D, 0x014D, ambiguous},
+	{0x0152, 0x0153, ambiguous},
+	{0x0166, 0x0167, ambiguous},
+	{0x016B, 0x016B, ambiguous},
+	{0x01CE, 0x01CE, ambiguous},
+	{0x01D0, 0x01D0, ambiguous},
+	{0x01D2, 0x01D2, ambiguous},
+	{0x01D4, 0x01D4, ambiguous},
+	{0x01D6, 0x01D6, ambiguous},
+	{0x01D8, 0x01D8, ambiguous},
+	{0x01DA, 0x01DA, ambiguous},
+	{0x01DC, 0x01DC, ambiguous},
+	{0x0251, 0x0251, ambiguous},
+	{0x0261, 0x0261, ambiguous},
+	{0x02C4, 0x02C4, ambiguous},
+	{0x02C7, 0x02C7, ambiguous},
+	{0x02C9, 0x02CB, ambiguous},
+	{0x02CD, 0x02CD, ambiguous},
+	{0x02D0, 0x02D0, ambiguous},
+	{0x02D8, 0x02DB, ambiguous},
+	{0x02DD, 0x02DD, ambiguous},
+	{0x02DF, 0x02DF, ambiguous},
+	{0x0300, 0x036F, ambiguous},
+	{0x0391, 0x03A2, ambiguous},
+	{0x03A3, 0x03A9, ambiguous},
+	{0x03B1, 0x03C1, ambiguous},
+	{0x03C3, 0x03C9, ambiguous},
+	{0x0401, 0x0401, ambiguous},
+	{0x0410, 0x044F, ambiguous},
+	{0x0451, 0x0451, ambiguous},
+	{0x1100, 0x115F, wide},
+	{0x2010, 0x2010, ambiguous},
+	{0x2013, 0x2016, ambiguous},
+	{0x2018, 0x2019, ambiguous},
+	{0x201C, 0x201D, ambiguous},
+	{0x2020, 0x2022, ambiguous},
+	{0x2024, 0x2027, ambiguous},
+	{0x2030, 0x2030, ambiguous},
+	{0x2032, 0x2033, ambiguous},
+	{0x2035, 0x2035, ambiguous},
+	{0x203B, 0x203B, ambiguous},
+	{0x203E, 0x203E, ambiguous},
+	{0x2074, 0x2074, ambiguous},
+	{0x207F, 0x207F, ambiguous},
+	{0x2081, 0x2084, ambiguous},
+	{0x20A9, 0x20A9, halfwidth},
+	{0x20AC, 0x20AC, ambiguous},
+	{0x2103, 0x2103, ambiguous},
+	{0x2105, 0x2105, ambiguous},
+	{0x2109, 0x2109, ambiguous},
+	{0x2113, 0x2113, ambiguous},
+	{0x2116, 0x2116, ambiguous},
+	{0x2121, 0x2122, ambiguous},
+	{0x2126, 0x2126, ambiguous},
+	{0x212B, 0x212B, ambiguous},
+	{0x2153, 0x2154, ambiguous},
+	{0x215B, 0x215E, ambiguous},
+	{0x2160, 0x216B, ambiguous},
+	{0x2170, 0x2179, ambiguous},
+	{0x2189, 0x218A, ambiguous},
+	{0x2190, 0x2199, ambiguous},
+	{0x21B8, 0x21B9, ambiguous},
+	{0x21D2, 0x21D2, ambiguous},
+	{0x21D4, 0x21D4, ambiguous},
+	{0x21E7, 0x21E7, ambiguous},
+	{0x2200, 0x2200, ambiguous},
+	{0x2202, 0x2203, ambiguous},
+	{0x2207, 0x2208, ambiguous},
+	{0x220B, 0x220B, ambiguous},
+	{0x220F, 0x220F, ambiguous},
+	{0x2211, 0x2211, ambiguous},
+	{0x2215, 0x2215, ambiguous},
+	{0x221A, 0x221A, ambiguous},
+	{0x221D, 0x2220, ambiguous},
+	{0x2223, 0x2223, ambiguous},
+	{0x2225, 0x2225, ambiguous},
+	{0x2227, 0x222C, ambiguous},
+	{0x222E, 0x222E, ambiguous},
+	{0x2234, 0x2237, ambiguous},
+	{0x223C, 0x223D, ambiguous},
+	{0x2248, 0x2248, ambiguous},
+	{0x224C, 0x224C, ambiguous},
+	{0x2252, 0x2252, ambiguous},
+	{0x2260, 0x2261, ambiguous},
+	{0x2264, 0x2267, ambiguous},
+	{0x226A, 0x226B, ambiguous},
+	{0x226E, 0x226F, ambiguous},
+	{0x2282, 0x2283, ambiguous},
+	{0x2286, 0x2287, ambiguous},
+	{0x2295, 0x2295, ambiguous},
+	{0x2299, 0x2299, ambiguous},
+	{0x22A5, 0x22A5, ambiguous},
+	{0x22BF, 0x22BF, ambiguous},
+	{0x2312, 0x2312, ambiguous},
+	{0x2329, 0x232A, wide},
+	{0x2460, 0x24E9, ambiguous},
+	{0x24EB, 0x254B, ambiguous},
+	{0x2550, 0x2573, ambiguous},
+	{0x2580, 0x258F, ambiguous},
+	{0x2592, 0x2595, ambiguous},
+	{0x25A0, 0x25A1, ambiguous},
+	{0x25A3, 0x25A9, ambiguous},
+	{0x25B2, 0x25B3, ambiguous},
+	{0x25B6, 0x25B7, ambiguous},
+	{0x25BC, 0x25BD, ambiguous},
+	{0x25C0, 0x25C1, ambiguous},
+	{0x25C6, 0x25C8, ambiguous},
+	{0x25CB, 0x25CB, ambiguous},
+	{0x25CE, 0x25D1, ambiguous},
+	{0x25E2, 0x25E5, ambiguous},
+	{0x25EF, 0x25EF, ambiguous},
+	{0x2605, 0x2606, ambiguous},
+	{0x2609, 0x2609, ambiguous},
+	{0x260E, 0x260F, ambiguous},
+	{0x2614, 0x2615, ambiguous},
+	{0x261C, 0x261C, ambiguous},
+	{0x261E, 0x261E, ambiguous},
+	{0x2640, 0x2640, ambiguous},
+	{0x2642, 0x2642, ambiguous},
+	{0x2660, 0x2661, ambiguous},
+	{0x2663, 0x2665, ambiguous},
+	{0x2667, 0x266A, ambiguous},
+	{0x266C, 0x266D, ambiguous},
+	{0x266F, 0x266F, ambiguous},
+	{0x269E, 0x269F, ambiguous},
+	{0x26BE, 0x26BF, ambiguous},
+	{0x26C4, 0x26CD, ambiguous},
+	{0x26CF, 0x26E1, ambiguous},
+	{0x26E3, 0x26E3, ambiguous},
+	{0x26E8, 0x26FF, ambiguous},
+	{0x273D, 0x273D, ambiguous},
+	{0x2757, 0x2757, ambiguous},
+	{0x2776, 0x277F, ambiguous},
+	{0x27E6, 0x27ED, narrow},
+	{0x2985, 0x2986, narrow},
+	{0x2B55, 0x2B59, ambiguous},
+	{0x2E80, 0x2E9A, wide},
+	{0x2E9B, 0x2EF4, wide},
+	{0x2F00, 0x2FD6, wide},
+	{0x2FF0, 0x2FFC, wide},
+	{0x3000, 0x3000, fullwidth},
+	{0x3001, 0x303E, wide},
+	{0x3041, 0x3097, wide},
+	{0x3099, 0x3100, wide},
+	{0x3105, 0x312E, wide},
+	{0x3131, 0x318F, wide},
+	{0x3190, 0x31BB, wide},
+	{0x31C0, 0x31E4, wide},
+	{0x31F0, 0x321F, wide},
+	{0x3220, 0x3247, wide},
+	{0x3248, 0x324F, ambiguous},
+	{0x3250, 0x32FF, wide},
+	{0x3300, 0x4DBF, wide},
+	{0x4E00, 0xA48D, wide},
+	{0xA490, 0xA4C7, wide},
+	{0xA960, 0xA97D, wide},
+	{0xAC00, 0xD7A4, wide},
+	{0xE000, 0xF8FF, ambiguous},
+	{0xF900, 0xFAFF, wide},
+	{0xFE00, 0xFE0F, ambiguous},
+	{0xFE10, 0xFE1A, wide},
+	{0xFE30, 0xFE53, wide},
+	{0xFE54, 0xFE67, wide},
+	{0xFE68, 0xFE6C, wide},
+	{0xFF01, 0xFF60, fullwidth},
+	{0xFF61, 0xFFBF, halfwidth},
+	{0xFFC2, 0xFFC8, halfwidth},
+	{0xFFCA, 0xFFD0, halfwidth},
+	{0xFFD2, 0xFFD8, halfwidth},
+	{0xFFDA, 0xFFDD, halfwidth},
+	{0xFFE0, 0xFFE7, fullwidth},
+	{0xFFE8, 0xFFEF, halfwidth},
+	{0xFFFD, 0xFFFE, ambiguous},
+	{0x1B000, 0x1B002, wide},
+	{0x1F100, 0x1F10A, ambiguous},
+	{0x1F110, 0x1F12D, ambiguous},
+	{0x1F130, 0x1F169, ambiguous},
+	{0x1F170, 0x1F19B, ambiguous},
+	{0x1F200, 0x1F203, wide},
+	{0x1F210, 0x1F23B, wide},
+	{0x1F240, 0x1F249, wide},
+	{0x1F250, 0x1F252, wide},
+	{0x20000, 0x2FFFE, wide},
+	{0x30000, 0x3FFFE, wide},
+	{0xE0100, 0xE01F0, ambiguous},
+	{0xF0000, 0xFFFFD, ambiguous},
+	{0x100000, 0x10FFFE, ambiguous},
+}
+
+// Condition have flag EastAsianWidth whether the current locale is CJK or not.
+type Condition struct {
+	EastAsianWidth bool
+}
+
+// NewCondition return new instance of Condition which is current locale.
+func NewCondition() *Condition {
+	return &Condition{EastAsianWidth}
+}
+
+// RuneWidth returns the number of cells in r.
+// See http://www.unicode.org/reports/tr11/
+func (c *Condition) RuneWidth(r rune) int {
+	if r == 0 {
+		return 0
+	}
+	if r < 32 || (r >= 0x7f && r < 0xa0) {
+		return 1
+	}
+	for _, iv := range combining {
+		if iv.first <= r && r <= iv.last {
+			return 0
+		}
+	}
+
+	if c.EastAsianWidth && IsAmbiguousWidth(r) {
+		return 2
+	}
+
+	if r >= 0x1100 &&
+		(r <= 0x115f || r == 0x2329 || r == 0x232a ||
+			(r >= 0x2e80 && r <= 0xa4cf && r != 0x303f) ||
+			(r >= 0xac00 && r <= 0xd7a3) ||
+			(r >= 0xf900 && r <= 0xfaff) ||
+			(r >= 0xfe30 && r <= 0xfe6f) ||
+			(r >= 0xff00 && r <= 0xff60) ||
+			(r >= 0xffe0 && r <= 0xffe6) ||
+			(r >= 0x20000 && r <= 0x2fffd) ||
+			(r >= 0x30000 && r <= 0x3fffd)) {
+		return 2
+	}
+	return 1
+}
+
+// StringWidth return width as you can see
+func (c *Condition) StringWidth(s string) (width int) {
+	for _, r := range []rune(s) {
+		width += c.RuneWidth(r)
+	}
+	return width
+}
+
+// Truncate return string truncated with w cells
+func (c *Condition) Truncate(s string, w int, tail string) string {
+	if c.StringWidth(s) <= w {
+		return s
+	}
+	r := []rune(s)
+	tw := c.StringWidth(tail)
+	w -= tw
+	width := 0
+	i := 0
+	for ; i < len(r); i++ {
+		cw := c.RuneWidth(r[i])
+		if width+cw > w {
+			break
+		}
+		width += cw
+	}
+	return string(r[0:i]) + tail
+}
+
+// Wrap return string wrapped with w cells
+func (c *Condition) Wrap(s string, w int) string {
+	width := 0
+	out := ""
+	for _, r := range []rune(s) {
+		cw := RuneWidth(r)
+		if r == '\n' {
+			out += string(r)
+			width = 0
+			continue
+		} else if width+cw > w {
+			out += "\n"
+			width = 0
+			out += string(r)
+			width += cw
+			continue
+		}
+		out += string(r)
+		width += cw
+	}
+	return out
+}
+
+// FillLeft return string filled in left by spaces in w cells
+func (c *Condition) FillLeft(s string, w int) string {
+	width := c.StringWidth(s)
+	count := w - width
+	if count > 0 {
+		b := make([]byte, count)
+		for i := range b {
+			b[i] = ' '
+		}
+		return string(b) + s
+	}
+	return s
+}
+
+// FillRight return string filled in left by spaces in w cells
+func (c *Condition) FillRight(s string, w int) string {
+	width := c.StringWidth(s)
+	count := w - width
+	if count > 0 {
+		b := make([]byte, count)
+		for i := range b {
+			b[i] = ' '
+		}
+		return s + string(b)
+	}
+	return s
+}
+
+// RuneWidth returns the number of cells in r.
+// See http://www.unicode.org/reports/tr11/
+func RuneWidth(r rune) int {
+	return DefaultCondition.RuneWidth(r)
+}
+
+func ct(r rune) ctype {
+	for _, iv := range ctypes {
+		if iv.first <= r && r <= iv.last {
+			return iv.ctype
+		}
+	}
+	return neutral
+}
+
+// IsAmbiguousWidth returns whether is ambiguous width or not.
+func IsAmbiguousWidth(r rune) bool {
+	return ct(r) == ambiguous
+}
+
+// IsNeutralWidth returns whether is neutral width or not.
+func IsNeutralWidth(r rune) bool {
+	return ct(r) == neutral
+}
+
+// StringWidth return width as you can see
+func StringWidth(s string) (width int) {
+	return DefaultCondition.StringWidth(s)
+}
+
+// Truncate return string truncated with w cells
+func Truncate(s string, w int, tail string) string {
+	return DefaultCondition.Truncate(s, w, tail)
+}
+
+// Wrap return string wrapped with w cells
+func Wrap(s string, w int) string {
+	return DefaultCondition.Wrap(s, w)
+}
+
+// FillLeft return string filled in left by spaces in w cells
+func FillLeft(s string, w int) string {
+	return DefaultCondition.FillLeft(s, w)
+}
+
+// FillRight return string filled in left by spaces in w cells
+func FillRight(s string, w int) string {
+	return DefaultCondition.FillRight(s, w)
+}

+ 8 - 0
vendor/github.com/mattn/go-runewidth/runewidth_js.go

@@ -0,0 +1,8 @@
+// +build js
+
+package runewidth
+
+func IsEastAsian() bool {
+	// TODO: Implement this for the web. Detect east asian in a compatible way, and return true.
+	return false
+}

+ 77 - 0
vendor/github.com/mattn/go-runewidth/runewidth_posix.go

@@ -0,0 +1,77 @@
+// +build !windows,!js
+
+package runewidth
+
+import (
+	"os"
+	"regexp"
+	"strings"
+)
+
+var reLoc = regexp.MustCompile(`^[a-z][a-z][a-z]?(?:_[A-Z][A-Z])?\.(.+)`)
+
+var mblenTable = map[string]int{
+	"utf-8":   6,
+	"utf8":    6,
+	"jis":     8,
+	"eucjp":   3,
+	"euckr":   2,
+	"euccn":   2,
+	"sjis":    2,
+	"cp932":   2,
+	"cp51932": 2,
+	"cp936":   2,
+	"cp949":   2,
+	"cp950":   2,
+	"big5":    2,
+	"gbk":     2,
+	"gb2312":  2,
+}
+
+func isEastAsian(locale string) bool {
+	charset := strings.ToLower(locale)
+	r := reLoc.FindStringSubmatch(locale)
+	if len(r) == 2 {
+		charset = strings.ToLower(r[1])
+	}
+
+	if strings.HasSuffix(charset, "@cjk_narrow") {
+		return false
+	}
+
+	for pos, b := range []byte(charset) {
+		if b == '@' {
+			charset = charset[:pos]
+			break
+		}
+	}
+	max := 1
+	if m, ok := mblenTable[charset]; ok {
+		max = m
+	}
+	if max > 1 && (charset[0] != 'u' ||
+		strings.HasPrefix(locale, "ja") ||
+		strings.HasPrefix(locale, "ko") ||
+		strings.HasPrefix(locale, "zh")) {
+		return true
+	}
+	return false
+}
+
+// IsEastAsian return true if the current locale is CJK
+func IsEastAsian() bool {
+	locale := os.Getenv("LC_CTYPE")
+	if locale == "" {
+		locale = os.Getenv("LANG")
+	}
+
+	// ignore C locale
+	if locale == "POSIX" || locale == "C" {
+		return false
+	}
+	if len(locale) > 1 && locale[0] == 'C' && (locale[1] == '.' || locale[1] == '-') {
+		return false
+	}
+
+	return isEastAsian(locale)
+}

+ 25 - 0
vendor/github.com/mattn/go-runewidth/runewidth_windows.go

@@ -0,0 +1,25 @@
+package runewidth
+
+import (
+	"syscall"
+)
+
+var (
+	kernel32               = syscall.NewLazyDLL("kernel32")
+	procGetConsoleOutputCP = kernel32.NewProc("GetConsoleOutputCP")
+)
+
+// IsEastAsian return true if the current locale is CJK
+func IsEastAsian() bool {
+	r1, _, _ := procGetConsoleOutputCP.Call()
+	if r1 == 0 {
+		return false
+	}
+
+	switch int(r1) {
+	case 932, 51932, 936, 949, 950:
+		return true
+	}
+
+	return false
+}

+ 54 - 0
vendor/github.com/tealeg/xlsx/AUTHORS.txt

@@ -0,0 +1,54 @@
+ACHER <artem.chernyak@monsanto.com>
+Andrew Schwartz <andrew.schwartz@gengo.com>
+Artem Chernyak <artemchernyak@Artems-MacBook-Air.local>
+Artem Chernyak <artemchernyak@gmail.com>
+blackss2 <blackss2@nate.com>
+Brandon Mulcahy <brandon@jangler.info>
+Brian Smith <ohohvi@gmail.com>
+bronze1man <bronze1man@gmail.com>
+Bruno Bigras <bigras.bruno@gmail.com>
+Chris Glass <tribaal@gmail.com>
+Colin Fox <colin.fox@cumul8.com>
+Colin Fox <greenenergy@gmail.com>
+crahles <christoph@rahles.de>
+Daniel Upton <daniel@floppy.co>
+Daniel YC Lin <dlin@u40>
+DerLinkshaender <mail@arminhanisch.de>
+Eric <ericscottlagergren@gmail.com>
+frogs <frogs@frogss-MacBook-Air.local>
+fzerorubigd <fzerorubigd@gmail.com>
+Geoffrey J. Teale <geoffrey.teale@canonical.com>
+Gyu-Ho Lee <gyuho.cs@gmail.com>
+Herman Schaaf <hermanschaaf@gmail.com>
+Hugh Gao <email@klniu.com>
+Iain Lowe <i.lowe@mademediacorp.com>
+ivnivnch <ivnivnch@gmail.com>
+Jason Hall <imjasonh@gmail.com>
+Joshua Baker <joshua.baker@cumul8.com>
+Kaur Kuut <strom@nevermore.ee>
+Lunny Xiao <xiaolunwen@gmail.com>
+magician1 <kouta-k@mbm.nifty.com>
+Mathias Fredriksson <mafredri@gmail.com>
+Matt Aimonetti <mattaimonetti@gmail.com>
+Moch. Lutfi <kapten_lufi@yahoo.co.id>
+Moch.Lutfi <kapten_lufi@yahoo.co.id>
+Neoin <Heinoldewage@gmail.com>
+Nguyen Nguyen <ntn@NTN.local>
+Nikita Danilov <mirt@mirt.su>
+OneOfOne <OneOfOne@gmail.com>
+Peter Waller <p@pwaller.net>
+Philipp Klose <TheHippo@users.noreply.github.com>
+richard bucker <richard@bucker.net>
+Shawn Milochik <shawn@milochik.com>
+Shawn Smith <shawnpsmith@gmail.com>
+Shawn Smith <shawn.smith@gengo.com>
+SHIMADA Koji <koji.shimada@enishi-tech.com>
+Steven Degutis <steven.degutis@gmail.com>
+takuya sato <sato-taku@klab.com>
+Thieu Pham <thieu.pham@workiva.com>
+Tormod Erevik Lea <tormodlea@gmail.com>
+trinchan <andrew.schwartz@gengo.com>
+U-NORTH_AMERICA\ACHER <ACHER@PBECRLK.na.ds.monsanto.com>
+YAMADA Tsuyoshi <tyamada@minimum2scp.org>
+Yoshiki Shibukawa <shibukawa.yoshiki@dena.jp>
+zhcy <zhangchenyu2009@gmail.com>

+ 131 - 0
vendor/github.com/tealeg/xlsx/README.org

@@ -0,0 +1,131 @@
+* XLSX
+
+[[https://travis-ci.org/tealeg/xlsx][https://img.shields.io/travis/tealeg/xlsx/master.svg?style=flat-square]]
+[[https://codecov.io/gh/tealeg/xlsx][https://codecov.io/gh/tealeg/xlsx/branch/master/graph/badge.svg]]
+[[https://godoc.org/github.com/tealeg/xlsx][https://godoc.org/github.com/tealeg/xlsx?status.svg]]
+[[https://github.com/tealeg/xlsx#license][https://img.shields.io/badge/license-bsd-orange.svg]]
+
+** Introduction
+xlsx is a library to simplify reading and writing the XML format used
+by recent version of Microsoft Excel in Go programs.
+
+The support for writing XLSX files is currently extremely minimal.  It
+will expand slowly, but in the meantime patches are welcome!
+
+** Full API docs
+The full API docs can be viewed using go's built in documentation
+tool, or online at [[http://godoc.org/github.com/tealeg/xlsx][godoc.org]].
+
+** Basic Usage
+*** Reading XLSX files
+Here is a minimal example usage that will dump all cell data in a
+given XLSX file.  A more complete example of this kind of
+functionality is contained in [[https://github.com/tealeg/xlsx2csv][the XLSX2CSV program]]:
+
+#+BEGIN_SRC go
+
+package main
+
+import (
+    "fmt"
+    "github.com/tealeg/xlsx"
+)
+
+func main() {
+    excelFileName := "/home/tealeg/foo.xlsx"
+    xlFile, err := xlsx.OpenFile(excelFileName)
+    if err != nil {
+        ...
+    }
+    for _, sheet := range xlFile.Sheets {
+        for _, row := range sheet.Rows {
+            for _, cell := range row.Cells {
+                fmt.Printf("%s\n", cell.String())
+            }
+        }
+    }
+}
+
+#+END_SRC
+
+Some additional information is available from the cell (for example,
+style information).  For more details see the godoc output for this
+package.
+
+*** Writing XLSX files
+The following constitutes the bare minimum required to write an XLSX document.
+
+#+BEGIN_SRC go
+
+package main
+
+import (
+    "fmt"
+    "github.com/tealeg/xlsx"
+)
+
+func main() {
+    var file *xlsx.File
+    var sheet *xlsx.Sheet
+    var row *xlsx.Row
+    var cell *xlsx.Cell
+    var err error
+
+    file = xlsx.NewFile()
+    sheet, err = file.AddSheet("Sheet1")
+    if err != nil {
+        fmt.Printf(err.Error())
+    }
+    row = sheet.AddRow()
+    cell = row.AddCell()
+    cell.Value = "I am a cell!"
+    err = file.Save("MyXLSXFile.xlsx")
+    if err != nil {
+        fmt.Printf(err.Error())
+    }
+}
+
+#+END_SRC
+
+** Contributing
+
+We're extremely happy to review pull requests.  Please be patient, maintaining XLSX doesn't pay anyone's salary (to my knowledge).
+
+If you'd like to propose a change please ensure the following:
+
+- All existing tests are passing.
+- There are tests in the test suite that cover the changes you're making.
+- You have added documentation strings (in English) to (at least) the public functions you've added or modified.
+- Your use of, or creation of, XML is compliant with [[http://www.ecma-international.org/publications/standards/Ecma-376.htm][part 1 of the 4th edition of the ECMA-376 Standard for Office Open XML]].
+
+** License
+This code is under a BSD style license:
+
+#+BEGIN_EXAMPLE
+
+  Copyright 2011-2015 Geoffrey Teale. All rights reserved.
+
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions are
+  met:
+
+  Redistributions of source code must retain the above copyright notice,
+  this list of conditions and the following disclaimer.  Redistributions
+  in binary form must reproduce the above copyright notice, this list of
+  conditions and the following disclaimer in the documentation and/or
+  other materials provided with the distribution.  THIS SOFTWARE IS
+  PROVIDED BY Geoffrey Teale ``AS IS'' AND ANY EXPRESS OR IMPLIED
+  WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+  MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+  DISCLAIMED. IN NO EVENT SHALL GEOFFREY TEALE OR CONTRIBUTORS BE
+  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+  BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+  WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+  OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
+  IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#+END_EXAMPLE
+
+Eat a peach - Geoff

+ 393 - 0
vendor/github.com/tealeg/xlsx/cell.go

@@ -0,0 +1,393 @@
+package xlsx
+
+import (
+	"fmt"
+	"math"
+	"strconv"
+	"strings"
+	"time"
+)
+
+// CellType is an int type for storing metadata about the data type in the cell.
+type CellType int
+
+// Known types for cell values.
+const (
+	CellTypeString CellType = iota
+	CellTypeFormula
+	CellTypeNumeric
+	CellTypeBool
+	CellTypeInline
+	CellTypeError
+	CellTypeDate
+	CellTypeGeneral
+)
+
+// Cell is a high level structure intended to provide user access to
+// the contents of Cell within an xlsx.Row.
+type Cell struct {
+	Row      *Row
+	Value    string
+	formula  string
+	style    *Style
+	NumFmt   string
+	date1904 bool
+	Hidden   bool
+	HMerge   int
+	VMerge   int
+	cellType CellType
+}
+
+// CellInterface defines the public API of the Cell.
+type CellInterface interface {
+	String() string
+	FormattedValue() string
+}
+
+// NewCell creates a cell and adds it to a row.
+func NewCell(r *Row) *Cell {
+	return &Cell{Row: r}
+}
+
+// Merge with other cells, horizontally and/or vertically.
+func (c *Cell) Merge(hcells, vcells int) {
+	c.HMerge = hcells
+	c.VMerge = vcells
+}
+
+// Type returns the CellType of a cell. See CellType constants for more details.
+func (c *Cell) Type() CellType {
+	return c.cellType
+}
+
+// SetString sets the value of a cell to a string.
+func (c *Cell) SetString(s string) {
+	c.Value = s
+	c.formula = ""
+	c.cellType = CellTypeString
+}
+
+// String returns the value of a Cell as a string.
+func (c *Cell) String() (string, error) {
+	return c.FormattedValue()
+}
+
+// SetFloat sets the value of a cell to a float.
+func (c *Cell) SetFloat(n float64) {
+	c.SetValue(n)
+}
+
+/*
+	The following are samples of format samples.
+
+	* "0.00e+00"
+	* "0", "#,##0"
+	* "0.00", "#,##0.00", "@"
+	* "#,##0 ;(#,##0)", "#,##0 ;[red](#,##0)"
+	* "#,##0.00;(#,##0.00)", "#,##0.00;[red](#,##0.00)"
+	* "0%", "0.00%"
+	* "0.00e+00", "##0.0e+0"
+*/
+
+// SetFloatWithFormat sets the value of a cell to a float and applies
+// formatting to the cell.
+func (c *Cell) SetFloatWithFormat(n float64, format string) {
+	// beauty the output when the float is small enough
+	if n != 0 && n < 0.00001 {
+		c.Value = strconv.FormatFloat(n, 'e', -1, 64)
+	} else {
+		c.Value = strconv.FormatFloat(n, 'f', -1, 64)
+	}
+	c.NumFmt = format
+	c.formula = ""
+	c.cellType = CellTypeNumeric
+}
+
+var timeLocationUTC, _ = time.LoadLocation("UTC")
+
+func timeToUTCTime(t time.Time) time.Time {
+	return time.Date(t.Year(), t.Month(), t.Day(), t.Hour(), t.Minute(), t.Second(), t.Nanosecond(), timeLocationUTC)
+}
+
+func timeToExcelTime(t time.Time) float64 {
+	return float64(t.Unix())/86400.0 + 25569.0
+}
+
+// SetDate sets the value of a cell to a float.
+func (c *Cell) SetDate(t time.Time) {
+	c.SetDateTimeWithFormat(float64(int64(timeToExcelTime(timeToUTCTime(t)))), builtInNumFmt[14])
+}
+
+func (c *Cell) SetDateTime(t time.Time) {
+	c.SetDateTimeWithFormat(timeToExcelTime(timeToUTCTime(t)), builtInNumFmt[22])
+}
+
+func (c *Cell) SetDateTimeWithFormat(n float64, format string) {
+	c.Value = strconv.FormatFloat(n, 'f', -1, 64)
+	c.NumFmt = format
+	c.formula = ""
+	c.cellType = CellTypeDate
+}
+
+// Float returns the value of cell as a number.
+func (c *Cell) Float() (float64, error) {
+	f, err := strconv.ParseFloat(c.Value, 64)
+	if err != nil {
+		return math.NaN(), err
+	}
+	return f, nil
+}
+
+// SetInt64 sets a cell's value to a 64-bit integer.
+func (c *Cell) SetInt64(n int64) {
+	c.SetValue(n)
+}
+
+// Int64 returns the value of cell as 64-bit integer.
+func (c *Cell) Int64() (int64, error) {
+	f, err := strconv.ParseInt(c.Value, 10, 64)
+	if err != nil {
+		return -1, err
+	}
+	return f, nil
+}
+
+// SetInt sets a cell's value to an integer.
+func (c *Cell) SetInt(n int) {
+	c.SetValue(n)
+}
+
+// SetInt sets a cell's value to an integer.
+func (c *Cell) SetValue(n interface{}) {
+	switch t := n.(type) {
+	case time.Time:
+		c.SetDateTime(n.(time.Time))
+		return
+	case int, int8, int16, int32, int64, float32, float64:
+		c.setGeneral(fmt.Sprintf("%v", n))
+	case string:
+		c.SetString(t)
+	case []byte:
+		c.SetString(string(t))
+	case nil:
+		c.SetString("")
+	default:
+		c.SetString(fmt.Sprintf("%v", n))
+	}
+}
+
+// SetInt sets a cell's value to an integer.
+func (c *Cell) setGeneral(s string) {
+	c.Value = s
+	c.NumFmt = builtInNumFmt[builtInNumFmtIndex_GENERAL]
+	c.formula = ""
+	c.cellType = CellTypeGeneral
+}
+
+// Int returns the value of cell as integer.
+// Has max 53 bits of precision
+// See: float64(int64(math.MaxInt))
+func (c *Cell) Int() (int, error) {
+	f, err := strconv.ParseFloat(c.Value, 64)
+	if err != nil {
+		return -1, err
+	}
+	return int(f), nil
+}
+
+// SetBool sets a cell's value to a boolean.
+func (c *Cell) SetBool(b bool) {
+	if b {
+		c.Value = "1"
+	} else {
+		c.Value = "0"
+	}
+	c.cellType = CellTypeBool
+}
+
+// Bool returns a boolean from a cell's value.
+// TODO: Determine if the current return value is
+// appropriate for types other than CellTypeBool.
+func (c *Cell) Bool() bool {
+	// If bool, just return the value.
+	if c.cellType == CellTypeBool {
+		return c.Value == "1"
+	}
+	// If numeric, base it on a non-zero.
+	if c.cellType == CellTypeNumeric || c.cellType == CellTypeGeneral {
+		return c.Value != "0"
+	}
+	// Return whether there's an empty string.
+	return c.Value != ""
+}
+
+// SetFormula sets the format string for a cell.
+func (c *Cell) SetFormula(formula string) {
+	c.formula = formula
+	c.cellType = CellTypeFormula
+}
+
+// Formula returns the formula string for the cell.
+func (c *Cell) Formula() string {
+	return c.formula
+}
+
+// GetStyle returns the Style associated with a Cell
+func (c *Cell) GetStyle() *Style {
+	if c.style == nil {
+		c.style = NewStyle()
+	}
+	return c.style
+}
+
+// SetStyle sets the style of a cell.
+func (c *Cell) SetStyle(style *Style) {
+	c.style = style
+}
+
+// GetNumberFormat returns the number format string for a cell.
+func (c *Cell) GetNumberFormat() string {
+	return c.NumFmt
+}
+
+func (c *Cell) formatToFloat(format string) (string, error) {
+	f, err := strconv.ParseFloat(c.Value, 64)
+	if err != nil {
+		return c.Value, err
+	}
+	return fmt.Sprintf(format, f), nil
+}
+
+func (c *Cell) formatToInt(format string) (string, error) {
+	f, err := strconv.ParseFloat(c.Value, 64)
+	if err != nil {
+		return c.Value, err
+	}
+	return fmt.Sprintf(format, int(f)), nil
+}
+
+// FormattedValue returns a value, and possibly an error condition
+// from a Cell.  If it is possible to apply a format to the cell
+// value, it will do so, if not then an error will be returned, along
+// with the raw value of the Cell.
+func (c *Cell) FormattedValue() (string, error) {
+	var numberFormat = c.GetNumberFormat()
+	if isTimeFormat(numberFormat) {
+		return parseTime(c)
+	}
+	switch numberFormat {
+	case builtInNumFmt[builtInNumFmtIndex_GENERAL], builtInNumFmt[builtInNumFmtIndex_STRING]:
+		return c.Value, nil
+	case builtInNumFmt[builtInNumFmtIndex_INT], "#,##0":
+		return c.formatToInt("%d")
+	case builtInNumFmt[builtInNumFmtIndex_FLOAT], "#,##0.00":
+		return c.formatToFloat("%.2f")
+	case "#,##0 ;(#,##0)", "#,##0 ;[red](#,##0)":
+		f, err := strconv.ParseFloat(c.Value, 64)
+		if err != nil {
+			return c.Value, err
+		}
+		if f < 0 {
+			i := int(math.Abs(f))
+			return fmt.Sprintf("(%d)", i), nil
+		}
+		i := int(f)
+		return fmt.Sprintf("%d", i), nil
+	case "#,##0.00;(#,##0.00)", "#,##0.00;[red](#,##0.00)":
+		f, err := strconv.ParseFloat(c.Value, 64)
+		if err != nil {
+			return c.Value, err
+		}
+		if f < 0 {
+			return fmt.Sprintf("(%.2f)", f), nil
+		}
+		return fmt.Sprintf("%.2f", f), nil
+	case "0%":
+		f, err := strconv.ParseFloat(c.Value, 64)
+		if err != nil {
+			return c.Value, err
+		}
+		f = f * 100
+		return fmt.Sprintf("%d%%", int(f)), nil
+	case "0.00%":
+		f, err := strconv.ParseFloat(c.Value, 64)
+		if err != nil {
+			return c.Value, err
+		}
+		f = f * 100
+		return fmt.Sprintf("%.2f%%", f), nil
+	case "0.00e+00", "##0.0e+0":
+		return c.formatToFloat("%e")
+	}
+	return c.Value, nil
+
+}
+
+// parseTime returns a string parsed using time.Time
+func parseTime(c *Cell) (string, error) {
+	f, err := strconv.ParseFloat(c.Value, 64)
+	if err != nil {
+		return c.Value, err
+	}
+	val := TimeFromExcelTime(f, c.date1904)
+	format := c.GetNumberFormat()
+
+	// Replace Excel placeholders with Go time placeholders.
+	// For example, replace yyyy with 2006. These are in a specific order,
+	// due to the fact that m is used in month, minute, and am/pm. It would
+	// be easier to fix that with regular expressions, but if it's possible
+	// to keep this simple it would be easier to maintain.
+	// Full-length month and days (e.g. March, Tuesday) have letters in them that would be replaced
+	// by other characters below (such as the 'h' in March, or the 'd' in Tuesday) below.
+	// First we convert them to arbitrary characters unused in Excel Date formats, and then at the end,
+	// turn them to what they should actually be.
+	// Based off: http://www.ozgrid.com/Excel/CustomFormats.htm
+	replacements := []struct{ xltime, gotime string }{
+		{"yyyy", "2006"},
+		{"yy", "06"},
+		{"mmmm", "%%%%"},
+		{"dddd", "&&&&"},
+		{"dd", "02"},
+		{"d", "2"},
+		{"mmm", "Jan"},
+		{"mmss", "0405"},
+		{"ss", "05"},
+		{"hh", "15"},
+		{"h", "3"},
+		{"mm:", "04:"},
+		{":mm", ":04"},
+		{"mm", "01"},
+		{"am/pm", "pm"},
+		{"m/", "1/"},
+		{"%%%%", "January"},
+		{"&&&&", "Monday"},
+	}
+	for _, repl := range replacements {
+		format = strings.Replace(format, repl.xltime, repl.gotime, 1)
+	}
+	// If the hour is optional, strip it out, along with the
+	// possible dangling colon that would remain.
+	if val.Hour() < 1 {
+		format = strings.Replace(format, "]:", "]", 1)
+		format = strings.Replace(format, "[3]", "", 1)
+		format = strings.Replace(format, "[15]", "", 1)
+	} else {
+		format = strings.Replace(format, "[3]", "3", 1)
+		format = strings.Replace(format, "[15]", "15", 1)
+	}
+	return val.Format(format), nil
+}
+
+// isTimeFormat checks whether an Excel format string represents
+// a time.Time.
+func isTimeFormat(format string) bool {
+	dateParts := []string{
+		"yy", "hh", "am", "pm", "ss", "mm", ":",
+	}
+	for _, part := range dateParts {
+		if strings.Contains(format, part) {
+			return true
+		}
+	}
+	return false
+}

+ 44 - 0
vendor/github.com/tealeg/xlsx/col.go

@@ -0,0 +1,44 @@
+package xlsx
+
+// Default column width in excel
+const ColWidth = 9.5
+
+type Col struct {
+	Min          int
+	Max          int
+	Hidden       bool
+	Width        float64
+	Collapsed    bool
+	OutlineLevel uint8
+	numFmt       string
+	style        *Style
+}
+
+func (c *Col) SetType(cellType CellType) {
+	switch cellType {
+	case CellTypeString:
+		c.numFmt = builtInNumFmt[builtInNumFmtIndex_STRING]
+	case CellTypeBool:
+		c.numFmt = builtInNumFmt[builtInNumFmtIndex_GENERAL] //TEMP
+	case CellTypeNumeric:
+		c.numFmt = builtInNumFmt[builtInNumFmtIndex_INT]
+	case CellTypeDate:
+		c.numFmt = builtInNumFmt[builtInNumFmtIndex_DATE]
+	case CellTypeFormula:
+		c.numFmt = builtInNumFmt[builtInNumFmtIndex_GENERAL]
+	case CellTypeError:
+		c.numFmt = builtInNumFmt[builtInNumFmtIndex_GENERAL] //TEMP
+	case CellTypeGeneral:
+		c.numFmt = builtInNumFmt[builtInNumFmtIndex_GENERAL]
+	}
+}
+
+// GetStyle returns the Style associated with a Col
+func (c *Col) GetStyle() *Style {
+	return c.style
+}
+
+// SetStyle sets the style of a Col
+func (c *Col) SetStyle(style *Style) {
+	c.style = style
+}

+ 105 - 0
vendor/github.com/tealeg/xlsx/date.go

@@ -0,0 +1,105 @@
+package xlsx
+
+import (
+	"math"
+	"time"
+)
+
+const MJD_0 float64 = 2400000.5
+const MJD_JD2000 float64 = 51544.5
+
+func shiftJulianToNoon(julianDays, julianFraction float64) (float64, float64) {
+	switch {
+	case -0.5 < julianFraction && julianFraction < 0.5:
+		julianFraction += 0.5
+	case julianFraction >= 0.5:
+		julianDays += 1
+		julianFraction -= 0.5
+	case julianFraction <= -0.5:
+		julianDays -= 1
+		julianFraction += 1.5
+	}
+	return julianDays, julianFraction
+}
+
+// Return the integer values for hour, minutes, seconds and
+// nanoseconds that comprised a given fraction of a day.
+// values would round to 1 us.
+func fractionOfADay(fraction float64) (hours, minutes, seconds, nanoseconds int) {
+
+	const (
+		c1us  = 1e3
+		c1s   = 1e9
+		c1day = 24 * 60 * 60 * c1s
+	)
+
+	frac := int64(c1day*fraction + c1us/2)
+	nanoseconds = int((frac%c1s)/c1us) * c1us
+	frac /= c1s
+	seconds = int(frac % 60)
+	frac /= 60
+	minutes = int(frac % 60)
+	hours = int(frac / 60)
+	return
+}
+
+func julianDateToGregorianTime(part1, part2 float64) time.Time {
+	part1I, part1F := math.Modf(part1)
+	part2I, part2F := math.Modf(part2)
+	julianDays := part1I + part2I
+	julianFraction := part1F + part2F
+	julianDays, julianFraction = shiftJulianToNoon(julianDays, julianFraction)
+	day, month, year := doTheFliegelAndVanFlandernAlgorithm(int(julianDays))
+	hours, minutes, seconds, nanoseconds := fractionOfADay(julianFraction)
+	return time.Date(year, time.Month(month), day, hours, minutes, seconds, nanoseconds, time.UTC)
+}
+
+// By this point generations of programmers have repeated the
+// algorithm sent to the editor of "Communications of the ACM" in 1968
+// (published in CACM, volume 11, number 10, October 1968, p.657).
+// None of those programmers seems to have found it necessary to
+// explain the constants or variable names set out by Henry F. Fliegel
+// and Thomas C. Van Flandern.  Maybe one day I'll buy that jounal and
+// expand an explanation here - that day is not today.
+func doTheFliegelAndVanFlandernAlgorithm(jd int) (day, month, year int) {
+	l := jd + 68569
+	n := (4 * l) / 146097
+	l = l - (146097*n+3)/4
+	i := (4000 * (l + 1)) / 1461001
+	l = l - (1461*i)/4 + 31
+	j := (80 * l) / 2447
+	d := l - (2447*j)/80
+	l = j / 11
+	m := j + 2 - (12 * l)
+	y := 100*(n-49) + i + l
+	return d, m, y
+}
+
+// Convert an excelTime representation (stored as a floating point number) to a time.Time.
+func TimeFromExcelTime(excelTime float64, date1904 bool) time.Time {
+	var date time.Time
+	var intPart int64 = int64(excelTime)
+	// Excel uses Julian dates prior to March 1st 1900, and
+	// Gregorian thereafter.
+	if intPart <= 61 {
+		const OFFSET1900 = 15018.0
+		const OFFSET1904 = 16480.0
+		var date time.Time
+		if date1904 {
+			date = julianDateToGregorianTime(MJD_0, excelTime+OFFSET1904)
+		} else {
+			date = julianDateToGregorianTime(MJD_0, excelTime+OFFSET1900)
+		}
+		return date
+	}
+	var floatPart float64 = excelTime - float64(intPart)
+	var dayNanoSeconds float64 = 24 * 60 * 60 * 1000 * 1000 * 1000
+	if date1904 {
+		date = time.Date(1904, 1, 1, 0, 0, 0, 0, time.UTC)
+	} else {
+		date = time.Date(1899, 12, 30, 0, 0, 0, 0, time.UTC)
+	}
+	durationDays := time.Duration(intPart) * time.Hour * 24
+	durationPart := time.Duration(dayNanoSeconds * floatPart)
+	return date.Add(durationDays).Add(durationPart)
+}

+ 11 - 0
vendor/github.com/tealeg/xlsx/doc.go

@@ -0,0 +1,11 @@
+// xslx is a package designed to help with reading data from
+// spreadsheets stored in the XLSX format used in recent versions of
+// Microsoft's Excel spreadsheet.
+//
+// Additionally, xlsx has started to grow some XLSX authoring
+// capabilities too.
+//
+// For a concise example of how to use this library why not check out
+// the source for xlsx2csv here: https://github.com/tealeg/xlsx2csv
+
+package xlsx

+ 308 - 0
vendor/github.com/tealeg/xlsx/file.go

@@ -0,0 +1,308 @@
+package xlsx
+
+import (
+	"archive/zip"
+	"bytes"
+	"encoding/xml"
+	"fmt"
+	"io"
+	"os"
+	"strconv"
+	"strings"
+)
+
+// File is a high level structure providing a slice of Sheet structs
+// to the user.
+type File struct {
+	worksheets     map[string]*zip.File
+	referenceTable *RefTable
+	Date1904       bool
+	styles         *xlsxStyleSheet
+	Sheets         []*Sheet
+	Sheet          map[string]*Sheet
+	theme          *theme
+	DefinedNames   []*xlsxDefinedName
+}
+
+// Create a new File
+func NewFile() *File {
+	return &File{
+		Sheet:        make(map[string]*Sheet),
+		Sheets:       make([]*Sheet, 0),
+		DefinedNames: make([]*xlsxDefinedName, 0),
+	}
+}
+
+// OpenFile() take the name of an XLSX file and returns a populated
+// xlsx.File struct for it.
+func OpenFile(filename string) (file *File, err error) {
+	var f *zip.ReadCloser
+	f, err = zip.OpenReader(filename)
+	if err != nil {
+		return nil, err
+	}
+	file, err = ReadZip(f)
+	return
+}
+
+// OpenBinary() take bytes of an XLSX file and returns a populated
+// xlsx.File struct for it.
+func OpenBinary(bs []byte) (*File, error) {
+	r := bytes.NewReader(bs)
+	return OpenReaderAt(r, int64(r.Len()))
+}
+
+// OpenReaderAt() take io.ReaderAt of an XLSX file and returns a populated
+// xlsx.File struct for it.
+func OpenReaderAt(r io.ReaderAt, size int64) (*File, error) {
+	file, err := zip.NewReader(r, size)
+	if err != nil {
+		return nil, err
+	}
+	return ReadZipReader(file)
+}
+
+// A convenient wrapper around File.ToSlice, FileToSlice will
+// return the raw data contained in an Excel XLSX file as three
+// dimensional slice.  The first index represents the sheet number,
+// the second the row number, and the third the cell number.
+//
+// For example:
+//
+//    var mySlice [][][]string
+//    var value string
+//    mySlice = xlsx.FileToSlice("myXLSX.xlsx")
+//    value = mySlice[0][0][0]
+//
+// Here, value would be set to the raw value of the cell A1 in the
+// first sheet in the XLSX file.
+func FileToSlice(path string) ([][][]string, error) {
+	f, err := OpenFile(path)
+	if err != nil {
+		return nil, err
+	}
+	return f.ToSlice()
+}
+
+// Save the File to an xlsx file at the provided path.
+func (f *File) Save(path string) (err error) {
+	target, err := os.Create(path)
+	if err != nil {
+		return err
+	}
+	err = f.Write(target)
+	if err != nil {
+		return err
+	}
+	return target.Close()
+}
+
+// Write the File to io.Writer as xlsx
+func (f *File) Write(writer io.Writer) (err error) {
+	parts, err := f.MarshallParts()
+	if err != nil {
+		return
+	}
+	zipWriter := zip.NewWriter(writer)
+	for partName, part := range parts {
+		w, err := zipWriter.Create(partName)
+		if err != nil {
+			return err
+		}
+		_, err = w.Write([]byte(part))
+		if err != nil {
+			return err
+		}
+	}
+	return zipWriter.Close()
+}
+
+// Add a new Sheet, with the provided name, to a File
+func (f *File) AddSheet(sheetName string) (*Sheet, error) {
+	if _, exists := f.Sheet[sheetName]; exists {
+		return nil, fmt.Errorf("duplicate sheet name '%s'.", sheetName)
+	}
+	sheet := &Sheet{
+		Name:     sheetName,
+		File:     f,
+		Selected: len(f.Sheets) == 0,
+	}
+	f.Sheet[sheetName] = sheet
+	f.Sheets = append(f.Sheets, sheet)
+	return sheet, nil
+}
+
+func (f *File) makeWorkbook() xlsxWorkbook {
+	return xlsxWorkbook{
+		FileVersion: xlsxFileVersion{AppName: "Go XLSX"},
+		WorkbookPr:  xlsxWorkbookPr{ShowObjects: "all"},
+		BookViews: xlsxBookViews{
+			WorkBookView: []xlsxWorkBookView{
+				{
+					ShowHorizontalScroll: true,
+					ShowSheetTabs:        true,
+					ShowVerticalScroll:   true,
+					TabRatio:             204,
+					WindowHeight:         8192,
+					WindowWidth:          16384,
+					XWindow:              "0",
+					YWindow:              "0",
+				},
+			},
+		},
+		Sheets: xlsxSheets{Sheet: make([]xlsxSheet, len(f.Sheets))},
+		CalcPr: xlsxCalcPr{
+			IterateCount: 100,
+			RefMode:      "A1",
+			Iterate:      false,
+			IterateDelta: 0.001,
+		},
+	}
+}
+
+// Some tools that read XLSX files have very strict requirements about
+// the structure of the input XML.  In particular both Numbers on the Mac
+// and SAS dislike inline XML namespace declarations, or namespace
+// prefixes that don't match the ones that Excel itself uses.  This is a
+// problem because the Go XML library doesn't multiple namespace
+// declarations in a single element of a document.  This function is a
+// horrible hack to fix that after the XML marshalling is completed.
+func replaceRelationshipsNameSpace(workbookMarshal string) string {
+	newWorkbook := strings.Replace(workbookMarshal, `xmlns:relationships="http://schemas.openxmlformats.org/officeDocument/2006/relationships" relationships:id`, `r:id`, -1)
+	// Dirty hack to fix issues #63 and #91; encoding/xml currently
+	// "doesn't allow for additional namespaces to be defined in the
+	// root element of the document," as described by @tealeg in the
+	// comments for #63.
+	oldXmlns := `<workbook xmlns="http://schemas.openxmlformats.org/spreadsheetml/2006/main">`
+	newXmlns := `<workbook xmlns="http://schemas.openxmlformats.org/spreadsheetml/2006/main" xmlns:r="http://schemas.openxmlformats.org/officeDocument/2006/relationships">`
+	return strings.Replace(newWorkbook, oldXmlns, newXmlns, 1)
+}
+
+// Construct a map of file name to XML content representing the file
+// in terms of the structure of an XLSX file.
+func (f *File) MarshallParts() (map[string]string, error) {
+	var parts map[string]string
+	var refTable *RefTable = NewSharedStringRefTable()
+	refTable.isWrite = true
+	var workbookRels WorkBookRels = make(WorkBookRels)
+	var err error
+	var workbook xlsxWorkbook
+	var types xlsxTypes = MakeDefaultContentTypes()
+
+	marshal := func(thing interface{}) (string, error) {
+		body, err := xml.Marshal(thing)
+		if err != nil {
+			return "", err
+		}
+		return xml.Header + string(body), nil
+	}
+
+	parts = make(map[string]string)
+	workbook = f.makeWorkbook()
+	sheetIndex := 1
+
+	if f.styles == nil {
+		f.styles = newXlsxStyleSheet(f.theme)
+	}
+	f.styles.reset()
+	for _, sheet := range f.Sheets {
+		xSheet := sheet.makeXLSXSheet(refTable, f.styles)
+		rId := fmt.Sprintf("rId%d", sheetIndex)
+		sheetId := strconv.Itoa(sheetIndex)
+		sheetPath := fmt.Sprintf("worksheets/sheet%d.xml", sheetIndex)
+		partName := "xl/" + sheetPath
+		types.Overrides = append(
+			types.Overrides,
+			xlsxOverride{
+				PartName:    "/" + partName,
+				ContentType: "application/vnd.openxmlformats-officedocument.spreadsheetml.worksheet+xml"})
+		workbookRels[rId] = sheetPath
+		workbook.Sheets.Sheet[sheetIndex-1] = xlsxSheet{
+			Name:    sheet.Name,
+			SheetId: sheetId,
+			Id:      rId,
+			State:   "visible"}
+		parts[partName], err = marshal(xSheet)
+		if err != nil {
+			return parts, err
+		}
+		sheetIndex++
+	}
+
+	workbookMarshal, err := marshal(workbook)
+	if err != nil {
+		return parts, err
+	}
+	workbookMarshal = replaceRelationshipsNameSpace(workbookMarshal)
+	parts["xl/workbook.xml"] = workbookMarshal
+	if err != nil {
+		return parts, err
+	}
+
+	parts["_rels/.rels"] = TEMPLATE__RELS_DOT_RELS
+	parts["docProps/app.xml"] = TEMPLATE_DOCPROPS_APP
+	// TODO - do this properly, modification and revision information
+	parts["docProps/core.xml"] = TEMPLATE_DOCPROPS_CORE
+	parts["xl/theme/theme1.xml"] = TEMPLATE_XL_THEME_THEME
+
+	xSST := refTable.makeXLSXSST()
+	parts["xl/sharedStrings.xml"], err = marshal(xSST)
+	if err != nil {
+		return parts, err
+	}
+
+	xWRel := workbookRels.MakeXLSXWorkbookRels()
+
+	parts["xl/_rels/workbook.xml.rels"], err = marshal(xWRel)
+	if err != nil {
+		return parts, err
+	}
+
+	parts["[Content_Types].xml"], err = marshal(types)
+	if err != nil {
+		return parts, err
+	}
+
+	parts["xl/styles.xml"], err = f.styles.Marshal()
+	if err != nil {
+		return parts, err
+	}
+
+	return parts, nil
+}
+
+// Return the raw data contained in the File as three
+// dimensional slice.  The first index represents the sheet number,
+// the second the row number, and the third the cell number.
+//
+// For example:
+//
+//    var mySlice [][][]string
+//    var value string
+//    mySlice = xlsx.FileToSlice("myXLSX.xlsx")
+//    value = mySlice[0][0][0]
+//
+// Here, value would be set to the raw value of the cell A1 in the
+// first sheet in the XLSX file.
+func (file *File) ToSlice() (output [][][]string, err error) {
+	output = [][][]string{}
+	for _, sheet := range file.Sheets {
+		s := [][]string{}
+		for _, row := range sheet.Rows {
+			if row == nil {
+				continue
+			}
+			r := []string{}
+			for _, cell := range row.Cells {
+				str, err := cell.String()
+				if err != nil {
+					return output, err
+				}
+				r = append(r, str)
+			}
+			s = append(s, r)
+		}
+		output = append(output, s)
+	}
+	return output, nil
+}

+ 145 - 0
vendor/github.com/tealeg/xlsx/hsl.go

@@ -0,0 +1,145 @@
+/*
+Copyright (c) 2012 Rodrigo Moraes. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+	 * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+	 * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+	 * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+
+package xlsx
+
+import (
+	"image/color"
+	"math"
+)
+
+// HSLModel converts any color.Color to a HSL color.
+var HSLModel = color.ModelFunc(hslModel)
+
+// HSL represents a cylindrical coordinate of points in an RGB color model.
+//
+// Values are in the range 0 to 1.
+type HSL struct {
+	H, S, L float64
+}
+
+// RGBA returns the alpha-premultiplied red, green, blue and alpha values
+// for the HSL.
+func (c HSL) RGBA() (uint32, uint32, uint32, uint32) {
+	r, g, b := HSLToRGB(c.H, c.S, c.L)
+	return uint32(r) * 0x101, uint32(g) * 0x101, uint32(b) * 0x101, 0xffff
+}
+
+// hslModel converts a color.Color to HSL.
+func hslModel(c color.Color) color.Color {
+	if _, ok := c.(HSL); ok {
+		return c
+	}
+	r, g, b, _ := c.RGBA()
+	h, s, l := RGBToHSL(uint8(r>>8), uint8(g>>8), uint8(b>>8))
+	return HSL{h, s, l}
+}
+
+// RGBToHSL converts an RGB triple to a HSL triple.
+//
+// Ported from http://goo.gl/Vg1h9
+func RGBToHSL(r, g, b uint8) (h, s, l float64) {
+	fR := float64(r) / 255
+	fG := float64(g) / 255
+	fB := float64(b) / 255
+	max := math.Max(math.Max(fR, fG), fB)
+	min := math.Min(math.Min(fR, fG), fB)
+	l = (max + min) / 2
+	if max == min {
+		// Achromatic.
+		h, s = 0, 0
+	} else {
+		// Chromatic.
+		d := max - min
+		if l > 0.5 {
+			s = d / (2.0 - max - min)
+		} else {
+			s = d / (max + min)
+		}
+		switch max {
+		case fR:
+			h = (fG - fB) / d
+			if fG < fB {
+				h += 6
+			}
+		case fG:
+			h = (fB-fR)/d + 2
+		case fB:
+			h = (fR-fG)/d + 4
+		}
+		h /= 6
+	}
+	return
+}
+
+// HSLToRGB converts an HSL triple to a RGB triple.
+//
+// Ported from http://goo.gl/Vg1h9
+func HSLToRGB(h, s, l float64) (r, g, b uint8) {
+	var fR, fG, fB float64
+	if s == 0 {
+		fR, fG, fB = l, l, l
+	} else {
+		var q float64
+		if l < 0.5 {
+			q = l * (1 + s)
+		} else {
+			q = l + s - s*l
+		}
+		p := 2*l - q
+		fR = hueToRGB(p, q, h+1.0/3)
+		fG = hueToRGB(p, q, h)
+		fB = hueToRGB(p, q, h-1.0/3)
+	}
+	r = uint8((fR * 255) + 0.5)
+	g = uint8((fG * 255) + 0.5)
+	b = uint8((fB * 255) + 0.5)
+	return
+}
+
+// hueToRGB is a helper function for HSLToRGB.
+func hueToRGB(p, q, t float64) float64 {
+	if t < 0 {
+		t += 1
+	}
+	if t > 1 {
+		t -= 1
+	}
+	if t < 1.0/6 {
+		return p + (q-p)*6*t
+	}
+	if t < 0.5 {
+		return q
+	}
+	if t < 2.0/3 {
+		return p + (q-p)*(2.0/3-t)*6
+	}
+	return p
+}

+ 983 - 0
vendor/github.com/tealeg/xlsx/lib.go

@@ -0,0 +1,983 @@
+package xlsx
+
+import (
+	"archive/zip"
+	"encoding/xml"
+	"errors"
+	"fmt"
+	"io"
+	"path"
+	"strconv"
+	"strings"
+)
+
+// XLSXReaderError is the standard error type for otherwise undefined
+// errors in the XSLX reading process.
+type XLSXReaderError struct {
+	Err string
+}
+
+// Error returns a string value from an XLSXReaderError struct in order
+// that it might comply with the builtin.error interface.
+func (e *XLSXReaderError) Error() string {
+	return e.Err
+}
+
+// getRangeFromString is an internal helper function that converts
+// XLSX internal range syntax to a pair of integers.  For example,
+// the range string "1:3" yield the upper and lower intergers 1 and 3.
+func getRangeFromString(rangeString string) (lower int, upper int, error error) {
+	var parts []string
+	parts = strings.SplitN(rangeString, ":", 2)
+	if parts[0] == "" {
+		error = errors.New(fmt.Sprintf("Invalid range '%s'\n", rangeString))
+	}
+	if parts[1] == "" {
+		error = errors.New(fmt.Sprintf("Invalid range '%s'\n", rangeString))
+	}
+	lower, error = strconv.Atoi(parts[0])
+	if error != nil {
+		error = errors.New(fmt.Sprintf("Invalid range (not integer in lower bound) %s\n", rangeString))
+	}
+	upper, error = strconv.Atoi(parts[1])
+	if error != nil {
+		error = errors.New(fmt.Sprintf("Invalid range (not integer in upper bound) %s\n", rangeString))
+	}
+	return lower, upper, error
+}
+
+// lettersToNumeric is used to convert a character based column
+// reference to a zero based numeric column identifier.
+func lettersToNumeric(letters string) int {
+	sum, mul, n := 0, 1, 0
+	for i := len(letters) - 1; i >= 0; i, mul, n = i-1, mul*26, 1 {
+		c := letters[i]
+		switch {
+		case 'A' <= c && c <= 'Z':
+			n += int(c - 'A')
+		case 'a' <= c && c <= 'z':
+			n += int(c - 'a')
+		}
+		sum += n * mul
+	}
+	return sum
+}
+
+// Get the largestDenominator that is a multiple of a basedDenominator
+// and fits at least once into a given numerator.
+func getLargestDenominator(numerator, multiple, baseDenominator, power int) (int, int) {
+	if numerator/multiple == 0 {
+		return 1, power
+	}
+	next, nextPower := getLargestDenominator(
+		numerator, multiple*baseDenominator, baseDenominator, power+1)
+	if next > multiple {
+		return next, nextPower
+	}
+	return multiple, power
+}
+
+// Convers a list of numbers representing a column into a alphabetic
+// representation, as used in the spreadsheet.
+func formatColumnName(colId []int) string {
+	lastPart := len(colId) - 1
+
+	result := ""
+	for n, part := range colId {
+		if n == lastPart {
+			// The least significant number is in the
+			// range 0-25, all other numbers are 1-26,
+			// hence we use a differente offset for the
+			// last part.
+			result += string(part + 65)
+		} else {
+			// Don't output leading 0s, as there is no
+			// representation of 0 in this format.
+			if part > 0 {
+				result += string(part + 64)
+			}
+		}
+	}
+	return result
+}
+
+func smooshBase26Slice(b26 []int) []int {
+	// Smoosh values together, eliminating 0s from all but the
+	// least significant part.
+	lastButOnePart := len(b26) - 2
+	for i := lastButOnePart; i > 0; i-- {
+		part := b26[i]
+		if part == 0 {
+			greaterPart := b26[i-1]
+			if greaterPart > 0 {
+				b26[i-1] = greaterPart - 1
+				b26[i] = 26
+			}
+		}
+	}
+	return b26
+}
+
+func intToBase26(x int) (parts []int) {
+	// Excel column codes are pure evil - in essence they're just
+	// base26, but they don't represent the number 0.
+	b26Denominator, _ := getLargestDenominator(x, 1, 26, 0)
+
+	// This loop terminates because integer division of 1 / 26
+	// returns 0.
+	for d := b26Denominator; d > 0; d = d / 26 {
+		value := x / d
+		remainder := x % d
+		parts = append(parts, value)
+		x = remainder
+	}
+	return parts
+}
+
+// numericToLetters is used to convert a zero based, numeric column
+// indentifier into a character code.
+func numericToLetters(colRef int) string {
+	parts := intToBase26(colRef)
+	return formatColumnName(smooshBase26Slice(parts))
+}
+
+// letterOnlyMapF is used in conjunction with strings.Map to return
+// only the characters A-Z and a-z in a string
+func letterOnlyMapF(rune rune) rune {
+	switch {
+	case 'A' <= rune && rune <= 'Z':
+		return rune
+	case 'a' <= rune && rune <= 'z':
+		return rune - 32
+	}
+	return -1
+}
+
+// intOnlyMapF is used in conjunction with strings.Map to return only
+// the numeric portions of a string.
+func intOnlyMapF(rune rune) rune {
+	if rune >= 48 && rune < 58 {
+		return rune
+	}
+	return -1
+}
+
+// getCoordsFromCellIDString returns the zero based cartesian
+// coordinates from a cell name in Excel format, e.g. the cellIDString
+// "A1" returns 0, 0 and the "B3" return 1, 2.
+func getCoordsFromCellIDString(cellIDString string) (x, y int, error error) {
+	var letterPart string = strings.Map(letterOnlyMapF, cellIDString)
+	y, error = strconv.Atoi(strings.Map(intOnlyMapF, cellIDString))
+	if error != nil {
+		return x, y, error
+	}
+	y -= 1 // Zero based
+	x = lettersToNumeric(letterPart)
+	return x, y, error
+}
+
+// getCellIDStringFromCoords returns the Excel format cell name that
+// represents a pair of zero based cartesian coordinates.
+func getCellIDStringFromCoords(x, y int) string {
+	letterPart := numericToLetters(x)
+	numericPart := y + 1
+	return fmt.Sprintf("%s%d", letterPart, numericPart)
+}
+
+// getMaxMinFromDimensionRef return the zero based cartesian maximum
+// and minimum coordinates from the dimension reference embedded in a
+// XLSX worksheet.  For example, the dimension reference "A1:B2"
+// returns "0,0", "1,1".
+func getMaxMinFromDimensionRef(ref string) (minx, miny, maxx, maxy int, err error) {
+	var parts []string
+	parts = strings.Split(ref, ":")
+	minx, miny, err = getCoordsFromCellIDString(parts[0])
+	if err != nil {
+		return -1, -1, -1, -1, err
+	}
+	if len(parts) == 1 {
+		maxx, maxy = minx, miny
+		return
+	}
+	maxx, maxy, err = getCoordsFromCellIDString(parts[1])
+	if err != nil {
+		return -1, -1, -1, -1, err
+	}
+	return
+}
+
+// calculateMaxMinFromWorkSheet works out the dimensions of a spreadsheet
+// that doesn't have a DimensionRef set.  The only case currently
+// known where this is true is with XLSX exported from Google Docs.
+func calculateMaxMinFromWorksheet(worksheet *xlsxWorksheet) (minx, miny, maxx, maxy int, err error) {
+	// Note, this method could be very slow for large spreadsheets.
+	var x, y int
+	var maxVal int
+	maxVal = int(^uint(0) >> 1)
+	minx = maxVal
+	miny = maxVal
+	maxy = 0
+	maxx = 0
+	for _, row := range worksheet.SheetData.Row {
+		for _, cell := range row.C {
+			x, y, err = getCoordsFromCellIDString(cell.R)
+			if err != nil {
+				return -1, -1, -1, -1, err
+			}
+			if x < minx {
+				minx = x
+			}
+			if x > maxx {
+				maxx = x
+			}
+			if y < miny {
+				miny = y
+			}
+			if y > maxy {
+				maxy = y
+			}
+		}
+	}
+	if minx == maxVal {
+		minx = 0
+	}
+	if miny == maxVal {
+		miny = 0
+	}
+	return
+}
+
+// makeRowFromSpan will, when given a span expressed as a string,
+// return an empty Row large enough to encompass that span and
+// populate it with empty cells.  All rows start from cell 1 -
+// regardless of the lower bound of the span.
+func makeRowFromSpan(spans string, sheet *Sheet) *Row {
+	var error error
+	var upper int
+	var row *Row
+	var cell *Cell
+
+	row = new(Row)
+	row.Sheet = sheet
+	_, upper, error = getRangeFromString(spans)
+	if error != nil {
+		panic(error)
+	}
+	error = nil
+	row.Cells = make([]*Cell, upper)
+	for i := 0; i < upper; i++ {
+		cell = new(Cell)
+		cell.Value = ""
+		row.Cells[i] = cell
+	}
+	return row
+}
+
+// makeRowFromRaw returns the Row representation of the xlsxRow.
+func makeRowFromRaw(rawrow xlsxRow, sheet *Sheet) *Row {
+	var upper int
+	var row *Row
+	var cell *Cell
+
+	row = new(Row)
+	row.Sheet = sheet
+	upper = -1
+
+	for _, rawcell := range rawrow.C {
+		if rawcell.R != "" {
+			x, _, error := getCoordsFromCellIDString(rawcell.R)
+			if error != nil {
+				panic(fmt.Sprintf("Invalid Cell Coord, %s\n", rawcell.R))
+			}
+			if x > upper {
+				upper = x
+			}
+			continue
+		}
+		upper++
+	}
+	upper++
+
+	row.OutlineLevel = rawrow.OutlineLevel
+
+	row.Cells = make([]*Cell, upper)
+	for i := 0; i < upper; i++ {
+		cell = new(Cell)
+		cell.Value = ""
+		row.Cells[i] = cell
+	}
+	return row
+}
+
+func makeEmptyRow(sheet *Sheet) *Row {
+	row := new(Row)
+	row.Cells = make([]*Cell, 0)
+	row.Sheet = sheet
+	return row
+}
+
+type sharedFormula struct {
+	x, y    int
+	formula string
+}
+
+func formulaForCell(rawcell xlsxC, sharedFormulas map[int]sharedFormula) string {
+	var res string
+
+	f := rawcell.F
+	if f == nil {
+		return ""
+	}
+	if f.T == "shared" {
+		x, y, err := getCoordsFromCellIDString(rawcell.R)
+		if err != nil {
+			res = f.Content
+		} else {
+			if f.Ref != "" {
+				res = f.Content
+				sharedFormulas[f.Si] = sharedFormula{x, y, res}
+			} else {
+				sharedFormula := sharedFormulas[f.Si]
+				dx := x - sharedFormula.x
+				dy := y - sharedFormula.y
+				orig := []byte(sharedFormula.formula)
+				var start, end int
+				var stringLiteral bool
+				for end = 0; end < len(orig); end++ {
+					c := orig[end]
+
+					if c == '"' {
+						stringLiteral = !stringLiteral
+					}
+
+					if stringLiteral {
+						continue // Skip characters in quotes
+					}
+
+					if c >= 'A' && c <= 'Z' || c == '$' {
+						res += string(orig[start:end])
+						start = end
+						end++
+						foundNum := false
+						for ; end < len(orig); end++ {
+							idc := orig[end]
+							if idc >= '0' && idc <= '9' || idc == '$' {
+								foundNum = true
+							} else if idc >= 'A' && idc <= 'Z' {
+								if foundNum {
+									break
+								}
+							} else {
+								break
+							}
+						}
+						if foundNum {
+							cellID := string(orig[start:end])
+							res += shiftCell(cellID, dx, dy)
+							start = end
+						}
+					}
+				}
+				if start < len(orig) {
+					res += string(orig[start:])
+				}
+			}
+		}
+	} else {
+		res = f.Content
+	}
+	return strings.Trim(res, " \t\n\r")
+}
+
+// shiftCell returns the cell shifted according to dx and dy taking into consideration of absolute
+// references with dollar sign ($)
+func shiftCell(cellID string, dx, dy int) string {
+	fx, fy, _ := getCoordsFromCellIDString(cellID)
+
+	// Is fixed column?
+	fixedCol := strings.Index(cellID, "$") == 0
+
+	// Is fixed row?
+	fixedRow := strings.LastIndex(cellID, "$") > 0
+
+	if !fixedCol {
+		// Shift column
+		fx += dx
+	}
+
+	if !fixedRow {
+		// Shift row
+		fy += dy
+	}
+
+	// New shifted cell
+	shiftedCellID := getCellIDStringFromCoords(fx, fy)
+
+	if !fixedCol && !fixedRow {
+		return shiftedCellID
+	}
+
+	// There are absolute references, need to put the $ back into the formula.
+	letterPart := strings.Map(letterOnlyMapF, shiftedCellID)
+	numberPart := strings.Map(intOnlyMapF, shiftedCellID)
+
+	result := ""
+
+	if fixedCol {
+		result += "$"
+	}
+
+	result += letterPart
+
+	if fixedRow {
+		result += "$"
+	}
+
+	result += numberPart
+
+	return result
+}
+
+// fillCellData attempts to extract a valid value, usable in
+// CSV form from the raw cell value.  Note - this is not actually
+// general enough - we should support retaining tabs and newlines.
+func fillCellData(rawcell xlsxC, reftable *RefTable, sharedFormulas map[int]sharedFormula, cell *Cell) {
+	var data string = rawcell.V
+	if len(data) > 0 {
+		vval := strings.Trim(data, " \t\n\r")
+		switch rawcell.T {
+		case "s": // Shared String
+			ref, error := strconv.Atoi(vval)
+			if error != nil {
+				panic(error)
+			}
+			cell.Value = reftable.ResolveSharedString(ref)
+			cell.cellType = CellTypeString
+		case "b": // Boolean
+			cell.Value = vval
+			cell.cellType = CellTypeBool
+		case "e": // Error
+			cell.Value = vval
+			cell.formula = formulaForCell(rawcell, sharedFormulas)
+			cell.cellType = CellTypeError
+		default:
+			if rawcell.F == nil {
+				// Numeric
+				cell.Value = vval
+				cell.cellType = CellTypeNumeric
+			} else {
+				// Formula
+				cell.Value = vval
+				cell.formula = formulaForCell(rawcell, sharedFormulas)
+				cell.cellType = CellTypeFormula
+			}
+		}
+	}
+}
+
+// readRowsFromSheet is an internal helper function that extracts the
+// rows from a XSLXWorksheet, populates them with Cells and resolves
+// the value references from the reference table and stores them in
+// the rows and columns.
+func readRowsFromSheet(Worksheet *xlsxWorksheet, file *File, sheet *Sheet) ([]*Row, []*Col, int, int) {
+	var rows []*Row
+	var cols []*Col
+	var row *Row
+	var minCol, maxCol, minRow, maxRow, colCount, rowCount int
+	var reftable *RefTable
+	var err error
+	var insertRowIndex, insertColIndex int
+	sharedFormulas := map[int]sharedFormula{}
+
+	if len(Worksheet.SheetData.Row) == 0 {
+		return nil, nil, 0, 0
+	}
+	reftable = file.referenceTable
+	if len(Worksheet.Dimension.Ref) > 0 {
+		minCol, minRow, maxCol, maxRow, err = getMaxMinFromDimensionRef(Worksheet.Dimension.Ref)
+	} else {
+		minCol, minRow, maxCol, maxRow, err = calculateMaxMinFromWorksheet(Worksheet)
+	}
+	if err != nil {
+		panic(err.Error())
+	}
+
+	rowCount = maxRow + 1
+	colCount = maxCol + 1
+	rows = make([]*Row, rowCount)
+	cols = make([]*Col, colCount)
+	insertRowIndex = minRow
+	for i := range cols {
+		cols[i] = &Col{
+			Hidden: false,
+		}
+	}
+
+	if Worksheet.Cols != nil {
+		// Columns can apply to a range, for convenience we expand the
+		// ranges out into individual column definitions.
+		for _, rawcol := range Worksheet.Cols.Col {
+			// Note, below, that sometimes column definitions can
+			// exist outside the defined dimensions of the
+			// spreadsheet - we deliberately exclude these
+			// columns.
+			for i := rawcol.Min; i <= rawcol.Max && i <= colCount; i++ {
+				col := &Col{
+					Min:          rawcol.Min,
+					Max:          rawcol.Max,
+					Hidden:       rawcol.Hidden,
+					Width:        rawcol.Width,
+					OutlineLevel: rawcol.OutlineLevel}
+				cols[i-1] = col
+				if file.styles != nil {
+					col.style = file.styles.getStyle(rawcol.Style)
+					col.numFmt = file.styles.getNumberFormat(rawcol.Style)
+				}
+			}
+		}
+	}
+
+	// insert leading empty rows that is in front of minRow
+	for rowIndex := 0; rowIndex < minRow; rowIndex++ {
+		rows[rowIndex] = makeEmptyRow(sheet)
+	}
+
+	numRows := len(rows)
+	for rowIndex := 0; rowIndex < len(Worksheet.SheetData.Row); rowIndex++ {
+		rawrow := Worksheet.SheetData.Row[rowIndex]
+		// Some spreadsheets will omit blank rows from the
+		// stored data
+		for rawrow.R > (insertRowIndex + 1) {
+			// Put an empty Row into the array
+			if insertRowIndex < numRows {
+				rows[insertRowIndex] = makeEmptyRow(sheet)
+			}
+			insertRowIndex++
+		}
+		// range is not empty and only one range exist
+		if len(rawrow.Spans) != 0 && strings.Count(rawrow.Spans, ":") == 1 {
+			row = makeRowFromSpan(rawrow.Spans, sheet)
+		} else {
+			row = makeRowFromRaw(rawrow, sheet)
+		}
+
+		row.Hidden = rawrow.Hidden
+		height, err := strconv.ParseFloat(rawrow.Ht, 64)
+		if err == nil {
+			row.Height = height
+		}
+		row.isCustom = rawrow.CustomHeight
+		row.OutlineLevel = rawrow.OutlineLevel
+
+		insertColIndex = minCol
+		for _, rawcell := range rawrow.C {
+			h, v, err := Worksheet.MergeCells.getExtent(rawcell.R)
+			if err != nil {
+				panic(err.Error())
+			}
+			x, _, _ := getCoordsFromCellIDString(rawcell.R)
+
+			// K1000000: Prevent panic when the range specified in the spreadsheet
+			//           view exceeds the actual number of columns in the dataset.
+
+			// Some spreadsheets will omit blank cells
+			// from the data.
+			for x > insertColIndex {
+				// Put an empty Cell into the array
+				if insertColIndex < len(row.Cells) {
+					row.Cells[insertColIndex] = new(Cell)
+				}
+				insertColIndex++
+			}
+			cellX := insertColIndex
+
+			if cellX < len(row.Cells) {
+				cell := row.Cells[cellX]
+				cell.HMerge = h
+				cell.VMerge = v
+				fillCellData(rawcell, reftable, sharedFormulas, cell)
+				if file.styles != nil {
+					cell.style = file.styles.getStyle(rawcell.S)
+					cell.NumFmt = file.styles.getNumberFormat(rawcell.S)
+				}
+				cell.date1904 = file.Date1904
+				// Cell is considered hidden if the row or the column of this cell is hidden
+				cell.Hidden = rawrow.Hidden || (len(cols) > cellX && cols[cellX].Hidden)
+				insertColIndex++
+			}
+		}
+		if len(rows) > insertRowIndex {
+			rows[insertRowIndex] = row
+		}
+		insertRowIndex++
+	}
+	return rows, cols, colCount, rowCount
+}
+
+type indexedSheet struct {
+	Index int
+	Sheet *Sheet
+	Error error
+}
+
+func readSheetViews(xSheetViews xlsxSheetViews) []SheetView {
+	if xSheetViews.SheetView == nil || len(xSheetViews.SheetView) == 0 {
+		return nil
+	}
+	sheetViews := []SheetView{}
+	for _, xSheetView := range xSheetViews.SheetView {
+		sheetView := SheetView{}
+		if xSheetView.Pane != nil {
+			xlsxPane := xSheetView.Pane
+			pane := &Pane{}
+			pane.XSplit = xlsxPane.XSplit
+			pane.YSplit = xlsxPane.YSplit
+			pane.TopLeftCell = xlsxPane.TopLeftCell
+			pane.ActivePane = xlsxPane.ActivePane
+			pane.State = xlsxPane.State
+			sheetView.Pane = pane
+		}
+		sheetViews = append(sheetViews, sheetView)
+	}
+	return sheetViews
+}
+
+// readSheetFromFile is the logic of converting a xlsxSheet struct
+// into a Sheet struct.  This work can be done in parallel and so
+// readSheetsFromZipFile will spawn an instance of this function per
+// sheet and get the results back on the provided channel.
+func readSheetFromFile(sc chan *indexedSheet, index int, rsheet xlsxSheet, fi *File, sheetXMLMap map[string]string) {
+	result := &indexedSheet{Index: index, Sheet: nil, Error: nil}
+	defer func() {
+		if e := recover(); e != nil {
+			switch e.(type) {
+			case error:
+				result.Error = e.(error)
+			default:
+				result.Error = errors.New("unexpected error")
+			}
+			// The only thing here, is if one close the channel. but its not the case
+			sc <- result
+		}
+	}()
+
+	worksheet, error := getWorksheetFromSheet(rsheet, fi.worksheets, sheetXMLMap)
+	if error != nil {
+		result.Error = error
+		sc <- result
+		return
+	}
+	sheet := new(Sheet)
+	sheet.File = fi
+	sheet.Rows, sheet.Cols, sheet.MaxCol, sheet.MaxRow = readRowsFromSheet(worksheet, fi, sheet)
+	sheet.Hidden = rsheet.State == sheetStateHidden || rsheet.State == sheetStateVeryHidden
+	sheet.SheetViews = readSheetViews(worksheet.SheetViews)
+
+	sheet.SheetFormat.DefaultColWidth = worksheet.SheetFormatPr.DefaultColWidth
+	sheet.SheetFormat.DefaultRowHeight = worksheet.SheetFormatPr.DefaultRowHeight
+	sheet.SheetFormat.OutlineLevelCol = worksheet.SheetFormatPr.OutlineLevelCol
+	sheet.SheetFormat.OutlineLevelRow = worksheet.SheetFormatPr.OutlineLevelRow
+
+	result.Sheet = sheet
+	sc <- result
+}
+
+// readSheetsFromZipFile is an internal helper function that loops
+// over the Worksheets defined in the XSLXWorkbook and loads them into
+// Sheet objects stored in the Sheets slice of a xlsx.File struct.
+func readSheetsFromZipFile(f *zip.File, file *File, sheetXMLMap map[string]string) (map[string]*Sheet, []*Sheet, error) {
+	var workbook *xlsxWorkbook
+	var err error
+	var rc io.ReadCloser
+	var decoder *xml.Decoder
+	var sheetCount int
+	workbook = new(xlsxWorkbook)
+	rc, err = f.Open()
+	if err != nil {
+		return nil, nil, err
+	}
+	decoder = xml.NewDecoder(rc)
+	err = decoder.Decode(workbook)
+	if err != nil {
+		return nil, nil, err
+	}
+	file.Date1904 = workbook.WorkbookPr.Date1904
+
+	for entryNum := range workbook.DefinedNames.DefinedName {
+		file.DefinedNames = append(file.DefinedNames, &workbook.DefinedNames.DefinedName[entryNum])
+	}
+
+	// Only try and read sheets that have corresponding files.
+	// Notably this excludes chartsheets don't right now
+	var workbookSheets []xlsxSheet
+	for _, sheet := range workbook.Sheets.Sheet {
+		if f := worksheetFileForSheet(sheet, file.worksheets, sheetXMLMap); f != nil {
+			workbookSheets = append(workbookSheets, sheet)
+		}
+	}
+	sheetCount = len(workbookSheets)
+	sheetsByName := make(map[string]*Sheet, sheetCount)
+	sheets := make([]*Sheet, sheetCount)
+	sheetChan := make(chan *indexedSheet, sheetCount)
+	defer close(sheetChan)
+
+	go func() {
+		err = nil
+		for i, rawsheet := range workbookSheets {
+			readSheetFromFile(sheetChan, i, rawsheet, file, sheetXMLMap)
+		}
+	}()
+
+	for j := 0; j < sheetCount; j++ {
+		sheet := <-sheetChan
+		if sheet.Error != nil {
+			return nil, nil, sheet.Error
+		}
+		sheetName := workbookSheets[sheet.Index].Name
+		sheetsByName[sheetName] = sheet.Sheet
+		sheet.Sheet.Name = sheetName
+		sheets[sheet.Index] = sheet.Sheet
+	}
+	return sheetsByName, sheets, nil
+}
+
+// readSharedStringsFromZipFile() is an internal helper function to
+// extract a reference table from the sharedStrings.xml file within
+// the XLSX zip file.
+func readSharedStringsFromZipFile(f *zip.File) (*RefTable, error) {
+	var sst *xlsxSST
+	var error error
+	var rc io.ReadCloser
+	var decoder *xml.Decoder
+	var reftable *RefTable
+
+	// In a file with no strings it's possible that
+	// sharedStrings.xml doesn't exist.  In this case the value
+	// passed as f will be nil.
+	if f == nil {
+		return nil, nil
+	}
+	rc, error = f.Open()
+	if error != nil {
+		return nil, error
+	}
+	sst = new(xlsxSST)
+	decoder = xml.NewDecoder(rc)
+	error = decoder.Decode(sst)
+	if error != nil {
+		return nil, error
+	}
+	reftable = MakeSharedStringRefTable(sst)
+	return reftable, nil
+}
+
+// readStylesFromZipFile() is an internal helper function to
+// extract a style table from the style.xml file within
+// the XLSX zip file.
+func readStylesFromZipFile(f *zip.File, theme *theme) (*xlsxStyleSheet, error) {
+	var style *xlsxStyleSheet
+	var error error
+	var rc io.ReadCloser
+	var decoder *xml.Decoder
+	rc, error = f.Open()
+	if error != nil {
+		return nil, error
+	}
+	style = newXlsxStyleSheet(theme)
+	decoder = xml.NewDecoder(rc)
+	error = decoder.Decode(style)
+	if error != nil {
+		return nil, error
+	}
+	buildNumFmtRefTable(style)
+	return style, nil
+}
+
+func buildNumFmtRefTable(style *xlsxStyleSheet) {
+	for _, numFmt := range style.NumFmts.NumFmt {
+		// We do this for the side effect of populating the NumFmtRefTable.
+		style.addNumFmt(numFmt)
+	}
+}
+
+func readThemeFromZipFile(f *zip.File) (*theme, error) {
+	rc, err := f.Open()
+	if err != nil {
+		return nil, err
+	}
+
+	var themeXml xlsxTheme
+	err = xml.NewDecoder(rc).Decode(&themeXml)
+	if err != nil {
+		return nil, err
+	}
+
+	return newTheme(themeXml), nil
+}
+
+type WorkBookRels map[string]string
+
+func (w *WorkBookRels) MakeXLSXWorkbookRels() xlsxWorkbookRels {
+	relCount := len(*w)
+	xWorkbookRels := xlsxWorkbookRels{}
+	xWorkbookRels.Relationships = make([]xlsxWorkbookRelation, relCount+3)
+	for k, v := range *w {
+		index, err := strconv.Atoi(k[3:])
+		if err != nil {
+			panic(err.Error())
+		}
+		xWorkbookRels.Relationships[index-1] = xlsxWorkbookRelation{
+			Id:     k,
+			Target: v,
+			Type:   "http://schemas.openxmlformats.org/officeDocument/2006/relationships/worksheet"}
+	}
+
+	relCount++
+	sheetId := fmt.Sprintf("rId%d", relCount)
+	xWorkbookRels.Relationships[relCount-1] = xlsxWorkbookRelation{
+		Id:     sheetId,
+		Target: "sharedStrings.xml",
+		Type:   "http://schemas.openxmlformats.org/officeDocument/2006/relationships/sharedStrings"}
+
+	relCount++
+	sheetId = fmt.Sprintf("rId%d", relCount)
+	xWorkbookRels.Relationships[relCount-1] = xlsxWorkbookRelation{
+		Id:     sheetId,
+		Target: "theme/theme1.xml",
+		Type:   "http://schemas.openxmlformats.org/officeDocument/2006/relationships/theme"}
+
+	relCount++
+	sheetId = fmt.Sprintf("rId%d", relCount)
+	xWorkbookRels.Relationships[relCount-1] = xlsxWorkbookRelation{
+		Id:     sheetId,
+		Target: "styles.xml",
+		Type:   "http://schemas.openxmlformats.org/officeDocument/2006/relationships/styles"}
+
+	return xWorkbookRels
+}
+
+// readWorkbookRelationsFromZipFile is an internal helper function to
+// extract a map of relationship ID strings to the name of the
+// worksheet.xml file they refer to.  The resulting map can be used to
+// reliably derefence the worksheets in the XLSX file.
+func readWorkbookRelationsFromZipFile(workbookRels *zip.File) (WorkBookRels, error) {
+	var sheetXMLMap WorkBookRels
+	var wbRelationships *xlsxWorkbookRels
+	var rc io.ReadCloser
+	var decoder *xml.Decoder
+	var err error
+
+	rc, err = workbookRels.Open()
+	if err != nil {
+		return nil, err
+	}
+	decoder = xml.NewDecoder(rc)
+	wbRelationships = new(xlsxWorkbookRels)
+	err = decoder.Decode(wbRelationships)
+	if err != nil {
+		return nil, err
+	}
+	sheetXMLMap = make(WorkBookRels)
+	for _, rel := range wbRelationships.Relationships {
+		if strings.HasSuffix(rel.Target, ".xml") && rel.Type == "http://schemas.openxmlformats.org/officeDocument/2006/relationships/worksheet" {
+			_, filename := path.Split(rel.Target)
+			sheetXMLMap[rel.Id] = strings.Replace(filename, ".xml", "", 1)
+		}
+	}
+	return sheetXMLMap, nil
+}
+
+// ReadZip() takes a pointer to a zip.ReadCloser and returns a
+// xlsx.File struct populated with its contents.  In most cases
+// ReadZip is not used directly, but is called internally by OpenFile.
+func ReadZip(f *zip.ReadCloser) (*File, error) {
+	defer f.Close()
+	return ReadZipReader(&f.Reader)
+}
+
+// ReadZipReader() can be used to read an XLSX in memory without
+// touching the filesystem.
+func ReadZipReader(r *zip.Reader) (*File, error) {
+	var err error
+	var file *File
+	var reftable *RefTable
+	var sharedStrings *zip.File
+	var sheetXMLMap map[string]string
+	var sheetsByName map[string]*Sheet
+	var sheets []*Sheet
+	var style *xlsxStyleSheet
+	var styles *zip.File
+	var themeFile *zip.File
+	var v *zip.File
+	var workbook *zip.File
+	var workbookRels *zip.File
+	var worksheets map[string]*zip.File
+
+	file = NewFile()
+	// file.numFmtRefTable = make(map[int]xlsxNumFmt, 1)
+	worksheets = make(map[string]*zip.File, len(r.File))
+	for _, v = range r.File {
+		switch v.Name {
+		case "xl/sharedStrings.xml":
+			sharedStrings = v
+		case "xl/workbook.xml":
+			workbook = v
+		case "xl/_rels/workbook.xml.rels":
+			workbookRels = v
+		case "xl/styles.xml":
+			styles = v
+		case "xl/theme/theme1.xml":
+			themeFile = v
+		default:
+			if len(v.Name) > 14 {
+				if v.Name[0:13] == "xl/worksheets" {
+					worksheets[v.Name[14:len(v.Name)-4]] = v
+				}
+			}
+		}
+	}
+	if workbookRels == nil {
+		return nil, fmt.Errorf("xl/_rels/workbook.xml.rels not found in input xlsx.")
+	}
+	sheetXMLMap, err = readWorkbookRelationsFromZipFile(workbookRels)
+	if err != nil {
+		return nil, err
+	}
+	if len(worksheets) == 0 {
+		return nil, fmt.Errorf("Input xlsx contains no worksheets.")
+	}
+	file.worksheets = worksheets
+	reftable, err = readSharedStringsFromZipFile(sharedStrings)
+	if err != nil {
+		return nil, err
+	}
+	file.referenceTable = reftable
+	if themeFile != nil {
+		theme, err := readThemeFromZipFile(themeFile)
+		if err != nil {
+			return nil, err
+		}
+
+		file.theme = theme
+	}
+	if styles != nil {
+		style, err = readStylesFromZipFile(styles, file.theme)
+		if err != nil {
+			return nil, err
+		}
+
+		file.styles = style
+	}
+	sheetsByName, sheets, err = readSheetsFromZipFile(workbook, file, sheetXMLMap)
+	if err != nil {
+		return nil, err
+	}
+	if sheets == nil {
+		readerErr := new(XLSXReaderError)
+		readerErr.Err = "No sheets found in XLSX File"
+		return nil, readerErr
+	}
+	file.Sheet = sheetsByName
+	file.Sheets = sheets
+	return file, nil
+}

+ 77 - 0
vendor/github.com/tealeg/xlsx/reftable.go

@@ -0,0 +1,77 @@
+package xlsx
+
+type RefTable struct {
+	indexedStrings []string
+	knownStrings   map[string]int
+	isWrite        bool
+}
+
+// NewSharedStringRefTable() creates a new, empty RefTable.
+func NewSharedStringRefTable() *RefTable {
+	rt := RefTable{}
+	rt.knownStrings = make(map[string]int)
+	return &rt
+}
+
+// MakeSharedStringRefTable() takes an xlsxSST struct and converts
+// it's contents to an slice of strings used to refer to string values
+// by numeric index - this is the model used within XLSX worksheet (a
+// numeric reference is stored to a shared cell value).
+func MakeSharedStringRefTable(source *xlsxSST) *RefTable {
+	reftable := NewSharedStringRefTable()
+	reftable.isWrite = false
+	for _, si := range source.SI {
+		if len(si.R) > 0 {
+			newString := ""
+			for j := 0; j < len(si.R); j++ {
+				newString = newString + si.R[j].T
+			}
+			reftable.AddString(newString)
+		} else {
+			reftable.AddString(si.T)
+		}
+	}
+	return reftable
+}
+
+// makeXlsxSST() takes a RefTable and returns and
+// equivalent xlsxSST representation.
+func (rt *RefTable) makeXLSXSST() xlsxSST {
+	sst := xlsxSST{}
+	sst.Count = len(rt.indexedStrings)
+	sst.UniqueCount = sst.Count
+	for _, ref := range rt.indexedStrings {
+		si := xlsxSI{}
+		si.T = ref
+		sst.SI = append(sst.SI, si)
+	}
+	return sst
+}
+
+// Resolvesharedstring() looks up a string value by numeric index from
+// a provided reference table (just a slice of strings in the correct
+// order).  This function only exists to provide clarity or purpose
+// via it's name.
+func (rt *RefTable) ResolveSharedString(index int) string {
+	return rt.indexedStrings[index]
+}
+
+// AddString adds a string to the reference table and return it's
+// numeric index.  If the string already exists then it simply returns
+// the existing index.
+func (rt *RefTable) AddString(str string) int {
+	if rt.isWrite {
+		index, ok := rt.knownStrings[str]
+		if ok {
+			return index
+		}
+	}
+	rt.indexedStrings = append(rt.indexedStrings, str)
+	index := len(rt.indexedStrings) - 1
+	rt.knownStrings[str] = index
+	return index
+}
+
+func (rt *RefTable) Length() int {
+	return len(rt.indexedStrings)
+}

+ 22 - 0
vendor/github.com/tealeg/xlsx/row.go

@@ -0,0 +1,22 @@
+package xlsx
+
+type Row struct {
+	Cells        []*Cell
+	Hidden       bool
+	Sheet        *Sheet
+	Height       float64
+	OutlineLevel uint8
+	isCustom     bool
+}
+
+func (r *Row) SetHeightCM(ht float64) {
+	r.Height = ht * 28.3464567 // Convert CM to postscript points
+	r.isCustom = true
+}
+
+func (r *Row) AddCell() *Cell {
+	cell := NewCell(r)
+	r.Cells = append(r.Cells, cell)
+	r.Sheet.maybeAddCol(len(r.Cells))
+	return cell
+}

+ 395 - 0
vendor/github.com/tealeg/xlsx/sheet.go

@@ -0,0 +1,395 @@
+package xlsx
+
+import (
+	"fmt"
+	"strconv"
+)
+
+// Sheet is a high level structure intended to provide user access to
+// the contents of a particular sheet within an XLSX file.
+type Sheet struct {
+	Name        string
+	File        *File
+	Rows        []*Row
+	Cols        []*Col
+	MaxRow      int
+	MaxCol      int
+	Hidden      bool
+	Selected    bool
+	SheetViews  []SheetView
+	SheetFormat SheetFormat
+}
+
+type SheetView struct {
+	Pane *Pane
+}
+
+type Pane struct {
+	XSplit      float64
+	YSplit      float64
+	TopLeftCell string
+	ActivePane  string
+	State       string // Either "split" or "frozen"
+}
+
+type SheetFormat struct {
+	DefaultColWidth  float64
+	DefaultRowHeight float64
+	OutlineLevelCol  uint8
+	OutlineLevelRow  uint8
+}
+
+// Add a new Row to a Sheet
+func (s *Sheet) AddRow() *Row {
+	row := &Row{Sheet: s}
+	s.Rows = append(s.Rows, row)
+	if len(s.Rows) > s.MaxRow {
+		s.MaxRow = len(s.Rows)
+	}
+	return row
+}
+
+// Make sure we always have as many Cols as we do cells.
+func (s *Sheet) maybeAddCol(cellCount int) {
+	if cellCount > s.MaxCol {
+		col := &Col{
+			style:     NewStyle(),
+			Min:       cellCount,
+			Max:       cellCount,
+			Hidden:    false,
+			Collapsed: false}
+		s.Cols = append(s.Cols, col)
+		s.MaxCol = cellCount
+	}
+}
+
+// Make sure we always have as many Cols as we do cells.
+func (s *Sheet) Col(idx int) *Col {
+	s.maybeAddCol(idx + 1)
+	return s.Cols[idx]
+}
+
+// Get a Cell by passing it's cartesian coordinates (zero based) as
+// row and column integer indexes.
+//
+// For example:
+//
+//    cell := sheet.Cell(0,0)
+//
+// ... would set the variable "cell" to contain a Cell struct
+// containing the data from the field "A1" on the spreadsheet.
+func (sh *Sheet) Cell(row, col int) *Cell {
+
+	// If the user requests a row beyond what we have, then extend.
+	for len(sh.Rows) <= row {
+		sh.AddRow()
+	}
+
+	r := sh.Rows[row]
+	for len(r.Cells) <= col {
+		r.AddCell()
+	}
+
+	return r.Cells[col]
+}
+
+//Set the width of a single column or multiple columns.
+func (s *Sheet) SetColWidth(startcol, endcol int, width float64) error {
+	if startcol > endcol {
+		return fmt.Errorf("Could not set width for range %d-%d: startcol must be less than endcol.", startcol, endcol)
+	}
+	col := &Col{
+		style:     NewStyle(),
+		Min:       startcol + 1,
+		Max:       endcol + 1,
+		Hidden:    false,
+		Collapsed: false,
+		Width:     width}
+	s.Cols = append(s.Cols, col)
+	if endcol+1 > s.MaxCol {
+		s.MaxCol = endcol + 1
+	}
+	return nil
+}
+
+// When merging cells, the cell may be the 'original' or the 'covered'.
+// First, figure out which cells are merge starting points. Then create
+// the necessary cells underlying the merge area.
+// Then go through all the underlying cells and apply the appropriate
+// border, based on the original cell.
+func (s *Sheet) handleMerged() {
+	merged := make(map[string]*Cell)
+
+	for r, row := range s.Rows {
+		for c, cell := range row.Cells {
+			if cell.HMerge > 0 || cell.VMerge > 0 {
+				coord := fmt.Sprintf("%s%d", numericToLetters(c), r+1)
+				merged[coord] = cell
+			}
+		}
+	}
+
+	// This loop iterates over all cells that should be merged and applies the correct
+	// borders to them depending on their position. If any cells required by the merge
+	// are missing, they will be allocated by s.Cell().
+	for key, cell := range merged {
+		mainstyle := cell.GetStyle()
+
+		top := mainstyle.Border.Top
+		left := mainstyle.Border.Left
+		right := mainstyle.Border.Right
+		bottom := mainstyle.Border.Bottom
+
+		// When merging cells, the upper left cell does not maintain
+		// the original borders
+		mainstyle.Border.Top = "none"
+		mainstyle.Border.Left = "none"
+		mainstyle.Border.Right = "none"
+		mainstyle.Border.Bottom = "none"
+
+		maincol, mainrow, _ := getCoordsFromCellIDString(key)
+		for rownum := 0; rownum <= cell.VMerge; rownum++ {
+			for colnum := 0; colnum <= cell.HMerge; colnum++ {
+				tmpcell := s.Cell(mainrow+rownum, maincol+colnum)
+				style := tmpcell.GetStyle()
+				style.ApplyBorder = true
+
+				if rownum == 0 {
+					style.Border.Top = top
+				}
+
+				if rownum == (cell.VMerge) {
+					style.Border.Bottom = bottom
+				}
+
+				if colnum == 0 {
+					style.Border.Left = left
+				}
+
+				if colnum == (cell.HMerge) {
+					style.Border.Right = right
+				}
+			}
+		}
+	}
+}
+
+// Dump sheet to its XML representation, intended for internal use only
+func (s *Sheet) makeXLSXSheet(refTable *RefTable, styles *xlsxStyleSheet) *xlsxWorksheet {
+	worksheet := newXlsxWorksheet()
+	xSheet := xlsxSheetData{}
+	maxRow := 0
+	maxCell := 0
+	var maxLevelCol, maxLevelRow uint8
+
+	// Scan through the sheet and see if there are any merged cells. If there
+	// are, we may need to extend the size of the sheet. There needs to be
+	// phantom cells underlying the area covered by the merged cell
+	s.handleMerged()
+
+	for index, sheetView := range s.SheetViews {
+		if sheetView.Pane != nil {
+			worksheet.SheetViews.SheetView[index].Pane = &xlsxPane{
+				XSplit:      sheetView.Pane.XSplit,
+				YSplit:      sheetView.Pane.YSplit,
+				TopLeftCell: sheetView.Pane.TopLeftCell,
+				ActivePane:  sheetView.Pane.ActivePane,
+				State:       sheetView.Pane.State,
+			}
+
+		}
+	}
+
+	if s.Selected {
+		worksheet.SheetViews.SheetView[0].TabSelected = true
+	}
+
+	if s.SheetFormat.DefaultRowHeight != 0 {
+		worksheet.SheetFormatPr.DefaultRowHeight = s.SheetFormat.DefaultRowHeight
+	}
+	worksheet.SheetFormatPr.DefaultColWidth = s.SheetFormat.DefaultColWidth
+
+	colsXfIdList := make([]int, len(s.Cols))
+	worksheet.Cols = &xlsxCols{Col: []xlsxCol{}}
+	for c, col := range s.Cols {
+		XfId := 0
+		if col.Min == 0 {
+			col.Min = 1
+		}
+		if col.Max == 0 {
+			col.Max = 1
+		}
+		style := col.GetStyle()
+		//col's style always not nil
+		if style != nil {
+			xNumFmt := styles.newNumFmt(col.numFmt)
+			XfId = handleStyleForXLSX(style, xNumFmt.NumFmtId, styles)
+		}
+		colsXfIdList[c] = XfId
+
+		var customWidth int
+		if col.Width == 0 {
+			col.Width = ColWidth
+		} else {
+			customWidth = 1
+		}
+		worksheet.Cols.Col = append(worksheet.Cols.Col,
+			xlsxCol{Min: col.Min,
+				Max:          col.Max,
+				Hidden:       col.Hidden,
+				Width:        col.Width,
+				CustomWidth:  customWidth,
+				Collapsed:    col.Collapsed,
+				OutlineLevel: col.OutlineLevel,
+				Style:        XfId,
+			})
+
+		if col.OutlineLevel > maxLevelCol {
+			maxLevelCol = col.OutlineLevel
+		}
+	}
+
+	for r, row := range s.Rows {
+		if r > maxRow {
+			maxRow = r
+		}
+		xRow := xlsxRow{}
+		xRow.R = r + 1
+		if row.isCustom {
+			xRow.CustomHeight = true
+			xRow.Ht = fmt.Sprintf("%g", row.Height)
+		}
+		xRow.OutlineLevel = row.OutlineLevel
+		if row.OutlineLevel > maxLevelRow {
+			maxLevelRow = row.OutlineLevel
+		}
+		for c, cell := range row.Cells {
+			XfId := colsXfIdList[c]
+
+			// generate NumFmtId and add new NumFmt
+			xNumFmt := styles.newNumFmt(cell.NumFmt)
+
+			style := cell.style
+			if style != nil {
+				XfId = handleStyleForXLSX(style, xNumFmt.NumFmtId, styles)
+			} else if len(cell.NumFmt) > 0 && s.Cols[c].numFmt != cell.NumFmt {
+				XfId = handleNumFmtIdForXLSX(xNumFmt.NumFmtId, styles)
+			}
+
+			if c > maxCell {
+				maxCell = c
+			}
+			xC := xlsxC{}
+			xC.R = fmt.Sprintf("%s%d", numericToLetters(c), r+1)
+			switch cell.cellType {
+			case CellTypeString:
+				if len(cell.Value) > 0 {
+					xC.V = strconv.Itoa(refTable.AddString(cell.Value))
+				}
+				xC.T = "s"
+				xC.S = XfId
+			case CellTypeBool:
+				xC.V = cell.Value
+				xC.T = "b"
+				xC.S = XfId
+			case CellTypeNumeric:
+				xC.V = cell.Value
+				xC.S = XfId
+			case CellTypeDate:
+				xC.V = cell.Value
+				xC.S = XfId
+			case CellTypeFormula:
+				xC.V = cell.Value
+				xC.F = &xlsxF{Content: cell.formula}
+				xC.S = XfId
+			case CellTypeError:
+				xC.V = cell.Value
+				xC.F = &xlsxF{Content: cell.formula}
+				xC.T = "e"
+				xC.S = XfId
+			case CellTypeGeneral:
+				xC.V = cell.Value
+				xC.S = XfId
+			}
+
+			xRow.C = append(xRow.C, xC)
+
+			if cell.HMerge > 0 || cell.VMerge > 0 {
+				// r == rownum, c == colnum
+				mc := xlsxMergeCell{}
+				start := fmt.Sprintf("%s%d", numericToLetters(c), r+1)
+				endcol := c + cell.HMerge
+				endrow := r + cell.VMerge + 1
+				end := fmt.Sprintf("%s%d", numericToLetters(endcol), endrow)
+				mc.Ref = start + ":" + end
+				if worksheet.MergeCells == nil {
+					worksheet.MergeCells = &xlsxMergeCells{}
+				}
+				worksheet.MergeCells.Cells = append(worksheet.MergeCells.Cells, mc)
+			}
+		}
+		xSheet.Row = append(xSheet.Row, xRow)
+	}
+
+	// Update sheet format with the freshly determined max levels
+	s.SheetFormat.OutlineLevelCol = maxLevelCol
+	s.SheetFormat.OutlineLevelRow = maxLevelRow
+	// .. and then also apply this to the xml worksheet
+	worksheet.SheetFormatPr.OutlineLevelCol = s.SheetFormat.OutlineLevelCol
+	worksheet.SheetFormatPr.OutlineLevelRow = s.SheetFormat.OutlineLevelRow
+
+	if worksheet.MergeCells != nil {
+		worksheet.MergeCells.Count = len(worksheet.MergeCells.Cells)
+	}
+
+	worksheet.SheetData = xSheet
+	dimension := xlsxDimension{}
+	dimension.Ref = fmt.Sprintf("A1:%s%d",
+		numericToLetters(maxCell), maxRow+1)
+	if dimension.Ref == "A1:A1" {
+		dimension.Ref = "A1"
+	}
+	worksheet.Dimension = dimension
+	return worksheet
+}
+
+func handleStyleForXLSX(style *Style, NumFmtId int, styles *xlsxStyleSheet) (XfId int) {
+	xFont, xFill, xBorder, xCellXf := style.makeXLSXStyleElements()
+	fontId := styles.addFont(xFont)
+	fillId := styles.addFill(xFill)
+
+	// HACK - adding light grey fill, as in OO and Google
+	greyfill := xlsxFill{}
+	greyfill.PatternFill.PatternType = "lightGray"
+	styles.addFill(greyfill)
+
+	borderId := styles.addBorder(xBorder)
+	xCellXf.FontId = fontId
+	xCellXf.FillId = fillId
+	xCellXf.BorderId = borderId
+	xCellXf.NumFmtId = NumFmtId
+	// apply the numFmtId when it is not the default cellxf
+	if xCellXf.NumFmtId > 0 {
+		xCellXf.ApplyNumberFormat = true
+	}
+
+	xCellXf.Alignment.Horizontal = style.Alignment.Horizontal
+	xCellXf.Alignment.Indent = style.Alignment.Indent
+	xCellXf.Alignment.ShrinkToFit = style.Alignment.ShrinkToFit
+	xCellXf.Alignment.TextRotation = style.Alignment.TextRotation
+	xCellXf.Alignment.Vertical = style.Alignment.Vertical
+	xCellXf.Alignment.WrapText = style.Alignment.WrapText
+
+	XfId = styles.addCellXf(xCellXf)
+	return
+}
+
+func handleNumFmtIdForXLSX(NumFmtId int, styles *xlsxStyleSheet) (XfId int) {
+	xCellXf := makeXLSXCellElement()
+	xCellXf.NumFmtId = NumFmtId
+	if xCellXf.NumFmtId > 0 {
+		xCellXf.ApplyNumberFormat = true
+	}
+	XfId = styles.addCellXf(xCellXf)
+	return
+}

+ 180 - 0
vendor/github.com/tealeg/xlsx/style.go

@@ -0,0 +1,180 @@
+package xlsx
+
+import "strconv"
+
+// Style is a high level structure intended to provide user access to
+// the contents of Style within an XLSX file.
+type Style struct {
+	Border          Border
+	Fill            Fill
+	Font            Font
+	ApplyBorder     bool
+	ApplyFill       bool
+	ApplyFont       bool
+	ApplyAlignment  bool
+	Alignment       Alignment
+	NamedStyleIndex *int
+}
+
+// Return a new Style structure initialised with the default values.
+func NewStyle() *Style {
+	return &Style{
+		Alignment: *DefaultAlignment(),
+		Border:    *DefaultBorder(),
+		Fill:      *DefaultFill(),
+		Font:      *DefaultFont(),
+	}
+}
+
+// Generate the underlying XLSX style elements that correspond to the Style.
+func (style *Style) makeXLSXStyleElements() (xFont xlsxFont, xFill xlsxFill, xBorder xlsxBorder, xCellXf xlsxXf) {
+	xFont = xlsxFont{}
+	xFill = xlsxFill{}
+	xBorder = xlsxBorder{}
+	xCellXf = xlsxXf{}
+	xFont.Sz.Val = strconv.Itoa(style.Font.Size)
+	xFont.Name.Val = style.Font.Name
+	xFont.Family.Val = strconv.Itoa(style.Font.Family)
+	xFont.Charset.Val = strconv.Itoa(style.Font.Charset)
+	xFont.Color.RGB = style.Font.Color
+	if style.Font.Bold {
+		xFont.B = &xlsxVal{}
+	} else {
+		xFont.B = nil
+	}
+	if style.Font.Italic {
+		xFont.I = &xlsxVal{}
+	} else {
+		xFont.I = nil
+	}
+	if style.Font.Underline {
+		xFont.U = &xlsxVal{}
+	} else {
+		xFont.U = nil
+	}
+	xPatternFill := xlsxPatternFill{}
+	xPatternFill.PatternType = style.Fill.PatternType
+	xPatternFill.FgColor.RGB = style.Fill.FgColor
+	xPatternFill.BgColor.RGB = style.Fill.BgColor
+	xFill.PatternFill = xPatternFill
+	xBorder.Left = xlsxLine{
+		Style: style.Border.Left,
+		Color: xlsxColor{RGB: style.Border.LeftColor},
+	}
+	xBorder.Right = xlsxLine{
+		Style: style.Border.Right,
+		Color: xlsxColor{RGB: style.Border.RightColor},
+	}
+	xBorder.Top = xlsxLine{
+		Style: style.Border.Top,
+		Color: xlsxColor{RGB: style.Border.TopColor},
+	}
+	xBorder.Bottom = xlsxLine{
+		Style: style.Border.Bottom,
+		Color: xlsxColor{RGB: style.Border.BottomColor},
+	}
+	xCellXf = makeXLSXCellElement()
+	xCellXf.ApplyBorder = style.ApplyBorder
+	xCellXf.ApplyFill = style.ApplyFill
+	xCellXf.ApplyFont = style.ApplyFont
+	xCellXf.ApplyAlignment = style.ApplyAlignment
+	if style.NamedStyleIndex != nil {
+		xCellXf.XfId = style.NamedStyleIndex
+	}
+	return
+}
+
+func makeXLSXCellElement() (xCellXf xlsxXf) {
+	xCellXf.NumFmtId = 0
+	return
+}
+
+// Border is a high level structure intended to provide user access to
+// the contents of Border Style within an Sheet.
+type Border struct {
+	Left        string
+	LeftColor   string
+	Right       string
+	RightColor  string
+	Top         string
+	TopColor    string
+	Bottom      string
+	BottomColor string
+}
+
+func NewBorder(left, right, top, bottom string) *Border {
+	return &Border{
+		Left:   left,
+		Right:  right,
+		Top:    top,
+		Bottom: bottom,
+	}
+}
+
+// Fill is a high level structure intended to provide user access to
+// the contents of background and foreground color index within an Sheet.
+type Fill struct {
+	PatternType string
+	BgColor     string
+	FgColor     string
+}
+
+func NewFill(patternType, fgColor, bgColor string) *Fill {
+	return &Fill{
+		PatternType: patternType,
+		FgColor:     fgColor,
+		BgColor:     bgColor,
+	}
+}
+
+type Font struct {
+	Size      int
+	Name      string
+	Family    int
+	Charset   int
+	Color     string
+	Bold      bool
+	Italic    bool
+	Underline bool
+}
+
+func NewFont(size int, name string) *Font {
+	return &Font{Size: size, Name: name}
+}
+
+type Alignment struct {
+	Horizontal   string
+	Indent       int
+	ShrinkToFit  bool
+	TextRotation int
+	Vertical     string
+	WrapText     bool
+}
+
+var defaultFontSize = 12
+var defaultFontName = "Verdana"
+
+func SetDefaultFont(size int, name string) {
+	defaultFontSize = size
+	defaultFontName = name
+}
+
+func DefaultFont() *Font {
+	return NewFont(defaultFontSize, defaultFontName)
+}
+
+func DefaultFill() *Fill {
+	return NewFill("none", "FFFFFFFF", "00000000")
+
+}
+
+func DefaultBorder() *Border {
+	return NewBorder("none", "none", "none", "none")
+}
+
+func DefaultAlignment() *Alignment {
+	return &Alignment{
+		Horizontal: "general",
+		Vertical:   "bottom",
+	}
+}

+ 339 - 0
vendor/github.com/tealeg/xlsx/templates.go

@@ -0,0 +1,339 @@
+// This file contains default templates for XML files we don't yet
+// populated based on content.
+
+package xlsx
+
+const TEMPLATE__RELS_DOT_RELS = `<?xml version="1.0" encoding="UTF-8"?>
+<Relationships xmlns="http://schemas.openxmlformats.org/package/2006/relationships">
+  <Relationship Id="rId1" Type="http://schemas.openxmlformats.org/officeDocument/2006/relationships/officeDocument" Target="xl/workbook.xml"/>
+  <Relationship Id="rId2" Type="http://schemas.openxmlformats.org/package/2006/relationships/metadata/core-properties" Target="docProps/core.xml"/>
+  <Relationship Id="rId3" Type="http://schemas.openxmlformats.org/officeDocument/2006/relationships/extended-properties" Target="docProps/app.xml"/>
+</Relationships>`
+
+const TEMPLATE_DOCPROPS_APP = `<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
+<Properties xmlns="http://schemas.openxmlformats.org/officeDocument/2006/extended-properties" xmlns:vt="http://schemas.openxmlformats.org/officeDocument/2006/docPropsVTypes">
+  <TotalTime>0</TotalTime>
+  <Application>Go XLSX</Application>
+</Properties>`
+
+const TEMPLATE_DOCPROPS_CORE = `<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
+<cp:coreProperties xmlns:cp="http://schemas.openxmlformats.org/package/2006/metadata/core-properties" xmlns:dc="http://purl.org/dc/elements/1.1/" xmlns:dcmitype="http://purl.org/dc/dcmitype/" xmlns:dcterms="http://purl.org/dc/terms/" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"></cp:coreProperties>`
+
+const TEMPLATE_XL_THEME_THEME = `<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
+<a:theme xmlns:a="http://schemas.openxmlformats.org/drawingml/2006/main" name="Office-Design">
+  <a:themeElements>
+    <a:clrScheme name="Office">
+      <a:dk1>
+        <a:sysClr val="windowText" lastClr="000000"/>
+      </a:dk1>
+      <a:lt1>
+        <a:sysClr val="window" lastClr="FFFFFF"/>
+      </a:lt1>
+      <a:dk2>
+        <a:srgbClr val="1F497D"/>
+      </a:dk2>
+      <a:lt2>
+        <a:srgbClr val="EEECE1"/>
+      </a:lt2>
+      <a:accent1>
+        <a:srgbClr val="4F81BD"/>
+      </a:accent1>
+      <a:accent2>
+        <a:srgbClr val="C0504D"/>
+      </a:accent2>
+      <a:accent3>
+        <a:srgbClr val="9BBB59"/>
+      </a:accent3>
+      <a:accent4>
+        <a:srgbClr val="8064A2"/>
+      </a:accent4>
+      <a:accent5>
+        <a:srgbClr val="4BACC6"/>
+      </a:accent5>
+      <a:accent6>
+        <a:srgbClr val="F79646"/>
+      </a:accent6>
+      <a:hlink>
+        <a:srgbClr val="0000FF"/>
+      </a:hlink>
+      <a:folHlink>
+        <a:srgbClr val="800080"/>
+      </a:folHlink>
+    </a:clrScheme>
+    <a:fontScheme name="Office">
+      <a:majorFont>
+        <a:latin typeface="Cambria"/>
+        <a:ea typeface=""/>
+        <a:cs typeface=""/>
+        <a:font script="Jpan" typeface="MS Pゴシック"/>
+        <a:font script="Hang" typeface="맑은 고딕"/>
+        <a:font script="Hans" typeface="宋体"/>
+        <a:font script="Hant" typeface="新細明體"/>
+        <a:font script="Arab" typeface="Times New Roman"/>
+        <a:font script="Hebr" typeface="Times New Roman"/>
+        <a:font script="Thai" typeface="Tahoma"/>
+        <a:font script="Ethi" typeface="Nyala"/>
+        <a:font script="Beng" typeface="Vrinda"/>
+        <a:font script="Gujr" typeface="Shruti"/>
+        <a:font script="Khmr" typeface="MoolBoran"/>
+        <a:font script="Knda" typeface="Tunga"/>
+        <a:font script="Guru" typeface="Raavi"/>
+        <a:font script="Cans" typeface="Euphemia"/>
+        <a:font script="Cher" typeface="Plantagenet Cherokee"/>
+        <a:font script="Yiii" typeface="Microsoft Yi Baiti"/>
+        <a:font script="Tibt" typeface="Microsoft Himalaya"/>
+        <a:font script="Thaa" typeface="MV Boli"/>
+        <a:font script="Deva" typeface="Mangal"/>
+        <a:font script="Telu" typeface="Gautami"/>
+        <a:font script="Taml" typeface="Latha"/>
+        <a:font script="Syrc" typeface="Estrangelo Edessa"/>
+        <a:font script="Orya" typeface="Kalinga"/>
+        <a:font script="Mlym" typeface="Kartika"/>
+        <a:font script="Laoo" typeface="DokChampa"/>
+        <a:font script="Sinh" typeface="Iskoola Pota"/>
+        <a:font script="Mong" typeface="Mongolian Baiti"/>
+        <a:font script="Viet" typeface="Times New Roman"/>
+        <a:font script="Uigh" typeface="Microsoft Uighur"/>
+        <a:font script="Geor" typeface="Sylfaen"/>
+      </a:majorFont>
+      <a:minorFont>
+        <a:latin typeface="Calibri"/>
+        <a:ea typeface=""/>
+        <a:cs typeface=""/>
+        <a:font script="Jpan" typeface="MS Pゴシック"/>
+        <a:font script="Hang" typeface="맑은 고딕"/>
+        <a:font script="Hans" typeface="宋体"/>
+        <a:font script="Hant" typeface="新細明體"/>
+        <a:font script="Arab" typeface="Arial"/>
+        <a:font script="Hebr" typeface="Arial"/>
+        <a:font script="Thai" typeface="Tahoma"/>
+        <a:font script="Ethi" typeface="Nyala"/>
+        <a:font script="Beng" typeface="Vrinda"/>
+        <a:font script="Gujr" typeface="Shruti"/>
+        <a:font script="Khmr" typeface="DaunPenh"/>
+        <a:font script="Knda" typeface="Tunga"/>
+        <a:font script="Guru" typeface="Raavi"/>
+        <a:font script="Cans" typeface="Euphemia"/>
+        <a:font script="Cher" typeface="Plantagenet Cherokee"/>
+        <a:font script="Yiii" typeface="Microsoft Yi Baiti"/>
+        <a:font script="Tibt" typeface="Microsoft Himalaya"/>
+        <a:font script="Thaa" typeface="MV Boli"/>
+        <a:font script="Deva" typeface="Mangal"/>
+        <a:font script="Telu" typeface="Gautami"/>
+        <a:font script="Taml" typeface="Latha"/>
+        <a:font script="Syrc" typeface="Estrangelo Edessa"/>
+        <a:font script="Orya" typeface="Kalinga"/>
+        <a:font script="Mlym" typeface="Kartika"/>
+        <a:font script="Laoo" typeface="DokChampa"/>
+        <a:font script="Sinh" typeface="Iskoola Pota"/>
+        <a:font script="Mong" typeface="Mongolian Baiti"/>
+        <a:font script="Viet" typeface="Arial"/>
+        <a:font script="Uigh" typeface="Microsoft Uighur"/>
+        <a:font script="Geor" typeface="Sylfaen"/>
+      </a:minorFont>
+    </a:fontScheme>
+    <a:fmtScheme name="Office">
+      <a:fillStyleLst>
+        <a:solidFill>
+          <a:schemeClr val="phClr"/>
+        </a:solidFill>
+        <a:gradFill rotWithShape="1">
+          <a:gsLst>
+            <a:gs pos="0">
+              <a:schemeClr val="phClr">
+                <a:tint val="50000"/>
+                <a:satMod val="300000"/>
+              </a:schemeClr>
+            </a:gs>
+            <a:gs pos="35000">
+              <a:schemeClr val="phClr">
+                <a:tint val="37000"/>
+                <a:satMod val="300000"/>
+              </a:schemeClr>
+            </a:gs>
+            <a:gs pos="100000">
+              <a:schemeClr val="phClr">
+                <a:tint val="15000"/>
+                <a:satMod val="350000"/>
+              </a:schemeClr>
+            </a:gs>
+          </a:gsLst>
+          <a:lin ang="16200000" scaled="1"/>
+        </a:gradFill>
+        <a:gradFill rotWithShape="1">
+          <a:gsLst>
+            <a:gs pos="0">
+              <a:schemeClr val="phClr">
+                <a:tint val="100000"/>
+                <a:shade val="100000"/>
+                <a:satMod val="130000"/>
+              </a:schemeClr>
+            </a:gs>
+            <a:gs pos="100000">
+              <a:schemeClr val="phClr">
+                <a:tint val="50000"/>
+                <a:shade val="100000"/>
+                <a:satMod val="350000"/>
+              </a:schemeClr>
+            </a:gs>
+          </a:gsLst>
+          <a:lin ang="16200000" scaled="0"/>
+        </a:gradFill>
+      </a:fillStyleLst>
+      <a:lnStyleLst>
+        <a:ln w="9525" cap="flat" cmpd="sng" algn="ctr">
+          <a:solidFill>
+            <a:schemeClr val="phClr">
+              <a:shade val="95000"/>
+              <a:satMod val="105000"/>
+            </a:schemeClr>
+          </a:solidFill>
+          <a:prstDash val="solid"/>
+        </a:ln>
+        <a:ln w="25400" cap="flat" cmpd="sng" algn="ctr">
+          <a:solidFill>
+            <a:schemeClr val="phClr"/>
+          </a:solidFill>
+          <a:prstDash val="solid"/>
+        </a:ln>
+        <a:ln w="38100" cap="flat" cmpd="sng" algn="ctr">
+          <a:solidFill>
+            <a:schemeClr val="phClr"/>
+          </a:solidFill>
+          <a:prstDash val="solid"/>
+        </a:ln>
+      </a:lnStyleLst>
+      <a:effectStyleLst>
+        <a:effectStyle>
+          <a:effectLst>
+            <a:outerShdw blurRad="40000" dist="20000" dir="5400000" rotWithShape="0">
+              <a:srgbClr val="000000">
+                <a:alpha val="38000"/>
+              </a:srgbClr>
+            </a:outerShdw>
+          </a:effectLst>
+        </a:effectStyle>
+        <a:effectStyle>
+          <a:effectLst>
+            <a:outerShdw blurRad="40000" dist="23000" dir="5400000" rotWithShape="0">
+              <a:srgbClr val="000000">
+                <a:alpha val="35000"/>
+              </a:srgbClr>
+            </a:outerShdw>
+          </a:effectLst>
+        </a:effectStyle>
+        <a:effectStyle>
+          <a:effectLst>
+            <a:outerShdw blurRad="40000" dist="23000" dir="5400000" rotWithShape="0">
+              <a:srgbClr val="000000">
+                <a:alpha val="35000"/>
+              </a:srgbClr>
+            </a:outerShdw>
+          </a:effectLst>
+          <a:scene3d>
+            <a:camera prst="orthographicFront">
+              <a:rot lat="0" lon="0" rev="0"/>
+            </a:camera>
+            <a:lightRig rig="threePt" dir="t">
+              <a:rot lat="0" lon="0" rev="1200000"/>
+            </a:lightRig>
+          </a:scene3d>
+          <a:sp3d>
+            <a:bevelT w="63500" h="25400"/>
+          </a:sp3d>
+        </a:effectStyle>
+      </a:effectStyleLst>
+      <a:bgFillStyleLst>
+        <a:solidFill>
+          <a:schemeClr val="phClr"/>
+        </a:solidFill>
+        <a:gradFill rotWithShape="1">
+          <a:gsLst>
+            <a:gs pos="0">
+              <a:schemeClr val="phClr">
+                <a:tint val="40000"/>
+                <a:satMod val="350000"/>
+              </a:schemeClr>
+            </a:gs>
+            <a:gs pos="40000">
+              <a:schemeClr val="phClr">
+                <a:tint val="45000"/>
+                <a:shade val="99000"/>
+                <a:satMod val="350000"/>
+              </a:schemeClr>
+            </a:gs>
+            <a:gs pos="100000">
+              <a:schemeClr val="phClr">
+                <a:shade val="20000"/>
+                <a:satMod val="255000"/>
+              </a:schemeClr>
+            </a:gs>
+          </a:gsLst>
+          <a:path path="circle">
+            <a:fillToRect l="50000" t="-80000" r="50000" b="180000"/>
+          </a:path>
+        </a:gradFill>
+        <a:gradFill rotWithShape="1">
+          <a:gsLst>
+            <a:gs pos="0">
+              <a:schemeClr val="phClr">
+                <a:tint val="80000"/>
+                <a:satMod val="300000"/>
+              </a:schemeClr>
+            </a:gs>
+            <a:gs pos="100000">
+              <a:schemeClr val="phClr">
+                <a:shade val="30000"/>
+                <a:satMod val="200000"/>
+              </a:schemeClr>
+            </a:gs>
+          </a:gsLst>
+          <a:path path="circle">
+            <a:fillToRect l="50000" t="50000" r="50000" b="50000"/>
+          </a:path>
+        </a:gradFill>
+      </a:bgFillStyleLst>
+    </a:fmtScheme>
+  </a:themeElements>
+  <a:objectDefaults>
+    <a:spDef>
+      <a:spPr/>
+      <a:bodyPr/>
+      <a:lstStyle/>
+      <a:style>
+        <a:lnRef idx="1">
+          <a:schemeClr val="accent1"/>
+        </a:lnRef>
+        <a:fillRef idx="3">
+          <a:schemeClr val="accent1"/>
+        </a:fillRef>
+        <a:effectRef idx="2">
+          <a:schemeClr val="accent1"/>
+        </a:effectRef>
+        <a:fontRef idx="minor">
+          <a:schemeClr val="lt1"/>
+        </a:fontRef>
+      </a:style>
+    </a:spDef>
+    <a:lnDef>
+      <a:spPr/>
+      <a:bodyPr/>
+      <a:lstStyle/>
+      <a:style>
+        <a:lnRef idx="2">
+          <a:schemeClr val="accent1"/>
+        </a:lnRef>
+        <a:fillRef idx="0">
+          <a:schemeClr val="accent1"/>
+        </a:fillRef>
+        <a:effectRef idx="1">
+          <a:schemeClr val="accent1"/>
+        </a:effectRef>
+        <a:fontRef idx="minor">
+          <a:schemeClr val="tx1"/>
+        </a:fontRef>
+      </a:style>
+    </a:lnDef>
+  </a:objectDefaults>
+  <a:extraClrSchemeLst/>
+</a:theme>`

Một số tệp đã không được hiển thị bởi vì quá nhiều tập tin thay đổi trong này khác