Browse Source

vendor: upgrade "gopkg.in/yaml.v2"

Signed-off-by: Gyuho Lee <gyuhox@gmail.com>
Gyuho Lee 7 years ago
parent
commit
8cedb48570

+ 2 - 1
Gopkg.lock

@@ -353,7 +353,8 @@
 [[projects]]
   name = "gopkg.in/yaml.v2"
   packages = ["."]
-  revision = "cd8b52f8269e0feb286dfeef29f8fe4d5b397e0b"
+  revision = "5420a8b6744d3b0345ab293f6fcba19c978f1183"
+  version = "v2.2.1"
 
 [solve-meta]
   analyzer-name = "dep"

+ 198 - 10
vendor/gopkg.in/yaml.v2/LICENSE

@@ -1,13 +1,201 @@
-Copyright 2011-2016 Canonical Ltd.
+                                 Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
 
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
 
-    http://www.apache.org/licenses/LICENSE-2.0
+   1. Definitions.
 
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
+   END OF TERMS AND CONDITIONS
+
+   APPENDIX: How to apply the Apache License to your work.
+
+      To apply the Apache License to your work, attach the following
+      boilerplate notice, with the fields enclosed by brackets "{}"
+      replaced with your own identifying information. (Don't include
+      the brackets!)  The text should be enclosed in the appropriate
+      comment syntax for the file format. We also recommend that a
+      file or class name and description of purpose be included on the
+      same "printed page" as the copyright notice for easier
+      identification within third-party archives.
+
+   Copyright {yyyy} {name of copyright owner}
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.

+ 13 - 0
vendor/gopkg.in/yaml.v2/NOTICE

@@ -0,0 +1,13 @@
+Copyright 2011-2016 Canonical Ltd.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.

+ 26 - 29
vendor/gopkg.in/yaml.v2/apic.go

@@ -2,7 +2,6 @@ package yaml
 
 import (
 	"io"
-	"os"
 )
 
 func yaml_insert_token(parser *yaml_parser_t, pos int, token *yaml_token_t) {
@@ -48,9 +47,9 @@ func yaml_string_read_handler(parser *yaml_parser_t, buffer []byte) (n int, err
 	return n, nil
 }
 
-// File read handler.
-func yaml_file_read_handler(parser *yaml_parser_t, buffer []byte) (n int, err error) {
-	return parser.input_file.Read(buffer)
+// Reader read handler.
+func yaml_reader_read_handler(parser *yaml_parser_t, buffer []byte) (n int, err error) {
+	return parser.input_reader.Read(buffer)
 }
 
 // Set a string input.
@@ -64,12 +63,12 @@ func yaml_parser_set_input_string(parser *yaml_parser_t, input []byte) {
 }
 
 // Set a file input.
-func yaml_parser_set_input_file(parser *yaml_parser_t, file *os.File) {
+func yaml_parser_set_input_reader(parser *yaml_parser_t, r io.Reader) {
 	if parser.read_handler != nil {
 		panic("must set the input source only once")
 	}
-	parser.read_handler = yaml_file_read_handler
-	parser.input_file = file
+	parser.read_handler = yaml_reader_read_handler
+	parser.input_reader = r
 }
 
 // Set the source encoding.
@@ -81,14 +80,13 @@ func yaml_parser_set_encoding(parser *yaml_parser_t, encoding yaml_encoding_t) {
 }
 
 // Create a new emitter object.
-func yaml_emitter_initialize(emitter *yaml_emitter_t) bool {
+func yaml_emitter_initialize(emitter *yaml_emitter_t) {
 	*emitter = yaml_emitter_t{
 		buffer:     make([]byte, output_buffer_size),
 		raw_buffer: make([]byte, 0, output_raw_buffer_size),
 		states:     make([]yaml_emitter_state_t, 0, initial_stack_size),
 		events:     make([]yaml_event_t, 0, initial_queue_size),
 	}
-	return true
 }
 
 // Destroy an emitter object.
@@ -102,9 +100,10 @@ func yaml_string_write_handler(emitter *yaml_emitter_t, buffer []byte) error {
 	return nil
 }
 
-// File write handler.
-func yaml_file_write_handler(emitter *yaml_emitter_t, buffer []byte) error {
-	_, err := emitter.output_file.Write(buffer)
+// yaml_writer_write_handler uses emitter.output_writer to write the
+// emitted text.
+func yaml_writer_write_handler(emitter *yaml_emitter_t, buffer []byte) error {
+	_, err := emitter.output_writer.Write(buffer)
 	return err
 }
 
@@ -118,12 +117,12 @@ func yaml_emitter_set_output_string(emitter *yaml_emitter_t, output_buffer *[]by
 }
 
 // Set a file output.
-func yaml_emitter_set_output_file(emitter *yaml_emitter_t, file io.Writer) {
+func yaml_emitter_set_output_writer(emitter *yaml_emitter_t, w io.Writer) {
 	if emitter.write_handler != nil {
 		panic("must set the output target only once")
 	}
-	emitter.write_handler = yaml_file_write_handler
-	emitter.output_file = file
+	emitter.write_handler = yaml_writer_write_handler
+	emitter.output_writer = w
 }
 
 // Set the output encoding.
@@ -252,41 +251,41 @@ func yaml_emitter_set_break(emitter *yaml_emitter_t, line_break yaml_break_t) {
 //
 
 // Create STREAM-START.
-func yaml_stream_start_event_initialize(event *yaml_event_t, encoding yaml_encoding_t) bool {
+func yaml_stream_start_event_initialize(event *yaml_event_t, encoding yaml_encoding_t) {
 	*event = yaml_event_t{
 		typ:      yaml_STREAM_START_EVENT,
 		encoding: encoding,
 	}
-	return true
 }
 
 // Create STREAM-END.
-func yaml_stream_end_event_initialize(event *yaml_event_t) bool {
+func yaml_stream_end_event_initialize(event *yaml_event_t) {
 	*event = yaml_event_t{
 		typ: yaml_STREAM_END_EVENT,
 	}
-	return true
 }
 
 // Create DOCUMENT-START.
-func yaml_document_start_event_initialize(event *yaml_event_t, version_directive *yaml_version_directive_t,
-	tag_directives []yaml_tag_directive_t, implicit bool) bool {
+func yaml_document_start_event_initialize(
+	event *yaml_event_t,
+	version_directive *yaml_version_directive_t,
+	tag_directives []yaml_tag_directive_t,
+	implicit bool,
+) {
 	*event = yaml_event_t{
 		typ:               yaml_DOCUMENT_START_EVENT,
 		version_directive: version_directive,
 		tag_directives:    tag_directives,
 		implicit:          implicit,
 	}
-	return true
 }
 
 // Create DOCUMENT-END.
-func yaml_document_end_event_initialize(event *yaml_event_t, implicit bool) bool {
+func yaml_document_end_event_initialize(event *yaml_event_t, implicit bool) {
 	*event = yaml_event_t{
 		typ:      yaml_DOCUMENT_END_EVENT,
 		implicit: implicit,
 	}
-	return true
 }
 
 ///*
@@ -348,7 +347,7 @@ func yaml_sequence_end_event_initialize(event *yaml_event_t) bool {
 }
 
 // Create MAPPING-START.
-func yaml_mapping_start_event_initialize(event *yaml_event_t, anchor, tag []byte, implicit bool, style yaml_mapping_style_t) bool {
+func yaml_mapping_start_event_initialize(event *yaml_event_t, anchor, tag []byte, implicit bool, style yaml_mapping_style_t) {
 	*event = yaml_event_t{
 		typ:      yaml_MAPPING_START_EVENT,
 		anchor:   anchor,
@@ -356,15 +355,13 @@ func yaml_mapping_start_event_initialize(event *yaml_event_t, anchor, tag []byte
 		implicit: implicit,
 		style:    yaml_style_t(style),
 	}
-	return true
 }
 
 // Create MAPPING-END.
-func yaml_mapping_end_event_initialize(event *yaml_event_t) bool {
+func yaml_mapping_end_event_initialize(event *yaml_event_t) {
 	*event = yaml_event_t{
 		typ: yaml_MAPPING_END_EVENT,
 	}
-	return true
 }
 
 // Destroy an event object.
@@ -471,7 +468,7 @@ func yaml_event_delete(event *yaml_event_t) {
 //    } context
 //    tag_directive *yaml_tag_directive_t
 //
-//    context.error = YAML_NO_ERROR // Eliminate a compliler warning.
+//    context.error = YAML_NO_ERROR // Eliminate a compiler warning.
 //
 //    assert(document) // Non-NULL document object is expected.
 //

+ 169 - 76
vendor/gopkg.in/yaml.v2/decode.go

@@ -4,6 +4,7 @@ import (
 	"encoding"
 	"encoding/base64"
 	"fmt"
+	"io"
 	"math"
 	"reflect"
 	"strconv"
@@ -22,19 +23,22 @@ type node struct {
 	kind         int
 	line, column int
 	tag          string
-	value        string
-	implicit     bool
-	children     []*node
-	anchors      map[string]*node
+	// For an alias node, alias holds the resolved alias.
+	alias    *node
+	value    string
+	implicit bool
+	children []*node
+	anchors  map[string]*node
 }
 
 // ----------------------------------------------------------------------------
 // Parser, produces a node tree out of a libyaml event stream.
 
 type parser struct {
-	parser yaml_parser_t
-	event  yaml_event_t
-	doc    *node
+	parser   yaml_parser_t
+	event    yaml_event_t
+	doc      *node
+	doneInit bool
 }
 
 func newParser(b []byte) *parser {
@@ -42,21 +46,30 @@ func newParser(b []byte) *parser {
 	if !yaml_parser_initialize(&p.parser) {
 		panic("failed to initialize YAML emitter")
 	}
-
 	if len(b) == 0 {
 		b = []byte{'\n'}
 	}
-
 	yaml_parser_set_input_string(&p.parser, b)
+	return &p
+}
 
-	p.skip()
-	if p.event.typ != yaml_STREAM_START_EVENT {
-		panic("expected stream start event, got " + strconv.Itoa(int(p.event.typ)))
+func newParserFromReader(r io.Reader) *parser {
+	p := parser{}
+	if !yaml_parser_initialize(&p.parser) {
+		panic("failed to initialize YAML emitter")
 	}
-	p.skip()
+	yaml_parser_set_input_reader(&p.parser, r)
 	return &p
 }
 
+func (p *parser) init() {
+	if p.doneInit {
+		return
+	}
+	p.expect(yaml_STREAM_START_EVENT)
+	p.doneInit = true
+}
+
 func (p *parser) destroy() {
 	if p.event.typ != yaml_NO_EVENT {
 		yaml_event_delete(&p.event)
@@ -64,16 +77,35 @@ func (p *parser) destroy() {
 	yaml_parser_delete(&p.parser)
 }
 
-func (p *parser) skip() {
-	if p.event.typ != yaml_NO_EVENT {
-		if p.event.typ == yaml_STREAM_END_EVENT {
-			failf("attempted to go past the end of stream; corrupted value?")
+// expect consumes an event from the event stream and
+// checks that it's of the expected type.
+func (p *parser) expect(e yaml_event_type_t) {
+	if p.event.typ == yaml_NO_EVENT {
+		if !yaml_parser_parse(&p.parser, &p.event) {
+			p.fail()
 		}
-		yaml_event_delete(&p.event)
+	}
+	if p.event.typ == yaml_STREAM_END_EVENT {
+		failf("attempted to go past the end of stream; corrupted value?")
+	}
+	if p.event.typ != e {
+		p.parser.problem = fmt.Sprintf("expected %s event but got %s", e, p.event.typ)
+		p.fail()
+	}
+	yaml_event_delete(&p.event)
+	p.event.typ = yaml_NO_EVENT
+}
+
+// peek peeks at the next event in the event stream,
+// puts the results into p.event and returns the event type.
+func (p *parser) peek() yaml_event_type_t {
+	if p.event.typ != yaml_NO_EVENT {
+		return p.event.typ
 	}
 	if !yaml_parser_parse(&p.parser, &p.event) {
 		p.fail()
 	}
+	return p.event.typ
 }
 
 func (p *parser) fail() {
@@ -81,6 +113,10 @@ func (p *parser) fail() {
 	var line int
 	if p.parser.problem_mark.line != 0 {
 		line = p.parser.problem_mark.line
+		// Scanner errors don't iterate line before returning error
+		if p.parser.error == yaml_SCANNER_ERROR {
+			line++
+		}
 	} else if p.parser.context_mark.line != 0 {
 		line = p.parser.context_mark.line
 	}
@@ -103,7 +139,8 @@ func (p *parser) anchor(n *node, anchor []byte) {
 }
 
 func (p *parser) parse() *node {
-	switch p.event.typ {
+	p.init()
+	switch p.peek() {
 	case yaml_SCALAR_EVENT:
 		return p.scalar()
 	case yaml_ALIAS_EVENT:
@@ -118,7 +155,7 @@ func (p *parser) parse() *node {
 		// Happens when attempting to decode an empty buffer.
 		return nil
 	default:
-		panic("attempted to parse unknown event: " + strconv.Itoa(int(p.event.typ)))
+		panic("attempted to parse unknown event: " + p.event.typ.String())
 	}
 }
 
@@ -134,19 +171,20 @@ func (p *parser) document() *node {
 	n := p.node(documentNode)
 	n.anchors = make(map[string]*node)
 	p.doc = n
-	p.skip()
+	p.expect(yaml_DOCUMENT_START_EVENT)
 	n.children = append(n.children, p.parse())
-	if p.event.typ != yaml_DOCUMENT_END_EVENT {
-		panic("expected end of document event but got " + strconv.Itoa(int(p.event.typ)))
-	}
-	p.skip()
+	p.expect(yaml_DOCUMENT_END_EVENT)
 	return n
 }
 
 func (p *parser) alias() *node {
 	n := p.node(aliasNode)
 	n.value = string(p.event.anchor)
-	p.skip()
+	n.alias = p.doc.anchors[n.value]
+	if n.alias == nil {
+		failf("unknown anchor '%s' referenced", n.value)
+	}
+	p.expect(yaml_ALIAS_EVENT)
 	return n
 }
 
@@ -156,29 +194,29 @@ func (p *parser) scalar() *node {
 	n.tag = string(p.event.tag)
 	n.implicit = p.event.implicit
 	p.anchor(n, p.event.anchor)
-	p.skip()
+	p.expect(yaml_SCALAR_EVENT)
 	return n
 }
 
 func (p *parser) sequence() *node {
 	n := p.node(sequenceNode)
 	p.anchor(n, p.event.anchor)
-	p.skip()
-	for p.event.typ != yaml_SEQUENCE_END_EVENT {
+	p.expect(yaml_SEQUENCE_START_EVENT)
+	for p.peek() != yaml_SEQUENCE_END_EVENT {
 		n.children = append(n.children, p.parse())
 	}
-	p.skip()
+	p.expect(yaml_SEQUENCE_END_EVENT)
 	return n
 }
 
 func (p *parser) mapping() *node {
 	n := p.node(mappingNode)
 	p.anchor(n, p.event.anchor)
-	p.skip()
-	for p.event.typ != yaml_MAPPING_END_EVENT {
+	p.expect(yaml_MAPPING_START_EVENT)
+	for p.peek() != yaml_MAPPING_END_EVENT {
 		n.children = append(n.children, p.parse(), p.parse())
 	}
-	p.skip()
+	p.expect(yaml_MAPPING_END_EVENT)
 	return n
 }
 
@@ -187,9 +225,10 @@ func (p *parser) mapping() *node {
 
 type decoder struct {
 	doc     *node
-	aliases map[string]bool
+	aliases map[*node]bool
 	mapType reflect.Type
 	terrors []string
+	strict  bool
 }
 
 var (
@@ -197,11 +236,13 @@ var (
 	durationType   = reflect.TypeOf(time.Duration(0))
 	defaultMapType = reflect.TypeOf(map[interface{}]interface{}{})
 	ifaceType      = defaultMapType.Elem()
+	timeType       = reflect.TypeOf(time.Time{})
+	ptrTimeType    = reflect.TypeOf(&time.Time{})
 )
 
-func newDecoder() *decoder {
-	d := &decoder{mapType: defaultMapType}
-	d.aliases = make(map[string]bool)
+func newDecoder(strict bool) *decoder {
+	d := &decoder{mapType: defaultMapType, strict: strict}
+	d.aliases = make(map[*node]bool)
 	return d
 }
 
@@ -250,7 +291,7 @@ func (d *decoder) callUnmarshaler(n *node, u Unmarshaler) (good bool) {
 //
 // If n holds a null value, prepare returns before doing anything.
 func (d *decoder) prepare(n *node, out reflect.Value) (newout reflect.Value, unmarshaled, good bool) {
-	if n.tag == yaml_NULL_TAG || n.kind == scalarNode && n.tag == "" && (n.value == "null" || n.value == "" && n.implicit) {
+	if n.tag == yaml_NULL_TAG || n.kind == scalarNode && n.tag == "" && (n.value == "null" || n.value == "~" || n.value == "" && n.implicit) {
 		return out, false, false
 	}
 	again := true
@@ -307,16 +348,13 @@ func (d *decoder) document(n *node, out reflect.Value) (good bool) {
 }
 
 func (d *decoder) alias(n *node, out reflect.Value) (good bool) {
-	an, ok := d.doc.anchors[n.value]
-	if !ok {
-		failf("unknown anchor '%s' referenced", n.value)
-	}
-	if d.aliases[n.value] {
+	if d.aliases[n] {
+		// TODO this could actually be allowed in some circumstances.
 		failf("anchor '%s' value contains itself", n.value)
 	}
-	d.aliases[n.value] = true
-	good = d.unmarshal(an, out)
-	delete(d.aliases, n.value)
+	d.aliases[n] = true
+	good = d.unmarshal(n.alias, out)
+	delete(d.aliases, n)
 	return good
 }
 
@@ -328,7 +366,7 @@ func resetMap(out reflect.Value) {
 	}
 }
 
-func (d *decoder) scalar(n *node, out reflect.Value) (good bool) {
+func (d *decoder) scalar(n *node, out reflect.Value) bool {
 	var tag string
 	var resolved interface{}
 	if n.tag == "" && !n.implicit {
@@ -352,9 +390,26 @@ func (d *decoder) scalar(n *node, out reflect.Value) (good bool) {
 		}
 		return true
 	}
-	if s, ok := resolved.(string); ok && out.CanAddr() {
-		if u, ok := out.Addr().Interface().(encoding.TextUnmarshaler); ok {
-			err := u.UnmarshalText([]byte(s))
+	if resolvedv := reflect.ValueOf(resolved); out.Type() == resolvedv.Type() {
+		// We've resolved to exactly the type we want, so use that.
+		out.Set(resolvedv)
+		return true
+	}
+	// Perhaps we can use the value as a TextUnmarshaler to
+	// set its value.
+	if out.CanAddr() {
+		u, ok := out.Addr().Interface().(encoding.TextUnmarshaler)
+		if ok {
+			var text []byte
+			if tag == yaml_BINARY_TAG {
+				text = []byte(resolved.(string))
+			} else {
+				// We let any value be unmarshaled into TextUnmarshaler.
+				// That might be more lax than we'd like, but the
+				// TextUnmarshaler itself should bowl out any dubious values.
+				text = []byte(n.value)
+			}
+			err := u.UnmarshalText(text)
 			if err != nil {
 				fail(err)
 			}
@@ -365,46 +420,54 @@ func (d *decoder) scalar(n *node, out reflect.Value) (good bool) {
 	case reflect.String:
 		if tag == yaml_BINARY_TAG {
 			out.SetString(resolved.(string))
-			good = true
-		} else if resolved != nil {
+			return true
+		}
+		if resolved != nil {
 			out.SetString(n.value)
-			good = true
+			return true
 		}
 	case reflect.Interface:
 		if resolved == nil {
 			out.Set(reflect.Zero(out.Type()))
+		} else if tag == yaml_TIMESTAMP_TAG {
+			// It looks like a timestamp but for backward compatibility
+			// reasons we set it as a string, so that code that unmarshals
+			// timestamp-like values into interface{} will continue to
+			// see a string and not a time.Time.
+			// TODO(v3) Drop this.
+			out.Set(reflect.ValueOf(n.value))
 		} else {
 			out.Set(reflect.ValueOf(resolved))
 		}
-		good = true
+		return true
 	case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
 		switch resolved := resolved.(type) {
 		case int:
 			if !out.OverflowInt(int64(resolved)) {
 				out.SetInt(int64(resolved))
-				good = true
+				return true
 			}
 		case int64:
 			if !out.OverflowInt(resolved) {
 				out.SetInt(resolved)
-				good = true
+				return true
 			}
 		case uint64:
 			if resolved <= math.MaxInt64 && !out.OverflowInt(int64(resolved)) {
 				out.SetInt(int64(resolved))
-				good = true
+				return true
 			}
 		case float64:
 			if resolved <= math.MaxInt64 && !out.OverflowInt(int64(resolved)) {
 				out.SetInt(int64(resolved))
-				good = true
+				return true
 			}
 		case string:
 			if out.Type() == durationType {
 				d, err := time.ParseDuration(resolved)
 				if err == nil {
 					out.SetInt(int64(d))
-					good = true
+					return true
 				}
 			}
 		}
@@ -413,44 +476,49 @@ func (d *decoder) scalar(n *node, out reflect.Value) (good bool) {
 		case int:
 			if resolved >= 0 && !out.OverflowUint(uint64(resolved)) {
 				out.SetUint(uint64(resolved))
-				good = true
+				return true
 			}
 		case int64:
 			if resolved >= 0 && !out.OverflowUint(uint64(resolved)) {
 				out.SetUint(uint64(resolved))
-				good = true
+				return true
 			}
 		case uint64:
 			if !out.OverflowUint(uint64(resolved)) {
 				out.SetUint(uint64(resolved))
-				good = true
+				return true
 			}
 		case float64:
 			if resolved <= math.MaxUint64 && !out.OverflowUint(uint64(resolved)) {
 				out.SetUint(uint64(resolved))
-				good = true
+				return true
 			}
 		}
 	case reflect.Bool:
 		switch resolved := resolved.(type) {
 		case bool:
 			out.SetBool(resolved)
-			good = true
+			return true
 		}
 	case reflect.Float32, reflect.Float64:
 		switch resolved := resolved.(type) {
 		case int:
 			out.SetFloat(float64(resolved))
-			good = true
+			return true
 		case int64:
 			out.SetFloat(float64(resolved))
-			good = true
+			return true
 		case uint64:
 			out.SetFloat(float64(resolved))
-			good = true
+			return true
 		case float64:
 			out.SetFloat(resolved)
-			good = true
+			return true
+		}
+	case reflect.Struct:
+		if resolvedv := reflect.ValueOf(resolved); out.Type() == resolvedv.Type() {
+			out.Set(resolvedv)
+			return true
 		}
 	case reflect.Ptr:
 		if out.Type().Elem() == reflect.TypeOf(resolved) {
@@ -458,13 +526,11 @@ func (d *decoder) scalar(n *node, out reflect.Value) (good bool) {
 			elem := reflect.New(out.Type().Elem())
 			elem.Elem().Set(reflect.ValueOf(resolved))
 			out.Set(elem)
-			good = true
+			return true
 		}
 	}
-	if !good {
-		d.terror(n, tag, out)
-	}
-	return good
+	d.terror(n, tag, out)
+	return false
 }
 
 func settableValueOf(i interface{}) reflect.Value {
@@ -481,6 +547,10 @@ func (d *decoder) sequence(n *node, out reflect.Value) (good bool) {
 	switch out.Kind() {
 	case reflect.Slice:
 		out.Set(reflect.MakeSlice(out.Type(), l, l))
+	case reflect.Array:
+		if l != out.Len() {
+			failf("invalid array: want %d elements but got %d", out.Len(), l)
+		}
 	case reflect.Interface:
 		// No type hints. Will have to use a generic sequence.
 		iface = out
@@ -499,7 +569,9 @@ func (d *decoder) sequence(n *node, out reflect.Value) (good bool) {
 			j++
 		}
 	}
-	out.Set(out.Slice(0, j))
+	if out.Kind() != reflect.Array {
+		out.Set(out.Slice(0, j))
+	}
 	if iface.IsValid() {
 		iface.Set(out)
 	}
@@ -560,7 +632,7 @@ func (d *decoder) mapping(n *node, out reflect.Value) (good bool) {
 			}
 			e := reflect.New(et).Elem()
 			if d.unmarshal(n.children[i+1], e) {
-				out.SetMapIndex(k, e)
+				d.setMapIndex(n.children[i+1], out, k, e)
 			}
 		}
 	}
@@ -568,6 +640,14 @@ func (d *decoder) mapping(n *node, out reflect.Value) (good bool) {
 	return true
 }
 
+func (d *decoder) setMapIndex(n *node, out, k, v reflect.Value) {
+	if d.strict && out.MapIndex(k) != zeroValue {
+		d.terrors = append(d.terrors, fmt.Sprintf("line %d: key %#v already set in map", n.line+1, k.Interface()))
+		return
+	}
+	out.SetMapIndex(k, v)
+}
+
 func (d *decoder) mappingSlice(n *node, out reflect.Value) (good bool) {
 	outt := out.Type()
 	if outt.Elem() != mapItemType {
@@ -615,6 +695,10 @@ func (d *decoder) mappingStruct(n *node, out reflect.Value) (good bool) {
 		elemType = inlineMap.Type().Elem()
 	}
 
+	var doneFields []bool
+	if d.strict {
+		doneFields = make([]bool, len(sinfo.FieldsList))
+	}
 	for i := 0; i < l; i += 2 {
 		ni := n.children[i]
 		if isMerge(ni) {
@@ -625,6 +709,13 @@ func (d *decoder) mappingStruct(n *node, out reflect.Value) (good bool) {
 			continue
 		}
 		if info, ok := sinfo.FieldsMap[name.String()]; ok {
+			if d.strict {
+				if doneFields[info.Id] {
+					d.terrors = append(d.terrors, fmt.Sprintf("line %d: field %s already set in type %s", ni.line+1, name.String(), out.Type()))
+					continue
+				}
+				doneFields[info.Id] = true
+			}
 			var field reflect.Value
 			if info.Inline == nil {
 				field = out.Field(info.Num)
@@ -638,7 +729,9 @@ func (d *decoder) mappingStruct(n *node, out reflect.Value) (good bool) {
 			}
 			value := reflect.New(elemType).Elem()
 			d.unmarshal(n.children[i+1], value)
-			inlineMap.SetMapIndex(name, value)
+			d.setMapIndex(n.children[i+1], inlineMap, name, value)
+		} else if d.strict {
+			d.terrors = append(d.terrors, fmt.Sprintf("line %d: field %s not found in type %s", ni.line+1, name.String(), out.Type()))
 		}
 	}
 	return true

+ 10 - 9
vendor/gopkg.in/yaml.v2/emitterc.go

@@ -2,6 +2,7 @@ package yaml
 
 import (
 	"bytes"
+	"fmt"
 )
 
 // Flush the buffer if needed.
@@ -664,7 +665,7 @@ func yaml_emitter_emit_node(emitter *yaml_emitter_t, event *yaml_event_t,
 		return yaml_emitter_emit_mapping_start(emitter, event)
 	default:
 		return yaml_emitter_set_emitter_error(emitter,
-			"expected SCALAR, SEQUENCE-START, MAPPING-START, or ALIAS")
+			fmt.Sprintf("expected SCALAR, SEQUENCE-START, MAPPING-START, or ALIAS, but got %v", event.typ))
 	}
 }
 
@@ -842,7 +843,7 @@ func yaml_emitter_select_scalar_style(emitter *yaml_emitter_t, event *yaml_event
 	return true
 }
 
-// Write an achor.
+// Write an anchor.
 func yaml_emitter_process_anchor(emitter *yaml_emitter_t) bool {
 	if emitter.anchor_data.anchor == nil {
 		return true
@@ -994,10 +995,10 @@ func yaml_emitter_analyze_scalar(emitter *yaml_emitter_t, value []byte) bool {
 		break_space    = false
 		space_break    = false
 
-		preceeded_by_whitespace = false
-		followed_by_whitespace  = false
-		previous_space          = false
-		previous_break          = false
+		preceded_by_whitespace = false
+		followed_by_whitespace = false
+		previous_space         = false
+		previous_break         = false
 	)
 
 	emitter.scalar_data.value = value
@@ -1016,7 +1017,7 @@ func yaml_emitter_analyze_scalar(emitter *yaml_emitter_t, value []byte) bool {
 		flow_indicators = true
 	}
 
-	preceeded_by_whitespace = true
+	preceded_by_whitespace = true
 	for i, w := 0, 0; i < len(value); i += w {
 		w = width(value[i])
 		followed_by_whitespace = i+w >= len(value) || is_blank(value, i+w)
@@ -1047,7 +1048,7 @@ func yaml_emitter_analyze_scalar(emitter *yaml_emitter_t, value []byte) bool {
 					block_indicators = true
 				}
 			case '#':
-				if preceeded_by_whitespace {
+				if preceded_by_whitespace {
 					flow_indicators = true
 					block_indicators = true
 				}
@@ -1088,7 +1089,7 @@ func yaml_emitter_analyze_scalar(emitter *yaml_emitter_t, value []byte) bool {
 		}
 
 		// [Go]: Why 'z'? Couldn't be the end of the string as that's the loop condition.
-		preceeded_by_whitespace = is_blankz(value, i)
+		preceded_by_whitespace = is_blankz(value, i)
 	}
 
 	emitter.scalar_data.multiline = line_breaks

+ 96 - 40
vendor/gopkg.in/yaml.v2/encode.go

@@ -3,12 +3,14 @@ package yaml
 import (
 	"encoding"
 	"fmt"
+	"io"
 	"reflect"
 	"regexp"
 	"sort"
 	"strconv"
 	"strings"
 	"time"
+	"unicode/utf8"
 )
 
 type encoder struct {
@@ -16,25 +18,39 @@ type encoder struct {
 	event   yaml_event_t
 	out     []byte
 	flow    bool
+	// doneInit holds whether the initial stream_start_event has been
+	// emitted.
+	doneInit bool
 }
 
-func newEncoder() (e *encoder) {
-	e = &encoder{}
-	e.must(yaml_emitter_initialize(&e.emitter))
+func newEncoder() *encoder {
+	e := &encoder{}
+	yaml_emitter_initialize(&e.emitter)
 	yaml_emitter_set_output_string(&e.emitter, &e.out)
 	yaml_emitter_set_unicode(&e.emitter, true)
-	e.must(yaml_stream_start_event_initialize(&e.event, yaml_UTF8_ENCODING))
-	e.emit()
-	e.must(yaml_document_start_event_initialize(&e.event, nil, nil, true))
-	e.emit()
 	return e
 }
 
-func (e *encoder) finish() {
-	e.must(yaml_document_end_event_initialize(&e.event, true))
+func newEncoderWithWriter(w io.Writer) *encoder {
+	e := &encoder{}
+	yaml_emitter_initialize(&e.emitter)
+	yaml_emitter_set_output_writer(&e.emitter, w)
+	yaml_emitter_set_unicode(&e.emitter, true)
+	return e
+}
+
+func (e *encoder) init() {
+	if e.doneInit {
+		return
+	}
+	yaml_stream_start_event_initialize(&e.event, yaml_UTF8_ENCODING)
 	e.emit()
+	e.doneInit = true
+}
+
+func (e *encoder) finish() {
 	e.emitter.open_ended = false
-	e.must(yaml_stream_end_event_initialize(&e.event))
+	yaml_stream_end_event_initialize(&e.event)
 	e.emit()
 }
 
@@ -44,9 +60,7 @@ func (e *encoder) destroy() {
 
 func (e *encoder) emit() {
 	// This will internally delete the e.event value.
-	if !yaml_emitter_emit(&e.emitter, &e.event) && e.event.typ != yaml_DOCUMENT_END_EVENT && e.event.typ != yaml_STREAM_END_EVENT {
-		e.must(false)
-	}
+	e.must(yaml_emitter_emit(&e.emitter, &e.event))
 }
 
 func (e *encoder) must(ok bool) {
@@ -59,13 +73,28 @@ func (e *encoder) must(ok bool) {
 	}
 }
 
+func (e *encoder) marshalDoc(tag string, in reflect.Value) {
+	e.init()
+	yaml_document_start_event_initialize(&e.event, nil, nil, true)
+	e.emit()
+	e.marshal(tag, in)
+	yaml_document_end_event_initialize(&e.event, true)
+	e.emit()
+}
+
 func (e *encoder) marshal(tag string, in reflect.Value) {
-	if !in.IsValid() {
+	if !in.IsValid() || in.Kind() == reflect.Ptr && in.IsNil() {
 		e.nilv()
 		return
 	}
 	iface := in.Interface()
-	if m, ok := iface.(Marshaler); ok {
+	switch m := iface.(type) {
+	case time.Time, *time.Time:
+		// Although time.Time implements TextMarshaler,
+		// we don't want to treat it as a string for YAML
+		// purposes because YAML has special support for
+		// timestamps.
+	case Marshaler:
 		v, err := m.MarshalYAML()
 		if err != nil {
 			fail(err)
@@ -75,31 +104,34 @@ func (e *encoder) marshal(tag string, in reflect.Value) {
 			return
 		}
 		in = reflect.ValueOf(v)
-	} else if m, ok := iface.(encoding.TextMarshaler); ok {
+	case encoding.TextMarshaler:
 		text, err := m.MarshalText()
 		if err != nil {
 			fail(err)
 		}
 		in = reflect.ValueOf(string(text))
+	case nil:
+		e.nilv()
+		return
 	}
 	switch in.Kind() {
 	case reflect.Interface:
-		if in.IsNil() {
-			e.nilv()
-		} else {
-			e.marshal(tag, in.Elem())
-		}
+		e.marshal(tag, in.Elem())
 	case reflect.Map:
 		e.mapv(tag, in)
 	case reflect.Ptr:
-		if in.IsNil() {
-			e.nilv()
+		if in.Type() == ptrTimeType {
+			e.timev(tag, in.Elem())
 		} else {
 			e.marshal(tag, in.Elem())
 		}
 	case reflect.Struct:
-		e.structv(tag, in)
-	case reflect.Slice:
+		if in.Type() == timeType {
+			e.timev(tag, in)
+		} else {
+			e.structv(tag, in)
+		}
+	case reflect.Slice, reflect.Array:
 		if in.Type().Elem() == mapItemType {
 			e.itemsv(tag, in)
 		} else {
@@ -191,10 +223,10 @@ func (e *encoder) mappingv(tag string, f func()) {
 		e.flow = false
 		style = yaml_FLOW_MAPPING_STYLE
 	}
-	e.must(yaml_mapping_start_event_initialize(&e.event, nil, []byte(tag), implicit, style))
+	yaml_mapping_start_event_initialize(&e.event, nil, []byte(tag), implicit, style)
 	e.emit()
 	f()
-	e.must(yaml_mapping_end_event_initialize(&e.event))
+	yaml_mapping_end_event_initialize(&e.event)
 	e.emit()
 }
 
@@ -240,23 +272,36 @@ var base60float = regexp.MustCompile(`^[-+]?[0-9][0-9_]*(?::[0-5]?[0-9])+(?:\.[0
 func (e *encoder) stringv(tag string, in reflect.Value) {
 	var style yaml_scalar_style_t
 	s := in.String()
-	rtag, rs := resolve("", s)
-	if rtag == yaml_BINARY_TAG {
-		if tag == "" || tag == yaml_STR_TAG {
-			tag = rtag
-			s = rs.(string)
-		} else if tag == yaml_BINARY_TAG {
+	canUsePlain := true
+	switch {
+	case !utf8.ValidString(s):
+		if tag == yaml_BINARY_TAG {
 			failf("explicitly tagged !!binary data must be base64-encoded")
-		} else {
+		}
+		if tag != "" {
 			failf("cannot marshal invalid UTF-8 data as %s", shortTag(tag))
 		}
+		// It can't be encoded directly as YAML so use a binary tag
+		// and encode it as base64.
+		tag = yaml_BINARY_TAG
+		s = encodeBase64(s)
+	case tag == "":
+		// Check to see if it would resolve to a specific
+		// tag when encoded unquoted. If it doesn't,
+		// there's no need to quote it.
+		rtag, _ := resolve("", s)
+		canUsePlain = rtag == yaml_STR_TAG && !isBase60Float(s)
 	}
-	if tag == "" && (rtag != yaml_STR_TAG || isBase60Float(s)) {
-		style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
-	} else if strings.Contains(s, "\n") {
+	// Note: it's possible for user code to emit invalid YAML
+	// if they explicitly specify a tag and a string containing
+	// text that's incompatible with that tag.
+	switch {
+	case strings.Contains(s, "\n"):
 		style = yaml_LITERAL_SCALAR_STYLE
-	} else {
+	case canUsePlain:
 		style = yaml_PLAIN_SCALAR_STYLE
+	default:
+		style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
 	}
 	e.emitScalar(s, "", tag, style)
 }
@@ -281,9 +326,20 @@ func (e *encoder) uintv(tag string, in reflect.Value) {
 	e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE)
 }
 
+func (e *encoder) timev(tag string, in reflect.Value) {
+	t := in.Interface().(time.Time)
+	s := t.Format(time.RFC3339Nano)
+	e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE)
+}
+
 func (e *encoder) floatv(tag string, in reflect.Value) {
-	// FIXME: Handle 64 bits here.
-	s := strconv.FormatFloat(float64(in.Float()), 'g', -1, 32)
+	// Issue #352: When formatting, use the precision of the underlying value
+	precision := 64
+	if in.Kind() == reflect.Float32 {
+		precision = 32
+	}
+
+	s := strconv.FormatFloat(in.Float(), 'g', -1, precision)
 	switch s {
 	case "+Inf":
 		s = ".inf"

+ 19 - 1
vendor/gopkg.in/yaml.v2/readerc.go

@@ -93,9 +93,18 @@ func yaml_parser_update_buffer(parser *yaml_parser_t, length int) bool {
 		panic("read handler must be set")
 	}
 
+	// [Go] This function was changed to guarantee the requested length size at EOF.
+	// The fact we need to do this is pretty awful, but the description above implies
+	// for that to be the case, and there are tests 
+
 	// If the EOF flag is set and the raw buffer is empty, do nothing.
 	if parser.eof && parser.raw_buffer_pos == len(parser.raw_buffer) {
-		return true
+		// [Go] ACTUALLY! Read the documentation of this function above.
+		// This is just broken. To return true, we need to have the
+		// given length in the buffer. Not doing that means every single
+		// check that calls this function to make sure the buffer has a
+		// given length is Go) panicking; or C) accessing invalid memory.
+		//return true
 	}
 
 	// Return if the buffer contains enough characters.
@@ -389,6 +398,15 @@ func yaml_parser_update_buffer(parser *yaml_parser_t, length int) bool {
 			break
 		}
 	}
+	// [Go] Read the documentation of this function above. To return true,
+	// we need to have the given length in the buffer. Not doing that means
+	// every single check that calls this function to make sure the buffer
+	// has a given length is Go) panicking; or C) accessing invalid memory.
+	// This happens here due to the EOF above breaking early.
+	for buffer_len < length {
+		parser.buffer[buffer_len] = 0
+		buffer_len++
+	}
 	parser.buffer = parser.buffer[:buffer_len]
 	return true
 }

+ 65 - 15
vendor/gopkg.in/yaml.v2/resolve.go

@@ -6,7 +6,7 @@ import (
 	"regexp"
 	"strconv"
 	"strings"
-	"unicode/utf8"
+	"time"
 )
 
 type resolveMapItem struct {
@@ -75,7 +75,7 @@ func longTag(tag string) string {
 
 func resolvableTag(tag string) bool {
 	switch tag {
-	case "", yaml_STR_TAG, yaml_BOOL_TAG, yaml_INT_TAG, yaml_FLOAT_TAG, yaml_NULL_TAG:
+	case "", yaml_STR_TAG, yaml_BOOL_TAG, yaml_INT_TAG, yaml_FLOAT_TAG, yaml_NULL_TAG, yaml_TIMESTAMP_TAG:
 		return true
 	}
 	return false
@@ -92,6 +92,19 @@ func resolve(tag string, in string) (rtag string, out interface{}) {
 		switch tag {
 		case "", rtag, yaml_STR_TAG, yaml_BINARY_TAG:
 			return
+		case yaml_FLOAT_TAG:
+			if rtag == yaml_INT_TAG {
+				switch v := out.(type) {
+				case int64:
+					rtag = yaml_FLOAT_TAG
+					out = float64(v)
+					return
+				case int:
+					rtag = yaml_FLOAT_TAG
+					out = float64(v)
+					return
+				}
+			}
 		}
 		failf("cannot decode %s `%s` as a %s", shortTag(rtag), in, shortTag(tag))
 	}()
@@ -125,6 +138,15 @@ func resolve(tag string, in string) (rtag string, out interface{}) {
 
 		case 'D', 'S':
 			// Int, float, or timestamp.
+			// Only try values as a timestamp if the value is unquoted or there's an explicit
+			// !!timestamp tag.
+			if tag == "" || tag == yaml_TIMESTAMP_TAG {
+				t, ok := parseTimestamp(in)
+				if ok {
+					return yaml_TIMESTAMP_TAG, t
+				}
+			}
+
 			plain := strings.Replace(in, "_", "", -1)
 			intv, err := strconv.ParseInt(plain, 0, 64)
 			if err == nil {
@@ -158,28 +180,20 @@ func resolve(tag string, in string) (rtag string, out interface{}) {
 					return yaml_INT_TAG, uintv
 				}
 			} else if strings.HasPrefix(plain, "-0b") {
-				intv, err := strconv.ParseInt(plain[3:], 2, 64)
+				intv, err := strconv.ParseInt("-" + plain[3:], 2, 64)
 				if err == nil {
-					if intv == int64(int(intv)) {
-						return yaml_INT_TAG, -int(intv)
+					if true || intv == int64(int(intv)) {
+						return yaml_INT_TAG, int(intv)
 					} else {
-						return yaml_INT_TAG, -intv
+						return yaml_INT_TAG, intv
 					}
 				}
 			}
-			// XXX Handle timestamps here.
-
 		default:
 			panic("resolveTable item not yet handled: " + string(rune(hint)) + " (with " + in + ")")
 		}
 	}
-	if tag == yaml_BINARY_TAG {
-		return yaml_BINARY_TAG, in
-	}
-	if utf8.ValidString(in) {
-		return yaml_STR_TAG, in
-	}
-	return yaml_BINARY_TAG, encodeBase64(in)
+	return yaml_STR_TAG, in
 }
 
 // encodeBase64 encodes s as base64 that is broken up into multiple lines
@@ -206,3 +220,39 @@ func encodeBase64(s string) string {
 	}
 	return string(out[:k])
 }
+
+// This is a subset of the formats allowed by the regular expression
+// defined at http://yaml.org/type/timestamp.html.
+var allowedTimestampFormats = []string{
+	"2006-1-2T15:4:5.999999999Z07:00", // RCF3339Nano with short date fields.
+	"2006-1-2t15:4:5.999999999Z07:00", // RFC3339Nano with short date fields and lower-case "t".
+	"2006-1-2 15:4:5.999999999",       // space separated with no time zone
+	"2006-1-2",                        // date only
+	// Notable exception: time.Parse cannot handle: "2001-12-14 21:59:43.10 -5"
+	// from the set of examples.
+}
+
+// parseTimestamp parses s as a timestamp string and
+// returns the timestamp and reports whether it succeeded.
+// Timestamp formats are defined at http://yaml.org/type/timestamp.html
+func parseTimestamp(s string) (time.Time, bool) {
+	// TODO write code to check all the formats supported by
+	// http://yaml.org/type/timestamp.html instead of using time.Parse.
+
+	// Quick check: all date formats start with YYYY-.
+	i := 0
+	for ; i < len(s); i++ {
+		if c := s[i]; c < '0' || c > '9' {
+			break
+		}
+	}
+	if i != 4 || i == len(s) || s[i] != '-' {
+		return time.Time{}, false
+	}
+	for _, format := range allowedTimestampFormats {
+		if t, err := time.Parse(format, s); err == nil {
+			return t, true
+		}
+	}
+	return time.Time{}, false
+}

+ 12 - 26
vendor/gopkg.in/yaml.v2/scannerc.go

@@ -611,7 +611,7 @@ func yaml_parser_set_scanner_tag_error(parser *yaml_parser_t, directive bool, co
 	if directive {
 		context = "while parsing a %TAG directive"
 	}
-	return yaml_parser_set_scanner_error(parser, context, context_mark, "did not find URI escaped octet")
+	return yaml_parser_set_scanner_error(parser, context, context_mark, problem)
 }
 
 func trace(args ...interface{}) func() {
@@ -871,12 +871,6 @@ func yaml_parser_save_simple_key(parser *yaml_parser_t) bool {
 
 	required := parser.flow_level == 0 && parser.indent == parser.mark.column
 
-	// A simple key is required only when it is the first token in the current
-	// line.  Therefore it is always allowed.  But we add a check anyway.
-	if required && !parser.simple_key_allowed {
-		panic("should not happen")
-	}
-
 	//
 	// If the current position may start a simple key, save it.
 	//
@@ -1944,7 +1938,7 @@ func yaml_parser_scan_tag_handle(parser *yaml_parser_t, directive bool, start_ma
 	} else {
 		// It's either the '!' tag or not really a tag handle.  If it's a %TAG
 		// directive, it's an error.  If it's a tag token, it must be a part of URI.
-		if directive && !(s[0] == '!' && s[1] == 0) {
+		if directive && string(s) != "!" {
 			yaml_parser_set_scanner_tag_error(parser, directive,
 				start_mark, "did not find expected '!'")
 			return false
@@ -1959,6 +1953,7 @@ func yaml_parser_scan_tag_handle(parser *yaml_parser_t, directive bool, start_ma
 func yaml_parser_scan_tag_uri(parser *yaml_parser_t, directive bool, head []byte, start_mark yaml_mark_t, uri *[]byte) bool {
 	//size_t length = head ? strlen((char *)head) : 0
 	var s []byte
+	hasTag := len(head) > 0
 
 	// Copy the head if needed.
 	//
@@ -2000,10 +1995,10 @@ func yaml_parser_scan_tag_uri(parser *yaml_parser_t, directive bool, head []byte
 		if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
 			return false
 		}
+		hasTag = true
 	}
 
-	// Check if the tag is non-empty.
-	if len(s) == 0 {
+	if !hasTag {
 		yaml_parser_set_scanner_tag_error(parser, directive,
 			start_mark, "did not find expected tag URI")
 		return false
@@ -2474,6 +2469,10 @@ func yaml_parser_scan_flow_scalar(parser *yaml_parser_t, token *yaml_token_t, si
 			}
 		}
 
+		if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+			return false
+		}
+
 		// Check if we are at the end of the scalar.
 		if single {
 			if parser.buffer[parser.buffer_pos] == '\'' {
@@ -2486,10 +2485,6 @@ func yaml_parser_scan_flow_scalar(parser *yaml_parser_t, token *yaml_token_t, si
 		}
 
 		// Consume blank characters.
-		if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
-			return false
-		}
-
 		for is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos) {
 			if is_blank(parser.buffer, parser.buffer_pos) {
 				// Consume a space or a tab character.
@@ -2591,19 +2586,10 @@ func yaml_parser_scan_plain_scalar(parser *yaml_parser_t, token *yaml_token_t) b
 		// Consume non-blank characters.
 		for !is_blankz(parser.buffer, parser.buffer_pos) {
 
-			// Check for 'x:x' in the flow context. TODO: Fix the test "spec-08-13".
-			if parser.flow_level > 0 &&
-				parser.buffer[parser.buffer_pos] == ':' &&
-				!is_blankz(parser.buffer, parser.buffer_pos+1) {
-				yaml_parser_set_scanner_error(parser, "while scanning a plain scalar",
-					start_mark, "found unexpected ':'")
-				return false
-			}
-
 			// Check for indicators that may end a plain scalar.
 			if (parser.buffer[parser.buffer_pos] == ':' && is_blankz(parser.buffer, parser.buffer_pos+1)) ||
 				(parser.flow_level > 0 &&
-					(parser.buffer[parser.buffer_pos] == ',' || parser.buffer[parser.buffer_pos] == ':' ||
+					(parser.buffer[parser.buffer_pos] == ',' ||
 						parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == '[' ||
 						parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '{' ||
 						parser.buffer[parser.buffer_pos] == '}')) {
@@ -2655,10 +2641,10 @@ func yaml_parser_scan_plain_scalar(parser *yaml_parser_t, token *yaml_token_t) b
 		for is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos) {
 			if is_blank(parser.buffer, parser.buffer_pos) {
 
-				// Check for tab character that abuse indentation.
+				// Check for tab characters that abuse indentation.
 				if leading_blanks && parser.mark.column < indent && is_tab(parser.buffer, parser.buffer_pos) {
 					yaml_parser_set_scanner_error(parser, "while scanning a plain scalar",
-						start_mark, "found a tab character that violate indentation")
+						start_mark, "found a tab character that violates indentation")
 					return false
 				}
 

+ 9 - 0
vendor/gopkg.in/yaml.v2/sorter.go

@@ -51,6 +51,15 @@ func (l keyList) Less(i, j int) bool {
 		}
 		var ai, bi int
 		var an, bn int64
+		if ar[i] == '0' || br[i] == '0' {
+			for j := i-1; j >= 0 && unicode.IsDigit(ar[j]); j-- {
+				if ar[j] != '0' {
+					an = 1
+					bn = 1
+					break
+				}
+			}
+		}
 		for ai = i; ai < len(ar) && unicode.IsDigit(ar[ai]); ai++ {
 			an = an*10 + int64(ar[ai]-'0')
 		}

+ 1 - 64
vendor/gopkg.in/yaml.v2/writerc.go

@@ -18,72 +18,9 @@ func yaml_emitter_flush(emitter *yaml_emitter_t) bool {
 		return true
 	}
 
-	// If the output encoding is UTF-8, we don't need to recode the buffer.
-	if emitter.encoding == yaml_UTF8_ENCODING {
-		if err := emitter.write_handler(emitter, emitter.buffer[:emitter.buffer_pos]); err != nil {
-			return yaml_emitter_set_writer_error(emitter, "write error: "+err.Error())
-		}
-		emitter.buffer_pos = 0
-		return true
-	}
-
-	// Recode the buffer into the raw buffer.
-	var low, high int
-	if emitter.encoding == yaml_UTF16LE_ENCODING {
-		low, high = 0, 1
-	} else {
-		high, low = 1, 0
-	}
-
-	pos := 0
-	for pos < emitter.buffer_pos {
-		// See the "reader.c" code for more details on UTF-8 encoding.  Note
-		// that we assume that the buffer contains a valid UTF-8 sequence.
-
-		// Read the next UTF-8 character.
-		octet := emitter.buffer[pos]
-
-		var w int
-		var value rune
-		switch {
-		case octet&0x80 == 0x00:
-			w, value = 1, rune(octet&0x7F)
-		case octet&0xE0 == 0xC0:
-			w, value = 2, rune(octet&0x1F)
-		case octet&0xF0 == 0xE0:
-			w, value = 3, rune(octet&0x0F)
-		case octet&0xF8 == 0xF0:
-			w, value = 4, rune(octet&0x07)
-		}
-		for k := 1; k < w; k++ {
-			octet = emitter.buffer[pos+k]
-			value = (value << 6) + (rune(octet) & 0x3F)
-		}
-		pos += w
-
-		// Write the character.
-		if value < 0x10000 {
-			var b [2]byte
-			b[high] = byte(value >> 8)
-			b[low] = byte(value & 0xFF)
-			emitter.raw_buffer = append(emitter.raw_buffer, b[0], b[1])
-		} else {
-			// Write the character using a surrogate pair (check "reader.c").
-			var b [4]byte
-			value -= 0x10000
-			b[high] = byte(0xD8 + (value >> 18))
-			b[low] = byte((value >> 10) & 0xFF)
-			b[high+2] = byte(0xDC + ((value >> 8) & 0xFF))
-			b[low+2] = byte(value & 0xFF)
-			emitter.raw_buffer = append(emitter.raw_buffer, b[0], b[1], b[2], b[3])
-		}
-	}
-
-	// Write the raw buffer.
-	if err := emitter.write_handler(emitter, emitter.raw_buffer); err != nil {
+	if err := emitter.write_handler(emitter, emitter.buffer[:emitter.buffer_pos]); err != nil {
 		return yaml_emitter_set_writer_error(emitter, "write error: "+err.Error())
 	}
 	emitter.buffer_pos = 0
-	emitter.raw_buffer = emitter.raw_buffer[:0]
 	return true
 }

+ 128 - 8
vendor/gopkg.in/yaml.v2/yaml.go

@@ -9,6 +9,7 @@ package yaml
 import (
 	"errors"
 	"fmt"
+	"io"
 	"reflect"
 	"strings"
 	"sync"
@@ -77,8 +78,65 @@ type Marshaler interface {
 // supported tag options.
 //
 func Unmarshal(in []byte, out interface{}) (err error) {
+	return unmarshal(in, out, false)
+}
+
+// UnmarshalStrict is like Unmarshal except that any fields that are found
+// in the data that do not have corresponding struct members, or mapping
+// keys that are duplicates, will result in
+// an error.
+func UnmarshalStrict(in []byte, out interface{}) (err error) {
+	return unmarshal(in, out, true)
+}
+
+// A Decorder reads and decodes YAML values from an input stream.
+type Decoder struct {
+	strict bool
+	parser *parser
+}
+
+// NewDecoder returns a new decoder that reads from r.
+//
+// The decoder introduces its own buffering and may read
+// data from r beyond the YAML values requested.
+func NewDecoder(r io.Reader) *Decoder {
+	return &Decoder{
+		parser: newParserFromReader(r),
+	}
+}
+
+// SetStrict sets whether strict decoding behaviour is enabled when
+// decoding items in the data (see UnmarshalStrict). By default, decoding is not strict.
+func (dec *Decoder) SetStrict(strict bool) {
+	dec.strict = strict
+}
+
+// Decode reads the next YAML-encoded value from its input
+// and stores it in the value pointed to by v.
+//
+// See the documentation for Unmarshal for details about the
+// conversion of YAML into a Go value.
+func (dec *Decoder) Decode(v interface{}) (err error) {
+	d := newDecoder(dec.strict)
+	defer handleErr(&err)
+	node := dec.parser.parse()
+	if node == nil {
+		return io.EOF
+	}
+	out := reflect.ValueOf(v)
+	if out.Kind() == reflect.Ptr && !out.IsNil() {
+		out = out.Elem()
+	}
+	d.unmarshal(node, out)
+	if len(d.terrors) > 0 {
+		return &TypeError{d.terrors}
+	}
+	return nil
+}
+
+func unmarshal(in []byte, out interface{}, strict bool) (err error) {
 	defer handleErr(&err)
-	d := newDecoder()
+	d := newDecoder(strict)
 	p := newParser(in)
 	defer p.destroy()
 	node := p.parse()
@@ -99,8 +157,8 @@ func Unmarshal(in []byte, out interface{}) (err error) {
 // of the generated document will reflect the structure of the value itself.
 // Maps and pointers (to struct, string, int, etc) are accepted as the in value.
 //
-// Struct fields are only unmarshalled if they are exported (have an upper case
-// first letter), and are unmarshalled using the field name lowercased as the
+// Struct fields are only marshalled if they are exported (have an upper case
+// first letter), and are marshalled using the field name lowercased as the
 // default key. Custom keys may be defined via the "yaml" name in the field
 // tag: the content preceding the first comma is used as the key, and the
 // following comma-separated options are used to tweak the marshalling process.
@@ -114,7 +172,10 @@ func Unmarshal(in []byte, out interface{}) (err error) {
 //
 //     omitempty    Only include the field if it's not set to the zero
 //                  value for the type or to empty slices or maps.
-//                  Does not apply to zero valued structs.
+//                  Zero valued structs will be omitted if all their public
+//                  fields are zero, unless they implement an IsZero
+//                  method (see the IsZeroer interface type), in which
+//                  case the field will be included if that method returns true.
 //
 //     flow         Marshal using a flow style (useful for structs,
 //                  sequences and maps).
@@ -129,7 +190,7 @@ func Unmarshal(in []byte, out interface{}) (err error) {
 // For example:
 //
 //     type T struct {
-//         F int "a,omitempty"
+//         F int `yaml:"a,omitempty"`
 //         B int
 //     }
 //     yaml.Marshal(&T{B: 2}) // Returns "b: 2\n"
@@ -139,12 +200,47 @@ func Marshal(in interface{}) (out []byte, err error) {
 	defer handleErr(&err)
 	e := newEncoder()
 	defer e.destroy()
-	e.marshal("", reflect.ValueOf(in))
+	e.marshalDoc("", reflect.ValueOf(in))
 	e.finish()
 	out = e.out
 	return
 }
 
+// An Encoder writes YAML values to an output stream.
+type Encoder struct {
+	encoder *encoder
+}
+
+// NewEncoder returns a new encoder that writes to w.
+// The Encoder should be closed after use to flush all data
+// to w.
+func NewEncoder(w io.Writer) *Encoder {
+	return &Encoder{
+		encoder: newEncoderWithWriter(w),
+	}
+}
+
+// Encode writes the YAML encoding of v to the stream.
+// If multiple items are encoded to the stream, the
+// second and subsequent document will be preceded
+// with a "---" document separator, but the first will not.
+//
+// See the documentation for Marshal for details about the conversion of Go
+// values to YAML.
+func (e *Encoder) Encode(v interface{}) (err error) {
+	defer handleErr(&err)
+	e.encoder.marshalDoc("", reflect.ValueOf(v))
+	return nil
+}
+
+// Close closes the encoder by writing any remaining data.
+// It does not write a stream terminating string "...".
+func (e *Encoder) Close() (err error) {
+	defer handleErr(&err)
+	e.encoder.finish()
+	return nil
+}
+
 func handleErr(err *error) {
 	if v := recover(); v != nil {
 		if e, ok := v.(yamlError); ok {
@@ -200,6 +296,9 @@ type fieldInfo struct {
 	Num       int
 	OmitEmpty bool
 	Flow      bool
+	// Id holds the unique field identifier, so we can cheaply
+	// check for field duplicates without maintaining an extra map.
+	Id int
 
 	// Inline holds the field index if the field is part of an inlined struct.
 	Inline []int
@@ -279,6 +378,7 @@ func getStructInfo(st reflect.Type) (*structInfo, error) {
 					} else {
 						finfo.Inline = append([]int{i}, finfo.Inline...)
 					}
+					finfo.Id = len(fieldsList)
 					fieldsMap[finfo.Key] = finfo
 					fieldsList = append(fieldsList, finfo)
 				}
@@ -300,11 +400,16 @@ func getStructInfo(st reflect.Type) (*structInfo, error) {
 			return nil, errors.New(msg)
 		}
 
+		info.Id = len(fieldsList)
 		fieldsList = append(fieldsList, info)
 		fieldsMap[info.Key] = info
 	}
 
-	sinfo = &structInfo{fieldsMap, fieldsList, inlineMap}
+	sinfo = &structInfo{
+		FieldsMap:  fieldsMap,
+		FieldsList: fieldsList,
+		InlineMap:  inlineMap,
+	}
 
 	fieldMapMutex.Lock()
 	structMap[st] = sinfo
@@ -312,8 +417,23 @@ func getStructInfo(st reflect.Type) (*structInfo, error) {
 	return sinfo, nil
 }
 
+// IsZeroer is used to check whether an object is zero to
+// determine whether it should be omitted when marshaling
+// with the omitempty flag. One notable implementation
+// is time.Time.
+type IsZeroer interface {
+	IsZero() bool
+}
+
 func isZero(v reflect.Value) bool {
-	switch v.Kind() {
+	kind := v.Kind()
+	if z, ok := v.Interface().(IsZeroer); ok {
+		if (kind == reflect.Ptr || kind == reflect.Interface) && v.IsNil() {
+			return true
+		}
+		return z.IsZero()
+	}
+	switch kind {
 	case reflect.String:
 		return len(v.String()) == 0
 	case reflect.Interface, reflect.Ptr:

+ 27 - 5
vendor/gopkg.in/yaml.v2/yamlh.go

@@ -1,6 +1,7 @@
 package yaml
 
 import (
+	"fmt"
 	"io"
 )
 
@@ -239,6 +240,27 @@ const (
 	yaml_MAPPING_END_EVENT    // A MAPPING-END event.
 )
 
+var eventStrings = []string{
+	yaml_NO_EVENT:             "none",
+	yaml_STREAM_START_EVENT:   "stream start",
+	yaml_STREAM_END_EVENT:     "stream end",
+	yaml_DOCUMENT_START_EVENT: "document start",
+	yaml_DOCUMENT_END_EVENT:   "document end",
+	yaml_ALIAS_EVENT:          "alias",
+	yaml_SCALAR_EVENT:         "scalar",
+	yaml_SEQUENCE_START_EVENT: "sequence start",
+	yaml_SEQUENCE_END_EVENT:   "sequence end",
+	yaml_MAPPING_START_EVENT:  "mapping start",
+	yaml_MAPPING_END_EVENT:    "mapping end",
+}
+
+func (e yaml_event_type_t) String() string {
+	if e < 0 || int(e) >= len(eventStrings) {
+		return fmt.Sprintf("unknown event %d", e)
+	}
+	return eventStrings[e]
+}
+
 // The event structure.
 type yaml_event_t struct {
 
@@ -508,7 +530,7 @@ type yaml_parser_t struct {
 
 	problem string // Error description.
 
-	// The byte about which the problem occured.
+	// The byte about which the problem occurred.
 	problem_offset int
 	problem_value  int
 	problem_mark   yaml_mark_t
@@ -521,9 +543,9 @@ type yaml_parser_t struct {
 
 	read_handler yaml_read_handler_t // Read handler.
 
-	input_file io.Reader // File input data.
-	input      []byte    // String input data.
-	input_pos  int
+	input_reader io.Reader // File input data.
+	input        []byte    // String input data.
+	input_pos    int
 
 	eof bool // EOF flag
 
@@ -632,7 +654,7 @@ type yaml_emitter_t struct {
 	write_handler yaml_write_handler_t // Write handler.
 
 	output_buffer *[]byte   // String output data.
-	output_file   io.Writer // File output data.
+	output_writer io.Writer // File output data.
 
 	buffer     []byte // The working buffer.
 	buffer_pos int    // The current position of the buffer.