Quellcode durchsuchen

Added global functions optimized for constantly changing templates

Aliaksandr Valialkin vor 9 Jahren
Ursprung
Commit
d58c07cb2d

+ 117 - 22
template.go

@@ -9,10 +9,104 @@ package fasttemplate
 import (
 	"bytes"
 	"fmt"
+	"github.com/valyala/bytebufferpool"
 	"io"
-	"sync"
 )
 
+// ExecuteFunc calls f on each template tag (placeholder) occurrence.
+//
+// Returns the number of bytes written to w.
+//
+// This function is optimized for constantly changing templates.
+// Use Template.ExecuteFunc for frozen templates.
+func ExecuteFunc(template, startTag, endTag string, w io.Writer, f TagFunc) (int64, error) {
+	s := unsafeString2Bytes(template)
+	a := unsafeString2Bytes(startTag)
+	b := unsafeString2Bytes(endTag)
+
+	var nn int64
+	var ni int
+	var err error
+	for {
+		n := bytes.Index(s, a)
+		if n < 0 {
+			break
+		}
+		ni, err = w.Write(s[:n])
+		nn += int64(ni)
+		if err != nil {
+			return nn, err
+		}
+
+		s = s[n+len(a):]
+		n = bytes.Index(s, b)
+		if n < 0 {
+			// cannot find end tag - just write it to the output.
+			ni, _ = w.Write(a)
+			nn += int64(ni)
+			break
+		}
+
+		ni, err = f(w, unsafeBytes2String(s[:n]))
+		nn += int64(ni)
+		s = s[n+len(b):]
+	}
+	ni, err = w.Write(s)
+	nn += int64(ni)
+
+	return nn, err
+}
+
+// Execute substitutes template tags (placeholders) with the corresponding
+// values from the map m and writes the result to the given writer w.
+//
+// Substitution map m may contain values with the following types:
+//   * []byte - the fastest value type
+//   * string - convenient value type
+//   * TagFunc - flexible value type
+//
+// Returns the number of bytes written to w.
+//
+// This function is optimized for constantly changing templates.
+// Use Template.Execute for frozen templates.
+func Execute(template, startTag, endTag string, w io.Writer, m map[string]interface{}) (int64, error) {
+	return ExecuteFunc(template, startTag, endTag, w, func(w io.Writer, tag string) (int, error) { return stdTagFunc(w, tag, m) })
+}
+
+// ExecuteFuncString calls f on each template tag (placeholder) occurrence
+// and substitutes it with the data written to TagFunc's w.
+//
+// Returns the resulting string.
+//
+// This function is optimized for constantly changing templates.
+// Use Template.ExecuteFuncString for frozen templates.
+func ExecuteFuncString(template, startTag, endTag string, f TagFunc) string {
+	bb := byteBufferPool.Get()
+	if _, err := ExecuteFunc(template, startTag, endTag, bb, f); err != nil {
+		panic(fmt.Sprintf("unexpected error: %s", err))
+	}
+	s := string(bb.B)
+	bb.Reset()
+	byteBufferPool.Put(bb)
+	return s
+}
+
+var byteBufferPool bytebufferpool.Pool
+
+// ExecuteString substitutes template tags (placeholders) with the corresponding
+// values from the map m and returns the result.
+//
+// Substitution map m may contain values with the following types:
+//   * []byte - the fastest value type
+//   * string - convenient value type
+//   * TagFunc - flexible value type
+//
+// This function is optimized for constantly changing templates.
+// Use Template.ExecuteString for frozen templates.
+func ExecuteString(template, startTag, endTag string, m map[string]interface{}) string {
+	return ExecuteFuncString(template, startTag, endTag, func(w io.Writer, tag string) (int, error) { return stdTagFunc(w, tag, m) })
+}
+
 // Template implements simple template engine, which can be used for fast
 // tags' (aka placeholders) substitution.
 type Template struct {
@@ -20,9 +114,9 @@ type Template struct {
 	startTag string
 	endTag   string
 
-	texts           [][]byte
-	tags            []string
-	bytesBufferPool sync.Pool
+	texts          [][]byte
+	tags           []string
+	byteBufferPool bytebufferpool.Pool
 }
 
 // New parses the given template using the given startTag and endTag
@@ -55,10 +149,6 @@ func NewTemplate(template, startTag, endTag string) (*Template, error) {
 	return &t, nil
 }
 
-func newBytesBuffer() interface{} {
-	return &bytes.Buffer{}
-}
-
 // TagFunc can be used as a substitution value in the map passed to Execute*.
 // Execute* functions pass tag (placeholder) name in 'tag' argument.
 //
@@ -76,7 +166,6 @@ type TagFunc func(w io.Writer, tag string) (int, error)
 func (t *Template) Reset(template, startTag, endTag string) error {
 	// Keep these vars in t, so GC won't collect them and won't break
 	// vars derived via unsafe*
-	t.bytesBufferPool.New = newBytesBuffer
 	t.template = template
 	t.startTag = startTag
 	t.endTag = endTag
@@ -130,6 +219,9 @@ func (t *Template) Reset(template, startTag, endTag string) error {
 // ExecuteFunc calls f on each template tag (placeholder) occurrence.
 //
 // Returns the number of bytes written to w.
+//
+// This function is optimized for frozen templates.
+// Use ExecuteFunc for constantly changing templates.
 func (t *Template) ExecuteFunc(w io.Writer, f TagFunc) (int64, error) {
 	var nn int64
 
@@ -141,22 +233,20 @@ func (t *Template) ExecuteFunc(w io.Writer, f TagFunc) (int64, error) {
 
 	for i := 0; i < n; i++ {
 		ni, err := w.Write(t.texts[i])
+		nn += int64(ni)
 		if err != nil {
 			return nn, err
 		}
-		nn += int64(ni)
 
-		if ni, err = f(w, t.tags[i]); err != nil {
+		ni, err = f(w, t.tags[i])
+		nn += int64(ni)
+		if err != nil {
 			return nn, err
 		}
-		nn += int64(ni)
 	}
 	ni, err := w.Write(t.texts[n])
-	if err != nil {
-		return nn, err
-	}
 	nn += int64(ni)
-	return nn, nil
+	return nn, err
 }
 
 // Execute substitutes template tags (placeholders) with the corresponding
@@ -172,18 +262,21 @@ func (t *Template) Execute(w io.Writer, m map[string]interface{}) (int64, error)
 	return t.ExecuteFunc(w, func(w io.Writer, tag string) (int, error) { return stdTagFunc(w, tag, m) })
 }
 
-// ExecuteFuncString call f on each template tag (placeholder) occurrence
+// ExecuteFuncString calls f on each template tag (placeholder) occurrence
 // and substitutes it with the data written to TagFunc's w.
 //
 // Returns the resulting string.
+//
+// This function is optimized for frozen templates.
+// Use ExecuteFuncString for constantly changing templates.
 func (t *Template) ExecuteFuncString(f TagFunc) string {
-	w := t.bytesBufferPool.Get().(*bytes.Buffer)
-	if _, err := t.ExecuteFunc(w, f); err != nil {
+	bb := t.byteBufferPool.Get()
+	if _, err := t.ExecuteFunc(bb, f); err != nil {
 		panic(fmt.Sprintf("unexpected error: %s", err))
 	}
-	s := string(w.Bytes())
-	w.Reset()
-	t.bytesBufferPool.Put(w)
+	s := string(bb.Bytes())
+	bb.Reset()
+	t.byteBufferPool.Put(bb)
 	return s
 }
 
@@ -195,6 +288,8 @@ func (t *Template) ExecuteFuncString(f TagFunc) string {
 //   * string - convenient value type
 //   * TagFunc - flexible value type
 //
+// This function is optimized for frozen templates.
+// Use ExecuteString for constantly changing templates.
 func (t *Template) ExecuteString(m map[string]interface{}) string {
 	return t.ExecuteFuncString(func(w io.Writer, tag string) (int, error) { return stdTagFunc(w, tag, m) })
 }

+ 95 - 0
template_test.go

@@ -1,6 +1,7 @@
 package fasttemplate
 
 import (
+	"bytes"
 	"io"
 	"testing"
 )
@@ -212,6 +213,100 @@ func TestMixedValues(t *testing.T) {
 	}
 }
 
+func TestExecuteFunc(t *testing.T) {
+	testExecuteFunc(t, "", "")
+	testExecuteFunc(t, "a", "a")
+	testExecuteFunc(t, "abc", "abc")
+	testExecuteFunc(t, "{foo}", "xxxx")
+	testExecuteFunc(t, "a{foo}", "axxxx")
+	testExecuteFunc(t, "{foo}a", "xxxxa")
+	testExecuteFunc(t, "a{foo}bc", "axxxxbc")
+	testExecuteFunc(t, "{foo}{foo}", "xxxxxxxx")
+	testExecuteFunc(t, "{foo}bar{foo}", "xxxxbarxxxx")
+
+	// unclosed tag
+	testExecuteFunc(t, "{unclosed", "{unclosed")
+	testExecuteFunc(t, "{{unclosed", "{{unclosed")
+	testExecuteFunc(t, "{un{closed", "{un{closed")
+
+	// test unknown tag
+	testExecuteFunc(t, "{unknown}", "zz")
+	testExecuteFunc(t, "{foo}q{unexpected}{missing}bar{foo}", "xxxxqzzzzbarxxxx")
+}
+
+func testExecuteFunc(t *testing.T, template, expectedOutput string) {
+	var bb bytes.Buffer
+	ExecuteFunc(template, "{", "}", &bb, func(w io.Writer, tag string) (int, error) {
+		if tag == "foo" {
+			return w.Write([]byte("xxxx"))
+		}
+		return w.Write([]byte("zz"))
+	})
+
+	output := string(bb.Bytes())
+	if output != expectedOutput {
+		t.Fatalf("unexpected output for template=%q: %q. Expected %q", template, output, expectedOutput)
+	}
+}
+
+func TestExecute(t *testing.T) {
+	testExecute(t, "", "")
+	testExecute(t, "a", "a")
+	testExecute(t, "abc", "abc")
+	testExecute(t, "{foo}", "xxxx")
+	testExecute(t, "a{foo}", "axxxx")
+	testExecute(t, "{foo}a", "xxxxa")
+	testExecute(t, "a{foo}bc", "axxxxbc")
+	testExecute(t, "{foo}{foo}", "xxxxxxxx")
+	testExecute(t, "{foo}bar{foo}", "xxxxbarxxxx")
+
+	// unclosed tag
+	testExecute(t, "{unclosed", "{unclosed")
+	testExecute(t, "{{unclosed", "{{unclosed")
+	testExecute(t, "{un{closed", "{un{closed")
+
+	// test unknown tag
+	testExecute(t, "{unknown}", "")
+	testExecute(t, "{foo}q{unexpected}{missing}bar{foo}", "xxxxqbarxxxx")
+}
+
+func testExecute(t *testing.T, template, expectedOutput string) {
+	var bb bytes.Buffer
+	Execute(template, "{", "}", &bb, map[string]interface{}{"foo": "xxxx"})
+	output := string(bb.Bytes())
+	if output != expectedOutput {
+		t.Fatalf("unexpected output for template=%q: %q. Expected %q", template, output, expectedOutput)
+	}
+}
+
+func TestExecuteString(t *testing.T) {
+	testExecuteString(t, "", "")
+	testExecuteString(t, "a", "a")
+	testExecuteString(t, "abc", "abc")
+	testExecuteString(t, "{foo}", "xxxx")
+	testExecuteString(t, "a{foo}", "axxxx")
+	testExecuteString(t, "{foo}a", "xxxxa")
+	testExecuteString(t, "a{foo}bc", "axxxxbc")
+	testExecuteString(t, "{foo}{foo}", "xxxxxxxx")
+	testExecuteString(t, "{foo}bar{foo}", "xxxxbarxxxx")
+
+	// unclosed tag
+	testExecuteString(t, "{unclosed", "{unclosed")
+	testExecuteString(t, "{{unclosed", "{{unclosed")
+	testExecuteString(t, "{un{closed", "{un{closed")
+
+	// test unknown tag
+	testExecuteString(t, "{unknown}", "")
+	testExecuteString(t, "{foo}q{unexpected}{missing}bar{foo}", "xxxxqbarxxxx")
+}
+
+func testExecuteString(t *testing.T, template, expectedOutput string) {
+	output := ExecuteString(template, "{", "}", map[string]interface{}{"foo": "xxxx"})
+	if output != expectedOutput {
+		t.Fatalf("unexpected output for template=%q: %q. Expected %q", template, output, expectedOutput)
+	}
+}
+
 func expectPanic(t *testing.T, f func()) {
 	defer func() {
 		if r := recover(); r == nil {

+ 28 - 10
template_timing_test.go

@@ -119,15 +119,11 @@ func BenchmarkFastTemplateExecuteFunc(b *testing.B) {
 		b.Fatalf("error in template: %s", err)
 	}
 
-	f := func(w io.Writer, tag string) (int, error) {
-		return w.Write(m[tag].([]byte))
-	}
-
 	b.ResetTimer()
 	b.RunParallel(func(pb *testing.PB) {
 		var w bytes.Buffer
 		for pb.Next() {
-			if _, err := t.ExecuteFunc(&w, f); err != nil {
+			if _, err := t.ExecuteFunc(&w, testTagFunc); err != nil {
 				b.Fatalf("unexpected error: %s", err)
 			}
 			x := w.Bytes()
@@ -167,14 +163,10 @@ func BenchmarkFastTemplateExecuteFuncString(b *testing.B) {
 		b.Fatalf("error in template: %s", err)
 	}
 
-	f := func(w io.Writer, tag string) (int, error) {
-		return w.Write(m[tag].([]byte))
-	}
-
 	b.ResetTimer()
 	b.RunParallel(func(pb *testing.PB) {
 		for pb.Next() {
-			x := t.ExecuteFuncString(f)
+			x := t.ExecuteFuncString(testTagFunc)
 			if x != result {
 				b.Fatalf("unexpected result\n%q\nExpected\n%q\n", x, result)
 			}
@@ -246,3 +238,29 @@ func BenchmarkTemplateReset(b *testing.B) {
 		}
 	})
 }
+
+func BenchmarkTemplateResetExecuteFunc(b *testing.B) {
+	b.RunParallel(func(pb *testing.PB) {
+		t := New(source, "{{", "}}")
+		var w bytes.Buffer
+		for pb.Next() {
+			t.Reset(source, "{{", "}}")
+			t.ExecuteFunc(&w, testTagFunc)
+			w.Reset()
+		}
+	})
+}
+
+func BenchmarkExecuteFunc(b *testing.B) {
+	b.RunParallel(func(pb *testing.PB) {
+		var bb bytes.Buffer
+		for pb.Next() {
+			ExecuteFunc(source, "{{", "}}", &bb, testTagFunc)
+			bb.Reset()
+		}
+	})
+}
+
+func testTagFunc(w io.Writer, tag string) (int, error) {
+	return w.Write(m[tag].([]byte))
+}

+ 15 - 0
vendor/github.com/valyala/bytebufferpool/.travis.yml

@@ -0,0 +1,15 @@
+language: go
+
+go:
+  - 1.6
+
+script:
+  # build test for supported platforms
+  - GOOS=linux go build
+  - GOOS=darwin go build
+  - GOOS=freebsd go build
+  - GOOS=windows go build
+  - GOARCH=386 go build
+
+  # run tests on a standard platform
+  - go test -v ./...

+ 22 - 0
vendor/github.com/valyala/bytebufferpool/LICENSE

@@ -0,0 +1,22 @@
+The MIT License (MIT)
+
+Copyright (c) 2016 Aliaksandr Valialkin, VertaMedia
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
+

+ 21 - 0
vendor/github.com/valyala/bytebufferpool/README.md

@@ -0,0 +1,21 @@
+[![Build Status](https://travis-ci.org/valyala/bytebufferpool.svg)](https://travis-ci.org/valyala/bytebufferpool)
+[![GoDoc](https://godoc.org/github.com/valyala/bytebufferpool?status.svg)](http://godoc.org/github.com/valyala/bytebufferpool)
+[![Go Report](http://goreportcard.com/badge/valyala/bytebufferpool)](http://goreportcard.com/report/valyala/bytebufferpool)
+
+# bytebufferpool
+
+An implementation of a pool of byte buffers with anti-memory-waste protection.
+
+The pool may waste limited amount of memory due to fragmentation.
+This amount equals to the maximum total size of the byte buffers
+in concurrent use.
+
+# Benchmark results
+Currently bytebufferpool is fastest and most effective buffer pool written in Go.
+
+You can find results [here](https://omgnull.github.io/go-benchmark/buffer/).
+
+# bytebufferpool users
+
+* [fasthttp](https://github.com/valyala/fasthttp)
+* [quicktemplate](https://github.com/valyala/quicktemplate)

+ 111 - 0
vendor/github.com/valyala/bytebufferpool/bytebuffer.go

@@ -0,0 +1,111 @@
+package bytebufferpool
+
+import "io"
+
+// ByteBuffer provides byte buffer, which can be used for minimizing
+// memory allocations.
+//
+// ByteBuffer may be used with functions appending data to the given []byte
+// slice. See example code for details.
+//
+// Use Get for obtaining an empty byte buffer.
+type ByteBuffer struct {
+
+	// B is a byte buffer to use in append-like workloads.
+	// See example code for details.
+	B []byte
+}
+
+// Len returns the size of the byte buffer.
+func (b *ByteBuffer) Len() int {
+	return len(b.B)
+}
+
+// ReadFrom implements io.ReaderFrom.
+//
+// The function appends all the data read from r to b.
+func (b *ByteBuffer) ReadFrom(r io.Reader) (int64, error) {
+	p := b.B
+	nStart := int64(len(p))
+	nMax := int64(cap(p))
+	n := nStart
+	if nMax == 0 {
+		nMax = 64
+		p = make([]byte, nMax)
+	} else {
+		p = p[:nMax]
+	}
+	for {
+		if n == nMax {
+			nMax *= 2
+			bNew := make([]byte, nMax)
+			copy(bNew, p)
+			p = bNew
+		}
+		nn, err := r.Read(p[n:])
+		n += int64(nn)
+		if err != nil {
+			b.B = p[:n]
+			n -= nStart
+			if err == io.EOF {
+				return n, nil
+			}
+			return n, err
+		}
+	}
+}
+
+// WriteTo implements io.WriterTo.
+func (b *ByteBuffer) WriteTo(w io.Writer) (int64, error) {
+	n, err := w.Write(b.B)
+	return int64(n), err
+}
+
+// Bytes returns b.B, i.e. all the bytes accumulated in the buffer.
+//
+// The purpose of this function is bytes.Buffer compatibility.
+func (b *ByteBuffer) Bytes() []byte {
+	return b.B
+}
+
+// Write implements io.Writer - it appends p to ByteBuffer.B
+func (b *ByteBuffer) Write(p []byte) (int, error) {
+	b.B = append(b.B, p...)
+	return len(p), nil
+}
+
+// WriteByte appends the byte c to the buffer.
+//
+// The purpose of this function is bytes.Buffer compatibility.
+//
+// The function always returns nil.
+func (b *ByteBuffer) WriteByte(c byte) error {
+	b.B = append(b.B, c)
+	return nil
+}
+
+// WriteString appends s to ByteBuffer.B.
+func (b *ByteBuffer) WriteString(s string) (int, error) {
+	b.B = append(b.B, s...)
+	return len(s), nil
+}
+
+// Set sets ByteBuffer.B to p.
+func (b *ByteBuffer) Set(p []byte) {
+	b.B = append(b.B[:0], p...)
+}
+
+// SetString sets ByteBuffer.B to s.
+func (b *ByteBuffer) SetString(s string) {
+	b.B = append(b.B[:0], s...)
+}
+
+// String returns string representation of ByteBuffer.B.
+func (b *ByteBuffer) String() string {
+	return string(b.B)
+}
+
+// Reset makes ByteBuffer.B empty.
+func (b *ByteBuffer) Reset() {
+	b.B = b.B[:0]
+}

+ 21 - 0
vendor/github.com/valyala/bytebufferpool/bytebuffer_example_test.go

@@ -0,0 +1,21 @@
+package bytebufferpool_test
+
+import (
+	"fmt"
+
+	"github.com/valyala/bytebufferpool"
+)
+
+func ExampleByteBuffer() {
+	bb := bytebufferpool.Get()
+
+	bb.WriteString("first line\n")
+	bb.Write([]byte("second line\n"))
+	bb.B = append(bb.B, "third line\n"...)
+
+	fmt.Printf("bytebuffer contents=%q", bb.B)
+
+	// It is safe to release byte buffer now, since it is
+	// no longer used.
+	bytebufferpool.Put(bb)
+}

+ 138 - 0
vendor/github.com/valyala/bytebufferpool/bytebuffer_test.go

@@ -0,0 +1,138 @@
+package bytebufferpool
+
+import (
+	"bytes"
+	"fmt"
+	"io"
+	"testing"
+	"time"
+)
+
+func TestByteBufferReadFrom(t *testing.T) {
+	prefix := "foobar"
+	expectedS := "asadfsdafsadfasdfisdsdfa"
+	prefixLen := int64(len(prefix))
+	expectedN := int64(len(expectedS))
+
+	var bb ByteBuffer
+	bb.WriteString(prefix)
+
+	rf := (io.ReaderFrom)(&bb)
+	for i := 0; i < 20; i++ {
+		r := bytes.NewBufferString(expectedS)
+		n, err := rf.ReadFrom(r)
+		if n != expectedN {
+			t.Fatalf("unexpected n=%d. Expecting %d. iteration %d", n, expectedN, i)
+		}
+		if err != nil {
+			t.Fatalf("unexpected error: %s", err)
+		}
+		bbLen := int64(bb.Len())
+		expectedLen := prefixLen + int64(i+1)*expectedN
+		if bbLen != expectedLen {
+			t.Fatalf("unexpected byteBuffer length: %d. Expecting %d", bbLen, expectedLen)
+		}
+		for j := 0; j < i; j++ {
+			start := prefixLen + int64(j)*expectedN
+			b := bb.B[start : start+expectedN]
+			if string(b) != expectedS {
+				t.Fatalf("unexpected byteBuffer contents: %q. Expecting %q", b, expectedS)
+			}
+		}
+	}
+}
+
+func TestByteBufferWriteTo(t *testing.T) {
+	expectedS := "foobarbaz"
+	var bb ByteBuffer
+	bb.WriteString(expectedS[:3])
+	bb.WriteString(expectedS[3:])
+
+	wt := (io.WriterTo)(&bb)
+	var w bytes.Buffer
+	for i := 0; i < 10; i++ {
+		n, err := wt.WriteTo(&w)
+		if n != int64(len(expectedS)) {
+			t.Fatalf("unexpected n returned from WriteTo: %d. Expecting %d", n, len(expectedS))
+		}
+		if err != nil {
+			t.Fatalf("unexpected error: %s", err)
+		}
+		s := string(w.Bytes())
+		if s != expectedS {
+			t.Fatalf("unexpected string written %q. Expecting %q", s, expectedS)
+		}
+		w.Reset()
+	}
+}
+
+func TestByteBufferGetPutSerial(t *testing.T) {
+	testByteBufferGetPut(t)
+}
+
+func TestByteBufferGetPutConcurrent(t *testing.T) {
+	concurrency := 10
+	ch := make(chan struct{}, concurrency)
+	for i := 0; i < concurrency; i++ {
+		go func() {
+			testByteBufferGetPut(t)
+			ch <- struct{}{}
+		}()
+	}
+
+	for i := 0; i < concurrency; i++ {
+		select {
+		case <-ch:
+		case <-time.After(time.Second):
+			t.Fatalf("timeout!")
+		}
+	}
+}
+
+func testByteBufferGetPut(t *testing.T) {
+	for i := 0; i < 10; i++ {
+		expectedS := fmt.Sprintf("num %d", i)
+		b := Get()
+		b.B = append(b.B, "num "...)
+		b.B = append(b.B, fmt.Sprintf("%d", i)...)
+		if string(b.B) != expectedS {
+			t.Fatalf("unexpected result: %q. Expecting %q", b.B, expectedS)
+		}
+		Put(b)
+	}
+}
+
+func testByteBufferGetString(t *testing.T) {
+	for i := 0; i < 10; i++ {
+		expectedS := fmt.Sprintf("num %d", i)
+		b := Get()
+		b.SetString(expectedS)
+		if b.String() != expectedS {
+			t.Fatalf("unexpected result: %q. Expecting %q", b.B, expectedS)
+		}
+		Put(b)
+	}
+}
+
+func TestByteBufferGetStringSerial(t *testing.T) {
+	testByteBufferGetString(t)
+}
+
+func TestByteBufferGetStringConcurrent(t *testing.T) {
+	concurrency := 10
+	ch := make(chan struct{}, concurrency)
+	for i := 0; i < concurrency; i++ {
+		go func() {
+			testByteBufferGetString(t)
+			ch <- struct{}{}
+		}()
+	}
+
+	for i := 0; i < concurrency; i++ {
+		select {
+		case <-ch:
+		case <-time.After(time.Second):
+			t.Fatalf("timeout!")
+		}
+	}
+}

+ 32 - 0
vendor/github.com/valyala/bytebufferpool/bytebuffer_timing_test.go

@@ -0,0 +1,32 @@
+package bytebufferpool
+
+import (
+	"bytes"
+	"testing"
+)
+
+func BenchmarkByteBufferWrite(b *testing.B) {
+	s := []byte("foobarbaz")
+	b.RunParallel(func(pb *testing.PB) {
+		var buf ByteBuffer
+		for pb.Next() {
+			for i := 0; i < 100; i++ {
+				buf.Write(s)
+			}
+			buf.Reset()
+		}
+	})
+}
+
+func BenchmarkBytesBufferWrite(b *testing.B) {
+	s := []byte("foobarbaz")
+	b.RunParallel(func(pb *testing.PB) {
+		var buf bytes.Buffer
+		for pb.Next() {
+			for i := 0; i < 100; i++ {
+				buf.Write(s)
+			}
+			buf.Reset()
+		}
+	})
+}

+ 7 - 0
vendor/github.com/valyala/bytebufferpool/doc.go

@@ -0,0 +1,7 @@
+// Package bytebufferpool implements a pool of byte buffers
+// with anti-fragmentation protection.
+//
+// The pool may waste limited amount of memory due to fragmentation.
+// This amount equals to the maximum total size of the byte buffers
+// in concurrent use.
+package bytebufferpool

+ 151 - 0
vendor/github.com/valyala/bytebufferpool/pool.go

@@ -0,0 +1,151 @@
+package bytebufferpool
+
+import (
+	"sort"
+	"sync"
+	"sync/atomic"
+)
+
+const (
+	minBitSize = 6 // 2**6=64 is a CPU cache line size
+	steps      = 20
+
+	minSize = 1 << minBitSize
+	maxSize = 1 << (minBitSize + steps - 1)
+
+	calibrateCallsThreshold = 42000
+	maxPercentile           = 0.95
+)
+
+// Pool represents byte buffer pool.
+//
+// Distinct pools may be used for distinct types of byte buffers.
+// Properly determined byte buffer types with their own pools may help reducing
+// memory waste.
+type Pool struct {
+	calls       [steps]uint64
+	calibrating uint64
+
+	defaultSize uint64
+	maxSize     uint64
+
+	pool sync.Pool
+}
+
+var defaultPool Pool
+
+// Get returns an empty byte buffer from the pool.
+//
+// Got byte buffer may be returned to the pool via Put call.
+// This reduces the number of memory allocations required for byte buffer
+// management.
+func Get() *ByteBuffer { return defaultPool.Get() }
+
+// Get returns new byte buffer with zero length.
+//
+// The byte buffer may be returned to the pool via Put after the use
+// in order to minimize GC overhead.
+func (p *Pool) Get() *ByteBuffer {
+	v := p.pool.Get()
+	if v != nil {
+		return v.(*ByteBuffer)
+	}
+	return &ByteBuffer{
+		B: make([]byte, 0, atomic.LoadUint64(&p.defaultSize)),
+	}
+}
+
+// Put returns byte buffer to the pool.
+//
+// ByteBuffer.B mustn't be touched after returning it to the pool.
+// Otherwise data races will occur.
+func Put(b *ByteBuffer) { defaultPool.Put(b) }
+
+// Put releases byte buffer obtained via Get to the pool.
+//
+// The buffer mustn't be accessed after returning to the pool.
+func (p *Pool) Put(b *ByteBuffer) {
+	idx := index(len(b.B))
+
+	if atomic.AddUint64(&p.calls[idx], 1) > calibrateCallsThreshold {
+		p.calibrate()
+	}
+
+	maxSize := int(atomic.LoadUint64(&p.maxSize))
+	if maxSize == 0 || cap(b.B) <= maxSize {
+		b.Reset()
+		p.pool.Put(b)
+	}
+}
+
+func (p *Pool) calibrate() {
+	if !atomic.CompareAndSwapUint64(&p.calibrating, 0, 1) {
+		return
+	}
+
+	a := make(callSizes, 0, steps)
+	var callsSum uint64
+	for i := uint64(0); i < steps; i++ {
+		calls := atomic.SwapUint64(&p.calls[i], 0)
+		callsSum += calls
+		a = append(a, callSize{
+			calls: calls,
+			size:  minSize << i,
+		})
+	}
+	sort.Sort(a)
+
+	defaultSize := a[0].size
+	maxSize := defaultSize
+
+	maxSum := uint64(float64(callsSum) * maxPercentile)
+	callsSum = 0
+	for i := 0; i < steps; i++ {
+		if callsSum > maxSum {
+			break
+		}
+		callsSum += a[i].calls
+		size := a[i].size
+		if size > maxSize {
+			maxSize = size
+		}
+	}
+
+	atomic.StoreUint64(&p.defaultSize, defaultSize)
+	atomic.StoreUint64(&p.maxSize, maxSize)
+
+	atomic.StoreUint64(&p.calibrating, 0)
+}
+
+type callSize struct {
+	calls uint64
+	size  uint64
+}
+
+type callSizes []callSize
+
+func (ci callSizes) Len() int {
+	return len(ci)
+}
+
+func (ci callSizes) Less(i, j int) bool {
+	return ci[i].calls > ci[j].calls
+}
+
+func (ci callSizes) Swap(i, j int) {
+	ci[i], ci[j] = ci[j], ci[i]
+}
+
+func index(n int) int {
+	n--
+	n >>= minBitSize
+	idx := 0
+	for n > 0 {
+		n >>= 1
+		idx++
+	}
+	if idx >= steps {
+		idx = steps - 1
+	}
+	return idx
+}

+ 94 - 0
vendor/github.com/valyala/bytebufferpool/pool_test.go

@@ -0,0 +1,94 @@
+package bytebufferpool
+
+import (
+	"math/rand"
+	"testing"
+	"time"
+)
+
+func TestIndex(t *testing.T) {
+	testIndex(t, 0, 0)
+	testIndex(t, 1, 0)
+
+	testIndex(t, minSize-1, 0)
+	testIndex(t, minSize, 0)
+	testIndex(t, minSize+1, 1)
+
+	testIndex(t, 2*minSize-1, 1)
+	testIndex(t, 2*minSize, 1)
+	testIndex(t, 2*minSize+1, 2)
+
+	testIndex(t, maxSize-1, steps-1)
+	testIndex(t, maxSize, steps-1)
+	testIndex(t, maxSize+1, steps-1)
+}
+
+func testIndex(t *testing.T, n, expectedIdx int) {
+	idx := index(n)
+	if idx != expectedIdx {
+		t.Fatalf("unexpected idx for n=%d: %d. Expecting %d", n, idx, expectedIdx)
+	}
+}
+
+func TestPoolCalibrate(t *testing.T) {
+	for i := 0; i < steps*calibrateCallsThreshold; i++ {
+		n := 1004
+		if i%15 == 0 {
+			n = rand.Intn(15234)
+		}
+		testGetPut(t, n)
+	}
+}
+
+func TestPoolVariousSizesSerial(t *testing.T) {
+	testPoolVariousSizes(t)
+}
+
+func TestPoolVariousSizesConcurrent(t *testing.T) {
+	concurrency := 5
+	ch := make(chan struct{})
+	for i := 0; i < concurrency; i++ {
+		go func() {
+			testPoolVariousSizes(t)
+			ch <- struct{}{}
+		}()
+	}
+	for i := 0; i < concurrency; i++ {
+		select {
+		case <-ch:
+		case <-time.After(3 * time.Second):
+			t.Fatalf("timeout")
+		}
+	}
+}
+
+func testPoolVariousSizes(t *testing.T) {
+	for i := 0; i < steps+1; i++ {
+		n := (1 << uint32(i))
+
+		testGetPut(t, n)
+		testGetPut(t, n+1)
+		testGetPut(t, n-1)
+
+		for j := 0; j < 10; j++ {
+			testGetPut(t, j+n)
+		}
+	}
+}
+
+func testGetPut(t *testing.T, n int) {
+	bb := Get()
+	if len(bb.B) > 0 {
+		t.Fatalf("non-empty byte buffer returned from acquire")
+	}
+	bb.B = allocNBytes(bb.B, n)
+	Put(bb)
+}
+
+func allocNBytes(dst []byte, n int) []byte {
+	diff := n - cap(dst)
+	if diff <= 0 {
+		return dst[:n]
+	}
+	return append(dst, make([]byte, diff)...)
+}