Selaa lähdekoodia

Support JSON/text-based codecs and move benchmarks to its own repo.

Changes:
- Now supports text encodings like json (not just binary ones).
  This involves support for separators and delimiters for maps and slices.
  This also involves adding support for encoding.Text(M|Unm)arshaler interfaces.
- Comprehensive Performant JSON support.
  This gives equivalent performance to std library, but allows decoding
  numbers as int or uint (if number is clearly not a decimal/float).
- Some minor refactoring (changed internal interface function names, etc)
- Fix rpc support for json, by adding rpcEncodeTerminate to append
  a space after encoding a value. This is important as numbers do not have clear delimiter.
- clean up of error messages.
- Update Copyright
- Separate benchmarks into a different repository.
Ugorji Nwoke 11 vuotta sitten
vanhempi
commit
5605f87eda
20 muutettua tiedostoa jossa 1731 lisäystä ja 1092 poistoa
  1. 13 27
      codec/0doc.go
  2. 12 58
      codec/README.md
  3. 0 337
      codec/bench_test.go
  4. 23 55
      codec/binc.go
  5. 23 47
      codec/cbor.go
  6. 1 1
      codec/cbor_test.go
  7. 52 131
      codec/codecs_test.go
  8. 218 71
      codec/decode.go
  9. 139 87
      codec/encode.go
  10. 3 52
      codec/ext_dep_test.go
  11. 365 64
      codec/fast-path.go
  12. 36 11
      codec/gen-fast-path.go
  13. 45 24
      codec/helper.go
  14. 573 0
      codec/json.go
  15. 29 49
      codec/msgpack.go
  16. 19 2
      codec/rpc.go
  17. 27 38
      codec/simple.go
  18. 1 1
      codec/time.go
  19. 125 0
      codec/values_test.go
  20. 27 37
      codec/z_helper_test.go

+ 13 - 27
codec/0doc.go

@@ -1,15 +1,16 @@
-// Copyright (c) 2012, 2013 Ugorji Nwoke. All rights reserved.
+// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved.
 // Use of this source code is governed by a BSD-style license found in the LICENSE file.
 
 /*
-High Performance, Feature-Rich Idiomatic Go codec/encoding library for binc, msgpack and cbor.
+High Performance, Feature-Rich Idiomatic Go codec/encoding library for binc, msgpack, cbor, json.
 
 Supported Serialization formats are:
 
   - msgpack: [https://github.com/msgpack/msgpack]
   - binc:    [http://github.com/ugorji/binc]
   - cbor:    [http://cbor.io] [http://tools.ietf.org/html/rfc7049]
-  - simple:  
+  - simple: 
+  - json:    [http://json.org] [http://tools.ietf.org/html/rfc7159] 
 
 To install:
 
@@ -25,31 +26,24 @@ Rich Feature Set includes:
     Our extensive benchmarks show us outperforming Gob, Json and Bson by 2-4X.
     Achieved by extreme care on allocations, recursions, bypassing reflection, zero-copy, etc.
   - Multiple conversions:
-    Package co-erces types where appropriate e.g. decode an int in the stream into a float, etc
+    Package coerces types where appropriate e.g. decode an int in the stream into a float, etc
   - Corner Cases: Overflows, nil maps/slices, nil values in streams are handled correctly
   - Standard field renaming via tags
-  - Encoding from any value
+  - Encoding from any value and decoding into pointer to any value
     (struct, slice, map, primitives, pointers, interface{}, etc)
-  - Decoding into pointer to any value
-    (struct, slice, map, int, float32, bool, string, reflect.Value, etc)
   - Supports extension functions to handle the encode/decode of custom types
-  - Support Go 1.2 encoding.BinaryMarshaler/BinaryUnmarshaler
+  - Support encoding.(Binary|Text)(M|Unm)arshaler interfaces
   - Schema-less decoding
     (decode into a pointer to a nil interface{} as opposed to a typed value).
     Includes Options to configure what specific map or slice type to use
     when decoding an encoded list or map into a nil interface{}
   - Provides a RPC Server and Client Codec for net/rpc communication protocol.
-  - Fast Paths for some container types:
-    For some container types, we circumvent reflection and its associated overhead
-    and allocation costs, and encode/decode directly. These types are:
-	    Slice of all builtin types and interface{},
-	    map of all builtin types and interface{} to string, interface{}, int, int64, uint64
-	    symetrical maps of all builtin types and interface{}
-  - Msgpack Specific:
-      - Options to resolve ambiguities in handling raw bytes (as string or []byte)
-        during schema-less decoding (decoding into a nil interface{})
-      - RPC Server/Client Codec for msgpack-rpc protocol defined at:
-        https://github.com/msgpack-rpc/msgpack-rpc/blob/master/spec.md
+  - Fast Paths for common maps and slices of built-in types (numbers, string, bool).
+    Reflection (and its associated overhead) is bypassed.
+  - Handle unique idiosynchracies of codecs e.g. 
+    - For messagepack, configure how ambiguities in handling raw bytes are resolved 
+    - For messagepack, provide rpc server/client codec to support  msgpack-rpc protocol defined at:
+      https://github.com/msgpack-rpc/msgpack-rpc/blob/master/spec.md
 
 Extension Support
 
@@ -123,14 +117,6 @@ Typical usage model:
     //OR rpcCodec := codec.MsgpackSpecRpc.ClientCodec(conn, h)
     client := rpc.NewClientWithCodec(rpcCodec)
 
-Representative Benchmark Results
-
-Run the benchmark suite using:
-   go test -bi -bench=. -benchmem
-
-To run full benchmark suite (including against vmsgpack and bson),
-see notes in ext_dep_test.go
-
 */
 package codec
 

+ 12 - 58
codec/README.md

@@ -1,13 +1,14 @@
 # Codec
 
-High-Performance, Feature-Rich Idiomatic Go codec/encoding library for binc, msgpack and cbor.
+High Performance, Feature-Rich Idiomatic Go codec/encoding library for binc, msgpack, cbor, json.
 
 Supported Serialization formats are:
 
   - msgpack: [https://github.com/msgpack/msgpack]
   - binc:    [http://github.com/ugorji/binc]
-  - cbor:    [http://cbor.io]
+  - cbor:    [http://cbor.io] [http://tools.ietf.org/html/rfc7049]
   - simple: 
+  - json:    [http://json.org] [http://tools.ietf.org/html/rfc7159] 
 
 To install:
 
@@ -25,31 +26,24 @@ Rich Feature Set includes:
     Our extensive benchmarks show us outperforming Gob, Json and Bson by 2-4X.
     Achieved by extreme care on allocations, recursions, bypassing reflection, zero-copy, etc.
   - Multiple conversions:
-    Package co-erces types where appropriate e.g. decode an int in the stream into a float, etc
+    Package coerces types where appropriate e.g. decode an int in the stream into a float, etc
   - Corner Cases: Overflows, nil maps/slices, nil values in streams are handled correctly
   - Standard field renaming via tags
-  - Encoding from any value
+  - Encoding from any value and decoding into pointer to any value
     (struct, slice, map, primitives, pointers, interface{}, etc)
-  - Decoding into pointer to any value
-    (struct, slice, map, int, float32, bool, string, reflect.Value, etc)
   - Supports extension functions to handle the encode/decode of custom types
-  - Support Go 1.2 encoding.BinaryMarshaler/BinaryUnmarshaler
+  - Support encoding.(Binary|Text)(M|Unm)arshaler interfaces
   - Schema-less decoding
     (decode into a pointer to a nil interface{} as opposed to a typed value).
     Includes Options to configure what specific map or slice type to use
     when decoding an encoded list or map into a nil interface{}
   - Provides a RPC Server and Client Codec for net/rpc communication protocol.
-  - Fast Paths for some container types:
-    For some container types, we circumvent reflection and its associated overhead
-    and allocation costs, and encode/decode directly. These types are:
-	    Slice of all builtin types and interface{},
-	    map of all builtin types and interface{} to string, interface{}, int, int64, uint64
-	    symetrical maps of all builtin types and interface{}
-  - Msgpack Specific:
-      - Options to resolve ambiguities in handling raw bytes (as string or []byte)
-        during schema-less decoding (decoding into a nil interface{})
-      - RPC Server/Client Codec for msgpack-rpc protocol defined at:
-        https://github.com/msgpack-rpc/msgpack-rpc/blob/master/spec.md
+  - Fast Paths for common maps and slices of built-in types (numbers, string, bool).
+    Reflection (and its associated overhead) is bypassed.
+  - Handle unique idiosynchracies of codecs e.g. 
+    - For messagepack, configure how ambiguities in handling raw bytes are resolved 
+    - For messagepack, provide rpc server/client codec to support  msgpack-rpc protocol defined at:
+      https://github.com/msgpack-rpc/msgpack-rpc/blob/master/spec.md
 
 ## Extension Support
 
@@ -123,43 +117,3 @@ Typical usage model:
     //OR rpcCodec := codec.MsgpackSpecRpc.ClientCodec(conn, h)
     client := rpc.NewClientWithCodec(rpcCodec)
 
-## Representative Benchmark Results
-
-A sample run of benchmark using "go test -bi -bench=. -benchmem":
-
-    /proc/cpuinfo: Intel(R) Core(TM) i7-2630QM CPU @ 2.00GHz (HT)
-    
-    ..............................................
-    BENCHMARK INIT: 2013-10-16 11:02:50.345970786 -0400 EDT
-    To run full benchmark comparing encodings (MsgPack, Binc, JSON, GOB, etc), use: "go test -bench=."
-    Benchmark: 
-    	Struct recursive Depth:             1
-    	ApproxDeepSize Of benchmark Struct: 4694 bytes
-    Benchmark One-Pass Run:
-    	 v-msgpack: len: 1600 bytes
-    	      bson: len: 3025 bytes
-    	   msgpack: len: 1560 bytes
-    	      binc: len: 1187 bytes
-    	       gob: len: 1972 bytes
-    	      json: len: 2538 bytes
-    ..............................................
-    PASS
-    Benchmark__Msgpack____Encode	   50000	     54359 ns/op	   14953 B/op	      83 allocs/op
-    Benchmark__Msgpack____Decode	   10000	    106531 ns/op	   14990 B/op	     410 allocs/op
-    Benchmark__Binc_NoSym_Encode	   50000	     53956 ns/op	   14966 B/op	      83 allocs/op
-    Benchmark__Binc_NoSym_Decode	   10000	    103751 ns/op	   14529 B/op	     386 allocs/op
-    Benchmark__Binc_Sym___Encode	   50000	     65961 ns/op	   17130 B/op	      88 allocs/op
-    Benchmark__Binc_Sym___Decode	   10000	    106310 ns/op	   15857 B/op	     287 allocs/op
-    Benchmark__Gob________Encode	   10000	    135944 ns/op	   21189 B/op	     237 allocs/op
-    Benchmark__Gob________Decode	    5000	    405390 ns/op	   83460 B/op	    1841 allocs/op
-    Benchmark__Json_______Encode	   20000	     79412 ns/op	   13874 B/op	     102 allocs/op
-    Benchmark__Json_______Decode	   10000	    247979 ns/op	   14202 B/op	     493 allocs/op
-    Benchmark__Bson_______Encode	   10000	    121762 ns/op	   27814 B/op	     514 allocs/op
-    Benchmark__Bson_______Decode	   10000	    162126 ns/op	   16514 B/op	     789 allocs/op
-    Benchmark__VMsgpack___Encode	   50000	     69155 ns/op	   12370 B/op	     344 allocs/op
-    Benchmark__VMsgpack___Decode	   10000	    151609 ns/op	   20307 B/op	     571 allocs/op
-    ok  	ugorji.net/codec	30.827s
-
-To run full benchmark suite (including against vmsgpack and bson), 
-see notes in ext\_dep\_test.go
-

+ 0 - 337
codec/bench_test.go

@@ -1,337 +0,0 @@
-// Copyright (c) 2012, 2013 Ugorji Nwoke. All rights reserved.
-// Use of this source code is governed by a BSD-style license found in the LICENSE file.
-
-package codec
-
-import (
-	"bytes"
-	"encoding/gob"
-	"encoding/json"
-	"flag"
-	"fmt"
-	"reflect"
-	"runtime"
-	"testing"
-	"time"
-)
-
-// Sample way to run:
-// go test -bi -bv -bd=1 -benchmem -bench=.
-
-var (
-	_       = fmt.Printf
-	benchTs *TestStruc
-
-	approxSize int
-
-	benchDoInitBench     bool
-	benchVerify          bool
-	benchUnscientificRes bool = false
-	//depth of 0 maps to ~400bytes json-encoded string, 1 maps to ~1400 bytes, etc
-	//For depth>1, we likely trigger stack growth for encoders, making benchmarking unreliable.
-	benchDepth     int
-	benchInitDebug bool
-	benchCheckers  []benchChecker
-)
-
-type benchEncFn func(interface{}) ([]byte, error)
-type benchDecFn func([]byte, interface{}) error
-type benchIntfFn func() interface{}
-
-type benchChecker struct {
-	name     string
-	encodefn benchEncFn
-	decodefn benchDecFn
-}
-
-func benchInitFlags() {
-	flag.BoolVar(&benchInitDebug, "bg", false, "Bench Debug")
-	flag.IntVar(&benchDepth, "bd", 1, "Bench Depth: If >1, potential unreliable results due to stack growth")
-	flag.BoolVar(&benchDoInitBench, "bi", false, "Run Bench Init")
-	flag.BoolVar(&benchVerify, "bv", false, "Verify Decoded Value during Benchmark")
-	flag.BoolVar(&benchUnscientificRes, "bu", false, "Show Unscientific Results during Benchmark")
-}
-
-func benchInit() {
-	benchTs = newTestStruc(benchDepth, true)
-	approxSize = approxDataSize(reflect.ValueOf(benchTs))
-	bytesLen := 1024 * 4 * (benchDepth + 1) * (benchDepth + 1)
-	if bytesLen < approxSize {
-		bytesLen = approxSize
-	}
-
-	benchCheckers = append(benchCheckers,
-		benchChecker{"msgpack", fnMsgpackEncodeFn, fnMsgpackDecodeFn},
-		benchChecker{"binc-nosym", fnBincNoSymEncodeFn, fnBincNoSymDecodeFn},
-		benchChecker{"binc-sym", fnBincSymEncodeFn, fnBincSymDecodeFn},
-		benchChecker{"simple", fnSimpleEncodeFn, fnSimpleDecodeFn},
-		benchChecker{"cbor", fnCborEncodeFn, fnCborDecodeFn},
-		benchChecker{"gob", fnGobEncodeFn, fnGobDecodeFn},
-		benchChecker{"json", fnJsonEncodeFn, fnJsonDecodeFn},
-	)
-	if benchDoInitBench {
-		runBenchInit()
-	}
-}
-
-func runBenchInit() {
-	logT(nil, "..............................................")
-	logT(nil, "BENCHMARK INIT: %v", time.Now())
-	logT(nil, "To run full benchmark comparing encodings (MsgPack, Binc, Simple, Cbor, JSON, GOB, etc), "+
-		"use: \"go test -bench=.\"")
-	logT(nil, "Benchmark: ")
-	logT(nil, "\tStruct recursive Depth:             %d", benchDepth)
-	if approxSize > 0 {
-		logT(nil, "\tApproxDeepSize Of benchmark Struct: %d bytes", approxSize)
-	}
-	if benchUnscientificRes {
-		logT(nil, "Benchmark One-Pass Run (with Unscientific Encode/Decode times): ")
-	} else {
-		logT(nil, "Benchmark One-Pass Run:")
-	}
-	for _, bc := range benchCheckers {
-		doBenchCheck(bc.name, bc.encodefn, bc.decodefn)
-	}
-	logT(nil, "..............................................")
-	if benchInitDebug {
-		logT(nil, "<<<<====>>>> depth: %v, ts: %#v\n", benchDepth, benchTs)
-	}
-}
-
-func fnBenchNewTs() interface{} {
-	return new(TestStruc)
-}
-
-func doBenchCheck(name string, encfn benchEncFn, decfn benchDecFn) {
-	runtime.GC()
-	tnow := time.Now()
-	buf, err := encfn(benchTs)
-	if err != nil {
-		logT(nil, "\t%10s: **** Error encoding benchTs: %v", name, err)
-	}
-	encDur := time.Now().Sub(tnow)
-	encLen := len(buf)
-	runtime.GC()
-	if !benchUnscientificRes {
-		logT(nil, "\t%10s: len: %d bytes\n", name, encLen)
-		return
-	}
-	tnow = time.Now()
-	if err = decfn(buf, new(TestStruc)); err != nil {
-		logT(nil, "\t%10s: **** Error decoding into new TestStruc: %v", name, err)
-	}
-	decDur := time.Now().Sub(tnow)
-	logT(nil, "\t%10s: len: %d bytes, encode: %v, decode: %v\n", name, encLen, encDur, decDur)
-}
-
-func fnBenchmarkEncode(b *testing.B, encName string, ts interface{}, encfn benchEncFn) {
-	runtime.GC()
-	b.ResetTimer()
-	for i := 0; i < b.N; i++ {
-		_, err := encfn(ts)
-		if err != nil {
-			logT(b, "Error encoding benchTs: %s: %v", encName, err)
-			b.FailNow()
-		}
-	}
-}
-
-func fnBenchmarkDecode(b *testing.B, encName string, ts interface{},
-	encfn benchEncFn, decfn benchDecFn, newfn benchIntfFn,
-) {
-	buf, err := encfn(ts)
-	if err != nil {
-		logT(b, "Error encoding benchTs: %s: %v", encName, err)
-		b.FailNow()
-	}
-	runtime.GC()
-	b.ResetTimer()
-	for i := 0; i < b.N; i++ {
-		ts = newfn()
-		if err = decfn(buf, ts); err != nil {
-			logT(b, "Error decoding into new TestStruc: %s: %v", encName, err)
-			b.FailNow()
-		}
-		if benchVerify {
-			if vts, vok := ts.(*TestStruc); vok {
-				verifyTsTree(b, vts)
-			}
-		}
-	}
-}
-
-func verifyTsTree(b *testing.B, ts *TestStruc) {
-	var ts0, ts1m, ts2m, ts1s, ts2s *TestStruc
-	ts0 = ts
-
-	if benchDepth > 0 {
-		ts1m, ts1s = verifyCheckAndGet(b, ts0)
-	}
-
-	if benchDepth > 1 {
-		ts2m, ts2s = verifyCheckAndGet(b, ts1m)
-	}
-	for _, tsx := range []*TestStruc{ts0, ts1m, ts2m, ts1s, ts2s} {
-		if tsx != nil {
-			verifyOneOne(b, tsx)
-		}
-	}
-}
-
-func verifyCheckAndGet(b *testing.B, ts0 *TestStruc) (ts1m *TestStruc, ts1s *TestStruc) {
-	// if len(ts1m.Ms) <= 2 {
-	// 	logT(b, "Error: ts1m.Ms len should be > 2. Got: %v", len(ts1m.Ms))
-	// 	b.FailNow()
-	// }
-	if len(ts0.Its) == 0 {
-		logT(b, "Error: ts0.Islice len should be > 0. Got: %v", len(ts0.Its))
-		b.FailNow()
-	}
-	ts1m = ts0.Mtsptr["0"]
-	ts1s = ts0.Its[0]
-	if ts1m == nil || ts1s == nil {
-		logT(b, "Error: At benchDepth 1, No *TestStruc found")
-		b.FailNow()
-	}
-	return
-}
-
-func verifyOneOne(b *testing.B, ts *TestStruc) {
-	if ts.I64slice[2] != int64(3) {
-		logT(b, "Error: Decode failed by checking values")
-		b.FailNow()
-	}
-}
-
-func fnMsgpackEncodeFn(ts interface{}) (bs []byte, err error) {
-	err = NewEncoderBytes(&bs, testMsgpackH).Encode(ts)
-	return
-}
-
-func fnMsgpackDecodeFn(buf []byte, ts interface{}) error {
-	return NewDecoderBytes(buf, testMsgpackH).Decode(ts)
-}
-
-func fnBincEncodeFn(ts interface{}, sym AsSymbolFlag) (bs []byte, err error) {
-	tSym := testBincH.AsSymbols
-	testBincH.AsSymbols = sym
-	err = NewEncoderBytes(&bs, testBincH).Encode(ts)
-	testBincH.AsSymbols = tSym
-	return
-}
-
-func fnBincDecodeFn(buf []byte, ts interface{}, sym AsSymbolFlag) (err error) {
-	tSym := testBincH.AsSymbols
-	testBincH.AsSymbols = sym
-	err = NewDecoderBytes(buf, testBincH).Decode(ts)
-	testBincH.AsSymbols = tSym
-	return
-}
-
-func fnBincNoSymEncodeFn(ts interface{}) (bs []byte, err error) {
-	return fnBincEncodeFn(ts, AsSymbolNone)
-}
-
-func fnBincNoSymDecodeFn(buf []byte, ts interface{}) error {
-	return fnBincDecodeFn(buf, ts, AsSymbolNone)
-}
-
-func fnBincSymEncodeFn(ts interface{}) (bs []byte, err error) {
-	return fnBincEncodeFn(ts, AsSymbolAll)
-}
-
-func fnBincSymDecodeFn(buf []byte, ts interface{}) error {
-	return fnBincDecodeFn(buf, ts, AsSymbolAll)
-}
-
-func fnSimpleEncodeFn(ts interface{}) (bs []byte, err error) {
-	err = NewEncoderBytes(&bs, testSimpleH).Encode(ts)
-	return
-}
-
-func fnSimpleDecodeFn(buf []byte, ts interface{}) error {
-	return NewDecoderBytes(buf, testSimpleH).Decode(ts)
-}
-
-func fnCborEncodeFn(ts interface{}) (bs []byte, err error) {
-	err = NewEncoderBytes(&bs, testCborH).Encode(ts)
-	return
-}
-
-func fnCborDecodeFn(buf []byte, ts interface{}) error {
-	return NewDecoderBytes(buf, testCborH).Decode(ts)
-}
-
-func fnGobEncodeFn(ts interface{}) ([]byte, error) {
-	bbuf := new(bytes.Buffer)
-	err := gob.NewEncoder(bbuf).Encode(ts)
-	return bbuf.Bytes(), err
-}
-
-func fnGobDecodeFn(buf []byte, ts interface{}) error {
-	return gob.NewDecoder(bytes.NewBuffer(buf)).Decode(ts)
-}
-
-func fnJsonEncodeFn(ts interface{}) ([]byte, error) {
-	return json.Marshal(ts)
-}
-
-func fnJsonDecodeFn(buf []byte, ts interface{}) error {
-	return json.Unmarshal(buf, ts)
-}
-
-func Benchmark__Msgpack____Encode(b *testing.B) {
-	fnBenchmarkEncode(b, "msgpack", benchTs, fnMsgpackEncodeFn)
-}
-
-func Benchmark__Msgpack____Decode(b *testing.B) {
-	fnBenchmarkDecode(b, "msgpack", benchTs, fnMsgpackEncodeFn, fnMsgpackDecodeFn, fnBenchNewTs)
-}
-
-func Benchmark__Binc_NoSym_Encode(b *testing.B) {
-	fnBenchmarkEncode(b, "binc", benchTs, fnBincNoSymEncodeFn)
-}
-
-func Benchmark__Binc_NoSym_Decode(b *testing.B) {
-	fnBenchmarkDecode(b, "binc", benchTs, fnBincNoSymEncodeFn, fnBincNoSymDecodeFn, fnBenchNewTs)
-}
-
-func Benchmark__Binc_Sym___Encode(b *testing.B) {
-	fnBenchmarkEncode(b, "binc", benchTs, fnBincSymEncodeFn)
-}
-
-func Benchmark__Binc_Sym___Decode(b *testing.B) {
-	fnBenchmarkDecode(b, "binc", benchTs, fnBincSymEncodeFn, fnBincSymDecodeFn, fnBenchNewTs)
-}
-
-func Benchmark__Simple_____Encode(b *testing.B) {
-	fnBenchmarkEncode(b, "simple", benchTs, fnSimpleEncodeFn)
-}
-
-func Benchmark__Simple_____Decode(b *testing.B) {
-	fnBenchmarkDecode(b, "simple", benchTs, fnSimpleEncodeFn, fnSimpleDecodeFn, fnBenchNewTs)
-}
-
-func Benchmark__Cbor_______Encode(b *testing.B) {
-	fnBenchmarkEncode(b, "cbor", benchTs, fnCborEncodeFn)
-}
-
-func Benchmark__Cbor_______Decode(b *testing.B) {
-	fnBenchmarkDecode(b, "cbor", benchTs, fnCborEncodeFn, fnCborDecodeFn, fnBenchNewTs)
-}
-
-func Benchmark__Gob________Encode(b *testing.B) {
-	fnBenchmarkEncode(b, "gob", benchTs, fnGobEncodeFn)
-}
-
-func Benchmark__Gob________Decode(b *testing.B) {
-	fnBenchmarkDecode(b, "gob", benchTs, fnGobEncodeFn, fnGobDecodeFn, fnBenchNewTs)
-}
-
-func Benchmark__Json_______Encode(b *testing.B) {
-	fnBenchmarkEncode(b, "json", benchTs, fnJsonEncodeFn)
-}
-
-func Benchmark__Json_______Decode(b *testing.B) {
-	fnBenchmarkDecode(b, "json", benchTs, fnJsonEncodeFn, fnJsonDecodeFn, fnBenchNewTs)
-}

+ 23 - 55
codec/binc.go

@@ -1,4 +1,4 @@
-// Copyright (c) 2012, 2013 Ugorji Nwoke. All rights reserved.
+// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved.
 // Use of this source code is governed by a BSD-style license found in the LICENSE file.
 
 package codec
@@ -65,6 +65,8 @@ type bincEncDriver struct {
 	m map[string]uint16 // symbols
 	s uint32            // symbols sequencer
 	b [8]byte
+	encNoMapArrayEnd
+	encNoMapArraySeparator
 }
 
 func (e *bincEncDriver) isBuiltinType(rt uintptr) bool {
@@ -193,11 +195,11 @@ func (e *bincEncDriver) encodeExtPreamble(xtag byte, length int) {
 	e.w.writen1(xtag)
 }
 
-func (e *bincEncDriver) encodeArrayPreamble(length int) {
+func (e *bincEncDriver) encodeArrayStart(length int) {
 	e.encLen(bincVdArray<<4, uint64(length))
 }
 
-func (e *bincEncDriver) encodeMapPreamble(length int) {
+func (e *bincEncDriver) encodeMapStart(length int) {
 	e.encLen(bincVdMap<<4, uint64(length))
 }
 
@@ -328,6 +330,8 @@ type bincDecDriver struct {
 	vd     byte
 	vs     byte
 	noStreamingCodec
+	decNoMapArrayEnd
+	decNoMapArraySeparator
 	b [8]byte
 	m map[uint32]string // symbols (use uint32 as key, as map optimizes for it)
 }
@@ -343,57 +347,21 @@ func (d *bincDecDriver) initReadNext() {
 	d.bdType = valueTypeUnset
 }
 
-func (d *bincDecDriver) currentEncodedType() valueType {
-	if d.bdType == valueTypeUnset {
-		switch d.vd {
-		case bincVdSpecial:
-			switch d.vs {
-			case bincSpNil:
-				d.bdType = valueTypeNil
-			case bincSpFalse, bincSpTrue:
-				d.bdType = valueTypeBool
-			case bincSpNan, bincSpNegInf, bincSpPosInf, bincSpZeroFloat:
-				d.bdType = valueTypeFloat
-			case bincSpZero:
-				if d.h.SignedInteger {
-					d.bdType = valueTypeInt
-				} else {
-					d.bdType = valueTypeUint
-				}
-			case bincSpNegOne:
-				d.bdType = valueTypeInt
-			default:
-				decErr("currentEncodedType: Unrecognized special value 0x%x", d.vs)
-			}
-		case bincVdSmallInt, bincVdPosInt:
-			if d.h.SignedInteger {
-				d.bdType = valueTypeInt
-			} else {
-				d.bdType = valueTypeUint
-			}
-		case bincVdNegInt:
-			d.bdType = valueTypeInt
-		case bincVdFloat:
-			d.bdType = valueTypeFloat
-		case bincVdString:
-			d.bdType = valueTypeString
-		case bincVdSymbol:
-			d.bdType = valueTypeSymbol
-		case bincVdByteArray:
-			d.bdType = valueTypeBytes
-		case bincVdTimestamp:
-			d.bdType = valueTypeTimestamp
-		case bincVdCustomExt:
-			d.bdType = valueTypeExt
-		case bincVdArray:
-			d.bdType = valueTypeArray
-		case bincVdMap:
-			d.bdType = valueTypeMap
-		default:
-			decErr("currentEncodedType: Unrecognized d.vd: 0x%x", d.vd)
-		}
+func (d *bincDecDriver) isContainerType(vt valueType) bool {
+	switch vt {
+	case valueTypeNil:
+		return d.vd == bincVdSpecial && d.vs == bincSpNil
+	case valueTypeBytes:
+		return d.vd == bincVdByteArray
+	case valueTypeString:
+		return d.vd == bincVdString
+	case valueTypeArray:
+		return d.vd == bincVdArray
+	case valueTypeMap:
+		return d.vd == bincVdMap
 	}
-	return d.bdType
+	decErr("isContainerType: unsupported parameter: %v", vt)
+	panic("unreachable")
 }
 
 func (d *bincDecDriver) tryDecodeAsNil() bool {
@@ -572,7 +540,7 @@ func (d *bincDecDriver) decodeBool() (b bool) {
 	return
 }
 
-func (d *bincDecDriver) readMapLen() (length int) {
+func (d *bincDecDriver) readMapStart() (length int) {
 	if d.vd != bincVdMap {
 		decErr("Invalid d.vd for map. Expecting 0x%x. Got: 0x%x", bincVdMap, d.vd)
 	}
@@ -581,7 +549,7 @@ func (d *bincDecDriver) readMapLen() (length int) {
 	return
 }
 
-func (d *bincDecDriver) readArrayLen() (length int) {
+func (d *bincDecDriver) readArrayStart() (length int) {
 	if d.vd != bincVdArray {
 		decErr("Invalid d.vd for array. Expecting 0x%x. Got: 0x%x", bincVdArray, d.vd)
 	}

+ 23 - 47
codec/cbor.go

@@ -1,4 +1,4 @@
-// Copyright (c) 2012, 2013 Ugorji Nwoke. All rights reserved.
+// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved.
 // Use of this source code is governed by a BSD-style license found in the LICENSE file.
 
 package codec
@@ -63,6 +63,8 @@ type cborEncDriver struct {
 	w encWriter
 	h *CborHandle
 	noBuiltInTypes
+	encNoMapArrayEnd
+	encNoMapArraySeparator
 }
 
 func (e *cborEncDriver) encodeNil() {
@@ -141,11 +143,11 @@ func (e *cborEncDriver) encodeRawExt(re *RawExt, en *Encoder) {
 	}
 }
 
-func (e *cborEncDriver) encodeArrayPreamble(length int) {
+func (e *cborEncDriver) encodeArrayStart(length int) {
 	e.encLen(cborBaseArray, length)
 }
 
-func (e *cborEncDriver) encodeMapPreamble(length int) {
+func (e *cborEncDriver) encodeMapStart(length int) {
 	e.encLen(cborBaseMap, length)
 }
 
@@ -172,6 +174,8 @@ type cborDecDriver struct {
 	bdType valueType
 	bd     byte
 	noBuiltInTypes
+	decNoMapArrayEnd
+	decNoMapArraySeparator
 }
 
 func (d *cborDecDriver) initReadNext() {
@@ -183,49 +187,21 @@ func (d *cborDecDriver) initReadNext() {
 	d.bdType = valueTypeUnset
 }
 
-func (d *cborDecDriver) currentEncodedType() valueType {
-	if d.bdType == valueTypeUnset {
-		switch d.bd {
-		case cborBdNil:
-			d.bdType = valueTypeNil
-		case cborBdFalse, cborBdTrue:
-			d.bdType = valueTypeBool
-		case cborBdFloat16, cborBdFloat32, cborBdFloat64:
-			d.bdType = valueTypeFloat
-		case cborBdIndefiniteBytes:
-			d.bdType = valueTypeBytes
-		case cborBdIndefiniteString:
-			d.bdType = valueTypeString
-		case cborBdIndefiniteArray:
-			d.bdType = valueTypeArray
-		case cborBdIndefiniteMap:
-			d.bdType = valueTypeMap
-		default:
-			switch {
-			case d.bd >= cborBaseUint && d.bd < cborBaseNegInt:
-				if d.h.SignedInteger {
-					d.bdType = valueTypeInt
-				} else {
-					d.bdType = valueTypeUint
-				}
-			case d.bd >= cborBaseNegInt && d.bd < cborBaseBytes:
-				d.bdType = valueTypeInt
-			case d.bd >= cborBaseBytes && d.bd < cborBaseString:
-				d.bdType = valueTypeBytes
-			case d.bd >= cborBaseString && d.bd < cborBaseArray:
-				d.bdType = valueTypeString
-			case d.bd >= cborBaseArray && d.bd < cborBaseMap:
-				d.bdType = valueTypeArray
-			case d.bd >= cborBaseMap && d.bd < cborBaseTag:
-				d.bdType = valueTypeMap
-			case d.bd >= cborBaseTag && d.bd < cborBaseSimple:
-				d.bdType = valueTypeExt
-			default:
-				decErr("currentEncodedType: Unrecognized d.bd: 0x%x", d.bd)
-			}
-		}
+func (d *cborDecDriver) isContainerType(vt valueType) bool {
+	switch vt {
+	case valueTypeNil:
+		return d.bd == cborBdNil
+	case valueTypeBytes:
+		return d.bd == cborBdIndefiniteBytes || (d.bd >= cborBaseBytes && d.bd < cborBaseString)
+	case valueTypeString:
+		return d.bd == cborBdIndefiniteString || (d.bd >= cborBaseString && d.bd < cborBaseArray)
+	case valueTypeArray:
+		return d.bd == cborBdIndefiniteArray || (d.bd >= cborBaseArray && d.bd < cborBaseMap)
+	case valueTypeMap:
+		return d.bd == cborBdIndefiniteMap || (d.bd >= cborBaseMap && d.bd < cborBaseTag)
 	}
-	return d.bdType
+	decErr("isContainerType: unsupported parameter: %v", vt)
+	panic("unreachable")
 }
 
 func (d *cborDecDriver) tryDecodeAsNil() bool {
@@ -335,7 +311,7 @@ func (d *cborDecDriver) decodeBool() (b bool) {
 	return
 }
 
-func (d *cborDecDriver) readMapLen() (length int) {
+func (d *cborDecDriver) readMapStart() (length int) {
 	d.bdRead = false
 	if d.bd == cborBdIndefiniteMap {
 		return -1
@@ -343,7 +319,7 @@ func (d *cborDecDriver) readMapLen() (length int) {
 	return d.decLen()
 }
 
-func (d *cborDecDriver) readArrayLen() (length int) {
+func (d *cborDecDriver) readArrayStart() (length int) {
 	d.bdRead = false
 	if d.bd == cborBdIndefiniteArray {
 		return -1

+ 1 - 1
codec/cbor_test.go

@@ -1,4 +1,4 @@
-// Copyright (c) 2012, 2013 Ugorji Nwoke. All rights reserved.
+// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved.
 // Use of this source code is governed by a BSD-style license found in the LICENSE file.
 
 package codec

+ 52 - 131
codec/codecs_test.go

@@ -1,4 +1,4 @@
-// Copyright (c) 2012, 2013 Ugorji Nwoke. All rights reserved.
+// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved.
 // Use of this source code is governed by a BSD-style license found in the LICENSE file.
 
 package codec
@@ -77,11 +77,7 @@ var (
 	tableTestNilVerify []interface{} // for nil interface, use this to verify (rules are different)
 	tablePythonVerify  []interface{} // for verifying for python, since Python sometimes
 	// will encode a float32 as float64, or large int as uint
-	testRpcInt   = new(TestRpcInt)
-	testMsgpackH = &MsgpackHandle{}
-	testBincH    = &BincHandle{}
-	testSimpleH  = &SimpleHandle{}
-	testCborH    = &CborHandle{}
+	testRpcInt = new(TestRpcInt)
 )
 
 func testInitFlags() {
@@ -92,52 +88,6 @@ func testInitFlags() {
 	flag.BoolVar(&testWriteNoSymbols, "tn", false, "Set NoSymbols option")
 }
 
-type AnonInTestStruc struct {
-	AS        string
-	AI64      int64
-	AI16      int16
-	AUi64     uint64
-	ASslice   []string
-	AI64slice []int64
-}
-
-type TestStruc struct {
-	S    string
-	I64  int64
-	I16  int16
-	Ui64 uint64
-	Ui8  uint8
-	B    bool
-	By   byte
-
-	Sslice    []string
-	I64slice  []int64
-	I16slice  []int16
-	Ui64slice []uint64
-	Ui8slice  []uint8
-	Bslice    []bool
-	Byslice   []byte
-
-	Islice    []interface{}
-	Iptrslice []*int64
-
-	AnonInTestStruc
-
-	//M map[interface{}]interface{}  `json:"-",bson:"-"`
-	Ms    map[string]interface{}
-	Msi64 map[string]int64
-
-	Nintf      interface{} //don't set this, so we can test for nil
-	T          time.Time
-	Nmap       map[string]bool //don't set this, so we can test for nil
-	Nslice     []byte          //don't set this, so we can test for nil
-	Nint64     *int64          //don't set this, so we can test for nil
-	Mtsptr     map[string]*TestStruc
-	Mts        map[string]TestStruc
-	Its        []*TestStruc
-	Nteststruc *TestStruc
-}
-
 type TestABC struct {
 	A, B, C string
 }
@@ -158,14 +108,14 @@ func (r *TestRpcInt) Echo123(args []string, res *string) error {
 	return nil
 }
 
-type testCborTimeExt struct{}
+type testUnixNanoTimeExt struct{}
 
-func (x testCborTimeExt) WriteExt(reflect.Value) []byte { panic("unsupported") }
-func (x testCborTimeExt) ReadExt(reflect.Value, []byte) { panic("unsupported") }
-func (x testCborTimeExt) ConvertExt(rv reflect.Value) interface{} {
+func (x testUnixNanoTimeExt) WriteExt(reflect.Value) []byte { panic("unsupported") }
+func (x testUnixNanoTimeExt) ReadExt(reflect.Value, []byte) { panic("unsupported") }
+func (x testUnixNanoTimeExt) ConvertExt(rv reflect.Value) interface{} {
 	return rv.Interface().(time.Time).UTC().UnixNano()
 }
-func (x testCborTimeExt) UpdateExt(rv reflect.Value, v interface{}) {
+func (x testUnixNanoTimeExt) UpdateExt(rv reflect.Value, v interface{}) {
 	var tt time.Time
 	switch v2 := v.(type) {
 	case int64:
@@ -299,6 +249,7 @@ func testInit() {
 		fmt.Printf("====> depth: %v, ts: %#v\n", 2, ts0)
 	}
 
+	testJsonH.StructToArray = testStructToArray
 	testCborH.StructToArray = testStructToArray
 	testSimpleH.StructToArray = testStructToArray
 	testBincH.StructToArray = testStructToArray
@@ -327,7 +278,8 @@ func testInit() {
 	// add extensions for msgpack, simple for time.Time, so we can encode/decode same way.
 	testMsgpackH.AddExt(timeTyp, 1, timeEncExt, timeDecExt)
 	testSimpleH.AddExt(timeTyp, 1, timeEncExt, timeDecExt)
-	testCborH.SetExt(timeTyp, 1, &testCborTimeExt{})
+	testCborH.SetExt(timeTyp, 1, &testUnixNanoTimeExt{})
+	testJsonH.SetExt(timeTyp, 1, &testUnixNanoTimeExt{})
 
 	primitives := []interface{}{
 		int8(-8),
@@ -477,72 +429,6 @@ func testUnmarshalErr(v interface{}, data []byte, h Handle, t *testing.T, name s
 	return
 }
 
-func newTestStruc(depth int, bench bool) (ts *TestStruc) {
-	var i64a, i64b, i64c, i64d int64 = 64, 6464, 646464, 64646464
-
-	ts = &TestStruc{
-		S:    "some string",
-		I64:  math.MaxInt64 * 2 / 3, // 64,
-		I16:  16,
-		Ui64: uint64(int64(math.MaxInt64 * 2 / 3)), // 64, //don't use MaxUint64, as bson can't write it
-		Ui8:  160,
-		B:    true,
-		By:   5,
-
-		Sslice:    []string{"one", "two", "three"},
-		I64slice:  []int64{1, 2, 3},
-		I16slice:  []int16{4, 5, 6},
-		Ui64slice: []uint64{137, 138, 139},
-		Ui8slice:  []uint8{210, 211, 212},
-		Bslice:    []bool{true, false, true, false},
-		Byslice:   []byte{13, 14, 15},
-
-		Islice: []interface{}{"true", true, "no", false, uint64(288), float64(0.4)},
-
-		Ms: map[string]interface{}{
-			"true":     "true",
-			"int64(9)": false,
-		},
-		Msi64: map[string]int64{
-			"one": 1,
-			"two": 2,
-		},
-		T: timeToCompare1,
-		AnonInTestStruc: AnonInTestStruc{
-			AS:        "A-String",
-			AI64:      64,
-			AI16:      16,
-			AUi64:     64,
-			ASslice:   []string{"Aone", "Atwo", "Athree"},
-			AI64slice: []int64{1, 2, 3},
-		},
-	}
-	//For benchmarks, some things will not work.
-	if !bench {
-		//json and bson require string keys in maps
-		//ts.M = map[interface{}]interface{}{
-		//	true: "true",
-		//	int8(9): false,
-		//}
-		//gob cannot encode nil in element in array (encodeArray: nil element)
-		ts.Iptrslice = []*int64{nil, &i64a, nil, &i64b, nil, &i64c, nil, &i64d, nil}
-		// ts.Iptrslice = nil
-	}
-	if depth > 0 {
-		depth--
-		if ts.Mtsptr == nil {
-			ts.Mtsptr = make(map[string]*TestStruc)
-		}
-		if ts.Mts == nil {
-			ts.Mts = make(map[string]TestStruc)
-		}
-		ts.Mtsptr["0"] = newTestStruc(depth, bench)
-		ts.Mts["0"] = *(ts.Mtsptr["0"])
-		ts.Its = append(ts.Its, ts.Mtsptr["0"])
-	}
-	return
-}
-
 // doTestCodecTableOne allows us test for different variations based on arguments passed.
 func doTestCodecTableOne(t *testing.T, testNil bool, h Handle,
 	vs []interface{}, vsVerify []interface{}) {
@@ -556,8 +442,12 @@ func doTestCodecTableOne(t *testing.T, testNil bool, h Handle,
 		if err != nil {
 			continue
 		}
-		logT(t, "         Encoded bytes: len: %v, %v\n", len(b0), b0)
-
+		if h.isBinaryEncoding() {
+			logT(t, "         Encoded bytes: len: %v, %v\n", len(b0), b0)
+		} else {
+			logT(t, "         Encoded string: len: %v, %v\n", len(string(b0)), string(b0))
+			// println("########### encoded string: " + string(b0))
+		}
 		var v1 interface{}
 
 		if testNil {
@@ -606,6 +496,8 @@ func testCodecTableOne(t *testing.T, h Handle) {
 	// func TestMsgpackAllExperimental(t *testing.T) {
 	// dopts := testDecOpts(nil, nil, false, true, true),
 
+	idxTime, numPrim, numMap := 19, 23, 4
+	//println("#################")
 	switch v := h.(type) {
 	case *MsgpackHandle:
 		var oldWriteExt, oldRawToString bool
@@ -613,15 +505,20 @@ func testCodecTableOne(t *testing.T, h Handle) {
 		oldRawToString, v.RawToString = v.RawToString, true
 		doTestCodecTableOne(t, false, h, table, tableVerify)
 		v.WriteExt, v.RawToString = oldWriteExt, oldRawToString
+	case *JsonHandle:
+		//skip []interface{} containing time.Time, as it encodes as a number, but cannot decode back to time.Time.
+		//As there is no real support for extension tags in json, this must be skipped.
+		doTestCodecTableOne(t, false, h, table[:numPrim], tableVerify[:numPrim])
+		doTestCodecTableOne(t, false, h, table[numPrim+1:], tableVerify[numPrim+1:])
 	default:
 		doTestCodecTableOne(t, false, h, table, tableVerify)
 	}
+	//println("%%%%%%%%%%%%%%%%%")
 	// func TestMsgpackAll(t *testing.T) {
-	idxTime, numPrim, numMap := 19, 23, 4
 
-	//skip []interface{} containing time.Time
-	doTestCodecTableOne(t, false, h, table[:numPrim], tableVerify[:numPrim])
-	doTestCodecTableOne(t, false, h, table[numPrim+1:], tableVerify[numPrim+1:])
+	// //skip []interface{} containing time.Time
+	// doTestCodecTableOne(t, false, h, table[:numPrim], tableVerify[:numPrim])
+	// doTestCodecTableOne(t, false, h, table[numPrim+1:], tableVerify[numPrim+1:])
 	// func TestMsgpackNilStringMap(t *testing.T) {
 	var oldMapType reflect.Type
 	v := h.getBasicHandle()
@@ -664,7 +561,11 @@ func testCodecMiscOne(t *testing.T, h Handle) {
 		logT(t, "------- Size must be > 40. Size: %d", len(b))
 		t.FailNow()
 	}
-	logT(t, "------- b: %v", b)
+	if h.isBinaryEncoding() {
+		logT(t, "------- b: %v", b)
+	} else {
+		logT(t, "------- b: %s", b)
+	}
 	ts2 := new(TestStruc)
 	err = testUnmarshalErr(ts2, b, h, t, "pointer-to-struct")
 	if ts2.I64 != math.MaxInt64*2/3 {
@@ -781,6 +682,7 @@ func doTestRpcOne(t *testing.T, rr Rpc, h Handle, doRequest bool, exitSleepMs ti
 	// opts.RawToString = false
 	serverExitChan := make(chan bool, 1)
 	var serverExitFlag uint64 = 0
+	//println("111111111111111")
 	serverFn := func() {
 		for {
 			conn1, err1 := ln.Accept()
@@ -803,6 +705,7 @@ func doTestRpcOne(t *testing.T, rr Rpc, h Handle, doRequest bool, exitSleepMs ti
 	clientFn := func(cc rpc.ClientCodec) {
 		cl := rpc.NewClientWithCodec(cc)
 		defer cl.Close()
+		//	defer func() { println("##### client closing"); cl.Close() }()
 		var up, sq, mult int
 		var rstr string
 		// log("Calling client")
@@ -1040,6 +943,20 @@ func TestCborCodecsEmbeddedPointer(t *testing.T) {
 	testCodecEmbeddedPointer(t, testCborH)
 }
 
+func TestJsonCodecsTable(t *testing.T) {
+	testCodecTableOne(t, testJsonH)
+}
+
+func TestJsonCodecsMisc(t *testing.T) {
+	testCodecMiscOne(t, testJsonH)
+}
+
+func TestJsonCodecsEmbeddedPointer(t *testing.T) {
+	testCodecEmbeddedPointer(t, testJsonH)
+}
+
+// ----- RPC -----
+
 func TestBincRpcGo(t *testing.T) {
 	doTestRpcOne(t, GoRpc, testBincH, true, 0)
 }
@@ -1056,6 +973,10 @@ func TestCborRpcGo(t *testing.T) {
 	doTestRpcOne(t, GoRpc, testCborH, true, 0)
 }
 
+func TestJsonRpcGo(t *testing.T) {
+	doTestRpcOne(t, GoRpc, testJsonH, true, 0)
+}
+
 func TestMsgpackRpcSpec(t *testing.T) {
 	doTestRpcOne(t, MsgpackSpecRpc, testMsgpackH, true, 0)
 }

+ 218 - 71
codec/decode.go

@@ -1,9 +1,10 @@
-// Copyright (c) 2012, 2013 Ugorji Nwoke. All rights reserved.
+// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved.
 // Use of this source code is governed by a BSD-style license found in the LICENSE file.
 
 package codec
 
 import (
+	"encoding"
 	"io"
 	"reflect"
 	// "runtime/debug"
@@ -22,6 +23,7 @@ var fastpathsDec = make(map[uintptr]func(*decFnInfo, reflect.Value))
 // decReader abstracts the reading source, allowing implementations that can
 // read from an io.Reader or directly off a byte slice with zero-copying.
 type decReader interface {
+	unreadn1()
 	readn(n int) []byte
 	readb([]byte)
 	readn1() uint8
@@ -35,7 +37,8 @@ type decDriver interface {
 	// this will call initReadNext implicitly, and then check if the next token is a break.
 	checkBreak() bool
 	tryDecodeAsNil() bool
-	currentEncodedType() valueType
+	// check if a container type: vt is one of: Bytes, String, Nil, Slice or Map
+	isContainerType(vt valueType) bool
 	isBuiltinType(rt uintptr) bool
 	decodeBuiltin(rt uintptr, v interface{})
 	//decodeNaked: Numbers are decoded as int64, uint64, float64 only (no smaller sized number types).
@@ -51,10 +54,31 @@ type decDriver interface {
 	// decodeExt will decode into a *RawExt or into an extension.
 	decodeExt(rv reflect.Value, xtag uint64, ext Ext, d *Decoder) (realxtag uint64)
 	// decodeExt(verifyTag bool, tag byte) (xtag byte, xbs []byte)
-	readMapLen() int
-	readArrayLen() int
+	readMapStart() int
+	readArrayStart() int
+	readMapEnd()
+	readArrayEnd()
+	readArrayEntrySeparator()
+	readMapEntrySeparator()
+	readMapKVSeparator()
 }
 
+// decDrivers may implement this interface to bypass allocation
+type decDriverStringAsBytes interface {
+	decStringAsBytes(bs []byte) []byte
+}
+
+type decNoMapArrayEnd struct{}
+
+func (_ decNoMapArrayEnd) readMapEnd()   {}
+func (_ decNoMapArrayEnd) readArrayEnd() {}
+
+type decNoMapArraySeparator struct{}
+
+func (_ decNoMapArraySeparator) readArrayEntrySeparator() {}
+func (_ decNoMapArraySeparator) readMapEntrySeparator()   {}
+func (_ decNoMapArraySeparator) readMapKVSeparator()      {}
+
 type DecodeOptions struct {
 	// An instance of MapType is used during schema-less decoding of a map in the stream.
 	// If nil, we use map[interface{}]interface{}
@@ -74,8 +98,10 @@ type DecodeOptions struct {
 // ioDecReader is a decReader that reads off an io.Reader
 type ioDecReader struct {
 	r  io.Reader
-	br io.ByteReader
+	br io.ByteScanner
 	x  [8]byte //temp byte array re-used internally for efficiency
+	l  byte    // last byte
+	ls uint8   // last byte status: 0: unset, 1: read, 2: unread
 }
 
 func (z *ioDecReader) readn(n int) (bs []byte) {
@@ -97,14 +123,37 @@ func (z *ioDecReader) readb(bs []byte) {
 
 func (z *ioDecReader) readn1() uint8 {
 	if z.br != nil {
-		b, err := z.br.ReadByte()
-		if err != nil {
+		if b, err := z.br.ReadByte(); err == nil {
+			return b
+		} else {
 			panic(err)
 		}
-		return b
 	}
-	z.readb(z.x[:1])
-	return z.x[0]
+	if z.ls == 2 {
+		z.ls = 0
+	} else {
+		z.readb(z.x[:1])
+		z.l = z.x[0]
+		z.ls = 1
+	}
+	return z.l
+}
+
+func (z *ioDecReader) unreadn1() {
+	if z.br != nil {
+		if err := z.br.UnreadByte(); err != nil {
+			panic(err)
+		}
+		return
+	}
+	if z.ls == 2 {
+		decErr("cannot unread when last byte has been unread")
+	}
+	if z.ls == 1 {
+		z.ls = 2
+		return
+	}
+	decErr("cannot unread when no byte has been read")
 }
 
 func (z *ioDecReader) readUint16() uint16 {
@@ -136,7 +185,7 @@ func (z *bytesDecReader) consume(n int) (oldcursor int) {
 		panic(io.EOF)
 	}
 	if n > z.a {
-		decErr("Trying to read %v bytes. Only %v available", n, z.a)
+		decErr("cannot read %v bytes, when only %v available", n, z.a)
 	}
 	// z.checkAvailable(n)
 	oldcursor = z.c
@@ -145,6 +194,15 @@ func (z *bytesDecReader) consume(n int) (oldcursor int) {
 	return
 }
 
+func (z *bytesDecReader) unreadn1() {
+	if z.c == 0 || len(z.b) == 0 {
+		decErr("cannot unread last byte read")
+	}
+	z.c--
+	z.a++
+	return
+}
+
 func (z *bytesDecReader) readn(n int) (bs []byte) {
 	if n <= 0 {
 		return
@@ -214,20 +272,20 @@ func (f *decFnInfo) ext(rv reflect.Value) {
 	f.dd.decodeExt(rv, f.xfTag, f.xfFn, f.d)
 }
 
-func (f *decFnInfo) binaryMarshal(rv reflect.Value) {
-	var bm binaryUnmarshaler
-	if f.ti.unmIndir == -1 {
-		bm = rv.Addr().Interface().(binaryUnmarshaler)
-	} else if f.ti.unmIndir == 0 {
-		bm = rv.Interface().(binaryUnmarshaler)
+func (f *decFnInfo) binaryUnmarshal(rv reflect.Value) {
+	var bm encoding.BinaryUnmarshaler
+	if f.ti.bunmIndir == -1 {
+		bm = rv.Addr().Interface().(encoding.BinaryUnmarshaler)
+	} else if f.ti.bunmIndir == 0 {
+		bm = rv.Interface().(encoding.BinaryUnmarshaler)
 	} else {
-		for j, k := int8(0), f.ti.unmIndir; j < k; j++ {
+		for j, k := int8(0), f.ti.bunmIndir; j < k; j++ {
 			if rv.IsNil() {
 				rv.Set(reflect.New(rv.Type().Elem()))
 			}
 			rv = rv.Elem()
 		}
-		bm = rv.Interface().(binaryUnmarshaler)
+		bm = rv.Interface().(encoding.BinaryUnmarshaler)
 	}
 	xbs, _ := f.dd.decodeBytes(nil)
 	if fnerr := bm.UnmarshalBinary(xbs); fnerr != nil {
@@ -235,8 +293,34 @@ func (f *decFnInfo) binaryMarshal(rv reflect.Value) {
 	}
 }
 
+func (f *decFnInfo) textUnmarshal(rv reflect.Value) {
+	var tm encoding.TextUnmarshaler
+	if f.ti.tunmIndir == -1 {
+		tm = rv.Addr().Interface().(encoding.TextUnmarshaler)
+	} else if f.ti.tunmIndir == 0 {
+		tm = rv.Interface().(encoding.TextUnmarshaler)
+	} else {
+		for j, k := int8(0), f.ti.tunmIndir; j < k; j++ {
+			if rv.IsNil() {
+				rv.Set(reflect.New(rv.Type().Elem()))
+			}
+			rv = rv.Elem()
+		}
+		tm = rv.Interface().(encoding.TextUnmarshaler)
+	}
+	var fnerr error
+	if sb, sbok := f.dd.(decDriverStringAsBytes); sbok {
+		fnerr = tm.UnmarshalText(sb.decStringAsBytes(f.d.b[:0]))
+	} else {
+		fnerr = tm.UnmarshalText([]byte(f.dd.decodeString()))
+	}
+	if fnerr != nil {
+		panic(fnerr)
+	}
+}
+
 func (f *decFnInfo) kErr(rv reflect.Value) {
-	decErr("Unhandled value for kind: %v: %s", rv.Kind(), msgBadDesc)
+	decErr("no decoding function defined for kind %v", rv.Kind())
 }
 
 func (f *decFnInfo) kString(rv reflect.Value) {
@@ -319,7 +403,7 @@ func (f *decFnInfo) kInterface(rv reflect.Value) {
 	// Cannot decode into nil interface with methods (e.g. error, io.Reader, etc)
 	// if non-nil value in stream.
 	if num := f.ti.rt.NumMethod(); num > 0 {
-		decErr("decodeValue: Cannot decode non-nil codec value into nil %v (%v methods)",
+		decErr("cannot decode non-nil codec value into nil %v (%v methods)",
 			f.ti.rt, num)
 	}
 	var rvn reflect.Value
@@ -376,17 +460,29 @@ func (f *decFnInfo) kInterface(rv reflect.Value) {
 
 func (f *decFnInfo) kStruct(rv reflect.Value) {
 	fti := f.ti
-	if currEncodedType := f.dd.currentEncodedType(); currEncodedType == valueTypeMap {
-		containerLen := f.dd.readMapLen()
+	if f.dd.isContainerType(valueTypeMap) {
+		containerLen := f.dd.readMapStart()
 		if containerLen == 0 {
+			f.dd.readMapEnd()
 			return
 		}
 		tisfi := fti.sfi
-		for j := 0; j < containerLen; j++ {
+		for j := 0; ; j++ {
+			if containerLen >= 0 {
+				if j >= containerLen {
+					break
+				}
+			} else if f.dd.checkBreak() {
+				break
+			}
+			if j > 0 {
+				f.dd.readMapEntrySeparator()
+			}
 			// var rvkencname string
 			// ddecode(&rvkencname)
 			f.dd.initReadNext()
 			rvkencname := f.dd.decodeString()
+			f.dd.readMapKVSeparator()
 			// rvksi := ti.getForEncName(rvkencname)
 			if k := fti.indexForEncName(rvkencname); k > -1 {
 				sfik := tisfi[k]
@@ -398,7 +494,7 @@ func (f *decFnInfo) kStruct(rv reflect.Value) {
 				// f.d.decodeValue(ti.field(k, rv))
 			} else {
 				if f.d.h.ErrorIfNoField {
-					decErr("No matching struct field found when decoding stream map with key: %v",
+					decErr("no matching struct field found when decoding stream map with key %v",
 						rvkencname)
 				} else {
 					var nilintf0 interface{}
@@ -406,15 +502,24 @@ func (f *decFnInfo) kStruct(rv reflect.Value) {
 				}
 			}
 		}
-	} else if currEncodedType == valueTypeArray {
-		containerLen := f.dd.readArrayLen()
+		f.dd.readMapEnd()
+	} else if f.dd.isContainerType(valueTypeArray) {
+		containerLen := f.dd.readArrayStart()
 		if containerLen == 0 {
+			f.dd.readArrayEnd()
 			return
 		}
 		for j, si := range fti.sfip {
-			if j == containerLen {
+			if containerLen >= 0 {
+				if j == containerLen {
+					break
+				}
+			} else if f.dd.checkBreak() {
 				break
 			}
+			if j > 0 {
+				f.dd.readArrayEntrySeparator()
+			}
 			if si.i != -1 {
 				f.d.decodeValue(rv.Field(int(si.i)), decFn{})
 			} else {
@@ -425,29 +530,36 @@ func (f *decFnInfo) kStruct(rv reflect.Value) {
 			// read remaining values and throw away
 			for j := len(fti.sfip); j < containerLen; j++ {
 				var nilintf0 interface{}
+				if j > 0 {
+					f.dd.readArrayEntrySeparator()
+				}
 				f.d.decodeValue(reflect.ValueOf(&nilintf0).Elem(), decFn{})
 			}
 		}
+		f.dd.readArrayEnd()
 	} else {
-		decErr("Only encoded map or array can be decoded into a struct. (valueType: %x)",
-			currEncodedType)
+		decErr("only encoded map or array can be decoded into a struct")
 	}
 }
 
 func (f *decFnInfo) kSlice(rv reflect.Value) {
-	// A slice can be set from a map or array in stream.
-	currEncodedType := f.dd.currentEncodedType()
-	switch currEncodedType {
-	case valueTypeBytes, valueTypeString:
+	// A slice can be set from a map or array in stream. This way, the order can be kept (as order is lost with map).
+	if f.dd.isContainerType(valueTypeBytes) || f.dd.isContainerType(valueTypeString) {
 		if f.ti.rtid == uint8SliceTypId || f.ti.rt.Elem().Kind() == reflect.Uint8 {
-			if bs2, changed2 := f.dd.decodeBytes(rv.Bytes()); changed2 {
-				rv.SetBytes(bs2)
+			rvbs := rv.Bytes()
+			if bs2, changed2 := f.dd.decodeBytes(rvbs); changed2 {
+				if rv.CanSet() {
+					rv.SetBytes(bs2)
+				} else {
+					copy(rvbs, bs2)
+				}
 			}
 			return
 		}
 	}
 
-	containerLen, containerLenS := decContLens(f.dd, currEncodedType)
+	slh := decSliceHelper{dd: f.dd}
+	containerLenS := slh.start()
 
 	// an array can never return a nil slice. so no need to check f.array here.
 	if rv.IsNil() {
@@ -458,8 +570,9 @@ func (f *decFnInfo) kSlice(rv reflect.Value) {
 		}
 	}
 
-	if containerLen == 0 {
+	if containerLenS == 0 {
 		rv.SetLen(0)
+		f.dd.readArrayEnd()
 		return
 	}
 
@@ -501,8 +614,12 @@ func (f *decFnInfo) kSlice(rv reflect.Value) {
 			rv = reflect.Append(rv, reflect.Zero(rtelem0))
 			rvChanged = true
 		}
+		if j > 0 {
+			slh.sep(j)
+		}
 		f.d.decodeValue(rv.Index(j), fn)
 	}
+	slh.end()
 	if rvChanged {
 		rv0.Set(rv)
 	}
@@ -514,13 +631,14 @@ func (f *decFnInfo) kArray(rv reflect.Value) {
 }
 
 func (f *decFnInfo) kMap(rv reflect.Value) {
-	containerLen := f.dd.readMapLen()
+	containerLen := f.dd.readMapStart()
 
 	if rv.IsNil() {
 		rv.Set(reflect.MakeMap(f.ti.rt))
 	}
 
 	if containerLen == 0 {
+		f.dd.readMapEnd()
 		return
 	}
 
@@ -543,6 +661,9 @@ func (f *decFnInfo) kMap(rv reflect.Value) {
 		} else if f.dd.checkBreak() {
 			break
 		}
+		if j > 0 {
+			f.dd.readMapEntrySeparator()
+		}
 		rvk := reflect.New(ktype).Elem()
 		f.d.decodeValue(rvk, keyFn)
 
@@ -557,20 +678,23 @@ func (f *decFnInfo) kMap(rv reflect.Value) {
 		if !rvv.IsValid() {
 			rvv = reflect.New(vtype).Elem()
 		}
-
+		f.dd.readMapKVSeparator()
 		f.d.decodeValue(rvv, valFn)
 		rv.SetMapIndex(rvk, rvv)
 	}
+	f.dd.readMapEnd()
 }
 
 // A Decoder reads and decodes an object from an input stream in the codec format.
 type Decoder struct {
-	r decReader
-	d decDriver
-	h *BasicHandle
-	f map[uintptr]decFn
-	x []uintptr
-	s []decFn
+	r  decReader
+	d  decDriver
+	h  *BasicHandle
+	hh Handle
+	f  map[uintptr]decFn
+	x  []uintptr
+	s  []decFn
+	b  [32]byte
 }
 
 // NewDecoder returns a Decoder for decoding a stream of bytes from an io.Reader.
@@ -581,8 +705,8 @@ func NewDecoder(r io.Reader, h Handle) *Decoder {
 	z := ioDecReader{
 		r: r,
 	}
-	z.br, _ = r.(io.ByteReader)
-	return &Decoder{r: &z, d: h.newDecDriver(&z), h: h.getBasicHandle()}
+	z.br, _ = r.(io.ByteScanner)
+	return &Decoder{r: &z, hh: h, h: h.getBasicHandle(), d: h.newDecDriver(&z)}
 }
 
 // NewDecoderBytes returns a Decoder which efficiently decodes directly
@@ -592,7 +716,7 @@ func NewDecoderBytes(in []byte, h Handle) *Decoder {
 		b: in,
 		a: len(in),
 	}
-	return &Decoder{r: &z, d: h.newDecDriver(&z), h: h.getBasicHandle()}
+	return &Decoder{r: &z, hh: h, h: h.getBasicHandle(), d: h.newDecDriver(&z)}
 }
 
 // Decode decodes the stream from reader and stores the result in the
@@ -662,7 +786,7 @@ func (d *Decoder) decode(iv interface{}) {
 
 	switch v := iv.(type) {
 	case nil:
-		decErr("Cannot decode into nil.")
+		decErr("cannot decode into nil.")
 
 	case reflect.Value:
 		d.chkPtrValue(v)
@@ -783,8 +907,10 @@ func (d *Decoder) getDecFn(rt reflect.Type) (fn decFn) {
 		} else if xfFn := d.h.getExt(rtid); xfFn != nil {
 			fi.xfTag, fi.xfFn = xfFn.tag, xfFn.ext
 			fn.f = (*decFnInfo).ext
-		} else if supportBinaryMarshal && fi.ti.unm {
-			fn.f = (*decFnInfo).binaryMarshal
+		} else if supportMarshalInterfaces && d.hh.isBinaryEncoding() && fi.ti.bunm {
+			fn.f = (*decFnInfo).binaryUnmarshal
+		} else if supportMarshalInterfaces && !d.hh.isBinaryEncoding() && fi.ti.tunm {
+			fn.f = (*decFnInfo).textUnmarshal
 		} else {
 			rk := rt.Kind()
 			if fastpathEnabled && (rk == reflect.Map || rk == reflect.Slice) {
@@ -875,13 +1001,13 @@ func (d *Decoder) chkPtrValue(rv reflect.Value) {
 		return
 	}
 	if !rv.IsValid() {
-		decErr("Cannot decode into a zero (ie invalid) reflect.Value")
+		decErr("cannot decode into a zero (ie invalid) reflect.Value")
 	}
 	if !rv.CanInterface() {
-		decErr("Cannot decode into a value without an interface: %v", rv)
+		decErr("cannot decode into a value without an interface: %v", rv)
 	}
 	rvi := rv.Interface()
-	decErr("Cannot decode into non-pointer or nil pointer. Got: %v, %T, %v",
+	decErr("cannot decode into non-pointer or nil pointer. Got: %v, %T, %v",
 		rv.Kind(), rvi, rvi)
 }
 
@@ -903,23 +1029,44 @@ func (d *Decoder) decEmbeddedField(rv reflect.Value, index []int) {
 
 // --------------------------------------------------
 
-func decContLens(dd decDriver, currEncodedType valueType) (containerLen, containerLenS int) {
-	// currEncodedType = dd.currentEncodedType()
-	// if currEncodedType == valueTypeInvalid {
-	// 	currEncodedType = dd.currentEncodedType()
-	// }
-	switch currEncodedType {
-	case valueTypeArray:
-		containerLen = dd.readArrayLen()
-		containerLenS = containerLen
-	case valueTypeMap:
-		containerLen = dd.readMapLen()
-		containerLenS = containerLen * 2
-	default:
-		decErr("Only encoded map or array can be decoded into a slice. (valueType: %0x)",
-			currEncodedType)
+// decSliceHelper assists when decoding into a slice, from a map or an array in the stream.
+// A slice can be set from a map or array in stream. This supports the MapBySlice interface.
+type decSliceHelper struct {
+	dd decDriver
+	ct valueType
+}
+
+func (x *decSliceHelper) start() (sliceLen int) {
+	if x.dd.isContainerType(valueTypeArray) {
+		x.ct = valueTypeArray
+		return x.dd.readArrayStart()
+	}
+	if x.dd.isContainerType(valueTypeMap) {
+		x.ct = valueTypeMap
+		return x.dd.readMapStart() * 2
+	}
+	decErr("only encoded map or array can be decoded into a slice")
+	panic("unreachable")
+}
+
+func (x *decSliceHelper) sep(index int) {
+	if x.ct == valueTypeArray {
+		x.dd.readArrayEntrySeparator()
+	} else {
+		if index%2 == 0 {
+			x.dd.readMapEntrySeparator()
+		} else {
+			x.dd.readMapKVSeparator()
+		}
+	}
+}
+
+func (x *decSliceHelper) end() {
+	if x.ct == valueTypeArray {
+		x.dd.readArrayEnd()
+	} else {
+		x.dd.readMapEnd()
 	}
-	return
 }
 
 func decErr(format string, params ...interface{}) {

+ 139 - 87
codec/encode.go

@@ -1,9 +1,10 @@
-// Copyright (c) 2012, 2013 Ugorji Nwoke. All rights reserved.
+// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved.
 // Use of this source code is governed by a BSD-style license found in the LICENSE file.
 
 package codec
 
 import (
+	"encoding"
 	"io"
 	"reflect"
 )
@@ -65,8 +66,13 @@ type encDriver interface {
 	// encodeExtPreamble(xtag byte, length int)
 	encodeRawExt(re *RawExt, e *Encoder)
 	encodeExt(rv reflect.Value, xtag uint64, ext Ext, e *Encoder)
-	encodeArrayPreamble(length int)
-	encodeMapPreamble(length int)
+	encodeArrayStart(length int)
+	encodeArrayEnd()
+	encodeArrayEntrySeparator()
+	encodeMapStart(length int)
+	encodeMapEnd()
+	encodeMapEntrySeparator()
+	encodeMapKVSeparator()
 	encodeString(c charEncoding, v string)
 	encodeSymbol(v string)
 	encodeStringBytes(c charEncoding, v []byte)
@@ -75,6 +81,17 @@ type encDriver interface {
 	//encStringRunes(c charEncoding, v []rune)
 }
 
+type encNoMapArrayEnd struct{}
+
+func (_ encNoMapArrayEnd) encodeMapEnd()   {}
+func (_ encNoMapArrayEnd) encodeArrayEnd() {}
+
+type encNoMapArraySeparator struct{}
+
+func (_ encNoMapArraySeparator) encodeArrayEntrySeparator() {}
+func (_ encNoMapArraySeparator) encodeMapEntrySeparator()   {}
+func (_ encNoMapArraySeparator) encodeMapKVSeparator()      {}
+
 type ioEncWriterWriter interface {
 	WriteByte(c byte) error
 	WriteString(s string) (n int, err error)
@@ -164,7 +181,7 @@ func (z *ioEncWriter) writeb(bs []byte) {
 		panic(err)
 	}
 	if n != len(bs) {
-		encErr("write: Incorrect num bytes written. Expecting: %v, Wrote: %v", len(bs), n)
+		encErr("incorrect num bytes written. Expecting: %v, Wrote: %v", len(bs), n)
 	}
 }
 
@@ -174,7 +191,7 @@ func (z *ioEncWriter) writestr(s string) {
 		panic(err)
 	}
 	if n != len(s) {
-		encErr("write: Incorrect num bytes written. Expecting: %v, Wrote: %v", len(s), n)
+		encErr("incorrect num bytes written. Expecting: %v, Wrote: %v", len(s), n)
 	}
 }
 
@@ -279,6 +296,7 @@ type encFnInfo struct {
 	ee    encDriver
 	xfFn  Ext
 	xfTag uint64
+	array bool
 }
 
 func (f *encFnInfo) builtin(rv reflect.Value) {
@@ -294,22 +312,22 @@ func (f *encFnInfo) ext(rv reflect.Value) {
 }
 
 func (f *encFnInfo) binaryMarshal(rv reflect.Value) {
-	var bm binaryMarshaler
-	if f.ti.mIndir == 0 {
-		bm = rv.Interface().(binaryMarshaler)
-	} else if f.ti.mIndir == -1 {
-		bm = rv.Addr().Interface().(binaryMarshaler)
+	var bm encoding.BinaryMarshaler
+	if f.ti.bmIndir == 0 {
+		bm = rv.Interface().(encoding.BinaryMarshaler)
+	} else if f.ti.bmIndir == -1 {
+		bm = rv.Addr().Interface().(encoding.BinaryMarshaler)
 	} else {
-		for j, k := int8(0), f.ti.mIndir; j < k; j++ {
+		for j, k := int8(0), f.ti.bmIndir; j < k; j++ {
 			if rv.IsNil() {
 				f.ee.encodeNil()
 				return
 			}
 			rv = rv.Elem()
 		}
-		bm = rv.Interface().(binaryMarshaler)
+		bm = rv.Interface().(encoding.BinaryMarshaler)
 	}
-	// debugf(">>>> binaryMarshaler: %T", rv.Interface())
+	// debugf(">>>> encoding.BinaryMarshaler: %T", rv.Interface())
 	bs, fnerr := bm.MarshalBinary()
 	if fnerr != nil {
 		panic(fnerr)
@@ -321,6 +339,34 @@ func (f *encFnInfo) binaryMarshal(rv reflect.Value) {
 	}
 }
 
+func (f *encFnInfo) textMarshal(rv reflect.Value) {
+	var tm encoding.TextMarshaler
+	if f.ti.tmIndir == 0 {
+		tm = rv.Interface().(encoding.TextMarshaler)
+	} else if f.ti.tmIndir == -1 {
+		tm = rv.Addr().Interface().(encoding.TextMarshaler)
+	} else {
+		for j, k := int8(0), f.ti.tmIndir; j < k; j++ {
+			if rv.IsNil() {
+				f.ee.encodeNil()
+				return
+			}
+			rv = rv.Elem()
+		}
+		tm = rv.Interface().(encoding.TextMarshaler)
+	}
+	// debugf(">>>> encoding.TextMarshaler: %T", rv.Interface())
+	bs, fnerr := tm.MarshalText()
+	if fnerr != nil {
+		panic(fnerr)
+	}
+	if bs == nil {
+		f.ee.encodeNil()
+	} else {
+		f.ee.encodeStringBytes(c_UTF8, bs)
+	}
+}
+
 func (f *encFnInfo) kBool(rv reflect.Value) {
 	f.ee.encodeBool(rv.Bool())
 }
@@ -350,92 +396,80 @@ func (f *encFnInfo) kInvalid(rv reflect.Value) {
 }
 
 func (f *encFnInfo) kErr(rv reflect.Value) {
-	encErr("Unsupported kind: %s, for: %#v", rv.Kind(), rv)
+	encErr("unsupported kind %s, for %#v", rv.Kind(), rv)
 }
 
 func (f *encFnInfo) kSlice(rv reflect.Value) {
-	if rv.IsNil() {
-		f.ee.encodeNil()
-		return
-	}
-
-	// If in this method, then there was no extension function defined.
-	// So it's okay to treat as []byte.
-	if f.ti.rtid == uint8SliceTypId || f.ti.rt.Elem().Kind() == reflect.Uint8 {
-		f.ee.encodeStringBytes(c_RAW, rv.Bytes())
-		return
-	}
+	// array may be non-addressable, so we have to manage with care (don't call rv.Bytes, rv.Slice, etc).
+	// E.g. type struct S{B [2]byte}; Encode(S{}) will bomb on "panic: slice of unaddressable array".
 
-	l := rv.Len()
-	if f.ti.mbs {
-		if l%2 == 1 {
-			encErr("mapBySlice: invalid length (must be divisible by 2): %v", l)
+	if !f.array {
+		if rv.IsNil() {
+			f.ee.encodeNil()
+			return
+		}
+		// If in this method, then there was no extension function defined.
+		// So it's okay to treat as []byte.
+		if f.ti.rtid == uint8SliceTypId {
+			f.ee.encodeStringBytes(c_RAW, rv.Bytes())
+			return
 		}
-		f.ee.encodeMapPreamble(l / 2)
-	} else {
-		f.ee.encodeArrayPreamble(l)
-	}
-	if l == 0 {
-		return
 	}
-
 	rtelem := f.ti.rt.Elem()
-	for rtelem.Kind() == reflect.Ptr {
-		rtelem = rtelem.Elem()
-	}
-	fn := f.e.getEncFn(rtelem)
-	for j := 0; j < l; j++ {
-		// TODO: Consider perf implication of encoding odd index values as symbols if type is string
-		f.e.encodeValue(rv.Index(j), fn)
-	}
-}
-
-func (f *encFnInfo) kArray(rv reflect.Value) {
-	// We cannot share kSlice method, because the array may be non-addressable.
-	// E.g. type struct S{B [2]byte}; Encode(S{}) will bomb on "panic: slice of unaddressable array".
-	// So we have to duplicate the functionality here.
-	// f.e.encodeValue(rv.Slice(0, rv.Len()))
-	// f.kSlice(rv.Slice(0, rv.Len()))
-
 	l := rv.Len()
-	// Handle an array of bytes specially (in line with what is done for slices)
-	rtelem := f.ti.rt.Elem()
 	if rtelem.Kind() == reflect.Uint8 {
-		if l == 0 {
-			f.ee.encodeStringBytes(c_RAW, nil)
-			return
-		}
-		var bs []byte
-		if rv.CanAddr() {
-			bs = rv.Slice(0, l).Bytes()
-		} else {
-			bs = make([]byte, l)
-			for i := 0; i < l; i++ {
-				bs[i] = byte(rv.Index(i).Uint())
+		if f.array {
+			// if l == 0 { f.ee.encodeStringBytes(c_RAW, nil) } else
+			if rv.CanAddr() {
+				f.ee.encodeStringBytes(c_RAW, rv.Slice(0, l).Bytes())
+			} else {
+				bs := make([]byte, l)
+				for i := 0; i < l; i++ {
+					bs[i] = byte(rv.Index(i).Uint())
+				}
+				f.ee.encodeStringBytes(c_RAW, bs)
 			}
+		} else {
+			f.ee.encodeStringBytes(c_RAW, rv.Bytes())
 		}
-		f.ee.encodeStringBytes(c_RAW, bs)
 		return
 	}
 
 	if f.ti.mbs {
 		if l%2 == 1 {
-			encErr("mapBySlice: invalid length (must be divisible by 2): %v", l)
+			encErr("mapBySlice requires even slice length, but got %v", l)
 		}
-		f.ee.encodeMapPreamble(l / 2)
+		f.ee.encodeMapStart(l / 2)
 	} else {
-		f.ee.encodeArrayPreamble(l)
+		f.ee.encodeArrayStart(l)
 	}
-	if l == 0 {
-		return
-	}
-	for rtelem.Kind() == reflect.Ptr {
-		rtelem = rtelem.Elem()
+
+	if l > 0 {
+		for rtelem.Kind() == reflect.Ptr {
+			rtelem = rtelem.Elem()
+		}
+		fn := f.e.getEncFn(rtelem)
+		for j := 0; j < l; j++ {
+			// TODO: Consider perf implication of encoding odd index values as symbols if type is string
+			if j > 0 {
+				if f.ti.mbs {
+					if j%2 == 0 {
+						f.ee.encodeMapEntrySeparator()
+					} else {
+						f.ee.encodeMapKVSeparator()
+					}
+				} else {
+					f.ee.encodeArrayEntrySeparator()
+				}
+			}
+			f.e.encodeValue(rv.Index(j), fn)
+		}
 	}
-	fn := f.e.getEncFn(rtelem)
-	for j := 0; j < l; j++ {
-		// TODO: Consider perf implication of encoding odd index values as symbols if type is string
-		f.e.encodeValue(rv.Index(j), fn)
+
+	if f.ti.mbs {
+		f.ee.encodeMapEnd()
+	} else {
+		f.ee.encodeArrayEnd()
 	}
 }
 
@@ -475,22 +509,31 @@ func (f *encFnInfo) kStruct(rv reflect.Value) {
 	// debugf(">>>> kStruct: newlen: %v", newlen)
 	if toMap {
 		ee := f.ee //don't dereference everytime
-		ee.encodeMapPreamble(newlen)
+		ee.encodeMapStart(newlen)
 		// asSymbols := e.h.AsSymbols&AsSymbolStructFieldNameFlag != 0
 		asSymbols := e.h.AsSymbols == AsSymbolDefault || e.h.AsSymbols&AsSymbolStructFieldNameFlag != 0
 		for j := 0; j < newlen; j++ {
+			if j > 0 {
+				ee.encodeMapEntrySeparator()
+			}
 			if asSymbols {
 				ee.encodeSymbol(encnames[j])
 			} else {
 				ee.encodeString(c_UTF8, encnames[j])
 			}
+			ee.encodeMapKVSeparator()
 			e.encodeValue(rvals[j], encFn{})
 		}
+		ee.encodeMapEnd()
 	} else {
-		f.ee.encodeArrayPreamble(newlen)
+		f.ee.encodeArrayStart(newlen)
 		for j := 0; j < newlen; j++ {
+			if j > 0 {
+				f.ee.encodeArrayEntrySeparator()
+			}
 			e.encodeValue(rvals[j], encFn{})
 		}
+		f.ee.encodeArrayEnd()
 	}
 }
 
@@ -518,8 +561,9 @@ func (f *encFnInfo) kMap(rv reflect.Value) {
 	}
 
 	l := rv.Len()
-	f.ee.encodeMapPreamble(l)
+	f.ee.encodeMapStart(l)
 	if l == 0 {
+		f.ee.encodeMapEnd()
 		return
 	}
 	var asSymbols bool
@@ -545,19 +589,24 @@ func (f *encFnInfo) kMap(rv reflect.Value) {
 	valFn = f.e.getEncFn(rtval)
 	mks := rv.MapKeys()
 	// for j, lmks := 0, len(mks); j < lmks; j++ {
+	ee := f.ee //don't dereference everytime
 	for j := range mks {
+		if j > 0 {
+			ee.encodeMapEntrySeparator()
+		}
 		if keyTypeIsString {
 			if asSymbols {
-				f.ee.encodeSymbol(mks[j].String())
+				ee.encodeSymbol(mks[j].String())
 			} else {
-				f.ee.encodeString(c_UTF8, mks[j].String())
+				ee.encodeString(c_UTF8, mks[j].String())
 			}
 		} else {
 			f.e.encodeValue(mks[j], keyFn)
 		}
+		ee.encodeMapKVSeparator()
 		f.e.encodeValue(rv.MapIndex(mks[j]), valFn)
 	}
-
+	ee.encodeMapEnd()
 }
 
 // --------------------------------------------------
@@ -821,8 +870,10 @@ func (e *Encoder) getEncFn(rt reflect.Type) (fn encFn) {
 		} else if xfFn := e.h.getExt(rtid); xfFn != nil {
 			fi.xfTag, fi.xfFn = xfFn.tag, xfFn.ext
 			fn.f = (*encFnInfo).ext
-		} else if supportBinaryMarshal && fi.ti.m {
+		} else if supportMarshalInterfaces && e.hh.isBinaryEncoding() && fi.ti.bm {
 			fn.f = (*encFnInfo).binaryMarshal
+		} else if supportMarshalInterfaces && !e.hh.isBinaryEncoding() && fi.ti.tm {
+			fn.f = (*encFnInfo).textMarshal
 		} else {
 			rk := rt.Kind()
 			if fastpathEnabled && (rk == reflect.Map || rk == reflect.Slice) {
@@ -863,7 +914,8 @@ func (e *Encoder) getEncFn(rt reflect.Type) (fn encFn) {
 				case reflect.Slice:
 					fn.f = (*encFnInfo).kSlice
 				case reflect.Array:
-					fn.f = (*encFnInfo).kArray
+					fi.array = true
+					fn.f = (*encFnInfo).kSlice
 				case reflect.Struct:
 					fn.f = (*encFnInfo).kStruct
 					// case reflect.Ptr:

+ 3 - 52
codec/ext_dep_test.go

@@ -5,63 +5,14 @@
 
 package codec
 
-// This file includes benchmarks which have dependencies on 3rdparty
-// packages (bson and vmihailenco/msgpack) which must be installed locally.
-//
-// To run the benchmarks including these 3rdparty packages, first
-//   - Uncomment first line in this file (put // // in front of it)
-//   - Get those packages:
-//       go get github.com/vmihailenco/msgpack
-//       go get labix.org/v2/mgo/bson
-//   - Run:
-//       go test -bi -bench=.
+// These tests are used to verify
+// msgpack and cbor implementations against their corresponding python libraries.
+// If you have the library installed, you can enable the tests back by removing the //+build ignore.
 
 import (
 	"testing"
-
-	vmsgpack "github.com/vmihailenco/msgpack"
-	"labix.org/v2/mgo/bson"
 )
 
-func init() {
-	benchCheckers = append(benchCheckers,
-		benchChecker{"v-msgpack", fnVMsgpackEncodeFn, fnVMsgpackDecodeFn},
-		benchChecker{"bson", fnBsonEncodeFn, fnBsonDecodeFn},
-	)
-}
-
-func fnVMsgpackEncodeFn(ts interface{}) ([]byte, error) {
-	return vmsgpack.Marshal(ts)
-}
-
-func fnVMsgpackDecodeFn(buf []byte, ts interface{}) error {
-	return vmsgpack.Unmarshal(buf, ts)
-}
-
-func fnBsonEncodeFn(ts interface{}) ([]byte, error) {
-	return bson.Marshal(ts)
-}
-
-func fnBsonDecodeFn(buf []byte, ts interface{}) error {
-	return bson.Unmarshal(buf, ts)
-}
-
-func Benchmark__Bson_______Encode(b *testing.B) {
-	fnBenchmarkEncode(b, "bson", benchTs, fnBsonEncodeFn)
-}
-
-func Benchmark__Bson_______Decode(b *testing.B) {
-	fnBenchmarkDecode(b, "bson", benchTs, fnBsonEncodeFn, fnBsonDecodeFn, fnBenchNewTs)
-}
-
-func Benchmark__VMsgpack___Encode(b *testing.B) {
-	fnBenchmarkEncode(b, "v-msgpack", benchTs, fnVMsgpackEncodeFn)
-}
-
-func Benchmark__VMsgpack___Decode(b *testing.B) {
-	fnBenchmarkDecode(b, "v-msgpack", benchTs, fnVMsgpackEncodeFn, fnVMsgpackDecodeFn, fnBenchNewTs)
-}
-
 func TestMsgpackPythonGenStreams(t *testing.T) {
 	doTestPythonGenStreams(t, "msgpack", testMsgpackH)
 }

Tiedoston diff-näkymää rajattu, sillä se on liian suuri
+ 365 - 64
codec/fast-path.go


+ 36 - 11
codec/gen-fast-path.go

@@ -1,6 +1,6 @@
 //+build ignore
 
-// Copyright (c) 2012, 2013 Ugorji Nwoke. All rights reserved.
+// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved.
 // Use of this source code is governed by a BSD-style license found in the LICENSE file.
 
 package main
@@ -13,8 +13,12 @@ import (
 	"text/template"
 )
 
+// fastpathenabled uses maps to track the uintptr to the function.
+// It is shown by experiments that maps scale better than linear search
+// when there are more than 32 entries.
+
 const tmplstr = `
-// Copyright (c) 2012, 2013 Ugorji Nwoke. All rights reserved.
+// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved.
 // Use of this source code is governed by a BSD-style license found in the LICENSE file.
 
 // ************************************************************
@@ -81,10 +85,14 @@ func (f *encFnInfo) {{ .MethodName true }}(rv reflect.Value) {
 		f.ee.encodeNil()
 		return
 	}
-	f.ee.encodeArrayPreamble(len(v))
-	for _, v2 := range v {
+	f.ee.encodeArrayStart(len(v))
+	for j, v2 := range v {
+		if j > 0 {
+			f.ee.encodeArrayEntrySeparator()
+		}
 		{{ encmd .Elem "v2"}}
 	}
+	f.ee.encodeArrayEnd()
 }
 
 {{end}}{{end}}
@@ -97,16 +105,23 @@ func (f *encFnInfo) {{ .MethodName true }}(rv reflect.Value) {
 		f.ee.encodeNil()
 		return
 	}
-	f.ee.encodeMapPreamble(len(v))
+	f.ee.encodeMapStart(len(v))
 	{{if eq .MapKey "string"}}asSymbols := f.e.h.AsSymbols&AsSymbolMapStringKeysFlag != 0{{end}}
+	j := 0
 	for k2, v2 := range v {
+		if j > 0 {
+			f.ee.encodeMapEntrySeparator()
+		}
 		{{if eq .MapKey "string"}}if asSymbols {
 			f.ee.encodeSymbol(k2)
 		} else {
 			f.ee.encodeString(c_UTF8, k2)
 		}{{else}}{{ encmd .MapKey "k2"}}{{end}}
+		f.ee.encodeMapKVSeparator()
 		{{ encmd .Elem "v2"}}
+		j++
 	}
+	f.ee.encodeMapEnd()
 }
 
 {{end}}{{end}}
@@ -125,8 +140,7 @@ func (f *decFnInfo) {{ .MethodName false }}(rv reflect.Value) {
 	} else {
 		v = rv.Interface().([]{{ .Elem }})
 	}
-	vtype := f.dd.currentEncodedType()
-	if vtype == valueTypeNil {
+	if f.dd.isContainerType(valueTypeNil) {
 		if xaddr {
 			v = nil
 			*vp = v
@@ -134,7 +148,8 @@ func (f *decFnInfo) {{ .MethodName false }}(rv reflect.Value) {
 		return
 	}
 
-	_, containerLenS := decContLens(f.dd, vtype)
+	slh := decSliceHelper{dd: f.dd}
+	containerLenS := slh.start()
 	if containerLenS == 0 {
 		if v == nil {
 			v = []{{ .Elem }}{}
@@ -142,6 +157,7 @@ func (f *decFnInfo) {{ .MethodName false }}(rv reflect.Value) {
 			v = v[:0]
 		}
 		*vp = v
+		f.dd.readArrayEnd()
 		return
 	}
 	if v == nil {
@@ -172,11 +188,15 @@ func (f *decFnInfo) {{ .MethodName false }}(rv reflect.Value) {
 		if j >= len(v) {
 			v = append(v, {{ zerocmd .Elem }})
 		}
+		if j > 0 {
+			slh.sep(j)
+		}
 		{{ if eq .Elem "interface{}" }}f.d.decode(&v[j])
 		{{ else }}f.dd.initReadNext()
 		v[j] = {{ decmd .Elem }}
 		{{ end }}
 	}
+	slh.end()
 	if xaddr {
 		*vp = v
 	}
@@ -197,8 +217,7 @@ func (f *decFnInfo) {{ .MethodName false }}(rv reflect.Value) {
 	} else {
 		v = rv.Interface().(map[{{ .MapKey }}]{{ .Elem }})
 	}
-	vtype := f.dd.currentEncodedType()
-	if vtype == valueTypeNil {
+	if f.dd.isContainerType(valueTypeNil) {
 		if xaddr {
 			v = nil
 			*vp = v
@@ -206,12 +225,13 @@ func (f *decFnInfo) {{ .MethodName false }}(rv reflect.Value) {
 		return
 	}
 
-	containerLen := f.dd.readMapLen()
+	containerLen := f.dd.readMapStart()
 	if containerLen == 0 {
 		if v == nil {
 			v = map[{{ .MapKey }}]{{ .Elem }}{}
 			*vp = v
 		}
+		f.dd.readMapEnd()
 		return
 	}
 	if xaddr && v == nil {
@@ -231,6 +251,9 @@ func (f *decFnInfo) {{ .MethodName false }}(rv reflect.Value) {
 		} else if f.dd.checkBreak() {
 			break
 		}
+		if j > 0 {
+			f.dd.readMapEntrySeparator()
+		}
 		{{ if eq .MapKey "interface{}" }}var mk interface{}
 		f.d.decode(&mk)
 		// special case if a byte array.
@@ -240,6 +263,7 @@ func (f *decFnInfo) {{ .MethodName false }}(rv reflect.Value) {
 		{{ else }}f.dd.initReadNext()
 		mk := {{ decmd .MapKey }}
 		{{ end }}
+		f.dd.readMapKVSeparator()
         mv := v[mk]
 		{{ if eq .Elem "interface{}" }}f.d.decode(&mv)
 		{{ else }}f.dd.initReadNext()
@@ -249,6 +273,7 @@ func (f *decFnInfo) {{ .MethodName false }}(rv reflect.Value) {
 			v[mk] = mv
 		}
 	}
+	f.dd.readMapEnd()
 }
 
 {{end}}{{end}}

+ 45 - 24
codec/helper.go

@@ -1,4 +1,4 @@
-// Copyright (c) 2012, 2013 Ugorji Nwoke. All rights reserved.
+// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved.
 // Use of this source code is governed by a BSD-style license found in the LICENSE file.
 
 package codec
@@ -6,6 +6,7 @@ package codec
 // Contains code shared by both encode and decode.
 
 import (
+	"encoding"
 	"encoding/binary"
 	"fmt"
 	"math"
@@ -21,11 +22,9 @@ import (
 const (
 	structTagName = "codec"
 
-	// Support
-	//    encoding.BinaryMarshaler: MarshalBinary() (data []byte, err error)
-	//    encoding.BinaryUnmarshaler: UnmarshalBinary(data []byte) error
+	// Support encoding.(Binary|Text)(Unm|M)arshaler.
 	// This constant flag will enable or disable it.
-	supportBinaryMarshal = true
+	supportMarshalInterfaces = true
 
 	// Each Encoder or Decoder uses a cache of functions based on conditionals,
 	// so that the conditionals are not run every time.
@@ -33,6 +32,9 @@ const (
 	// Either a map or a slice is used to keep track of the functions.
 	// The map is more natural, but has a higher cost than a slice/array.
 	// This flag (useMapForCodecCache) controls which is used.
+	//
+	// From benchmarks, slices with linear search perform better with < 32 entries.
+	// We have typically seen a high threshold of about 24 entries.
 	useMapForCodecCache = false
 
 	// for debugging, set this to false, to catch panic traces.
@@ -93,9 +95,13 @@ var (
 	rawExtTyp     = reflect.TypeOf(RawExt{})
 	uint8SliceTyp = reflect.TypeOf([]uint8(nil))
 
-	mapBySliceTyp        = reflect.TypeOf((*MapBySlice)(nil)).Elem()
-	binaryMarshalerTyp   = reflect.TypeOf((*binaryMarshaler)(nil)).Elem()
-	binaryUnmarshalerTyp = reflect.TypeOf((*binaryUnmarshaler)(nil)).Elem()
+	mapBySliceTyp = reflect.TypeOf((*MapBySlice)(nil)).Elem()
+
+	binaryMarshalerTyp   = reflect.TypeOf((*encoding.BinaryMarshaler)(nil)).Elem()
+	binaryUnmarshalerTyp = reflect.TypeOf((*encoding.BinaryUnmarshaler)(nil)).Elem()
+
+	textMarshalerTyp   = reflect.TypeOf((*encoding.TextMarshaler)(nil)).Elem()
+	textUnmarshalerTyp = reflect.TypeOf((*encoding.TextUnmarshaler)(nil)).Elem()
 
 	uint8SliceTypId = reflect.ValueOf(uint8SliceTyp).Pointer()
 	rawExtTypId     = reflect.ValueOf(rawExtTyp).Pointer()
@@ -111,16 +117,13 @@ var (
 	bsAll0xff = []byte{0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff}
 )
 
-type binaryUnmarshaler interface {
-	UnmarshalBinary(data []byte) error
-}
-
-type binaryMarshaler interface {
-	MarshalBinary() (data []byte, err error)
-}
-
 // MapBySlice represents a slice which should be encoded as a map in the stream.
 // The slice contains a sequence of key-value pairs.
+// This affords storing a map in a specific sequence in the stream.
+//
+// The support of MapBySlice affords the following:
+//   - A slice type which implements MapBySlice will be encoded as a map
+//   - A slice can be decoded from a map in the stream
 type MapBySlice interface {
 	MapBySlice()
 }
@@ -138,6 +141,10 @@ func (x *BasicHandle) getBasicHandle() *BasicHandle {
 	return x
 }
 
+func (x *BasicHandle) isBinaryEncoding() bool {
+	return true
+}
+
 // Handle is the interface for a specific encoding format.
 //
 // Typically, a Handle is pre-configured before first time use,
@@ -147,6 +154,7 @@ type Handle interface {
 	getBasicHandle() *BasicHandle
 	newEncDriver(w encWriter) encDriver
 	newDecDriver(r decReader) decDriver
+	isBinaryEncoding() bool
 }
 
 // RawExt represents raw unprocessed extension data.
@@ -303,7 +311,7 @@ type structFieldInfo struct {
 
 func parseStructFieldInfo(fname string, stag string) *structFieldInfo {
 	if fname == "" {
-		panic("parseStructFieldInfo: No Field Name")
+		panic("no field name passed to parseStructFieldInfo")
 	}
 	si := structFieldInfo{
 		encName: fname,
@@ -349,6 +357,7 @@ func (p sfiSortedByEncName) Swap(i, j int) {
 //   - If base is a built in type, en/decode base value
 //   - If base is registered as an extension, en/decode base value
 //   - If type is binary(M/Unm)arshaler, call Binary(M/Unm)arshal method
+//   - If type is text(M/Unm)arshaler, call Text(M/Unm)arshal method
 //   - Else decode appropriately based on the reflect.Kind
 type typeInfo struct {
 	sfi  []*structFieldInfo // sorted. Used when enc/dec struct to map.
@@ -365,11 +374,17 @@ type typeInfo struct {
 
 	mbs bool // base type (T or *T) is a MapBySlice
 
-	m        bool // base type (T or *T) is a binaryMarshaler
-	unm      bool // base type (T or *T) is a binaryUnmarshaler
-	mIndir   int8 // number of indirections to get to binaryMarshaler type
-	unmIndir int8 // number of indirections to get to binaryUnmarshaler type
-	toArray  bool // whether this (struct) type should be encoded as an array
+	bm        bool // base type (T or *T) is a binaryMarshaler
+	bunm      bool // base type (T or *T) is a binaryUnmarshaler
+	bmIndir   int8 // number of indirections to get to binaryMarshaler type
+	bunmIndir int8 // number of indirections to get to binaryUnmarshaler type
+
+	tm        bool // base type (T or *T) is a textMarshaler
+	tunm      bool // base type (T or *T) is a textUnmarshaler
+	tmIndir   int8 // number of indirections to get to textMarshaler type
+	tunmIndir int8 // number of indirections to get to textUnmarshaler type
+
+	toArray bool // whether this (struct) type should be encoded as an array
 }
 
 func (ti *typeInfo) indexForEncName(name string) int {
@@ -420,10 +435,16 @@ func getTypeInfo(rtid uintptr, rt reflect.Type) (pti *typeInfo) {
 
 	var indir int8
 	if ok, indir = implementsIntf(rt, binaryMarshalerTyp); ok {
-		ti.m, ti.mIndir = true, indir
+		ti.bm, ti.bmIndir = true, indir
 	}
 	if ok, indir = implementsIntf(rt, binaryUnmarshalerTyp); ok {
-		ti.unm, ti.unmIndir = true, indir
+		ti.bunm, ti.bunmIndir = true, indir
+	}
+	if ok, indir = implementsIntf(rt, textMarshalerTyp); ok {
+		ti.tm, ti.tmIndir = true, indir
+	}
+	if ok, indir = implementsIntf(rt, textUnmarshalerTyp); ok {
+		ti.tunm, ti.tunmIndir = true, indir
 	}
 	if ok, _ = implementsIntf(rt, mapBySliceTyp); ok {
 		ti.mbs = true

+ 573 - 0
codec/json.go

@@ -0,0 +1,573 @@
+// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved.
+// Use of this source code is governed by a BSD-style license found in the LICENSE file.
+
+package codec
+
+// This json support uses base64 encoding for bytes, because you cannot
+// store and read any arbitrary string in json (only unicode).
+//
+// This library specifically supports UTF-8 for encoding and decoding only.
+//
+// Note:
+//   - we cannot use strconv.Quote and strconv.Unquote because json quotes/unquotes differently.
+//   - encode does not beautify. There is no whitespace when encoding.
+//   - rpc calls which take single integer arguments or write single numeric arguments will not
+//     work well, as it may not be possible to know when a number ends (unlike string, etc with terminator char).
+//     Luckily, rpc support in this package mitigates that via rpcEncodeTerminator interface.
+
+import (
+	"bytes"
+	"encoding/base64"
+	"fmt"
+	"io"
+	"reflect"
+	"strconv"
+	"unicode/utf16"
+	"unicode/utf8"
+)
+
+//--------------------------------
+
+var jsonLiterals = [...]byte{'t', 'r', 'u', 'e', 'f', 'a', 'l', 's', 'e', 'n', 'u', 'l', 'l'}
+
+type jsonEncDriver struct {
+	w  encWriter
+	h  *JsonHandle
+	b  [64]byte // scratch
+	bs []byte   // scratch
+	noBuiltInTypes
+}
+
+func (e *jsonEncDriver) encodeNil() {
+	e.w.writeb(jsonLiterals[9:]) // null
+}
+
+func (e *jsonEncDriver) encodeBool(b bool) {
+	if b {
+		e.w.writeb(jsonLiterals[:4]) // true
+	} else {
+		e.w.writeb(jsonLiterals[4:9]) // false
+	}
+}
+
+func (e *jsonEncDriver) encodeFloat32(f float32) {
+	e.w.writeb(strconv.AppendFloat(e.b[:0], float64(f), 'E', -1, 32))
+}
+
+func (e *jsonEncDriver) encodeFloat64(f float64) {
+	// e.w.writestr(strconv.FormatFloat(f, 'E', -1, 64))
+	e.w.writeb(strconv.AppendFloat(e.b[:0], f, 'E', -1, 64))
+}
+
+func (e *jsonEncDriver) encodeInt(v int64) {
+	e.w.writeb(strconv.AppendInt(e.b[:0], v, 10))
+}
+
+func (e *jsonEncDriver) encodeUint(v uint64) {
+	e.w.writeb(strconv.AppendUint(e.b[:0], v, 10))
+}
+
+func (e *jsonEncDriver) encodeExt(rv reflect.Value, xtag uint64, ext Ext, en *Encoder) {
+	if v := ext.ConvertExt(rv); v == nil {
+		e.encodeNil()
+	} else {
+		en.encode(v)
+	}
+}
+
+func (e *jsonEncDriver) encodeRawExt(re *RawExt, en *Encoder) {
+	// only encodes re.Value (never re.Data)
+	if re.Value == nil {
+		e.encodeNil()
+	} else {
+		en.encode(re.Value)
+	}
+}
+
+func (e *jsonEncDriver) encodeArrayStart(length int) {
+	e.w.writen1('[')
+}
+
+func (e *jsonEncDriver) encodeArrayEntrySeparator() {
+	e.w.writen1(',')
+}
+
+func (e *jsonEncDriver) encodeArrayEnd() {
+	e.w.writen1(']')
+}
+
+func (e *jsonEncDriver) encodeMapStart(length int) {
+	e.w.writen1('{')
+}
+
+func (e *jsonEncDriver) encodeMapEntrySeparator() {
+	e.w.writen1(',')
+}
+
+func (e *jsonEncDriver) encodeMapKVSeparator() {
+	e.w.writen1(':')
+}
+
+func (e *jsonEncDriver) encodeMapEnd() {
+	e.w.writen1('}')
+}
+
+func (e *jsonEncDriver) encodeString(c charEncoding, v string) {
+	// e.w.writestr(strconv.Quote(v))
+	e.quoteStr(v)
+}
+
+func (e *jsonEncDriver) encodeSymbol(v string) {
+	e.encodeString(c_UTF8, v)
+}
+
+func (e *jsonEncDriver) encodeStringBytes(c charEncoding, v []byte) {
+	if c == c_RAW {
+		slen := base64.StdEncoding.EncodedLen(len(v))
+		if e.bs == nil {
+			e.bs = e.b[:]
+		}
+		if cap(e.bs) >= slen {
+			e.bs = e.bs[:slen]
+		} else {
+			e.bs = make([]byte, slen)
+		}
+		base64.StdEncoding.Encode(e.bs, v)
+		e.w.writen1('"')
+		e.w.writeb(e.bs)
+		e.w.writen1('"')
+	} else {
+		e.encodeString(c, string(v))
+	}
+}
+
+func (e *jsonEncDriver) quoteStr(s string) {
+	// adapted from std pkg encoding/json
+	const hex = "0123456789abcdef"
+	w := e.w
+	w.writen1('"')
+	start := 0
+	for i := 0; i < len(s); {
+		if b := s[i]; b < utf8.RuneSelf {
+			if 0x20 <= b && b != '\\' && b != '"' && b != '<' && b != '>' && b != '&' {
+				i++
+				continue
+			}
+			if start < i {
+				w.writestr(s[start:i])
+			}
+			switch b {
+			case '\\', '"':
+				w.writen2('\\', b)
+			case '\n':
+				w.writen2('\\', 'n')
+			case '\r':
+				w.writen2('\\', 'r')
+			case '\b':
+				w.writen2('\\', 'b')
+			case '\f':
+				w.writen2('\\', 'f')
+			case '\t':
+				w.writen2('\\', 't')
+			default:
+				w.writestr(`\u00`)
+				w.writen1(hex[b>>4])
+				w.writen1(hex[b&0xF])
+			}
+			i++
+			start = i
+			continue
+		}
+		c, size := utf8.DecodeRuneInString(s[i:])
+		if c == utf8.RuneError && size == 1 {
+			if start < i {
+				w.writestr(s[start:i])
+			}
+			w.writestr(`\ufffd`)
+			i += size
+			start = i
+			continue
+		}
+		// U+2028 is LINE SEPARATOR. U+2029 is PARAGRAPH SEPARATOR.
+		// Both technically valid JSON, but bomb on JSONP, so fix here.
+		if c == '\u2028' || c == '\u2029' {
+			if start < i {
+				w.writestr(s[start:i])
+			}
+			w.writestr(`\u202`)
+			w.writen1(hex[c&0xF])
+			i += size
+			start = i
+			continue
+		}
+		i += size
+	}
+	if start < len(s) {
+		w.writestr(s[start:])
+	}
+	w.writen1('"')
+}
+
+//--------------------------------
+
+type jsonDecDriver struct {
+	h  *JsonHandle
+	r  decReader
+	ct valueType // container type. one of unset, array or map.
+	b  [64]byte  // scratch
+	b2 [8]byte   // scratch
+	noBuiltInTypes
+}
+
+// This will skip whitespace characters and return the next byte to read.
+// The next byte determines what the value will be one of.
+func (d *jsonDecDriver) skipWhitespace(unread bool) (b byte) {
+	for b = d.r.readn1(); b == ' ' || b == '\t' || b == '\r' || b == '\n'; b = d.r.readn1() {
+	}
+	if unread {
+		d.r.unreadn1()
+	}
+	return b
+}
+
+func (d *jsonDecDriver) initReadNext() {
+	d.skipWhitespace(true)
+	d.ct = valueTypeUnset
+}
+
+func (d *jsonDecDriver) checkBreak() bool {
+	b := d.skipWhitespace(true)
+	return b == '}' || b == ']'
+}
+
+func (d *jsonDecDriver) readStr(s []byte) {
+	bs := d.b[:len(s)]
+	d.r.readb(bs)
+	if !bytes.Equal(bs, s) {
+		decErr("json: expecting null: got %s", bs)
+	}
+}
+
+func (d *jsonDecDriver) tryDecodeAsNil() bool {
+	b := d.skipWhitespace(true)
+	if b == 'n' {
+		d.readStr(jsonLiterals[9:]) // null
+		d.ct = valueTypeNil
+		return true
+	}
+	return false
+}
+
+func (d *jsonDecDriver) decodeBool() bool {
+	b := d.skipWhitespace(false)
+	if b == 'f' {
+		d.readStr(jsonLiterals[5:9]) // alse
+		return false
+	}
+	if b == 't' {
+		d.readStr(jsonLiterals[1:4]) // rue
+		return true
+	}
+	decErr("json: decode bool: got first char %c", b)
+	panic("unreachable")
+}
+
+func (d *jsonDecDriver) readMapStart() int {
+	d.expectChar('{')
+	d.ct = valueTypeMap
+	return -1
+}
+
+func (d *jsonDecDriver) readArrayStart() int {
+	d.expectChar('[')
+	d.ct = valueTypeArray
+	return -1
+}
+func (d *jsonDecDriver) readMapEnd() {
+	d.expectChar('}')
+}
+func (d *jsonDecDriver) readArrayEnd() {
+	d.expectChar(']')
+}
+func (d *jsonDecDriver) readArrayEntrySeparator() {
+	d.expectChar(',')
+}
+func (d *jsonDecDriver) readMapEntrySeparator() {
+	d.expectChar(',')
+}
+func (d *jsonDecDriver) readMapKVSeparator() {
+	d.expectChar(':')
+}
+func (d *jsonDecDriver) expectChar(c uint8) {
+	b := d.skipWhitespace(false)
+	if b != c {
+		decErr("json: expect char %c but got char %c", c, b)
+	}
+}
+
+func (d *jsonDecDriver) isContainerType(vt valueType) bool {
+	// check container type by checking the first char
+	if d.ct == valueTypeUnset {
+		b := d.skipWhitespace(true)
+		switch b {
+		case '{':
+			d.ct = valueTypeMap
+		case '[':
+			d.ct = valueTypeArray
+		case 'n':
+			d.ct = valueTypeNil
+		case '"':
+			d.ct = valueTypeString
+		}
+	}
+	switch vt {
+	case valueTypeNil, valueTypeBytes, valueTypeString, valueTypeArray, valueTypeMap:
+		return d.ct == vt
+	}
+	decErr("isContainerType: unsupported parameter: %v", vt)
+	panic("unreachable")
+}
+
+func (d *jsonDecDriver) decNum() (i int64, f float64, isFloat bool) {
+	// If it is has a . or an e|E, decode as a float; else decode as an int.
+	b := d.skipWhitespace(false)
+	if !(b == '+' || b == '-' || b == '.' || (b >= '0' && b <= '9')) {
+		decErr("json: decNum: got first char %c", b)
+	}
+	var eof bool
+	bs := d.b[:0]
+	for {
+		if b == '.' || b == 'e' || b == 'E' {
+			isFloat = true
+		} else if b == '+' || b == '-' || (b >= '0' && b <= '9') {
+		} else {
+			d.r.unreadn1()
+			break
+		}
+		bs = append(bs, b)
+		b, eof = d.readn1eof()
+		if eof {
+			break
+		}
+	}
+	var err error
+	if isFloat {
+		f, err = strconv.ParseFloat(string(bs), 64)
+	} else {
+		i, err = strconv.ParseInt(string(bs), 10, 64)
+	}
+	if err != nil {
+		decErr("decNum: %v", err)
+	}
+	return
+}
+
+func (d *jsonDecDriver) decodeInt(bitsize uint8) (i int64) {
+	i, xf, xisFloat := d.decNum()
+	if xisFloat {
+		i = int64(xf)
+	}
+	checkOverflow(0, i, bitsize)
+	return
+}
+
+func (d *jsonDecDriver) decodeUint(bitsize uint8) (ui uint64) {
+	xi, xf, xisFloat := d.decNum()
+	if (xisFloat && xf < 0) || (!xisFloat && xi < 0) {
+		decErr("received negative number decoding number into uint: %v, %v", xi, xf)
+	}
+	if xisFloat {
+		ui = uint64(xf)
+	} else {
+		ui = uint64(xi)
+	}
+	checkOverflow(ui, 0, bitsize)
+	return
+}
+
+func (d *jsonDecDriver) decodeFloat(chkOverflow32 bool) (f float64) {
+	xi, f, xisFloat := d.decNum()
+	if !xisFloat {
+		f = float64(xi)
+	}
+	checkOverflowFloat32(f, chkOverflow32)
+	return
+}
+
+func (d *jsonDecDriver) decodeBytes(bs []byte) (bsOut []byte, changed bool) {
+	s := d.decStringAsBytes(nil)
+	slen := base64.StdEncoding.DecodedLen(len(s))
+	if len(bs) < slen {
+		changed = true
+		bsOut = make([]byte, slen)
+		bs = bsOut
+	} else if len(bs) > slen {
+		changed = true
+		bsOut = bs[:slen]
+		bs = bsOut
+	}
+	base64.StdEncoding.Decode(bs, s)
+	return
+}
+
+func (d *jsonDecDriver) decodeExt(rv reflect.Value, xtag uint64, ext Ext, de *Decoder) (realxtag uint64) {
+	if ext == nil {
+		re := rv.Interface().(*RawExt)
+		re.Tag = xtag
+		de.decode(&re.Value)
+	} else {
+		var v interface{}
+		de.decode(&v)
+		ext.UpdateExt(rv, v)
+	}
+	return
+}
+
+func (d *jsonDecDriver) decodeString() (s string) {
+	return string(d.decStringAsBytes(nil))
+}
+
+func (d *jsonDecDriver) decStringAsBytes(v []byte) []byte {
+	d.expectChar('"')
+	if v == nil {
+		v = d.b[:0]
+	}
+	for {
+		c := d.r.readn1()
+		if c == '"' {
+			break
+		} else if c == '\\' {
+			c = d.r.readn1()
+			switch c {
+			case '"', '\\', '/', '\'':
+				v = append(v, c)
+			case 'b':
+				v = append(v, '\b')
+			case 'f':
+				v = append(v, '\f')
+			case 'n':
+				v = append(v, '\n')
+			case 'r':
+				v = append(v, '\r')
+			case 't':
+				v = append(v, '\t')
+			case 'u':
+				rr := d.jsonU4(false)
+				fmt.Printf("$$$$$$$$$: is surrogate: %v\n", utf16.IsSurrogate(rr))
+				if utf16.IsSurrogate(rr) {
+					rr = utf16.DecodeRune(rr, d.jsonU4(true))
+				}
+				w2 := utf8.EncodeRune(d.b2[:], rr)
+				v = append(v, d.b2[:w2]...)
+			default:
+				decErr("json: unsupported escaped value: %c", c)
+			}
+		} else {
+			v = append(v, c)
+		}
+	}
+	return v
+}
+
+func (d *jsonDecDriver) jsonU4(checkSlashU bool) rune {
+	if checkSlashU && !(d.r.readn1() == '\\' && d.r.readn1() == 'u') {
+		decErr(`json: unquoteStr: invalid unicode sequence. Expecting \u`)
+	}
+	d.r.readb(d.b2[:4])
+	ui4, err4 := strconv.ParseUint(string(d.b2[:4]), 16, 64)
+	if err4 != nil {
+		decErr("json: unquoteStr: %v", err4)
+	}
+	return rune(ui4)
+}
+
+func (d *jsonDecDriver) decodeNaked(de *Decoder) (v interface{}, vt valueType, decodeFurther bool) {
+	n := d.skipWhitespace(true)
+	switch n {
+	case 'n':
+		d.readStr(jsonLiterals[9:]) // null
+		vt = valueTypeNil
+	case 'f':
+		d.readStr(jsonLiterals[4:9]) // false
+		vt = valueTypeBool
+		v = false
+	case 't':
+		d.readStr(jsonLiterals[:4]) // true
+		vt = valueTypeBool
+		v = true
+	case '{':
+		vt = valueTypeMap
+		decodeFurther = true
+	case '[':
+		vt = valueTypeArray
+		decodeFurther = true
+	case '"':
+		vt = valueTypeString
+		v = d.decodeString()
+	default:
+		xi, xf, xisFloat := d.decNum()
+		if xisFloat {
+			vt = valueTypeFloat
+			v = xf
+		} else if xi < 0 || d.h.SignedInteger {
+			vt = valueTypeInt
+			v = xi
+		} else {
+			vt = valueTypeUint
+			v = uint64(xi)
+		}
+	}
+	return
+}
+
+func (d *jsonDecDriver) readn1eof() (v uint8, eof bool) {
+	defer func() {
+		if x := recover(); x != nil {
+			if x != io.EOF {
+				panic(x)
+			}
+			eof = true
+		}
+	}()
+	v = d.r.readn1()
+	return
+}
+
+//----------------------
+
+// JsonHandle is a handle for JSON encoding format.
+//
+// Json is comprehensively supported:
+//    - decodes numbers into interface{} as int, uint or float64
+//    - encodes and decodes []byte using base64 Std Encoding
+//    - UTF-8 support for encoding and decoding
+//
+// It has better performance than the json library in the standard library,
+// by leveraging the performance improvements of the codec library and minimizing allocations.
+//
+// In addition, it doesn't read more bytes than necessary during a decode, which allows
+// the use of this to read multiple values from a stream containing json and non-json content.
+type JsonHandle struct {
+	BasicHandle
+}
+
+func (h *JsonHandle) newEncDriver(w encWriter) encDriver {
+	return &jsonEncDriver{w: w, h: h}
+}
+
+func (h *JsonHandle) newDecDriver(r decReader) decDriver {
+	return &jsonDecDriver{r: r, h: h}
+}
+
+func (h *JsonHandle) isBinaryEncoding() bool {
+	return false
+}
+
+var jsonEncodeTerminate = []byte{' '}
+
+func (h *JsonHandle) rpcEncodeTerminate() []byte {
+	return jsonEncodeTerminate
+}
+
+var _ decDriver = (*jsonDecDriver)(nil)
+var _ encDriver = (*jsonEncDriver)(nil)

+ 29 - 49
codec/msgpack.go

@@ -1,4 +1,4 @@
-// Copyright (c) 2012, 2013 Ugorji Nwoke. All rights reserved.
+// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved.
 // Use of this source code is governed by a BSD-style license found in the LICENSE file.
 
 /*
@@ -106,6 +106,8 @@ type msgpackEncDriver struct {
 	w encWriter
 	h *MsgpackHandle
 	noBuiltInTypes
+	encNoMapArrayEnd
+	encNoMapArraySeparator
 }
 
 func (e *msgpackEncDriver) encodeNil() {
@@ -214,11 +216,11 @@ func (e *msgpackEncDriver) encodeExtPreamble(xtag byte, l int) {
 	}
 }
 
-func (e *msgpackEncDriver) encodeArrayPreamble(length int) {
+func (e *msgpackEncDriver) encodeArrayStart(length int) {
 	e.writeContainerLen(msgpackContainerList, length)
 }
 
-func (e *msgpackEncDriver) encodeMapPreamble(length int) {
+func (e *msgpackEncDriver) encodeMapStart(length int) {
 	e.writeContainerLen(msgpackContainerMap, length)
 }
 
@@ -273,6 +275,8 @@ type msgpackDecDriver struct {
 	bdType valueType
 	noBuiltInTypes
 	noStreamingCodec
+	decNoMapArrayEnd
+	decNoMapArraySeparator
 }
 
 // Note: This returns either a primitive (int, bool, etc) for non-containers,
@@ -560,50 +564,25 @@ func (d *msgpackDecDriver) initReadNext() {
 	d.bdType = valueTypeUnset
 }
 
-func (d *msgpackDecDriver) currentEncodedType() valueType {
-	if d.bdType == valueTypeUnset {
-		bd := d.bd
-		switch bd {
-		case mpNil:
-			d.bdType = valueTypeNil
-		case mpFalse, mpTrue:
-			d.bdType = valueTypeBool
-		case mpFloat, mpDouble:
-			d.bdType = valueTypeFloat
-		case mpUint8, mpUint16, mpUint32, mpUint64:
-			if d.h.SignedInteger {
-				d.bdType = valueTypeInt
-			} else {
-				d.bdType = valueTypeUint
-			}
-		case mpInt8, mpInt16, mpInt32, mpInt64:
-			d.bdType = valueTypeInt
-		default:
-			switch {
-			case bd >= mpPosFixNumMin && bd <= mpPosFixNumMax:
-				d.bdType = valueTypeInt
-			case bd >= mpNegFixNumMin && bd <= mpNegFixNumMax:
-				d.bdType = valueTypeInt
-			case bd == mpStr8, bd == mpStr16, bd == mpStr32, bd >= mpFixStrMin && bd <= mpFixStrMax:
-				if d.h.RawToString {
-					d.bdType = valueTypeString
-				} else {
-					d.bdType = valueTypeBytes
-				}
-			case bd == mpBin8, bd == mpBin16, bd == mpBin32:
-				d.bdType = valueTypeBytes
-			case bd == mpArray16, bd == mpArray32, bd >= mpFixArrayMin && bd <= mpFixArrayMax:
-				d.bdType = valueTypeArray
-			case bd == mpMap16, bd == mpMap32, bd >= mpFixMapMin && bd <= mpFixMapMax:
-				d.bdType = valueTypeMap
-			case bd >= mpFixExt1 && bd <= mpFixExt16, bd >= mpExt8 && bd <= mpExt32:
-				d.bdType = valueTypeExt
-			default:
-				decErr("currentEncodedType: Undeciphered descriptor: %s: hex: %x, dec: %d", msgBadDesc, bd, bd)
-			}
-		}
-	}
-	return d.bdType
+func (d *msgpackDecDriver) isContainerType(vt valueType) bool {
+	bd := d.bd
+	switch vt {
+	case valueTypeNil:
+		return bd == mpNil
+	case valueTypeBytes:
+		return bd == mpBin8 || bd == mpBin16 || bd == mpBin32 ||
+			(!d.h.RawToString &&
+				(bd == mpStr8 || bd == mpStr16 || bd == mpStr32 || (bd >= mpFixStrMin && bd <= mpFixStrMax)))
+	case valueTypeString:
+		return d.h.RawToString &&
+			(bd == mpStr8 || bd == mpStr16 || bd == mpStr32 || (bd >= mpFixStrMin && bd <= mpFixStrMax))
+	case valueTypeArray:
+		return bd == mpArray16 || bd == mpArray32 || (bd >= mpFixArrayMin && bd <= mpFixArrayMax)
+	case valueTypeMap:
+		return bd == mpMap16 || bd == mpMap32 || (bd >= mpFixMapMin && bd <= mpFixMapMax)
+	}
+	decErr("isContainerType: unsupported parameter: %v", vt)
+	panic("unreachable")
 }
 
 func (d *msgpackDecDriver) tryDecodeAsNil() bool {
@@ -634,11 +613,11 @@ func (d *msgpackDecDriver) readContainerLen(ct msgpackContainerType) (clen int)
 	return
 }
 
-func (d *msgpackDecDriver) readMapLen() int {
+func (d *msgpackDecDriver) readMapStart() int {
 	return d.readContainerLen(msgpackContainerMap)
 }
 
-func (d *msgpackDecDriver) readArrayLen() int {
+func (d *msgpackDecDriver) readArrayStart() int {
 	return d.readContainerLen(msgpackContainerList)
 }
 
@@ -712,6 +691,7 @@ type MsgpackHandle struct {
 
 	// RawToString controls how raw bytes are decoded into a nil interface{}.
 	RawToString bool
+
 	// WriteExt flag supports encoding configured extensions with extension tags.
 	// It also controls whether other elements of the new spec are encoded (ie Str8).
 	//

+ 19 - 2
codec/rpc.go

@@ -1,4 +1,4 @@
-// Copyright (c) 2012, 2013 Ugorji Nwoke. All rights reserved.
+// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved.
 // Use of this source code is governed by a BSD-style license found in the LICENSE file.
 
 package codec
@@ -10,6 +10,14 @@ import (
 	"sync"
 )
 
+// rpcEncodeTerminator allows a handler specify a []byte terminator to send after each Encode.
+//
+// Some codecs like json need to put a space after each encoded value, to serve as a
+// delimiter for things like numbers (else json codec will continue reading till EOF).
+type rpcEncodeTerminator interface {
+	rpcEncodeTerminate() []byte
+}
+
 // Rpc provides a rpc Server or Client Codec for rpc communication.
 type Rpc interface {
 	ServerCodec(conn io.ReadWriteCloser, h Handle) rpc.ServerCodec
@@ -36,6 +44,7 @@ type rpcCodec struct {
 	br  *bufio.Reader
 	mu  sync.Mutex
 	cls bool
+	h   Handle
 }
 
 func newRPCCodec(conn io.ReadWriteCloser, h Handle) rpcCodec {
@@ -47,6 +56,7 @@ func newRPCCodec(conn io.ReadWriteCloser, h Handle) rpcCodec {
 		br:  br,
 		enc: NewEncoder(bw, h),
 		dec: NewDecoder(br, h),
+		h:   h,
 	}
 }
 
@@ -65,12 +75,19 @@ func (c *rpcCodec) write(obj1, obj2 interface{}, writeObj2, doFlush bool) (err e
 	if err = c.enc.Encode(obj1); err != nil {
 		return
 	}
+	t, tOk := c.h.(rpcEncodeTerminator)
+	if tOk {
+		c.bw.Write(t.rpcEncodeTerminate())
+	}
 	if writeObj2 {
 		if err = c.enc.Encode(obj2); err != nil {
 			return
 		}
+		if tOk {
+			c.bw.Write(t.rpcEncodeTerminate())
+		}
 	}
-	if doFlush && c.bw != nil {
+	if doFlush {
 		return c.bw.Flush()
 	}
 	return

+ 27 - 38
codec/simple.go

@@ -1,4 +1,4 @@
-// Copyright (c) 2012, 2013 Ugorji Nwoke. All rights reserved.
+// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved.
 // Use of this source code is governed by a BSD-style license found in the LICENSE file.
 
 package codec
@@ -33,6 +33,8 @@ type simpleEncDriver struct {
 	w encWriter
 	noBuiltInTypes
 	//b [8]byte
+	encNoMapArrayEnd
+	encNoMapArraySeparator
 }
 
 func (e *simpleEncDriver) encodeNil() {
@@ -124,11 +126,11 @@ func (e *simpleEncDriver) encodeExtPreamble(xtag byte, length int) {
 	e.w.writen1(xtag)
 }
 
-func (e *simpleEncDriver) encodeArrayPreamble(length int) {
+func (e *simpleEncDriver) encodeArrayStart(length int) {
 	e.encLen(simpleVdArray, length)
 }
 
-func (e *simpleEncDriver) encodeMapPreamble(length int) {
+func (e *simpleEncDriver) encodeMapStart(length int) {
 	e.encLen(simpleVdMap, length)
 }
 
@@ -156,6 +158,8 @@ type simpleDecDriver struct {
 	bd     byte
 	noBuiltInTypes
 	noStreamingCodec
+	decNoMapArrayEnd
+	decNoMapArraySeparator
 	//b      [8]byte
 }
 
@@ -168,38 +172,25 @@ func (d *simpleDecDriver) initReadNext() {
 	d.bdType = valueTypeUnset
 }
 
-func (d *simpleDecDriver) currentEncodedType() valueType {
-	if d.bdType == valueTypeUnset {
-		switch d.bd {
-		case simpleVdNil:
-			d.bdType = valueTypeNil
-		case simpleVdTrue, simpleVdFalse:
-			d.bdType = valueTypeBool
-		case simpleVdPosInt, simpleVdPosInt + 1, simpleVdPosInt + 2, simpleVdPosInt + 3:
-			if d.h.SignedInteger {
-				d.bdType = valueTypeInt
-			} else {
-				d.bdType = valueTypeUint
-			}
-		case simpleVdNegInt, simpleVdNegInt + 1, simpleVdNegInt + 2, simpleVdNegInt + 3:
-			d.bdType = valueTypeInt
-		case simpleVdFloat32, simpleVdFloat64:
-			d.bdType = valueTypeFloat
-		case simpleVdString, simpleVdString + 1, simpleVdString + 2, simpleVdString + 3, simpleVdString + 4:
-			d.bdType = valueTypeString
-		case simpleVdByteArray, simpleVdByteArray + 1, simpleVdByteArray + 2, simpleVdByteArray + 3, simpleVdByteArray + 4:
-			d.bdType = valueTypeBytes
-		case simpleVdExt, simpleVdExt + 1, simpleVdExt + 2, simpleVdExt + 3, simpleVdExt + 4:
-			d.bdType = valueTypeExt
-		case simpleVdArray, simpleVdArray + 1, simpleVdArray + 2, simpleVdArray + 3, simpleVdArray + 4:
-			d.bdType = valueTypeArray
-		case simpleVdMap, simpleVdMap + 1, simpleVdMap + 2, simpleVdMap + 3, simpleVdMap + 4:
-			d.bdType = valueTypeMap
-		default:
-			decErr("currentEncodedType: Unrecognized d.vd: 0x%x", d.bd)
-		}
+func (d *simpleDecDriver) isContainerType(vt valueType) bool {
+	switch vt {
+	case valueTypeNil:
+		return d.bd == simpleVdNil
+	case valueTypeBytes:
+		const x uint8 = simpleVdByteArray
+		return d.bd == x || d.bd == x+1 || d.bd == x+2 || d.bd == x+3 || d.bd == x+4
+	case valueTypeString:
+		const x uint8 = simpleVdString
+		return d.bd == x || d.bd == x+1 || d.bd == x+2 || d.bd == x+3 || d.bd == x+4
+	case valueTypeArray:
+		const x uint8 = simpleVdArray
+		return d.bd == x || d.bd == x+1 || d.bd == x+2 || d.bd == x+3 || d.bd == x+4
+	case valueTypeMap:
+		const x uint8 = simpleVdMap
+		return d.bd == x || d.bd == x+1 || d.bd == x+2 || d.bd == x+3 || d.bd == x+4
 	}
-	return d.bdType
+	decErr("isContainerType: unsupported parameter: %v", vt)
+	panic("unreachable")
 }
 
 func (d *simpleDecDriver) tryDecodeAsNil() bool {
@@ -210,8 +201,6 @@ func (d *simpleDecDriver) tryDecodeAsNil() bool {
 	return false
 }
 
-// 		i = int64(ui)
-
 func (d *simpleDecDriver) decCheckInteger() (ui uint64, neg bool) {
 	switch d.bd {
 	case simpleVdPosInt:
@@ -297,12 +286,12 @@ func (d *simpleDecDriver) decodeBool() (b bool) {
 	return
 }
 
-func (d *simpleDecDriver) readMapLen() (length int) {
+func (d *simpleDecDriver) readMapStart() (length int) {
 	d.bdRead = false
 	return d.decLen()
 }
 
-func (d *simpleDecDriver) readArrayLen() (length int) {
+func (d *simpleDecDriver) readArrayStart() (length int) {
 	d.bdRead = false
 	return d.decLen()
 }

+ 1 - 1
codec/time.go

@@ -1,4 +1,4 @@
-// Copyright (c) 2012, 2013 Ugorji Nwoke. All rights reserved.
+// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved.
 // Use of this source code is governed by a BSD-style license found in the LICENSE file.
 
 package codec

+ 125 - 0
codec/values_test.go

@@ -0,0 +1,125 @@
+// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved.
+// Use of this source code is governed by a BSD-style license found in the LICENSE file.
+
+package codec
+
+// This file contains values used by tests and benchmarks.
+
+import (
+	"math"
+	"time"
+)
+
+var testStrucTime = time.Date(2012, 2, 2, 2, 2, 2, 2000, time.UTC).UTC()
+
+type AnonInTestStruc struct {
+	AS        string
+	AI64      int64
+	AI16      int16
+	AUi64     uint64
+	ASslice   []string
+	AI64slice []int64
+}
+
+type TestStruc struct {
+	S    string
+	I64  int64
+	I16  int16
+	Ui64 uint64
+	Ui8  uint8
+	B    bool
+	By   byte
+
+	Sslice    []string
+	I64slice  []int64
+	I16slice  []int16
+	Ui64slice []uint64
+	Ui8slice  []uint8
+	Bslice    []bool
+	Byslice   []byte
+
+	Islice    []interface{}
+	Iptrslice []*int64
+
+	AnonInTestStruc
+
+	//M map[interface{}]interface{}  `json:"-",bson:"-"`
+	Ms    map[string]interface{}
+	Msi64 map[string]int64
+
+	Nintf      interface{} //don't set this, so we can test for nil
+	T          time.Time
+	Nmap       map[string]bool //don't set this, so we can test for nil
+	Nslice     []byte          //don't set this, so we can test for nil
+	Nint64     *int64          //don't set this, so we can test for nil
+	Mtsptr     map[string]*TestStruc
+	Mts        map[string]TestStruc
+	Its        []*TestStruc
+	Nteststruc *TestStruc
+}
+
+func newTestStruc(depth int, bench bool) (ts *TestStruc) {
+	var i64a, i64b, i64c, i64d int64 = 64, 6464, 646464, 64646464
+
+	ts = &TestStruc{
+		S:    "some string",
+		I64:  math.MaxInt64 * 2 / 3, // 64,
+		I16:  16,
+		Ui64: uint64(int64(math.MaxInt64 * 2 / 3)), // 64, //don't use MaxUint64, as bson can't write it
+		Ui8:  160,
+		B:    true,
+		By:   5,
+
+		Sslice:    []string{"one", "two", "three"},
+		I64slice:  []int64{1, 2, 3},
+		I16slice:  []int16{4, 5, 6},
+		Ui64slice: []uint64{137, 138, 139},
+		Ui8slice:  []uint8{210, 211, 212},
+		Bslice:    []bool{true, false, true, false},
+		Byslice:   []byte{13, 14, 15},
+
+		Islice: []interface{}{"true", true, "no", false, uint64(288), float64(0.4)},
+
+		Ms: map[string]interface{}{
+			"true":     "true",
+			"int64(9)": false,
+		},
+		Msi64: map[string]int64{
+			"one": 1,
+			"two": 2,
+		},
+		T: testStrucTime,
+		AnonInTestStruc: AnonInTestStruc{
+			AS:        "A-String",
+			AI64:      64,
+			AI16:      16,
+			AUi64:     64,
+			ASslice:   []string{"Aone", "Atwo", "Athree"},
+			AI64slice: []int64{1, 2, 3},
+		},
+	}
+	//For benchmarks, some things will not work.
+	if !bench {
+		//json and bson require string keys in maps
+		//ts.M = map[interface{}]interface{}{
+		//	true: "true",
+		//	int8(9): false,
+		//}
+		//gob cannot encode nil in element in array (encodeArray: nil element)
+		ts.Iptrslice = []*int64{nil, &i64a, nil, &i64b, nil, &i64c, nil, &i64d, nil}
+		// ts.Iptrslice = nil
+	}
+	if depth > 0 {
+		depth--
+		if ts.Mtsptr == nil {
+			ts.Mtsptr = make(map[string]*TestStruc)
+		}
+		if ts.Mts == nil {
+			ts.Mts = make(map[string]TestStruc)
+		}
+		ts.Mtsptr["0"] = newTestStruc(depth, bench)
+		ts.Mts["0"] = *(ts.Mtsptr["0"])
+		ts.Its = append(ts.Its, ts.Mtsptr["0"])
+	}
+	return
+}

+ 27 - 37
codec/z_helper_test.go

@@ -11,8 +11,9 @@ package codec
 
 import (
 	"errors"
-	"reflect"
 	"flag"
+	"fmt"
+	"reflect"
 	"testing"
 )
 
@@ -22,11 +23,30 @@ var (
 )
 
 func init() {
+	testBincHSym.AsSymbols = AsSymbolAll
+	testBincHNoSym.AsSymbols = AsSymbolNone
 	testInitFlags()
-	benchInitFlags()
 	flag.Parse()
 	testInit()
-	benchInit()
+}
+
+var (
+	testMsgpackH   = &MsgpackHandle{}
+	testBincH      = &BincHandle{}
+	testBincHNoSym = &BincHandle{}
+	testBincHSym   = &BincHandle{}
+	testSimpleH    = &SimpleHandle{}
+	testCborH      = &CborHandle{}
+	testJsonH      = &JsonHandle{}
+)
+
+func fnCodecEncode(ts interface{}, h Handle) (bs []byte, err error) {
+	err = NewEncoderBytes(&bs, h).Encode(ts)
+	return
+}
+
+func fnCodecDecode(buf []byte, ts interface{}, h Handle) error {
+	return NewDecoderBytes(buf, h).Decode(ts)
 }
 
 func checkErrT(t *testing.T, err error) {
@@ -50,7 +70,10 @@ func logT(x interface{}, format string, args ...interface{}) {
 	} else if b, ok := x.(*testing.B); ok && b != nil && testLogToT {
 		b.Logf(format, args...)
 	} else {
-		debugf(format, args...)
+		if len(format) == 0 || format[len(format)-1] != '\n' {
+			format = format + "\n"
+		}
+		fmt.Printf(format, args...)
 	}
 }
 
@@ -68,36 +91,3 @@ func deepEqual(v1, v2 interface{}) (err error) {
 	}
 	return
 }
-
-func approxDataSize(rv reflect.Value) (sum int) {
-	switch rk := rv.Kind(); rk {
-	case reflect.Invalid:
-	case reflect.Ptr, reflect.Interface:
-		sum += int(rv.Type().Size())
-		sum += approxDataSize(rv.Elem())
-	case reflect.Slice:
-		sum += int(rv.Type().Size())
-		for j := 0; j < rv.Len(); j++ {
-			sum += approxDataSize(rv.Index(j))
-		}
-	case reflect.String:
-		sum += int(rv.Type().Size())
-		sum += rv.Len()
-	case reflect.Map:
-		sum += int(rv.Type().Size())
-		for _, mk := range rv.MapKeys() {
-			sum += approxDataSize(mk)
-			sum += approxDataSize(rv.MapIndex(mk))
-		}
-	case reflect.Struct:
-		//struct size already includes the full data size.
-		//sum += int(rv.Type().Size())
-		for j := 0; j < rv.NumField(); j++ {
-			sum += approxDataSize(rv.Field(j))
-		}
-	default:
-		//pure value types
-		sum += int(rv.Type().Size())
-	}
-	return
-}

Kaikkia tiedostoja ei voida näyttää, sillä liian monta tiedostoa muuttui tässä diffissä