Browse Source

Add codec library

Ugorji Nwoke 12 years ago
parent
commit
d9a6f6ca1c
14 changed files with 4658 additions and 0 deletions
  1. 166 0
      codec/0doc.go
  2. 161 0
      codec/README.md
  3. 263 0
      codec/bench_test.go
  4. 617 0
      codec/binc.go
  5. 725 0
      codec/codecs_test.go
  6. 685 0
      codec/decode.go
  7. 619 0
      codec/encode.go
  8. 66 0
      codec/ext_dep_test.go
  9. 274 0
      codec/helper.go
  10. 61 0
      codec/helper_internal.go
  11. 731 0
      codec/msgpack.go
  12. 83 0
      codec/msgpack_test.py
  13. 108 0
      codec/rpc.go
  14. 99 0
      codec/z_helper_test.go

+ 166 - 0
codec/0doc.go

@@ -0,0 +1,166 @@
+// Copyright (c) 2012, 2013 Ugorji Nwoke. All rights reserved.
+// Use of this source code is governed by a BSD-style license found in the LICENSE file.
+
+/*
+
+High Performance and Feature-Rich Idiomatic Go Library providing
+encode/decode support for different serialization formats.
+
+Supported Serialization formats are:
+
+  - msgpack: [http://wiki.msgpack.org/display/MSGPACK/Format+specification]
+  - binc: [http://www.ugorji.net/project/binc]
+
+To install:
+
+    go get github.com/ugorji/go/codec
+
+The idiomatic Go support is as seen in other encoding packages in
+the standard library (ie json, xml, gob, etc).
+
+Rich Feature Set includes:
+
+  - Simple but extremely powerful and feature-rich API
+  - Very High Performance.   
+    Our extensive benchmarks show us outperforming Gob, Json and Bson by 2-4X.
+    This was achieved by taking extreme care on:
+      - managing allocation
+      - stack frame size (important due to Go's use of split stacks), 
+      - reflection use
+      - recursion implications
+      - zero-copy mode (encoding/decoding to byte slice without using temp buffers)
+  - Correct.  
+    Care was taken to precisely handle corner cases like: 
+      overflows, nil maps and slices, nil value in stream, etc.
+  - Efficient zero-copying into temporary byte buffers  
+    when encoding into or decoding from a byte slice.
+  - Standard field renaming via tags
+  - Encoding from any value  
+    (struct, slice, map, primitives, pointers, interface{}, etc)
+  - Decoding into pointer to any non-nil typed value  
+    (struct, slice, map, int, float32, bool, string, reflect.Value, etc)
+  - Supports extension functions to handle the encode/decode of custom types
+  - Schema-less decoding  
+    (decode into a pointer to a nil interface{} as opposed to a typed non-nil value).  
+    Includes Options to configure what specific map or slice type to use 
+    when decoding an encoded list or map into a nil interface{}
+  - Provides a RPC Server and Client Codec for net/rpc communication protocol.
+  - Msgpack Specific:
+      - Provides extension functions to handle spec-defined extensions (binary, timestamp)
+      - Options to resolve ambiguities in handling raw bytes (as string or []byte)  
+        during schema-less decoding (decoding into a nil interface{})
+      - RPC Server/Client Codec for msgpack-rpc protocol defined at: 
+        http://wiki.msgpack.org/display/MSGPACK/RPC+specification
+
+Extension Support
+
+Users can register a function to handle the encoding or decoding of
+their custom types. 
+
+There are no restrictions on what the custom type can be. Extensions can
+be any type: pointers, structs, custom types off arrays/slices, strings,
+etc. Some examples:
+
+    type BisSet   []int
+    type BitSet64 uint64
+    type UUID     string
+    type MyStructWithUnexportedFields struct { a int; b bool; c []int; }
+    type GifImage struct { ... }
+
+Typically, MyStructWithUnexportedFields is encoded as an empty map because
+it has no exported fields, while UUID will be encoded as a string,
+etc. However, with extension support, you can encode any of these
+however you like.
+
+We provide implementations of these functions where the spec has defined
+an inter-operable format. For msgpack, these are Binary and
+time.Time. Library users will have to explicitly configure these as seen
+in the usage below.
+
+Usage
+
+Typical usage model:
+
+    var (
+      mapStrIntfTyp = reflect.TypeOf(map[string]interface{}(nil))
+      sliceByteTyp = reflect.TypeOf([]byte(nil))
+      timeTyp = reflect.TypeOf(time.Time{})
+    )
+    
+    // create and configure Handle
+    var (
+      bh codec.BincHandle
+      mh codec.MsgpackHandle
+    )
+
+    mh.MapType = mapStrIntfTyp
+    
+    // configure extensions for msgpack, to enable Binary and Time support for tags 0 and 1
+    mh.AddExt(sliceByteTyp, 0, mh.BinaryEncodeExt, mh.BinaryDecodeExt)
+    mh.AddExt(timeTyp, 1, mh.TimeEncodeExt, mh.TimeDecodeExt)
+
+    // create and use decoder/encoder
+    var (
+      r io.Reader
+      w io.Writer
+      b []byte
+      h = &bh // or mh to use msgpack
+    )
+    
+    dec = codec.NewDecoder(r, h)
+    dec = codec.NewDecoderBytes(b, h)
+    err = dec.Decode(&v) 
+    
+    enc = codec.NewEncoder(w, h)
+    enc = codec.NewEncoderBytes(&b, h)
+    err = enc.Encode(v)
+    
+    //RPC Server
+    var rpcH codec.GoRpc // or codec.MsgpackSpecRpc  
+    go func() {
+        for {
+            conn, err := listener.Accept()
+            rpcCodec := rpcH.ServerCodec(conn, h)
+            rpc.ServeCodec(rpcCodec)
+        }
+    }()
+    
+    //RPC Communication (client side)
+    conn, err = net.Dial("tcp", "localhost:5555")  
+    rpcCodec := rpcH.ClientCodec(conn, h)  
+    client := rpc.NewClientWithCodec(rpcCodec)
+
+Representative Benchmark Results
+
+A sample run of benchmark using "go test -bi -bench=.":
+
+    ..............................................
+    Benchmark: 
+    	Struct recursive Depth:             1
+    	ApproxDeepSize Of benchmark Struct: 4758
+    Benchmark One-Pass Run:
+    	   msgpack: len: 1504
+    	      binc: len: 1508
+    	       gob: len: 1908
+    	      json: len: 2402
+    	 v-msgpack: len: 1536
+    	      bson: len: 3009
+    ..............................................
+    Benchmark__Msgpack__Encode	   50000	     60824 ns/op
+    Benchmark__Msgpack__Decode	   10000	    115119 ns/op
+    Benchmark__Binc_____Encode	   50000	     55140 ns/op
+    Benchmark__Binc_____Decode	   10000	    112132 ns/op
+    Benchmark__Gob______Encode	   10000	    143350 ns/op
+    Benchmark__Gob______Decode	    5000	    434248 ns/op
+    Benchmark__Json_____Encode	   10000	    157298 ns/op
+    Benchmark__Json_____Decode	    5000	    303729 ns/op
+    Benchmark__Bson_____Encode	   10000	    174250 ns/op
+    Benchmark__Bson_____Decode	   10000	    223602 ns/op
+    Benchmark__VMsgpack_Encode	   20000	     80438 ns/op
+    Benchmark__VMsgpack_Decode	   10000	    157330 ns/op
+    
+To run full benchmark suite (including against vmsgpack and bson), 
+see notes in ext_dep_test.go
+
+*/
+package codec

+ 161 - 0
codec/README.md

@@ -0,0 +1,161 @@
+# Codec
+
+High Performance and Feature-Rich Idiomatic Go Library providing
+encode/decode support for different serialization formats.
+
+Supported Serialization formats are:
+
+  - msgpack: [http://wiki.msgpack.org/display/MSGPACK/Format+specification]
+  - binc: [http://www.ugorji.net/project/binc]
+
+To install:
+
+    go get github.com/ugorji/go/codec
+
+The idiomatic Go support is as seen in other encoding packages in
+the standard library (ie json, xml, gob, etc).
+
+Rich Feature Set includes:
+
+  - Simple but extremely powerful and feature-rich API
+  - Very High Performance.   
+    Our extensive benchmarks show us outperforming Gob, Json and Bson by 2-4X.
+    This was achieved by taking extreme care on:
+      - managing allocation
+      - stack frame size (important due to Go's use of split stacks), 
+      - reflection use
+      - recursion implications
+      - zero-copy mode (encoding/decoding to byte slice without using temp buffers)
+  - Correct.  
+    Care was taken to precisely handle corner cases like: 
+      overflows, nil maps and slices, nil value in stream, etc.
+  - Efficient zero-copying into temporary byte buffers  
+    when encoding into or decoding from a byte slice.
+  - Standard field renaming via tags
+  - Encoding from any value  
+    (struct, slice, map, primitives, pointers, interface{}, etc)
+  - Decoding into pointer to any non-nil typed value  
+    (struct, slice, map, int, float32, bool, string, reflect.Value, etc)
+  - Supports extension functions to handle the encode/decode of custom types
+  - Schema-less decoding  
+    (decode into a pointer to a nil interface{} as opposed to a typed non-nil value).  
+    Includes Options to configure what specific map or slice type to use 
+    when decoding an encoded list or map into a nil interface{}
+  - Provides a RPC Server and Client Codec for net/rpc communication protocol.
+  - Msgpack Specific:
+      - Provides extension functions to handle spec-defined extensions (binary, timestamp)
+      - Options to resolve ambiguities in handling raw bytes (as string or []byte)  
+        during schema-less decoding (decoding into a nil interface{})
+      - RPC Server/Client Codec for msgpack-rpc protocol defined at: 
+        http://wiki.msgpack.org/display/MSGPACK/RPC+specification
+
+## Extension Support
+
+Users can register a function to handle the encoding or decoding of
+their custom types. 
+
+There are no restrictions on what the custom type can be. Extensions can
+be any type: pointers, structs, custom types off arrays/slices, strings,
+etc. Some examples:
+
+    type BisSet   []int
+    type BitSet64 uint64
+    type UUID     string
+    type MyStructWithUnexportedFields struct { a int; b bool; c []int; }
+    type GifImage struct { ... }
+
+Typically, MyStructWithUnexportedFields is encoded as an empty map because
+it has no exported fields, while UUID will be encoded as a string,
+etc. However, with extension support, you can encode any of these
+however you like.
+
+We provide implementations of these functions where the spec has defined
+an inter-operable format. For msgpack, these are Binary and
+time.Time. Library users will have to explicitly configure these as seen
+in the usage below.
+
+## Usage
+
+Typical usage model:
+
+    var (
+      mapStrIntfTyp = reflect.TypeOf(map[string]interface{}(nil))
+      sliceByteTyp = reflect.TypeOf([]byte(nil))
+      timeTyp = reflect.TypeOf(time.Time{})
+    )
+    
+    // create and configure Handle
+    var (
+      bh codec.BincHandle
+      mh codec.MsgpackHandle
+    )
+
+    mh.MapType = mapStrIntfTyp
+    
+    // configure extensions for msgpack, to enable Binary and Time support for tags 0 and 1
+    mh.AddExt(sliceByteTyp, 0, mh.BinaryEncodeExt, mh.BinaryDecodeExt)
+    mh.AddExt(timeTyp, 1, mh.TimeEncodeExt, mh.TimeDecodeExt)
+
+    // create and use decoder/encoder
+    var (
+      r io.Reader
+      w io.Writer
+      b []byte
+      h = &bh // or mh to use msgpack
+    )
+    
+    dec = codec.NewDecoder(r, h)
+    dec = codec.NewDecoderBytes(b, h)
+    err = dec.Decode(&v) 
+    
+    enc = codec.NewEncoder(w, h)
+    enc = codec.NewEncoderBytes(&b, h)
+    err = enc.Encode(v)
+    
+    //RPC Server
+    var rpcH codec.GoRpc // or codec.MsgpackSpecRpc  
+    go func() {
+        for {
+            conn, err := listener.Accept()
+            rpcCodec := rpcH.ServerCodec(conn, h)
+            rpc.ServeCodec(rpcCodec)
+        }
+    }()
+    
+    //RPC Communication (client side)
+    conn, err = net.Dial("tcp", "localhost:5555")  
+    rpcCodec := rpcH.ClientCodec(conn, h)  
+    client := rpc.NewClientWithCodec(rpcCodec)
+
+## Representative Benchmark Results
+
+A sample run of benchmark using "go test -bi -bench=.":
+
+    ..............................................
+    Benchmark: 
+    	Struct recursive Depth:             1
+    	ApproxDeepSize Of benchmark Struct: 4758
+    Benchmark One-Pass Run:
+    	   msgpack: len: 1504
+    	      binc: len: 1508
+    	       gob: len: 1908
+    	      json: len: 2402
+    	 v-msgpack: len: 1536
+    	      bson: len: 3009
+    ..............................................
+    Benchmark__Msgpack__Encode	   50000	     60824 ns/op
+    Benchmark__Msgpack__Decode	   10000	    115119 ns/op
+    Benchmark__Binc_____Encode	   50000	     55140 ns/op
+    Benchmark__Binc_____Decode	   10000	    112132 ns/op
+    Benchmark__Gob______Encode	   10000	    143350 ns/op
+    Benchmark__Gob______Decode	    5000	    434248 ns/op
+    Benchmark__Json_____Encode	   10000	    157298 ns/op
+    Benchmark__Json_____Decode	    5000	    303729 ns/op
+    Benchmark__Bson_____Encode	   10000	    174250 ns/op
+    Benchmark__Bson_____Decode	   10000	    223602 ns/op
+    Benchmark__VMsgpack_Encode	   20000	     80438 ns/op
+    Benchmark__VMsgpack_Decode	   10000	    157330 ns/op
+    
+To run full benchmark suite (including against vmsgpack and bson), 
+see notes in ext\_dep_test.go
+

+ 263 - 0
codec/bench_test.go

@@ -0,0 +1,263 @@
+// Copyright (c) 2012, 2013 Ugorji Nwoke. All rights reserved.
+// Use of this source code is governed by a BSD-style license found in the LICENSE file.
+
+package codec
+
+import (
+	"encoding/json"
+	"encoding/gob"
+	"testing"
+	"bytes"
+	"reflect"
+	"time"
+	"runtime"
+	"flag"
+	"fmt"
+)
+
+// Sample way to run:
+// go test -bi -bv -bd=1 -benchmem -bench Msgpack__Encode
+
+var (
+	_ = fmt.Printf
+	benchTs *TestStruc
+
+	approxSize int
+
+	benchDoInitBench bool
+	benchVerify bool
+	benchUnscientificRes bool = false
+	//depth of 0 maps to ~400bytes json-encoded string, 1 maps to ~1400 bytes, etc
+	//For depth>1, we likely trigger stack growth for encoders, making benchmarking unreliable.
+	benchDepth int
+	benchInitDebug bool
+	benchInitChan = make(chan bool, 1)
+	benchCheckers []benchChecker
+)
+
+type benchEncFn func(*TestStruc) ([]byte, error)
+type benchDecFn func([]byte, *TestStruc) error
+type benchChecker struct {
+	name string
+	encodefn benchEncFn
+	decodefn benchDecFn
+}
+
+func init() {
+	flag.BoolVar(&benchInitDebug, "bdbg", false, "Bench Debug")
+	flag.IntVar(&benchDepth, "bd", 1, "Bench Depth: If >1, potential unreliable results due to stack growth")
+	flag.BoolVar(&benchDoInitBench, "bi", false, "Run Bench Init")
+	flag.BoolVar(&benchVerify, "bv", false, "Verify Decoded Value during Benchmark")
+	flag.BoolVar(&benchUnscientificRes, "bu", false, "Show Unscientific Results during Benchmark")
+	flag.Parse()
+
+	benchTs = newTestStruc(benchDepth, true)
+	approxSize = approxDataSize(reflect.ValueOf(benchTs))
+	bytesLen := 1024 * 4 * (benchDepth + 1) * (benchDepth + 1)
+	if bytesLen < approxSize {
+		bytesLen = approxSize
+	}
+
+	benchCheckers = append(benchCheckers, 
+		benchChecker{"msgpack", fnMsgpackEncodeFn, fnMsgpackDecodeFn},
+		benchChecker{"binc", fnBincEncodeFn, fnBincDecodeFn},
+		benchChecker{"gob", fnGobEncodeFn, fnGobDecodeFn},
+		benchChecker{"json", fnJsonEncodeFn, fnJsonDecodeFn},
+	)
+	if benchDoInitBench {
+		go func() {
+			<- benchInitChan
+			runBenchInit()
+		}()
+	}
+}
+
+func runBenchInit() {
+	logT(nil, "..............................................")
+	logT(nil, "BENCHMARK INIT: %v", time.Now())
+	logT(nil, "To run full benchmark comparing encodings (MsgPack, Binc, JSON, GOB, etc), " + 
+		"use: \"go test -bench=.\"")
+	logT(nil, "Benchmark: ")
+	logT(nil, "\tStruct recursive Depth:             %d", benchDepth)
+	if approxSize > 0 {
+		logT(nil, "\tApproxDeepSize Of benchmark Struct: %d", approxSize)
+	}
+	if benchUnscientificRes {
+		logT(nil, "Benchmark One-Pass Run (with Unscientific Encode/Decode times): ")
+	} else {
+		logT(nil, "Benchmark One-Pass Run:")
+	}
+	for _, bc := range benchCheckers {
+		doBenchCheck(bc.name, bc.encodefn, bc.decodefn)
+	}
+	logT(nil, "..............................................")
+	if benchInitDebug {
+		logT(nil, "<<<<====>>>> depth: %v, ts: %#v\n", benchDepth, benchTs)
+	}
+}
+
+func doBenchCheck(name string, encfn benchEncFn, decfn benchDecFn) {
+	runtime.GC()
+	tnow := time.Now()
+	buf, err := encfn(benchTs)
+	if err != nil {
+		logT(nil, "\t%10s: **** Error encoding benchTs: %v", name, err)
+	} 
+	encDur := time.Now().Sub(tnow)
+	encLen := len(buf)
+	runtime.GC()
+	if !benchUnscientificRes {
+		logT(nil, "\t%10s: len: %v\n", name, encLen)
+		return
+	}
+	tnow = time.Now()
+	if err = decfn(buf, new(TestStruc)); err != nil {
+		logT(nil, "\t%10s: **** Error decoding into new TestStruc: %v", name, err)
+	}
+	decDur := time.Now().Sub(tnow)
+	logT(nil, "\t%10s: len: %v, encode: %v, decode: %v\n", name, encLen, encDur, decDur)
+}
+
+func fnBenchmarkEncode(b *testing.B, encName string, encfn benchEncFn) {
+	runtime.GC()
+	b.ResetTimer()
+	for i := 0; i < b.N; i++ {
+		_, err := encfn(benchTs)
+		if err != nil {
+			logT(b, "Error encoding benchTs: %s: %v", encName, err)
+			b.FailNow()
+		}
+	}
+}
+
+func fnBenchmarkDecode(b *testing.B, encName string, encfn benchEncFn, decfn benchDecFn) {
+	buf, err := encfn(benchTs)
+	if err != nil {
+		logT(b, "Error encoding benchTs: %s: %v", encName, err)
+		b.FailNow()
+	}
+	runtime.GC()
+	b.ResetTimer()
+	for i := 0; i < b.N; i++ {
+		ts := new(TestStruc)		
+		if err = decfn(buf, ts); err != nil {
+			logT(b, "Error decoding into new TestStruc: %s: %v", encName, err)
+			b.FailNow()
+		}
+		if benchVerify {
+			verifyTsTree(b, ts)
+		}
+	}
+}
+
+func verifyTsTree(b *testing.B, ts *TestStruc) {
+	var ts0, ts1m, ts2m, ts1s, ts2s *TestStruc
+	ts0 = ts
+
+	if benchDepth > 0 {
+		ts1m, ts1s = verifyCheckAndGet(b, ts0)
+	}
+
+	if benchDepth > 1 {
+		ts2m, ts2s = verifyCheckAndGet(b, ts1m)
+	}
+	for _, tsx := range []*TestStruc{ts0, ts1m, ts2m, ts1s, ts2s} {
+		if tsx != nil {
+			verifyOneOne(b, tsx)
+		}
+	}
+}
+
+func verifyCheckAndGet(b *testing.B, ts0 *TestStruc) (ts1m *TestStruc, ts1s *TestStruc) {
+	// if len(ts1m.Ms) <= 2 {
+	// 	logT(b, "Error: ts1m.Ms len should be > 2. Got: %v", len(ts1m.Ms))
+	// 	b.FailNow()
+	// } 
+	if len(ts0.Its) == 0 {
+		logT(b, "Error: ts0.Islice len should be > 0. Got: %v", len(ts0.Its))
+		b.FailNow()
+	}
+	ts1m = ts0.Mtsptr["0"]
+	ts1s = ts0.Its[0]
+	if (ts1m == nil || ts1s == nil) {
+		logT(b, "Error: At benchDepth 1, No *TestStruc found")
+		b.FailNow()
+	}		
+	return
+}
+
+func verifyOneOne(b *testing.B, ts *TestStruc) {
+	if ts.I64slice[2] != int64(3) {
+		logT(b, "Error: Decode failed by checking values")
+		b.FailNow()
+	}
+}
+
+func fnMsgpackEncodeFn(ts *TestStruc) (bs []byte, err error) {
+	err = NewEncoderBytes(&bs, testMsgpackH).Encode(ts)
+	return
+}
+
+func fnMsgpackDecodeFn(buf []byte, ts *TestStruc) error {
+	return NewDecoderBytes(buf, testMsgpackH).Decode(ts)
+}
+
+func fnBincEncodeFn(ts *TestStruc) (bs []byte, err error) {
+	err = NewEncoderBytes(&bs, testBincH).Encode(ts)
+	return
+}
+
+func fnBincDecodeFn(buf []byte, ts *TestStruc) error {
+	return NewDecoderBytes(buf, testBincH).Decode(ts)
+}
+
+func fnGobEncodeFn(ts *TestStruc) ([]byte, error) {
+	bbuf := new(bytes.Buffer)
+	err := gob.NewEncoder(bbuf).Encode(ts)
+	return bbuf.Bytes(), err
+}
+
+func fnGobDecodeFn(buf []byte, ts *TestStruc) error {
+	return gob.NewDecoder(bytes.NewBuffer(buf)).Decode(ts)
+}
+
+func fnJsonEncodeFn(ts *TestStruc) ([]byte, error) {
+	return json.Marshal(ts)
+}
+
+func fnJsonDecodeFn(buf []byte, ts *TestStruc) error {
+	return json.Unmarshal(buf, ts)
+}
+
+func Benchmark__Msgpack__Encode(b *testing.B) {
+	fnBenchmarkEncode(b, "msgpack", fnMsgpackEncodeFn)
+}
+
+func Benchmark__Msgpack__Decode(b *testing.B) {
+	fnBenchmarkDecode(b, "msgpack", fnMsgpackEncodeFn, fnMsgpackDecodeFn)
+}
+
+func Benchmark__Binc_____Encode(b *testing.B) {
+	fnBenchmarkEncode(b, "binc", fnBincEncodeFn)
+}
+
+func Benchmark__Binc_____Decode(b *testing.B) {
+	fnBenchmarkDecode(b, "binc", fnBincEncodeFn, fnBincDecodeFn)
+}
+
+func Benchmark__Gob______Encode(b *testing.B) {
+	fnBenchmarkEncode(b, "gob", fnGobEncodeFn)
+}
+
+func Benchmark__Gob______Decode(b *testing.B) {
+	fnBenchmarkDecode(b, "gob", fnGobEncodeFn, fnGobDecodeFn)
+}
+
+func Benchmark__Json_____Encode(b *testing.B) {
+	fnBenchmarkEncode(b, "json", fnJsonEncodeFn)
+}
+
+func Benchmark__Json_____Decode(b *testing.B) {
+	fnBenchmarkDecode(b, "json", fnJsonEncodeFn, fnJsonDecodeFn)
+}
+

+ 617 - 0
codec/binc.go

@@ -0,0 +1,617 @@
+// Copyright (c) 2012, 2013 Ugorji Nwoke. All rights reserved.
+// Use of this source code is governed by a BSD-style license found in the LICENSE file.
+
+package codec
+
+import (
+	"math"
+	"reflect"
+	"time"
+)
+
+//BincHandle is a Handle for the Binc Schema-Free Encoding Format
+//defined at http://www.ugorji.net/project/binc .
+//
+//BincHandle currently supports all Binc features with the following EXCEPTIONS:
+//  - only integers up to 64 bits of precision (degree of precision <= 3) are supported.
+//    big integers are unsupported.
+//  - Only IEEE 754 binary32 and binary64 floats are supported (ie Go float32 and float64 types).
+//    extended precision and decimal IEEE 754 floats are unsupported.
+//  - Only UTF-8 strings supported. 
+//    Unicode_Other Binc types (UTF16, UTF32) are currently unsupported.
+//Note that these EXCEPTIONS are temporary and full support is possible and may happen soon.
+type BincHandle struct {
+	encdecHandle
+	DecodeOptions
+}
+
+// vd as low 4 bits (there are 16 slots)
+const (
+	bincVdSpecial byte = iota
+	bincVdUint
+	bincVdInt
+	bincVdFloat
+	
+	bincVdString
+	bincVdByteArray
+	bincVdArray
+	bincVdMap
+	
+	bincVdTimestamp
+	bincVdSmallInt
+	bincVdUnicodeOther
+
+	// 4 open slots left ...
+
+	bincVdCustomExt = 0x0f
+)
+
+const (
+	bincSpNil byte = iota
+	bincSpFalse
+	bincSpTrue
+	bincSpNan
+	bincSpPosInf
+	bincSpNegInf
+	bincSpZero
+    bincSpNegOne
+)
+
+const (
+	bincFlBin16 byte = iota
+	bincFlBin32
+	_ // bincFlBin32e
+	bincFlBin64
+	// others not currently supported
+)
+
+type bincEncoder struct { 
+	w encWriter
+}
+
+type bincDecoder struct {
+	r decReader
+	bdRead bool
+	bd byte 
+	vd byte
+	vs byte 
+}
+
+func (_ *BincHandle) newEncoder(w encWriter) encoder {
+	return &bincEncoder{w: w}
+}
+
+func (_ *BincHandle) newDecoder(r decReader) decoder {
+	return &bincDecoder{r: r}
+}
+
+func (_ *BincHandle) writeExt() bool {
+	return true
+}
+
+func (e *bincEncoder) encodeBuiltinType(rt reflect.Type, rv reflect.Value) bool {
+	switch rt {
+	case timeTyp:
+		bs := encodeTime(rv.Interface().(time.Time))
+		e.w.writen1(bincVdTimestamp << 4 | uint8(len(bs)))
+		e.w.writeb(bs)
+		return true
+	}
+	return false
+}
+
+func (e *bincEncoder) encodeNil() { 
+	e.w.writen1(bincVdSpecial << 4 | bincSpNil)
+}
+
+func (e *bincEncoder) encodeBool(b bool) { 
+	if b {
+		e.w.writen1(bincVdSpecial << 4 | bincSpTrue)
+	} else {
+		e.w.writen1(bincVdSpecial << 4 | bincSpFalse)
+	}		
+}
+
+func (e *bincEncoder) encodeFloat32(f float32) { 
+	e.w.writen1(bincVdFloat << 4 | bincFlBin32)
+	e.w.writeUint32(math.Float32bits(f))
+}
+
+func (e *bincEncoder) encodeFloat64(f float64) { 
+	e.w.writen1(bincVdFloat << 4 | bincFlBin64)
+	e.w.writeUint64(math.Float64bits(f))
+}
+
+func (e *bincEncoder) encodeInt(v int64) { 
+	switch {
+	case v == 0:
+		e.w.writen1(bincVdSpecial << 4 | bincSpZero)
+	case v == -1:
+		e.w.writen1(bincVdSpecial << 4 | bincSpNegOne)
+	case v >= 1 && v <= 16:
+		e.w.writen1(bincVdSmallInt << 4 | byte(v-1))
+	case v >= math.MinInt8 && v <= math.MaxInt8:
+		e.w.writen2(bincVdInt << 4, byte(v))
+	case v >= math.MinInt16 && v <= math.MaxInt16:
+		e.w.writen1(bincVdInt << 4 | 0x01)
+		e.w.writeUint16(uint16(v))
+	case v >= math.MinInt32 && v <= math.MaxInt32:
+		e.w.writen1(bincVdInt << 4 | 0x02)
+		e.w.writeUint32(uint32(v))
+	default:
+		e.w.writen1(bincVdInt << 4 | 0x03)
+		e.w.writeUint64(uint64(v))
+	}
+}
+
+func (e *bincEncoder) encodeUint(v uint64) { 
+	e.encNumber(bincVdUint << 4, v)
+}
+
+func (e *bincEncoder) encodeExtPreamble(xtag byte, length int)  {
+	e.encLen(bincVdCustomExt << 4, uint64(length))
+	e.w.writen1(xtag)
+}
+
+func (e *bincEncoder) encodeArrayPreamble(length int) {
+	e.encLen(bincVdArray << 4, uint64(length))
+}
+
+func (e *bincEncoder) encodeMapPreamble(length int) { 
+	e.encLen(bincVdMap << 4, uint64(length))
+}
+
+func (e *bincEncoder) encodeString(c charEncoding, v string) { 
+	l := uint64(len(v))
+	e.encBytesLen(c, l)
+	if l > 0 {
+		e.w.writestr(v)
+	}	
+}
+
+func (e *bincEncoder) encodeStringBytes(c charEncoding, v []byte) { 
+	l := uint64(len(v))
+	e.encBytesLen(c, l)
+	if l > 0 {
+		e.w.writeb(v)
+	}	
+}
+
+func (e *bincEncoder) encBytesLen(c charEncoding, length uint64) {
+	//TODO: support bincUnicodeOther (for now, just use string or bytearray)
+	if c == c_RAW {
+		e.encLen(bincVdByteArray << 4, length)
+	} else {
+		e.encLen(bincVdString << 4, length)
+	}
+}
+
+func (e *bincEncoder) encLen(bd byte, l uint64) {
+	if l < 12 {
+		e.w.writen1(bd | uint8(l + 4))
+	} else {
+		e.encNumber(bd, l)
+	}
+}
+	
+func (e *bincEncoder) encNumber(bd byte, v uint64) {
+	switch {
+	case v <= math.MaxUint8:
+		e.w.writen2(bd, byte(v))
+	case v <= math.MaxUint16:
+		e.w.writen1(bd | 0x01)
+		e.w.writeUint16(uint16(v))
+	case v <= math.MaxUint32:
+		e.w.writen1(bd | 0x02)
+		e.w.writeUint32(uint32(v))
+	default:
+		e.w.writen1(bd | 0x03)
+		e.w.writeUint64(uint64(v))
+	}
+}
+
+
+//------------------------------------
+
+
+func (d *bincDecoder) initReadNext() {
+	if d.bdRead {
+		return
+	}
+	d.bd = d.r.readUint8()
+	d.vd = d.bd >> 4
+	d.vs = d.bd & 0x0f
+	d.bdRead = true
+}
+
+func (d *bincDecoder) currentIsNil() bool {
+	if d.bd == bincVdSpecial << 4 | bincSpNil {
+		d.bdRead = false
+		return true
+	} 
+	return false
+}
+
+func (d *bincDecoder) decodeBuiltinType(rt reflect.Type, rv reflect.Value) bool {
+	switch rt {
+	case timeTyp:
+		if d.vd != bincVdTimestamp {
+			decErr("Invalid d.vd. Expecting 0x%x. Received: 0x%x", bincVdTimestamp, d.vd)
+		}
+		tt, err := decodeTime(d.r.readn(int(d.vs)))
+		if err != nil {
+			panic(err)
+		}
+		rv.Set(reflect.ValueOf(tt))
+		d.bdRead = false
+		return true
+	}
+	return false
+}
+
+func (d *bincDecoder) decFloat() (f float64) {
+	switch d.vs {
+	case bincFlBin32:
+		f = float64(math.Float32frombits(d.r.readUint32()))
+	case bincFlBin64:
+		f = math.Float64frombits(d.r.readUint64())
+	default:
+		decErr("only float32 and float64 are supported")
+	}
+	return
+}
+
+func (d *bincDecoder) decFloatI() (f interface{}) {
+	switch d.vs {
+	case bincFlBin32:
+		f = math.Float32frombits(d.r.readUint32())
+	case bincFlBin64:
+		f = math.Float64frombits(d.r.readUint64())
+	default:
+		decErr("only float32 and float64 are supported")
+	}
+	return
+}
+
+func (d *bincDecoder) decInt() (v int64) {
+	switch d.vs {
+	case 0:
+		v = int64(int8(d.r.readUint8()))
+	case 1:
+		v = int64(int16(d.r.readUint16()))
+	case 2:
+		v = int64(int32(d.r.readUint32()))
+	case 3:
+		v = int64(d.r.readUint64())
+	default:
+		decErr("integers with greater than 64 bits of precision not supported")
+	}
+	return
+}
+
+func (d *bincDecoder) decIntI() (v interface{}) {
+	switch d.vs {
+	case 0:
+		v = int8(d.r.readUint8())
+	case 1:
+		v = int16(d.r.readUint16())
+	case 2:
+		v = int32(d.r.readUint32())
+	case 3:
+		v = int64(d.r.readUint64())
+	default:
+		decErr("integers with greater than 64 bits of precision not supported")
+	}
+	return
+}
+
+func (d *bincDecoder) decUint() (v uint64) {
+	switch d.vs {
+	case 0:
+		v = uint64(d.r.readUint8())
+	case 1:
+		v = uint64(d.r.readUint16())
+	case 2:
+		v = uint64(d.r.readUint32())
+	case 3:
+		v = uint64(d.r.readUint64())
+	default:
+		decErr("integers with greater than 64 bits of precision not supported")
+	}
+	return
+}
+
+func (d *bincDecoder) decUintI() (v interface{}) {
+	switch d.vs {
+	case 0:
+		v = d.r.readUint8()
+	case 1:
+		v = d.r.readUint16()
+	case 2:
+		v = d.r.readUint32()
+	case 3:
+		v = d.r.readUint64()
+	default:
+		decErr("integers with greater than 64 bits of precision not supported")
+	}
+	return
+}
+
+func (d *bincDecoder) decIntAny() (i int64) {
+	switch d.vd {
+	case bincVdInt: 
+		i = d.decInt()
+	case bincVdSmallInt:
+		i = int64(d.vs) + 1
+	case bincVdSpecial:
+		switch d.vs {
+		case bincSpZero:
+			//i = 0
+		case bincSpNegOne:
+			i = -1
+		default:
+			decErr("numeric decode fails for special value: d.vs: 0x%x", d.vs)
+		}
+	default:
+		decErr("number can only be decoded from uint or int values. d.bd: 0x%x, d.vd: 0x%x", d.bd, d.vd)
+	}
+	return
+}
+		
+func (d *bincDecoder) decodeInt(bitsize uint8) (i int64) {
+	switch d.vd {
+	case bincVdUint: 
+		i = int64(d.decUint())
+	default:
+		i = d.decIntAny()
+	}
+	// check overflow (logic adapted from std pkg reflect/value.go OverflowUint()
+	if bitsize > 0 {
+		if trunc := (i << (64 - bitsize)) >> (64 - bitsize); i != trunc {
+			decErr("Overflow int value: %v", i)
+		}
+	}
+	d.bdRead = false
+	return
+}
+
+func (d *bincDecoder) decodeUint(bitsize uint8) (ui uint64)  {
+	switch d.vd {
+	case bincVdUint: 
+		ui = d.decUint()
+	default:
+		if i := d.decIntAny(); i >= 0 {
+			ui = uint64(i)
+		} else {
+			decErr("Assigning negative signed value: %v, to unsigned type", i)
+		}
+	}	
+	// check overflow (logic adapted from std pkg reflect/value.go OverflowUint()
+	if bitsize > 0 {
+		if trunc := (ui << (64 - bitsize)) >> (64 - bitsize); ui != trunc {
+			decErr("Overflow uint value: %v", ui) 
+		}
+	}
+	d.bdRead = false
+	return
+}
+
+func (d *bincDecoder) decodeFloat(chkOverflow32 bool) (f float64) {
+	if d.vd == bincVdSpecial {
+		switch d.vs {
+		case bincSpNan:
+			return math.NaN()
+		case bincSpPosInf:
+			return math.Inf(1)
+		case bincSpNegInf:
+			return math.Inf(-1)
+		}
+	}
+	switch d.vd {
+	case bincVdFloat: 
+		f = d.decFloat()
+	case bincVdUint: 
+		f = float64(d.decUint())
+	default:
+		f = float64(d.decIntAny())
+	}
+	
+	// check overflow (logic adapted from std pkg reflect/value.go OverflowFloat()
+	if chkOverflow32 {
+		f2 := f
+		if f2 < 0 {
+			f2 = -f
+		}
+		if math.MaxFloat32 < f2 && f2 <= math.MaxFloat64 {
+			decErr("Overflow float32 value: %v", f2)
+		}
+	}
+	d.bdRead = false
+	return
+}
+
+
+// bool can be decoded from bool only (single byte).
+func (d *bincDecoder) decodeBool() (b bool)  {
+	switch d.bd {
+	case (bincVdSpecial | bincSpFalse):
+		// b = false
+	case (bincVdSpecial | bincSpTrue):
+		b = true
+	default:
+		decErr("Invalid single-byte value for bool: %s: %x", msgBadDesc, d.bd)
+	}
+	d.bdRead = false
+	return
+}
+
+func (d *bincDecoder) readMapLen() (length int) {
+	if d.vd != bincVdMap {
+		decErr("Invalid d.vd for map. Expecting 0x%x. Got: 0x%x", bincVdMap, d.vd)
+	}
+	length = d.decLen()
+	d.bdRead = false
+	return
+}
+
+func (d *bincDecoder) readArrayLen() (length int) {
+	if d.vd != bincVdArray {
+		decErr("Invalid d.vd for array. Expecting 0x%x. Got: 0x%x", bincVdArray, d.vd)
+	}
+	length = d.decLen()
+	d.bdRead = false
+	return
+}
+
+func (d *bincDecoder) decLen() int {
+	if d.vs <= 3 {
+		return int(d.decUint())
+	} 
+	return int(d.vs - 4)
+}
+
+func (d *bincDecoder) decBytesLen() int {
+	//decode string from either bytearray or string
+	if d.vd != bincVdString && d.vd != bincVdByteArray {
+		decErr("Invalid d.vd for string. Expecting string: 0x%x or bytearray: 0x%x. Got: 0x%x", 
+			bincVdString, bincVdByteArray, d.vd)
+	}
+	return d.decLen()
+}
+
+func (d *bincDecoder) decodeString() (s string)  {
+	if length := d.decBytesLen(); length > 0 {
+		s = string(d.r.readn(length))
+	}
+	d.bdRead = false
+	return
+}
+
+func (d *bincDecoder) decodeStringBytes(bs []byte) (bsOut []byte, changed bool) {
+	clen := d.decBytesLen()
+	if clen > 0 {
+		// if no contents in stream, don't update the passed byteslice	
+		if len(bs) != clen {
+			if len(bs) > clen {
+				bs = bs[:clen]
+			} else {
+				bs = make([]byte, clen)
+			}
+			bsOut = bs
+			changed = true
+		}
+		d.r.readb(bs)
+	}
+	d.bdRead = false
+	return
+}
+
+func (d *bincDecoder) decodeExt(tag byte) (xbs []byte) {
+	switch d.vd {
+	case bincVdCustomExt:
+		l := d.decLen()
+		if xtag := d.r.readUint8(); xtag != tag {
+			decErr("Wrong extension tag. Got %b. Expecting: %v", xtag, tag)
+		}
+		xbs = d.r.readn(l)
+	case bincVdByteArray:
+		xbs, _ = d.decodeStringBytes(nil)
+	default:
+		decErr("Invalid d.vd for extensions (Expecting extensions or byte array). Got: 0x%x", d.vd)
+	}
+	d.bdRead = false
+	return	
+}
+
+func (d *bincDecoder) decodeNaked(h decodeHandleI) (rv reflect.Value, ctx decodeNakedContext) {
+	d.initReadNext()
+	var v interface{}
+
+	switch d.vd {
+	case bincVdSpecial:
+		switch d.vs {
+		case bincSpNil:
+			ctx = dncNil
+			d.bdRead = false
+		case bincSpFalse:
+			v = false
+		case bincSpTrue:
+			v = true
+		case bincSpNan:
+			v = math.NaN()
+		case bincSpPosInf:
+			v = math.Inf(1)
+		case bincSpNegInf:
+			v = math.Inf(-1)
+		case bincSpZero:
+			v = int8(0)
+		case bincSpNegOne:
+			v = int8(-1)
+		default:
+			decErr("Unrecognized special value 0x%x", d.vs)
+		}
+	case bincVdUint:
+		v = d.decUintI()
+	case bincVdInt:
+		v = d.decIntI()
+	case bincVdSmallInt:
+		v = int8(d.vs) + 1
+	case bincVdFloat:
+		v = d.decFloatI()
+	case bincVdString:
+		v = d.decodeString()
+	case bincVdByteArray:
+		v, _ = d.decodeStringBytes(nil)
+	case bincVdTimestamp:
+		tt, err := decodeTime(d.r.readn(int(d.vs)))
+		if err != nil {
+			panic(err)
+		}
+		v = tt
+	case bincVdCustomExt:
+		//ctx = dncExt
+		l := d.decLen()
+		xtag := d.r.readUint8()
+		opts := h.(*BincHandle)
+		rt, bfn := opts.getDecodeExtForTag(xtag)
+		if rt == nil {
+			decErr("Unable to find type mapped to extension tag: %v", xtag)
+		}
+		if rt.Kind() == reflect.Ptr {
+			rv = reflect.New(rt.Elem())
+		} else {
+			rv = reflect.New(rt).Elem()
+		}
+		if fnerr := bfn(rv, d.r.readn(l)); fnerr != nil {
+			panic(fnerr)
+		} 
+	case bincVdArray:
+		ctx = dncContainer
+		opts := h.(*BincHandle)
+		if opts.SliceType == nil {
+			rv = reflect.New(intfSliceTyp).Elem()
+		} else {
+			rv = reflect.New(opts.SliceType).Elem()
+		}
+	case bincVdMap:
+		ctx = dncContainer
+		opts := h.(*BincHandle)
+		if opts.MapType == nil {
+			rv = reflect.MakeMap(mapIntfIntfTyp)
+		} else {
+			rv = reflect.MakeMap(opts.MapType)
+		}
+	default:
+		decErr("Unrecognized d.vd: 0x%x", d.vd)
+	}
+
+	if ctx == dncHandled {
+		d.bdRead = false
+		if v != nil {
+			rv = reflect.ValueOf(v)
+		}
+	}
+	return
+}
+
+

+ 725 - 0
codec/codecs_test.go

@@ -0,0 +1,725 @@
+// Copyright (c) 2012, 2013 Ugorji Nwoke. All rights reserved.
+// Use of this source code is governed by a BSD-style license found in the LICENSE file.
+
+package codec
+
+// Test works by using a slice of interfaces.
+// It can test for encoding/decoding into/from a nil interface{}
+// or passing the object to encode/decode into.
+//
+// There are basically 2 main tests here.
+// First test internally encodes and decodes things and verifies that
+// the artifact was as expected.
+// Second test will use python msgpack to create a bunch of golden files,
+// read those files, and compare them to what it should be. It then 
+// writes those files back out and compares the byte streams.
+//
+// Taken together, the tests are pretty extensive.
+
+// Some hints:
+// - python msgpack encodes positive numbers as uints, so use uints below
+//   for positive numbers.
+
+import (
+	"reflect"
+	"testing"
+	"net/rpc"
+	"bytes"
+	"time"
+	"os"
+	"os/exec"
+	"io/ioutil"
+	"path/filepath"
+	"strconv"
+	"net"
+	"fmt"
+	"flag"
+	"encoding/gob"
+)
+
+var (
+	testInitDebug bool
+	testUseIoEncDec bool
+	_ = fmt.Printf
+	skipVerifyVal interface{} = &(struct{}{})
+	timeLoc = time.FixedZone("UTC-08:00", -8*60*60) //time.UTC
+	timeToCompare = time.Date(2012, 2, 2, 2, 2, 2, 2000, timeLoc) //time.Time{} 
+	//"2012-02-02T02:02:02.000002000Z" //1328148122000002
+	timeToCompareAs interface{} = timeToCompare.UnixNano() 
+	table []interface{}               // main items we encode
+	tableVerify []interface{}         // we verify encoded things against this after decode
+	tableTestNilVerify []interface{}  // for nil interface, use this to verify (rules are different)
+	tablePythonVerify []interface{}   // for verifying for python, since Python sometimes
+                                      // will encode a float32 as float64, or large int as uint
+	testRpcInt = new(TestRpcInt)
+	testMsgpackH = &MsgpackHandle{}
+	testBincH = &BincHandle{}
+)
+
+func init() {
+	// delete(testDecOpts.ExtFuncs, timeTyp)
+	flag.BoolVar(&testInitDebug, "tdbg", false, "Test Debug")
+	flag.BoolVar(&testUseIoEncDec, "tio", false, "Use IO Reader/Writer for Marshal/Unmarshal")
+	flag.Parse()
+	gob.Register(new(TestStruc))
+	if testInitDebug {
+		ts0 := newTestStruc(2, false)
+		fmt.Printf("====> depth: %v, ts: %#v\n", 2, ts0)
+	}
+	
+	testMsgpackH.AddExt(byteSliceTyp, 0, testMsgpackH.BinaryEncodeExt, testMsgpackH.BinaryDecodeExt)
+	testMsgpackH.AddExt(timeTyp, 1, testMsgpackH.TimeEncodeExt, testMsgpackH.TimeDecodeExt)
+}
+
+type AnonInTestStruc struct {
+	AS string
+	AI64 int64
+	AI16 int16
+	AUi64 uint64
+	ASslice []string
+	AI64slice []int64
+}
+
+type TestStruc struct {
+	S string
+	I64 int64
+	I16 int16
+	Ui64 uint64
+	Ui8 uint8
+	B bool
+	By byte
+	
+	Sslice []string
+	I64slice []int64
+	I16slice []int16
+	Ui64slice []uint64
+	Ui8slice []uint8
+	Bslice []bool
+	Byslice []byte
+	
+	Islice []interface{}
+	Iptrslice []*int64
+	
+	AnonInTestStruc
+	
+	//M map[interface{}]interface{}  `json:"-",bson:"-"`
+	Ms map[string]interface{}
+	Msi64 map[string]int64
+	
+	Nintf interface{}    //don't set this, so we can test for nil
+	T time.Time          
+	Nmap map[string]bool //don't set this, so we can test for nil
+	Nslice []byte        //don't set this, so we can test for nil
+	Nint64 *int64        //don't set this, so we can test for nil
+	Mtsptr map[string]*TestStruc
+	Mts map[string]TestStruc
+	Its []*TestStruc
+	Nteststruc *TestStruc
+}
+
+type TestRpcInt struct {
+	i int
+}
+
+func (r *TestRpcInt) Update(n int, res *int) error { r.i = n; *res = r.i; return nil }
+func (r *TestRpcInt) Square(ignore int, res *int) error { *res = r.i * r.i; return nil }
+func (r *TestRpcInt) Mult(n int, res *int) error { *res = r.i * n; return nil }
+
+func init() {
+	primitives := []interface{} {
+		int8(-8),
+		int16(-1616),
+		int32(-32323232),
+		int64(-6464646464646464),
+		uint8(192),
+		uint16(1616),
+		uint32(32323232),
+		uint64(6464646464646464),
+		byte(192),
+		float32(-3232.0),
+		float64(-6464646464.0),
+		float32(3232.0),
+		float64(6464646464.0),
+		false,
+		true,
+		nil,
+		timeToCompare,
+		"someday",
+		"",
+		"bytestring",
+	}
+	mapsAndStrucs := []interface{}{
+		map[string]bool{
+			"true":true,
+			"false":false,
+		},
+		map[string]interface{}{
+			"true": "True",
+			"false": false,
+			"uint16(1616)": uint16(1616),
+		},
+		//add a complex combo map in here. (map has list which has map)
+		//note that after the first thing, everything else should be generic.
+		map[string]interface{}{
+			"list": []interface{}{
+				int16(1616),
+				int32(32323232),
+				true,
+				float32(-3232.0),
+				map[string]interface{} {
+					"TRUE":true,
+					"FALSE":false,
+				},
+				[]interface{}{true, false},
+			},
+			"int32": int32(32323232),
+			"bool": true,
+			"LONG STRING": "123456789012345678901234567890123456789012345678901234567890",
+			"SHORT STRING": "1234567890",
+		},
+		map[interface{}]interface{}{
+			true: "true",
+			uint8(8): false,
+			"false": uint8(0),
+		},
+		newTestStruc(0, false),
+	}
+	
+	table = []interface{}{}
+	table = append(table, primitives...)    //0-19 are primitives
+	table = append(table, primitives)       //20 is a list of primitives
+	table = append(table, mapsAndStrucs...) //21-24 are maps. 25 is a *struct
+
+	// we verify against the same table, but skip 23 
+	// because interface{} equality is not defined exact for exact objects or nil.
+	var a, b []interface{}
+	var c map[string]interface{}
+	a = make([]interface{}, len(table))
+	copy(a, table)
+	b = make([]interface{}, len(a[20].([]interface{})))
+	copy(b, a[20].([]interface{}))
+	// b[0], b[4], b[8], b[16], b[19] = int8(-8), int8(8), int8(8), 
+	// 	// []interface {}{int32(1328148122), int16(2000)}, "bytestring"
+	// 	timeToCompare, "bytestring"
+	//b[4], b[8] = int8(8), int8(8)
+	//b[4] = int8(8)
+	a[20] = b
+	a[23] = skipVerifyVal 
+	//a[25] = skipVerifyVal
+	tableVerify = a
+	
+	//when decoding into nil, for testing, 
+	//we treat each []byte as string, and uint < 127 are decoded as int8.
+	a = make([]interface{}, len(tableVerify))
+	copy(a, tableVerify)
+	//a[0], a[4], a[8], a[16], a[19] = int8(-8), int8(8), int8(8), timeToCompare, "bytestring"
+	a[0], a[16], a[19] = int8(-8), timeToCompare, "bytestring"
+	a[21] = map[string]interface{}{"true":true, "false":false}
+	a[23] = table[23]
+	a[25] = skipVerifyVal
+	tableTestNilVerify = a
+	
+	//python msgpack encodes large positive numbers as unsigned, and all floats as float64
+	a = make([]interface{}, len(tableTestNilVerify)-2)
+	copy(a, tableTestNilVerify)
+	a[23] = table[23]
+	a[9], a[11], a[16] = float64(-3232.0), float64(3232.0), uint64(1328148122000002)
+	b = make([]interface{}, len(a[20].([]interface{})))
+	copy(b, a[20].([]interface{}))
+	//b[4], b[8] = int8(8), int8(8)
+	b[9], b[11], b[16] = float64(-3232.0), float64(3232.0), uint64(1328148122000002)
+	a[20] = b
+	c = make(map[string]interface{})
+	for k, v := range a[23].(map[string]interface{}) { 
+		c[k] = v
+	}
+	a[23] = c
+	c["int32"] = uint32(32323232)
+	b = c["list"].([]interface{})
+	b[0], b[1], b[3] = uint16(1616), uint32(32323232), float64(-3232.0)
+	tablePythonVerify = a
+}
+
+func testUnmarshal(v interface{}, data []byte, h Handle) error {
+	if testUseIoEncDec {
+		return NewDecoder(bytes.NewBuffer(data), h).Decode(v)
+	}
+	return NewDecoderBytes(data, h).Decode(v)
+}
+
+func testMarshal(v interface{}, h Handle) (bs []byte, err error) {
+	if testUseIoEncDec {
+		var buf bytes.Buffer
+		err = NewEncoder(&buf, h).Encode(v)
+		bs = buf.Bytes()
+		return
+	}
+	err = NewEncoderBytes(&bs, h).Encode(v)
+	return
+}
+
+func newTestStruc(depth int, bench bool) (ts *TestStruc) {
+	var i64a, i64b, i64c, i64d int64 = 64, 6464, 646464, 64646464
+	
+	ts = &TestStruc {
+		S: "some string",
+		I64: 64,
+		I16: 16,
+		Ui64: 64,
+		Ui8: 160,
+		B: true,
+		By: 5,
+		
+		Sslice: []string{"one", "two", "three"},
+		I64slice: []int64{1, 2, 3},
+		I16slice: []int16{4, 5, 6},
+		Ui64slice: []uint64{7, 8, 9},
+		Ui8slice: []uint8{10, 11, 12},
+		Bslice: []bool{true, false, true, false},
+		Byslice: []byte{13, 14, 15},
+		
+		Islice: []interface{}{"true", true, "no", false, uint8(88), float64(0.4)},
+		
+		Ms: map[string]interface{}{
+			"true": "true",
+			"int64(9)": false,
+		},
+		Msi64: map[string]int64{
+			"one": 1,
+			"two": 2,
+		},
+		T: timeToCompare,
+		AnonInTestStruc: AnonInTestStruc{
+			AS: "A-String",
+			AI64: 64,
+			AI16: 16,
+			AUi64: 64,
+			ASslice: []string{"Aone", "Atwo", "Athree"},
+			AI64slice: []int64{1, 2, 3},
+		},
+	}
+	//For benchmarks, some things will not work.
+	if !bench {
+		//json and bson require string keys in maps
+		//ts.M = map[interface{}]interface{}{
+		//	true: "true",
+		//	int8(9): false,
+		//}
+		//gob cannot encode nil in element in array (encodeArray: nil element)
+		ts.Iptrslice = []*int64{nil, &i64a, nil, &i64b, nil, &i64c, nil, &i64d, nil}
+		// ts.Iptrslice = nil
+	}
+	if depth > 0 {
+		depth--
+		if ts.Mtsptr == nil {
+			ts.Mtsptr = make(map[string]*TestStruc)
+		}
+		if ts.Mts == nil {
+			ts.Mts = make(map[string]TestStruc)
+		}
+		ts.Mtsptr["0"] = newTestStruc(depth, bench)
+		ts.Mts["0"] = *(ts.Mtsptr["0"])
+		ts.Its = append(ts.Its, ts.Mtsptr["0"])
+	}
+	return
+}
+
+// doTestCodecTableOne allows us test for different variations based on arguments passed.
+func doTestCodecTableOne(t *testing.T, testNil bool, h Handle, 
+	vs []interface{}, vsVerify []interface{}) {
+	//if testNil, then just test for when a pointer to a nil interface{} is passed. It should work.
+	//Current setup allows us test (at least manually) the nil interface or typed interface.
+	logT(t, "================ TestNil: %v ================\n", testNil)
+	for i, v0 := range vs {
+		logT(t, "..............................................")
+		logT(t, "         Testing: #%d:, %T, %#v\n", i, v0, v0)
+		b0, err := testMarshal(v0, h)
+		if err != nil {
+			logT(t, err.Error())
+			failT(t)
+			continue
+		}
+		logT(t, "         Encoded bytes: len: %v, %v\n", len(b0), b0)
+		
+		var v1 interface{}
+		
+		if testNil {
+			err = testUnmarshal(&v1, b0, h)
+		} else {
+			if v0 != nil {
+				v0rt := reflect.TypeOf(v0) // ptr
+				rv1 := reflect.New(v0rt)
+				err = testUnmarshal(rv1.Interface(), b0, h)
+				v1 = rv1.Elem().Interface()
+				// v1 = reflect.Indirect(reflect.ValueOf(v1)).Interface()
+			}
+		}
+		
+		logT(t, "         v1 returned: %T, %#v", v1, v1)
+		// if v1 != nil {
+		//	logT(t, "         v1 returned: %T, %#v", v1, v1)
+		//	//we always indirect, because ptr to typed value may be passed (if not testNil)
+		//	v1 = reflect.Indirect(reflect.ValueOf(v1)).Interface()
+		// }
+		if err != nil {
+			logT(t, "-------- Error: %v. Partial return: %v", err, v1)
+			failT(t)
+			continue
+		}
+		v0check := vsVerify[i]
+		if v0check == skipVerifyVal { 
+			logT(t, "        Nil Check skipped: Decoded: %T, %#v\n", v1, v1)
+			continue 
+		}
+		
+		if err = deepEqual(v0check, v1); err == nil { 
+			logT(t, "++++++++ Before and After marshal matched\n")
+		} else {
+			logT(t, "-------- Before and After marshal do not match: Error: %v" + 
+				" ====> AGAINST: (%T) %#v, DECODED: (%T) %#v\n", err, v0check, v0check, v1, v1)
+			failT(t)
+		}
+	}
+}
+
+
+func testCodecTableOne(t *testing.T, h Handle) {
+	// func TestMsgpackAllExperimental(t *testing.T) {
+	// dopts := testDecOpts(nil, nil, false, true, true), 
+	var oldWriteExt bool
+	switch v := h.(type) {
+	case *MsgpackHandle:
+		oldWriteExt = v.WriteExt
+		v.WriteExt = true
+	}
+	doTestCodecTableOne(t, false, h, table, tableVerify) 
+
+	switch v := h.(type) {
+	case *MsgpackHandle:
+		v.WriteExt = oldWriteExt
+	}
+	// func TestMsgpackAll(t *testing.T) {
+
+	doTestCodecTableOne(t, false, h, table[:20], tableVerify[:20]) 
+	doTestCodecTableOne(t, false, h, table[21:], tableVerify[21:]) 
+
+	// func TestMsgpackNilStringMap(t *testing.T) {
+	var oldMapType reflect.Type
+	switch v := h.(type) {
+	case *MsgpackHandle:
+		oldMapType = v.MapType
+		v.MapType = mapStringIntfTyp
+	case *BincHandle:
+		oldMapType = v.MapType
+		v.MapType = mapStringIntfTyp
+	}
+	//skip #16 (time.Time), and #20 ([]interface{} containing time.Time)
+	doTestCodecTableOne(t, true, h, table[:16], tableTestNilVerify[:16]) 
+	doTestCodecTableOne(t, true, h, table[17:20], tableTestNilVerify[17:20]) 
+	doTestCodecTableOne(t, true, h, table[21:24], tableTestNilVerify[21:24]) 
+	
+	switch v := h.(type) {
+	case *MsgpackHandle:
+		v.MapType = oldMapType
+	case *BincHandle:
+		v.MapType = oldMapType
+	}
+
+	// func TestMsgpackNilIntf(t *testing.T) {	
+	doTestCodecTableOne(t, true, h, table[24:], tableTestNilVerify[24:]) 
+
+	doTestCodecTableOne(t, true, h, table[17:18], tableTestNilVerify[17:18]) 
+}
+
+
+func testCodecMiscOne(t *testing.T, h Handle) {
+	b, err := testMarshal(32, h)
+	// Cannot do this nil one, because faster type assertion decoding will panic
+	// var i *int32
+	// if err = testUnmarshal(b, i, nil); err == nil {
+	// 	logT(t, "------- Expecting error because we cannot unmarshal to int32 nil ptr")
+	// 	t.FailNow()
+	// }
+	var i2 int32 = 0
+	if err = testUnmarshal(&i2, b, h); err != nil {
+		logT(t, "------- Cannot unmarshal to int32 ptr. Error: %v", err)
+		t.FailNow()
+	}
+	if i2 != int32(32) {
+		logT(t, "------- didn't unmarshal to 32: Received: %d", i2)
+		t.FailNow()
+	}
+
+	// func TestMsgpackDecodePtr(t *testing.T) {
+	ts := newTestStruc(0, false)
+	b, err = testMarshal(ts, h)
+	if err != nil {
+		logT(t, "------- Cannot Marshal pointer to struct. Error: %v", err)
+		t.FailNow()
+	} else if len(b) < 40 {
+		logT(t, "------- Size must be > 40. Size: %d", len(b))
+		t.FailNow()
+	}
+	logT(t, "------- b: %v", b)
+	ts2 := new(TestStruc)
+	err = testUnmarshal(ts2, b, h)
+	if err != nil {
+		logT(t, "------- Cannot Unmarshal pointer to struct. Error: %v", err)
+		t.FailNow()
+	} else if ts2.I64 != 64 {
+		logT(t, "------- Unmarshal wrong. Expect I64 = 64. Got: %v", ts2.I64)
+		t.FailNow()
+	}
+
+	// func TestMsgpackIntfDecode(t *testing.T) {
+	m := map[string]int{"A":2, "B":3, }
+	p := []interface{}{m}
+	bs, err := testMarshal(p, h)
+	if err != nil {
+		logT(t, "Error marshalling p: %v, Err: %v", p, err)
+		t.FailNow()
+	}
+	m2 := map[string]int{}
+	p2 := []interface{}{m2}
+    err = testUnmarshal(&p2, bs, h)
+	if err != nil {
+		logT(t, "Error unmarshalling into &p2: %v, Err: %v", p2, err)
+		t.FailNow()
+	}
+	
+	if m2["A"] != 2 || m2["B"] != 3 {
+		logT(t, "m2 not as expected: expecting: %v, got: %v", m, m2)
+		t.FailNow()
+	}
+	// log("m: %v, m2: %v, p: %v, p2: %v", m, m2, p, p2)
+	if err = deepEqual(p, p2); err == nil {
+		logT(t, "p and p2 match")
+	} else {
+		logT(t, "Not Equal: %v. p: %v, p2: %v", err, p, p2)
+		t.FailNow()
+	}
+	if err = deepEqual(m, m2); err == nil {
+		logT(t, "m and m2 match")
+	} else {
+		logT(t, "Not Equal: %v. m: %v, m2: %v", err, m, m2)
+		t.FailNow()
+	}
+
+	// func TestMsgpackDecodeStructSubset(t *testing.T) {
+	// test that we can decode a subset of the stream
+	mm := map[string]interface{}{"A": 5, "B": 99, "C": 333, }
+	bs, err = testMarshal(mm, h)
+	if err != nil {
+		logT(t, "Error marshalling m: %v, Err: %v", mm, err)
+		t.FailNow()
+	}
+	type ttt struct {
+		A uint8
+		C int32
+	}
+	var t2 ttt
+	err = testUnmarshal(&t2, bs, h)
+	if err != nil {
+		logT(t, "Error unmarshalling into &t2: %v, Err: %v", t2, err)
+		t.FailNow()
+	}
+	t3 := ttt{5, 333}
+	if err = deepEqual(t2, t3); err != nil {
+		logT(t, "Not Equal: %v. t2: %v, t3: %v", err, t2, t3)
+		t.FailNow()
+	}
+}
+
+func doTestRpcOne(t *testing.T, rr Rpc, h Handle, callClose, doRequest, doExit bool) {
+	srv := rpc.NewServer()
+	srv.Register(testRpcInt)
+	ln, err := net.Listen("tcp", "127.0.0.1:0")
+	// log("listener: %v", ln.Addr())
+	checkErrT(t, err)
+	defer ln.Close()
+	
+	// var opts *DecoderOptions
+	// opts := testDecOpts
+	// opts.MapType = mapStringIntfTyp
+	// opts.RawToString = false
+	serverExitChan := make(chan bool, 1)
+	serverFn := func() {
+		for { 
+			conn1, err1 := ln.Accept()
+			if err1 != nil {
+				continue
+			}
+			bs := make([]byte, 1)
+			n1, err1 := conn1.Read(bs)
+			if n1 != 1 || err1 != nil {
+				conn1.Close()
+				continue
+			}
+			var sc rpc.ServerCodec
+			switch bs[0] {
+			case 'R':
+				sc = rr.ServerCodec(conn1, h)
+			case 'X':
+				serverExitChan <- true
+				// <- serverExitChan				
+				conn1.Close()
+				return // exit serverFn goroutine
+			}
+			if sc == nil {
+				conn1.Close()
+				continue
+			}
+			srv.ServeCodec(sc)
+			// for {
+			// 	if err1 = srv.ServeRequest(sc); err1 != nil {
+			// 		break
+			// 	}
+			// }
+			// if callClose {
+			// 	sc.Close() 
+			// }
+		}
+	}
+	
+	clientFn := func(cc rpc.ClientCodec) {
+		cl := rpc.NewClientWithCodec(cc)
+		if callClose {
+			defer cl.Close() 
+		} 
+		var up, sq, mult int
+		// log("Calling client")
+		checkErrT(t, cl.Call("TestRpcInt.Update", 5, &up))
+		// log("Called TestRpcInt.Update")
+		checkEqualT(t, testRpcInt.i, 5)
+		checkEqualT(t, up, 5)
+		checkErrT(t, cl.Call("TestRpcInt.Square", 1, &sq))
+		checkEqualT(t, sq, 25)
+		checkErrT(t, cl.Call("TestRpcInt.Mult", 20, &mult))
+		checkEqualT(t, mult, 100)		
+	}
+	
+	connFn := func(req byte) (bs net.Conn) {
+		// log("calling f1")
+		bs, err2 := net.Dial(ln.Addr().Network(), ln.Addr().String())
+		// log("f1. bs: %v, err2: %v", bs, err2)
+		checkErrT(t, err2)
+		n1, err2 := bs.Write([]byte{req})
+		checkErrT(t, err2)
+		checkEqualT(t, n1, 1)
+		return
+	}
+	
+	go serverFn()
+	if doRequest {
+		bs := connFn('R')
+		cc := rr.ClientCodec(bs, h)
+		clientFn(cc)
+	}
+	if doExit {
+		bs := connFn('X')
+		<- serverExitChan
+		bs.Close()
+		// serverExitChan <- true
+	}
+}
+
+// Comprehensive testing that generates data encoded from python msgpack, 
+// and validates that our code can read and write it out accordingly.
+// We keep this unexported here, and put actual test in ext_dep_test.go.
+// This way, it can be excluded by excluding file completely.
+func doTestMsgpackPythonGenStreams(t *testing.T) {
+	logT(t, "TestPythonGenStreams")
+	tmpdir, err := ioutil.TempDir("", "golang-msgpack-test") 
+	if err != nil {
+		logT(t, "-------- Unable to create temp directory\n")
+		t.FailNow()
+	}
+	defer os.RemoveAll(tmpdir)
+	logT(t, "tmpdir: %v", tmpdir)
+	cmd := exec.Command("python", "msgpack_test.py", "testdata", tmpdir)
+	//cmd.Stdin = strings.NewReader("some input")
+	//cmd.Stdout = &out
+	var cmdout []byte
+	if cmdout, err = cmd.CombinedOutput(); err != nil {
+		logT(t, "-------- Error running python build.py. Err: %v", err)
+		logT(t, "         %v", string(cmdout))
+		t.FailNow()
+	}
+	
+	oldMapType := testMsgpackH.MapType
+	for i, v := range tablePythonVerify {
+		testMsgpackH.MapType = oldMapType
+		//load up the golden file based on number
+		//decode it
+		//compare to in-mem object
+		//encode it again
+		//compare to output stream
+		logT(t, "..............................................")
+		logT(t, "         Testing: #%d: %T, %#v\n", i, v, v) 
+		var bss []byte
+		bss, err = ioutil.ReadFile(filepath.Join(tmpdir, strconv.Itoa(i) + ".golden"))
+		if err != nil {
+			logT(t, "-------- Error reading golden file: %d. Err: %v", i, err)
+			failT(t)
+			continue
+		}
+		testMsgpackH.MapType = mapStringIntfTyp
+		
+		var v1 interface{}
+		if err = testUnmarshal(&v1, bss, testMsgpackH); err != nil {
+			logT(t, "-------- Error decoding stream: %d: Err: %v", i, err)
+			failT(t)
+			continue
+		}
+		if v == skipVerifyVal {
+			continue
+		}
+		//no need to indirect, because we pass a nil ptr, so we already have the value 
+		//if v1 != nil { v1 = reflect.Indirect(reflect.ValueOf(v1)).Interface() }
+		if err = deepEqual(v, v1); err == nil { 
+			logT(t, "++++++++ Objects match")
+		} else {
+			logT(t, "-------- Objects do not match: %v. Source: %T. Decoded: %T", err, v, v1)
+			logT(t, "--------   AGAINST: %#v", v)
+			logT(t, "--------   DECODED: %#v <====> %#v", v1, reflect.Indirect(reflect.ValueOf(v1)).Interface())
+			failT(t)
+		}
+		bsb, err := testMarshal(v1, testMsgpackH)
+		if err != nil {
+			logT(t, "Error encoding to stream: %d: Err: %v", i, err)
+			failT(t)
+			continue
+		}
+		if err = deepEqual(bsb, bss); err == nil { 
+			logT(t, "++++++++ Bytes match")
+		} else {
+			logT(t, "???????? Bytes do not match. %v.", err)
+			xs := "--------"
+			if reflect.ValueOf(v).Kind() == reflect.Map {
+				xs = "        "
+				logT(t, "%s It's a map. Ok that they don't match (dependent on ordering).", xs)
+			} else {
+				logT(t, "%s It's not a map. They should match.", xs)
+				failT(t)
+			}
+			logT(t, "%s   FROM_FILE: %4d] %v", xs, len(bss), bss)
+			logT(t, "%s     ENCODED: %4d] %v", xs, len(bsb), bsb)
+		}
+	}
+	testMsgpackH.MapType = oldMapType
+	
+}
+
+func TestCodecs(t *testing.T) {
+	testCodecTableOne(t, testMsgpackH)
+	testCodecMiscOne(t, testMsgpackH)
+	testCodecTableOne(t, testBincH)
+	testCodecMiscOne(t, testBincH)	
+}
+
+func TestRpcs(t *testing.T) {
+	doTestRpcOne(t, MsgpackSpecRpc{}, testMsgpackH, true, true, true)
+	doTestRpcOne(t, GoRpc{}, testMsgpackH, true, true, true)
+	doTestRpcOne(t, GoRpc{}, testBincH, true, true, true)
+}
+

+ 685 - 0
codec/decode.go

@@ -0,0 +1,685 @@
+// Copyright (c) 2012, 2013 Ugorji Nwoke. All rights reserved.
+// Use of this source code is governed by a BSD-style license found in the LICENSE file.
+
+package codec
+
+import (
+	"io"
+	"reflect"
+	//"math"
+	"fmt"
+	"time"
+)
+
+// Some tagging information for error messages.
+var (
+	msgTagDec = "codec.decoder"
+	msgBadDesc = "Unrecognized descriptor byte"
+	digits = [...]byte {'0', '1', '2', '3', '4', '5', '6', '7', '8', '9'}
+)
+
+type decodeNakedContext uint8 
+const (
+	dncHandled decodeNakedContext = iota
+	dncNil
+	dncExt
+	dncContainer
+)
+
+// decReader abstracts the reading source, allowing implementations that can 
+// read from an io.Reader or directly off a byte slice with zero-copying.
+type decReader interface {
+	readn(n int) []byte
+	readb([]byte)
+	readUint8() uint8
+	readUint16() uint16
+	readUint32() uint32
+	readUint64() uint64
+}
+
+type decoder interface {
+	initReadNext() 
+	currentIsNil() bool
+	decodeBuiltinType(rt reflect.Type, rv reflect.Value) bool
+	//decodeNaked should completely handle extensions, builtins, primitives, etc.
+	decodeNaked(h decodeHandleI) (rv reflect.Value, ctx decodeNakedContext)
+	decodeInt(bitsize uint8) (i int64)
+	decodeUint(bitsize uint8) (ui uint64) 
+	decodeFloat(chkOverflow32 bool) (f float64)
+	decodeBool() (b bool) 
+	decodeString() (s string) 
+	decodeStringBytes(bs []byte) (bsOut []byte, changed bool)
+	decodeExt(tag byte) []byte
+	readMapLen() int
+	readArrayLen() int 
+}
+
+type newDecoderFunc func(r decReader) decoder 
+
+// A Decoder reads and decodes an object from an input stream in the codec format.
+type Decoder struct {
+	r decReader
+	d decoder 
+	h decodeHandleI
+}
+
+// ioDecReader is a decReader that reads off an io.Reader
+type ioDecReader struct {
+	x [8]byte        //temp byte array re-used internally for efficiency
+	t01, t02, t04, t08 []byte // use these, so no need to constantly re-slice
+	r io.Reader
+}
+
+
+// bytesDecReader is a decReader that reads off a byte slice with zero copying
+type bytesDecReader struct {
+	b []byte // data
+	c int    // cursor
+	a int // available
+}
+
+type decExtTagFn struct {
+	fn func(reflect.Value, []byte)(error)
+	tag byte
+}
+
+type decExtTypeTagFn struct {
+	rt reflect.Type
+	decExtTagFn
+}
+
+type decodeHandleI interface {
+	getDecodeExt(rt reflect.Type) (tag byte, fn func(reflect.Value, []byte) error)
+	newDecoder(r decReader) decoder
+	errorIfNoField() bool
+}
+
+type decHandle struct {
+	// put word-aligned fields first (before bools, etc)
+	exts []decExtTypeTagFn
+	extFuncs map[reflect.Type] decExtTagFn
+	// if an extension for byte slice is defined, then always decode Raw as strings
+	rawToStringOverride bool
+}
+
+type DecodeOptions struct {
+	// An instance of MapType is used during schema-less decoding of a map in the stream.
+	// If nil, we use map[interface{}]interface{}
+	MapType reflect.Type
+	// An instance of SliceType is used during schema-less decoding of an array in the stream.
+	// If nil, we use []interface{}
+	SliceType reflect.Type
+	// ErrorIfNoField controls whether an error is returned when decoding a map 
+	// from a codec stream into a struct, and no matching struct field is found.
+	ErrorIfNoField bool
+}
+
+func (o *DecodeOptions) errorIfNoField() bool {
+	return o.ErrorIfNoField
+}
+
+// addDecodeExt registers a function to handle decoding into a given type when an 
+// extension type and specific tag byte is detected in the codec stream. 
+// To remove an extension, pass fn=nil.
+func (o *decHandle) addDecodeExt(rt reflect.Type, tag byte, fn func(reflect.Value, []byte) (error)) {
+	if o.exts == nil {
+		o.exts = make([]decExtTypeTagFn, 0, 2)
+		o.extFuncs = make(map[reflect.Type]decExtTagFn, 2)
+	}
+	if _, ok := o.extFuncs[rt]; ok {
+		delete(o.extFuncs, rt)
+		if rt == byteSliceTyp {
+			o.rawToStringOverride = false
+		}
+	}
+	if fn != nil {
+		o.extFuncs[rt] = decExtTagFn{fn, tag}
+		if rt == byteSliceTyp {
+			o.rawToStringOverride = true
+		}
+	}
+	
+	if leno := len(o.extFuncs); leno > cap(o.exts) {
+		o.exts = make([]decExtTypeTagFn, leno, (leno * 3 / 2))
+	} else {
+		o.exts = o.exts[0:leno]
+	}
+	var i int
+	for k, v := range o.extFuncs {
+		o.exts[i] = decExtTypeTagFn {k, v}
+		i++
+	}
+}
+
+func (o *decHandle) getDecodeExtForTag(tag byte) (rt reflect.Type, fn func(reflect.Value, []byte) error) {
+	for i, l := 0, len(o.exts); i < l; i++ {
+		if o.exts[i].tag == tag {
+			return o.exts[i].rt, o.exts[i].fn
+		}
+	}
+	return 
+}
+
+func (o *decHandle) getDecodeExt(rt reflect.Type) (tag byte, fn func(reflect.Value, []byte) error) {
+	if l := len(o.exts); l == 0 {
+		return
+	} else if l < mapAccessThreshold {
+		for i := 0; i < l; i++ {
+			if o.exts[i].rt == rt {
+				x := o.exts[i].decExtTagFn
+				return x.tag, x.fn
+			}
+		}
+	} else {
+		x := o.extFuncs[rt]
+		return x.tag, x.fn
+	}
+	return
+}
+
+// NewDecoder returns a Decoder for decoding a stream of bytes from an io.Reader.
+func NewDecoder(r io.Reader, h Handle) (*Decoder) {
+	z := ioDecReader {
+		r: r,
+	}
+	z.t01, z.t02, z.t04, z.t08 = z.x[:1], z.x[:2], z.x[:4], z.x[:8]
+	return &Decoder{ r: &z, d: h.newDecoder(&z), h: h }
+}
+
+// NewDecoderBytes returns a Decoder which efficiently decodes directly 
+// from a byte slice with zero copying.
+func NewDecoderBytes(in []byte, h Handle) (*Decoder) {
+	z := bytesDecReader {
+		b: in,
+		a: len(in),
+	}
+	return &Decoder{ r: &z, d: h.newDecoder(&z), h: h }
+}
+
+// Decode decodes the stream from reader and stores the result in the 
+// value pointed to by v. v cannot be a nil pointer. v can also be 
+// a reflect.Value of a pointer.
+// 
+// Note that a pointer to a nil interface is not a nil pointer.
+// If you do not know what type of stream it is, pass in a pointer to a nil interface.
+// We will decode and store a value in that nil interface. 
+// 
+// Sample usages:
+//   // Decoding into a non-nil typed value
+//   var f float32
+//   err = codec.NewDecoder(r, handle).Decode(&f)
+//
+//   // Decoding into nil interface
+//   var v interface{}
+//   dec := codec.NewDecoder(r, handle)
+//   err = dec.Decode(&v)
+//   
+func (d *Decoder) Decode(v interface{}) (err error) {
+	defer panicToErr(&err)
+	d.decode(v)
+	return
+}
+
+func (d *Decoder) decode(iv interface{}) {
+	d.d.initReadNext()
+	
+	// Fast path included for various pointer types which cannot be registered as extensions
+	switch v := iv.(type) {
+	case nil:
+		decErr("Cannot decode into nil.")
+	case reflect.Value:
+		d.chkPtrValue(v)
+		d.decodeValue(v)
+	case *string:
+		*v = d.d.decodeString()
+	case *bool:
+		*v = d.d.decodeBool()
+	case *int:
+		*v = int(d.d.decodeInt(intBitsize))
+	case *int8:
+		*v = int8(d.d.decodeInt(8))
+	case *int16:
+		*v = int16(d.d.decodeInt(16))
+	case *int32:
+		*v = int32(d.d.decodeInt(32))
+	case *int64:
+		*v = int64(d.d.decodeInt(64))
+	case *uint:
+		*v = uint(d.d.decodeUint(uintBitsize))
+	case *uint8:
+		*v = uint8(d.d.decodeUint(8))
+	case *uint16:
+		*v = uint16(d.d.decodeUint(16))
+	case *uint32:
+		*v = uint32(d.d.decodeUint(32))
+	case *uint64:
+		*v = uint64(d.d.decodeUint(64))
+	case *float32:
+		*v = float32(d.d.decodeFloat(true))
+	case *float64:
+		*v = d.d.decodeFloat(false) 
+	case *interface{}:
+	 	d.decodeValue(reflect.ValueOf(iv).Elem())
+	default:
+		rv := reflect.ValueOf(iv)
+		d.chkPtrValue(rv)
+		d.decodeValue(rv)
+	}	
+}
+
+
+func (d *Decoder) decodeValue(rv reflect.Value) {
+	// Note: if stream is set to nil, we set the corresponding value to its "zero" value
+	
+	// var ctr int (define this above the  function if trying to do this run)
+	// ctr++
+	// log(".. [%v] enter decode: rv: %v <==> %T <==> %v", ctr, rv, rv.Interface(), rv.Interface())
+	// defer func(ctr2 int) {
+	// 	log(".... [%v] exit decode: rv: %v <==> %T <==> %v", ctr2, rv, rv.Interface(), rv.Interface())
+	// }(ctr)
+	dd := d.d //so we don't dereference constantly
+	dd.initReadNext()
+	
+	rvOrig := rv
+	wasNilIntf := rv.Kind() == reflect.Interface && rv.IsNil()
+	rt := rv.Type()
+	
+	var ndesc decodeNakedContext
+	//if nil interface, use some hieristics to set the nil interface to an 
+	//appropriate value based on the first byte read (byte descriptor bd)
+	if wasNilIntf {
+		if dd.currentIsNil() {
+			return
+		}
+		//Prevent from decoding into e.g. error, io.Reader, etc if it's nil and non-nil value in stream.
+		//We can only decode into interface{} (0 methods). Else reflect.Set fails later.
+		if num := rt.NumMethod(); num > 0 {
+			decErr("decodeValue: Cannot decode non-nil codec value into nil %v (%v methods)", rt, num)
+		} else {
+			rv, ndesc = dd.decodeNaked(d.h)
+			if ndesc == dncHandled {
+				rvOrig.Set(rv)
+				return
+			}
+			rt = rv.Type()
+		}
+	} else if dd.currentIsNil() {
+		// Note: if stream is set to nil, we set the dereferenced value to its "zero" value (if settable).
+		for rv.Kind() == reflect.Ptr {
+			rv = rv.Elem()
+		}
+		if rv.CanSet() {
+			rv.Set(reflect.Zero(rv.Type()))
+		}
+		return
+	}
+	
+	// An extension can be registered for any type, regardless of the Kind 
+	// (e.g. type BitSet int64, type MyStruct { / * unexported fields * / }, type X []int, etc.
+	// 
+	// We can't check if it's an extension byte here first, because the user may have 
+	// registered a pointer or non-pointer type, meaning we may have to recurse first 
+	// before matching a mapped type, even though the extension byte is already detected.
+	// 
+	// If we are checking for builtin or ext type here, it means we didn't go through decodeNaked, 
+	// Because decodeNaked would have handled it. It also means wasNilIntf = false.
+	if dd.decodeBuiltinType(rt, rv) {
+		return
+	}
+	if bfnTag, bfnFn := d.h.getDecodeExt(rt); bfnFn != nil {
+		xbs := dd.decodeExt(bfnTag)
+		if fnerr := bfnFn(rv, xbs); fnerr != nil {
+			panic(fnerr)
+		}
+		return
+	}
+	
+	// Note: In decoding into containers, we just use the stream to UPDATE the container.
+	// This means that for a struct or map, we just update matching fields or keys.
+	// For a slice/array, we just update the first n elements, where n is the length of the 
+	// stream.
+	// However, if the encoded value is Nil in the stream, then we try to set 
+	// to nil, or a "zero" value.
+	//
+	// Also, we must ensure that, if decoding into a nil interface{}, we return a non-nil
+	// value except even if the container registers a length of 0.
+	// 
+	// NOTE: Do not make blocks for struct, slice, map, etc individual methods. 
+	// It ends up being more expensive, because they recursively calls decodeValue
+	// 
+	// (Mar 7, 2013. DON'T REARRANGE ... code clarity)
+	// tried arranging in sequence of most probable ones. 
+	// string, bool, integer, float, struct, ptr, slice, array, map, interface, uint.
+	switch rk := rv.Kind(); rk {
+	case reflect.String:
+		rv.SetString(dd.decodeString())
+	case reflect.Bool:
+		rv.SetBool(dd.decodeBool())
+	case reflect.Int:
+		rv.SetInt(dd.decodeInt(intBitsize))
+	case reflect.Int64:
+		rv.SetInt(dd.decodeInt(64))
+	case reflect.Int32:
+		rv.SetInt(dd.decodeInt(32))
+	case reflect.Int8:
+		rv.SetInt(dd.decodeInt(8))
+	case reflect.Int16:
+		rv.SetInt(dd.decodeInt(16))
+	case reflect.Float32:
+		rv.SetFloat(dd.decodeFloat(true))
+	case reflect.Float64:
+		rv.SetFloat(dd.decodeFloat(false))
+	case reflect.Uint8:
+		rv.SetUint(dd.decodeUint(8))
+	case reflect.Uint64: 
+		rv.SetUint(dd.decodeUint(64))
+	case reflect.Uint:
+		rv.SetUint(dd.decodeUint(uintBitsize))
+	case reflect.Uint32:
+		rv.SetUint(dd.decodeUint(32))
+	case reflect.Uint16:
+		rv.SetUint(dd.decodeUint(16))
+	case reflect.Ptr:
+		if rv.IsNil() {
+			if wasNilIntf {
+				rv = reflect.New(rt.Elem())
+			} else {
+				rv.Set(reflect.New(rt.Elem()))
+			}
+		}
+		d.decodeValue(rv.Elem())
+	case reflect.Interface:
+		d.decodeValue(rv.Elem())
+	case reflect.Struct:
+		containerLen := dd.readMapLen()
+		
+		if containerLen == 0 {
+			break
+		}
+		
+		sfi := getStructFieldInfos(rt)
+		for j := 0; j < containerLen; j++ {
+			// var rvkencname string
+			// ddecode(&rvkencname)
+			dd.initReadNext()
+			rvkencname := dd.decodeString()
+			// rvksi := sfi.getForEncName(rvkencname)
+			if k := sfi.indexForEncName(rvkencname); k > -1 {
+				sfik := sfi[k]
+				if sfik.i > -1 {
+					d.decodeValue(rv.Field(int(sfik.i)))
+				} else {
+					d.decodeValue(rv.FieldByIndex(sfik.is))
+				}
+				// d.decodeValue(sfi.field(k, rv))
+			} else { 
+				if d.h.errorIfNoField() {
+					decErr("No matching struct field found when decoding stream map with key: %v", rvkencname)
+				} else {
+					var nilintf0 interface{}
+					d.decodeValue(reflect.ValueOf(&nilintf0).Elem())
+				}
+			}
+		}
+	case reflect.Slice:
+		// Be more careful calling Set() here, because a reflect.Value from an array
+		// may have come in here (which may not be settable). 
+		// In places where the slice got from an array could be, we should guard with CanSet() calls.
+		
+		if rt == byteSliceTyp { // rawbytes 
+			if bs2, changed2 := dd.decodeStringBytes(rv.Bytes()); changed2 {
+				rv.SetBytes(bs2)
+			}
+			if wasNilIntf && rv.IsNil() {
+				rv.SetBytes([]byte{})
+			}
+			break			
+		}
+		
+		containerLen := dd.readArrayLen()
+
+		if wasNilIntf {
+			rv = reflect.MakeSlice(rt, containerLen, containerLen)
+		} 
+		if containerLen == 0 {
+			break
+		}
+		
+		if rv.IsNil() {
+			// wasNilIntf only applies if rv is nil (since that's what we did earlier)
+			if containerLen > 0 {
+				rv.Set(reflect.MakeSlice(rt, containerLen, containerLen))
+			}
+		} else {
+			// if we need to reset rv but it cannot be set, we should err out.
+			// for example, if slice is got from unaddressable array, CanSet = false
+			if rvcap, rvlen := rv.Len(), rv.Cap(); containerLen > rvcap {
+				if rv.CanSet() {
+					rvn := reflect.MakeSlice(rt, containerLen, containerLen)
+					if rvlen > 0 {
+						reflect.Copy(rvn, rv)
+					}
+					rv.Set(rvn)
+				} else {
+					decErr("Cannot reset slice with less cap: %v that stream contents: %v", rvcap, containerLen)
+				}
+			} else if containerLen > rvlen {
+				rv.SetLen(containerLen)
+			}
+		}
+		for j := 0; j < containerLen; j++ {
+			d.decodeValue(rv.Index(j))
+		}
+	case reflect.Array:
+		d.decodeValue(rv.Slice(0, rv.Len()))
+	case reflect.Map:
+		containerLen := dd.readMapLen()
+		
+		if containerLen == 0 {
+			break
+		}
+
+		if rv.IsNil() {
+			rv.Set(reflect.MakeMap(rt))
+		}
+		ktype, vtype := rt.Key(), rt.Elem()			
+		for j := 0; j < containerLen; j++ {
+			rvk := reflect.New(ktype).Elem()
+			d.decodeValue(rvk)
+			
+			if ktype == intfTyp {
+				rvk = rvk.Elem()
+				if rvk.Type() == byteSliceTyp {
+					rvk = reflect.ValueOf(string(rvk.Bytes()))
+				}
+			}
+			rvv := rv.MapIndex(rvk)
+			if !rvv.IsValid() {
+				rvv = reflect.New(vtype).Elem()
+			}
+			
+			d.decodeValue(rvv)
+			rv.SetMapIndex(rvk, rvv)
+		}
+	default:
+		decErr("Unhandled value for kind: %v: %s", rk, msgBadDesc)
+	}
+	
+	if wasNilIntf {
+		rvOrig.Set(rv)
+	} 
+	return
+}
+
+func (d *Decoder) chkPtrValue(rv reflect.Value) {
+	// We cannot marshal into a non-pointer or a nil pointer 
+	// (at least pass a nil interface so we can marshal into it)
+	if rv.Kind() != reflect.Ptr || rv.IsNil() {
+		var rvi interface{} = rv
+		if rv.IsValid() && rv.CanInterface() {
+			rvi = rv.Interface()
+		}
+		decErr("Decode: Expecting valid pointer to decode into. Got: %v, %T, %v", 
+			rv.Kind(), rvi, rvi)
+	}
+}
+
+// ------------------------------------
+
+func (z *ioDecReader) readn(n int) (bs []byte) {
+	bs = make([]byte, n)
+	if _, err := io.ReadFull(z.r, bs); err != nil {
+		panic(err)
+	}
+	return
+}
+
+func (z *ioDecReader) readb(bs []byte) {
+	if _, err := io.ReadFull(z.r, bs); err != nil {
+		panic(err)
+	}
+}
+
+func (z *ioDecReader) readUint8() uint8 {
+	z.readb(z.t01)
+	return z.x[0]
+}
+
+func (z *ioDecReader) readUint16() uint16 {
+	z.readb(z.t02)
+	return binc.Uint16(z.t02)
+}
+
+func (z *ioDecReader) readUint32() uint32 {
+	z.readb(z.t04)
+	return binc.Uint32(z.t04)
+}
+
+func (z *ioDecReader) readUint64() uint64 {
+	z.readb(z.t08)
+	return binc.Uint64(z.t08)
+}
+
+// ------------------------------------
+
+func (z *bytesDecReader) consume(n int) (oldcursor int) {
+	if z.a == 0 {
+		panic(io.EOF)
+	}
+	if n > z.a {
+		doPanic(msgTagDec, "Trying to read %v bytes. Only %v available", n, z.a)
+	}
+	// z.checkAvailable(n)
+	oldcursor = z.c
+	z.c = oldcursor + n
+	z.a = z.a - n
+	return 
+}
+
+func (z *bytesDecReader) readn(n int) (bs []byte) {
+	c0 := z.consume(n)
+	bs = z.b[c0 : z.c]
+	return
+}
+
+func (z *bytesDecReader) readb(bs []byte) {
+	copy(bs, z.readn(len(bs)))
+}
+
+func (z *bytesDecReader) readUint8() uint8 {
+	c0 := z.consume(1)
+	return z.b[c0]
+}
+
+// Use binaryEncoding helper for 4 and 8 bits, but inline it for 2 bits
+// creating temp slice variable and copying it to helper function is expensive
+// for just 2 bits. 
+
+func (z *bytesDecReader) readUint16() uint16 {
+	c0 := z.consume(2)
+	return uint16(z.b[c0+1]) | uint16(z.b[c0])<<8 
+}
+
+func (z *bytesDecReader) readUint32() uint32 {
+	c0 := z.consume(4)
+	return binc.Uint32(z.b[c0 : z.c])
+}
+
+func (z *bytesDecReader) readUint64() uint64 {
+	c0 := z.consume(8)
+	return binc.Uint64(z.b[c0 : z.c])
+}
+
+// ----------------------------------------
+
+func decErr(format string, params ...interface{}) {
+	doPanic(msgTagDec, format, params...)
+}
+
+// DecodeTimeExt decodes a []byte into a time.Time, 
+// and sets into passed reflectValue.
+func decodeTime(bs []byte) (tt time.Time, err error) {
+	var (
+		tsec int64
+		tnsec int32
+		tz uint16
+	)
+	switch len(bs) {
+	case 4:		
+		tsec = int64(int32(binc.Uint32(bs)))
+	case 6:
+		tsec = int64(int32(binc.Uint32(bs)))
+		tz = (binc.Uint16(bs[4:]))
+	case 8:
+		tsec = int64(int32(binc.Uint32(bs)))
+		tnsec = int32(binc.Uint32(bs[4:]))
+	case 10:
+		tsec = int64(int32(binc.Uint32(bs)))
+		tnsec = int32(binc.Uint32(bs[4:]))
+		tz = (binc.Uint16(bs[8:]))
+
+	case 9:
+		tsec = int64(binc.Uint64(bs))
+	case 11:
+		tsec = int64(binc.Uint64(bs))
+		tz = (binc.Uint16(bs[8:]))
+	case 12:
+		tsec = int64(binc.Uint64(bs))
+		tnsec = int32(binc.Uint32(bs[8:]))
+	case 14:
+		tsec = int64(binc.Uint64(bs))
+		tnsec = int32(binc.Uint32(bs[8:]))
+		tz = (binc.Uint16(bs[12:]))
+	default:
+		err = fmt.Errorf("Error decoding bytes: %v as time.Time. Invalid length: %v", bs, len(bs))
+		return 
+	}
+	if tz == 0 {
+		tt = time.Unix(tsec, int64(tnsec)).UTC()
+	} else {
+		// In stdlib time.Parse, when a date is parsed without a zone name, it uses "" as zone name.
+		// However, we need name here, so it can be shown when time is printed.
+		// Zone name is in form: UTC-08:00.
+		// Note that Go Libs do not give access to dst flag, so we only check for sign bit
+		tzneg := tz & (1 << 15) != 0 //check if negative sign
+		tz = tz & 0x1fff             //clear 3 MSBs: sign and dst bits 
+		tzoff := int(tz) * 60
+		var tzname = []byte("UTC+00:00")
+		if tzneg {
+			tzoff = -tzoff
+			tzname[3] = '-'
+		}
+		//tzname := fmt.Sprintf("UTC%s%02d:%02d", tzsign, tz/60, tz%60) //perf issue using Sprintf. inline below.
+		//tzhr, tzmin := tz/60, tz%60 //faster if u convert to int first
+		tzhr, tzmin := int(tz/60), int(tz%60)
+		tzname[4] = digits[tzhr/10]
+		tzname[5] = digits[tzhr%10]
+		tzname[7] = digits[tzmin/10]
+		tzname[8] = digits[tzmin%10]
+			
+		//fmt.Printf(">>>>> DEC: tzname: %s, tzoff: %v\n", tzname, tzoff)
+		tt = time.Unix(tsec, int64(tnsec)).In(time.FixedZone(string(tzname), tzoff))
+	}
+	return 
+}
+

+ 619 - 0
codec/encode.go

@@ -0,0 +1,619 @@
+// Copyright (c) 2012, 2013 Ugorji Nwoke. All rights reserved.
+// Use of this source code is governed by a BSD-style license found in the LICENSE file.
+
+package codec
+
+import (
+	"io"
+	"bufio"
+	"reflect"
+	"math"
+	"time"
+	"fmt"
+)
+
+var _ = fmt.Printf
+const (
+	// Some tagging information for error messages.
+	msgTagEnc = "codec.encoder"
+	defEncByteBufSize = 1 << 6 // 4:16, 6:64, 8:256, 10:1024
+	// maxTimeSecs32 = math.MaxInt32 / 60 / 24 / 366
+)
+
+// encWriter abstracting writing to a byte array or to an io.Writer. 
+type encWriter interface {
+	writeUint16(uint16)
+	writeUint32(uint32)
+	writeUint64(uint64)
+	writeb([]byte)
+	writestr(string)
+	writen1(byte)
+	writen2(byte, byte)
+	writen3(byte, byte, byte)
+	flush()
+}
+
+type encoder interface {
+	encodeBuiltinType(rt reflect.Type, rv reflect.Value) bool
+	encodeNil()
+	encodeInt(i int64)
+	encodeUint(i uint64)
+	encodeBool(b bool) 
+	encodeFloat32(f float32)
+	encodeFloat64(f float64)
+	encodeExtPreamble(xtag byte, length int) 
+	encodeArrayPreamble(length int)
+	encodeMapPreamble(length int)
+	encodeString(c charEncoding, v string)
+	encodeStringBytes(c charEncoding, v []byte)
+	//TODO
+	//encBignum(f *big.Int) 
+	//encStringRunes(c charEncoding, v []rune)
+}
+
+type newEncoderFunc func(w encWriter) encoder
+
+type encodeHandleI interface {
+	getEncodeExt(rt reflect.Type) (tag byte, fn func(reflect.Value) ([]byte, error)) 
+	newEncoder(w encWriter) encoder
+	writeExt() bool
+}
+
+// An Encoder writes an object to an output stream in the codec format.
+type Encoder struct {
+	w encWriter
+	e encoder
+	h encodeHandleI
+}
+
+type ioEncWriterWriter interface {
+	WriteByte(c byte) error
+	WriteString(s string) (n int, err error)
+	Write(p []byte) (n int, err error)
+}
+
+type ioEncWriterFlusher interface {
+	 Flush() error
+}
+	
+// ioEncWriter implements encWriter and can write to an io.Writer implementation
+type ioEncWriter struct {
+	w ioEncWriterWriter
+	// temp byte array and slices used to prevent constant re-slicing while writing.
+	x [8]byte        
+	t01, t02, t04, t08 []byte 
+}
+
+// bytesEncWriter implements encWriter and can write to an byte slice.
+// It is used by Marshal function.
+type bytesEncWriter struct {
+	b []byte
+	c int // cursor
+	out *[]byte // write out on flush
+}
+	
+type encExtTagFn struct {
+	fn func(reflect.Value) ([]byte, error)
+	tag byte
+}
+ 
+type encExtTypeTagFn struct {
+	rt reflect.Type
+	encExtTagFn
+}
+
+// EncoderOptions contain options for the encoder, e.g. registered extension functions.
+type encHandle struct {
+	extFuncs map[reflect.Type] encExtTagFn
+	exts []encExtTypeTagFn
+}
+
+// addEncodeExt registers a function to handle encoding a given type as an extension  
+// with a specific specific tag byte. 
+// To remove an extension, pass fn=nil.
+func (o *encHandle) addEncodeExt(rt reflect.Type, tag byte, fn func(reflect.Value) ([]byte, error)) {
+	if o.exts == nil {
+		o.exts = make([]encExtTypeTagFn, 0, 8)
+		o.extFuncs = make(map[reflect.Type] encExtTagFn, 8)
+	}
+	delete(o.extFuncs, rt)
+	
+	if fn != nil {
+		o.extFuncs[rt] = encExtTagFn{fn, tag}
+	}
+	if leno := len(o.extFuncs); leno > cap(o.exts) {
+		o.exts = make([]encExtTypeTagFn, leno, (leno * 3 / 2))
+	} else {
+		o.exts = o.exts[0:leno]
+	}
+	var i int
+	for k, v := range o.extFuncs {
+		o.exts[i] = encExtTypeTagFn {k, v}
+		i++
+	}
+}
+
+func (o *encHandle) getEncodeExt(rt reflect.Type) (tag byte, fn func(reflect.Value) ([]byte, error)) {	
+	// For >= 5 elements, map constant cost less than iteration cost.
+	// This is because reflect.Type equality cost is pretty high
+	if l := len(o.exts); l == 0 {
+		return
+	} else if l < mapAccessThreshold {
+		for i := 0; i < l; i++ {
+			if o.exts[i].rt == rt {
+				x := o.exts[i].encExtTagFn
+				return x.tag, x.fn
+			}
+		}
+	} else {
+		x := o.extFuncs[rt]
+		return x.tag, x.fn
+	}
+	return
+}
+
+// NewEncoder returns an Encoder for encoding into an io.Writer.
+// For efficiency, Users are encouraged to pass in a memory buffered writer
+// (eg bufio.Writer, bytes.Buffer). This implementation *may* use one internally.
+func NewEncoder(w io.Writer, h Handle) (*Encoder) {
+	ww, ok := w.(ioEncWriterWriter)
+	if !ok {
+		ww = bufio.NewWriterSize(w, defEncByteBufSize)
+	}
+	z := ioEncWriter {
+		w: ww,
+	}
+	z.t01, z.t02, z.t04, z.t08 = z.x[:1], z.x[:2], z.x[:4], z.x[:8]
+	return &Encoder { w: &z, h: h, e: h.newEncoder(&z) }
+}
+
+// NewEncoderBytes returns an encoder for encoding directly and efficiently 
+// into a byte slice, using zero-copying to temporary slices.
+// 
+// It will potentially replace the output byte slice pointed to.
+// After encoding, the out parameter contains the encoded contents.
+func NewEncoderBytes(out *[]byte, h Handle) (*Encoder) {
+	in := *out
+	if in == nil {
+		in = make([]byte, defEncByteBufSize)
+	}
+	z := bytesEncWriter {
+		b: in,
+		out: out,
+	}
+	return &Encoder { w: &z, h: h, e: h.newEncoder(&z) }
+}
+
+// Encode writes an object into a stream in the codec format.
+// 
+// Struct values encode as maps. Each exported struct field is encoded unless:
+//    - the field's tag is "-", or
+//    - the field is empty and its tag specifies the "omitempty" option.
+//
+// The empty values are false, 0, any nil pointer or interface value, 
+// and any array, slice, map, or string of length zero. 
+// 
+// Anonymous fields are encoded inline if no struct tag is present.
+// Else they are encoded as regular fields.
+// 
+// The object's default key string is the struct field name but can be 
+// specified in the struct field's tag value. 
+// The "codec" key in struct field's tag value is the key name, 
+// followed by an optional comma and options. 
+// 
+// To set an option on all fields (e.g. omitempty on all fields), you 
+// can create a field called _struct, and set flags on it.
+// 
+// Examples:
+//    
+//      type MyStruct struct {
+//          _struct bool    `codec:",omitempty"`   //set omitempty for every field
+//          Field1 string   `codec:"-"`            //skip this field
+//          Field2 int      `codec:"myName"`       //Use key "myName" in encode stream
+//          Field3 int32    `codec:",omitempty"`   //use key "Field3". Omit if empty.
+//          Field4 bool     `codec:"f4,omitempty"` //use key "f4". Omit if empty.
+//          ...
+//      }
+//    
+func (e *Encoder) Encode(v interface{}) (err error) {
+	defer panicToErr(&err) 
+	e.encode(v)
+	e.w.flush()
+	return 
+}
+
+func (e *Encoder) encode(iv interface{}) {
+	switch v := iv.(type) {
+	case nil:
+		e.e.encodeNil()
+		
+	case reflect.Value:
+		e.encodeValue(v)
+
+	case string:
+		e.e.encodeString(c_UTF8, v)
+	case bool:
+		e.e.encodeBool(v)
+	case int:
+		e.e.encodeInt(int64(v))
+	case int8:
+		e.e.encodeInt(int64(v))
+	case int16:
+		e.e.encodeInt(int64(v))
+	case int32:
+		e.e.encodeInt(int64(v))
+	case int64:
+		e.e.encodeInt(v)
+	case uint:
+		e.e.encodeUint(uint64(v))
+	case uint8:
+		e.e.encodeUint(uint64(v))
+	case uint16:
+		e.e.encodeUint(uint64(v))
+	case uint32:
+		e.e.encodeUint(uint64(v))
+	case uint64:
+		e.e.encodeUint(v)
+	case float32:
+		e.e.encodeFloat32(v)
+	case float64:
+		e.e.encodeFloat64(v)
+
+	case *string:
+		e.e.encodeString(c_UTF8, *v)
+	case *bool:
+		e.e.encodeBool(*v)
+	case *int:
+		e.e.encodeInt(int64(*v))
+	case *int8:
+		e.e.encodeInt(int64(*v))
+	case *int16:
+		e.e.encodeInt(int64(*v))
+	case *int32:
+		e.e.encodeInt(int64(*v))
+	case *int64:
+		e.e.encodeInt(*v)
+	case *uint:
+		e.e.encodeUint(uint64(*v))
+	case *uint8:
+		e.e.encodeUint(uint64(*v))
+	case *uint16:
+		e.e.encodeUint(uint64(*v))
+	case *uint32:
+		e.e.encodeUint(uint64(*v))
+	case *uint64:
+		e.e.encodeUint(*v)
+	case *float32:
+		e.e.encodeFloat32(*v)
+	case *float64:
+		e.e.encodeFloat64(*v)
+
+	default:
+		e.encodeValue(reflect.ValueOf(iv))
+	}
+	
+}
+
+func (e *Encoder) encodeValue(rv reflect.Value) {
+	rt := rv.Type()
+	//encode based on type first, since over-rides are based on type.
+	ee := e.e //don't dereference everytime
+	if ee.encodeBuiltinType(rt, rv) {
+		return
+	}
+	
+	//Note: tagFn must handle returning nil if value should be encoded as a nil.
+	if xfTag, xfFn := e.h.getEncodeExt(rt); xfFn != nil {
+		bs, fnerr := xfFn(rv)
+		if fnerr != nil {
+			panic(fnerr)
+		}
+		if bs == nil {
+			ee.encodeNil()
+			return
+		}
+		if e.h.writeExt() {
+			ee.encodeExtPreamble(xfTag, len(bs))
+			e.w.writeb(bs)
+		} else {
+			ee.encodeStringBytes(c_RAW, bs)
+		}
+		return
+	}
+	
+	// ensure more common cases appear early in switch.
+	rk := rv.Kind()
+	switch rk {
+	case reflect.Bool:
+		ee.encodeBool(rv.Bool())
+	case reflect.String:
+		ee.encodeString(c_UTF8, rv.String())
+	case reflect.Float64:
+		ee.encodeFloat64(rv.Float())
+	case reflect.Float32:
+		ee.encodeFloat32(float32(rv.Float()))
+	case reflect.Slice:
+		if rv.IsNil() {
+			ee.encodeNil()
+			break
+		} 
+		if rt == byteSliceTyp {
+			ee.encodeStringBytes(c_RAW, rv.Bytes())
+			break
+		}
+		l := rv.Len()
+		ee.encodeArrayPreamble(l)
+		if l == 0 {
+			break
+		}
+		for j := 0; j < l; j++ {
+			e.encodeValue(rv.Index(j))
+		}
+	case reflect.Array:
+		e.encodeValue(rv.Slice(0, rv.Len()))
+	case reflect.Map:
+		if rv.IsNil() {
+			ee.encodeNil()
+			break
+		}
+		l := rv.Len()
+		ee.encodeMapPreamble(l)
+		if l == 0 {
+			break
+		}
+		mks := rv.MapKeys()
+		// for j, lmks := 0, len(mks); j < lmks; j++ {
+		for j := range mks {
+			e.encodeValue(mks[j])
+		 	e.encodeValue(rv.MapIndex(mks[j]))
+		}
+	case reflect.Struct:
+		e.encStruct(rt, rv)
+	case reflect.Ptr:
+		if rv.IsNil() {
+			ee.encodeNil()
+			break
+		}
+		e.encodeValue(rv.Elem())
+	case reflect.Interface:
+		if rv.IsNil() {
+			ee.encodeNil()
+			break
+		}
+		e.encodeValue(rv.Elem())
+	case reflect.Int, reflect.Int8, reflect.Int64, reflect.Int32, reflect.Int16:
+		ee.encodeInt(rv.Int())
+	case reflect.Uint8, reflect.Uint64, reflect.Uint, reflect.Uint32, reflect.Uint16:
+		ee.encodeUint(rv.Uint())
+	case reflect.Invalid:
+		ee.encodeNil()
+	default:
+		encErr("Unsupported kind: %s, for: %#v", rk, rv)
+	}
+	return
+}
+
+func (e *Encoder) encStruct(rt reflect.Type, rv reflect.Value) {
+	sis := getStructFieldInfos(rt)
+	newlen := len(sis)
+	rvals := make([]reflect.Value, newlen)
+	encnames := make([]string, newlen)
+	newlen = 0
+	// var rv0 reflect.Value
+	// for i := 0; i < l; i++ {
+	// 	si := sis[i]
+	for _, si := range sis {
+		if si.i > -1 {
+			rvals[newlen] = rv.Field(int(si.i))
+		} else {
+			rvals[newlen] = rv.FieldByIndex(si.is)
+		}
+		if si.omitEmpty && isEmptyValue(rvals[newlen]) {
+			continue
+		}
+		// sivals[newlen] = i
+		encnames[newlen] = si.encName
+		newlen++
+	}
+	ee := e.e //don't dereference everytime
+	ee.encodeMapPreamble(newlen)
+	for j := 0; j < newlen; j++ {
+		//e.encString(sis[sivals[j]].encName)
+		ee.encodeString(c_UTF8, encnames[j])
+		e.encodeValue(rvals[j])
+	}
+}
+
+// ----------------------------------------
+
+func (z *ioEncWriter) writeUint16(v uint16) {
+	binc.PutUint16(z.t02, v)
+	z.writeb(z.t02)
+}
+
+func (z *ioEncWriter) writeUint32(v uint32) {
+	binc.PutUint32(z.t04, v)
+	z.writeb(z.t04)
+}
+
+func (z *ioEncWriter) writeUint64(v uint64) {
+	binc.PutUint64(z.t08, v)
+	z.writeb(z.t08)
+}
+
+func (z *ioEncWriter) writeb(bs []byte) {
+	n, err := z.w.Write(bs)
+	if err != nil {
+		panic(err)
+	}
+	if n != len(bs) {
+		doPanic(msgTagEnc, "write: Incorrect num bytes written. Expecting: %v, Wrote: %v", len(bs), n)
+	}	
+}
+
+func (z *ioEncWriter) writestr(s string) {
+	n, err := z.w.WriteString(s)
+	if err != nil {
+		panic(err)
+	}
+	if n != len(s) {
+		doPanic(msgTagEnc, "write: Incorrect num bytes written. Expecting: %v, Wrote: %v", len(s), n)
+	}	
+}
+
+func (z *ioEncWriter) writen1(b byte) {
+	if err := z.w.WriteByte(b); err != nil {
+		panic(err)
+	}
+}
+
+func (z *ioEncWriter) writen2(b1 byte, b2 byte) {
+	z.writen1(b1)
+	z.writen1(b2)
+}
+
+func (z *ioEncWriter) writen3(b1 byte, b2 byte, b3 byte) {
+	z.writen1(b1)
+	z.writen1(b2)
+	z.writen1(b3)
+}
+
+func (z *ioEncWriter) flush() {
+	if f, ok := z.w.(ioEncWriterFlusher); ok {
+		if err := f.Flush(); err != nil {
+			panic(err)
+		}
+	}
+}
+
+// ----------------------------------------
+
+func (z *bytesEncWriter) writeUint16(v uint16) {
+	c := z.grow(2)
+	z.b[c] = byte(v >> 8)
+	z.b[c + 1] = byte(v)
+}
+
+func (z *bytesEncWriter) writeUint32(v uint32) {
+	c := z.grow(4)
+	z.b[c] = byte(v >> 24)
+	z.b[c + 1] = byte(v >> 16)
+	z.b[c + 2] = byte(v >> 8)
+	z.b[c + 3] = byte(v)
+}
+
+func (z *bytesEncWriter) writeUint64(v uint64) {
+	c := z.grow(8)
+	z.b[c] = byte(v >> 56)
+	z.b[c + 1] = byte(v >> 48)
+	z.b[c + 2] = byte(v >> 40)
+	z.b[c + 3] = byte(v >> 32)
+	z.b[c + 4] = byte(v >> 24)
+	z.b[c + 5] = byte(v >> 16)
+	z.b[c + 6] = byte(v >> 8)
+	z.b[c + 7] = byte(v)
+}
+
+func (z *bytesEncWriter) writeb(s []byte) {
+	c := z.grow(len(s))
+	copy(z.b[c:], s)
+}
+
+func (z *bytesEncWriter) writestr(s string) {
+	c := z.grow(len(s))
+	copy(z.b[c:], s)
+}
+
+func (z *bytesEncWriter) writen1(b1 byte) {
+	c := z.grow(1)
+	z.b[c] = b1
+}
+
+func (z *bytesEncWriter) writen2(b1 byte, b2 byte) {
+	c := z.grow(2)
+	z.b[c] = b1
+	z.b[c + 1] = b2
+}
+
+func (z *bytesEncWriter) writen3(b1 byte, b2 byte, b3 byte) {
+	c := z.grow(3)
+	z.b[c] = b1
+	z.b[c + 1] = b2
+	z.b[c + 2] = b3
+}
+
+func (z *bytesEncWriter) flush() { 
+	*(z.out) = z.b[:z.c]
+}
+
+func (z *bytesEncWriter) grow(n int) (oldcursor int) {
+	oldcursor = z.c
+	z.c = oldcursor + n
+	if z.c > cap(z.b) {
+		// It tried using appendslice logic: (if cap < 1024, *2, else *1.25).
+		// However, it was too expensive, causing too many iterations of copy. 
+		// Using bytes.Buffer model was much better (2*cap + n)
+		bs := make([]byte, 2*cap(z.b)+n)
+		copy(bs, z.b[:oldcursor])
+		z.b = bs
+	} else if z.c > len(z.b) {
+		z.b = z.b[:cap(z.b)]
+	}
+	return
+}
+
+// ----------------------------------------
+
+func encErr(format string, params ...interface{}) {
+	doPanic(msgTagEnc, format, params...)
+}
+
+// EncodeTimeExt encodes a time.Time as a []byte, including 
+// information on the instant in time and UTC offset.
+func encodeTime(t time.Time) ([]byte) {
+	//t := rv.Interface().(time.Time)
+	tsecs, tnsecs := t.Unix(), t.Nanosecond()
+	var padzero bool
+	var bs [14]byte
+	var i int
+	l := t.Location()
+	if l == time.UTC {
+		l = nil
+	}
+	if tsecs > math.MinInt32 && tsecs < math.MaxInt32 {
+		binc.PutUint32(bs[i:], uint32(int32(tsecs)))
+		i = i + 4
+	} else {
+		binc.PutUint64(bs[i:], uint64(tsecs))
+		i = i + 8
+		padzero = (tnsecs == 0)
+	}
+	if tnsecs != 0 {
+		binc.PutUint32(bs[i:], uint32(tnsecs))
+		i = i + 4
+	}
+	if l != nil {
+		// Note that Go Libs do not give access to dst flag.
+		_, zoneOffset := t.Zone()
+		//zoneName, zoneOffset := t.Zone()
+		//fmt.Printf(">>>>>> ENC: zone: %s, %v\n", zoneName, zoneOffset)
+		zoneOffset /= 60
+		isNeg := zoneOffset < 0
+		if isNeg {
+			zoneOffset = -zoneOffset
+		}
+		var z uint16 = uint16(zoneOffset)
+		if isNeg {
+			z |= 1 << 15 //set sign bit
+		}
+		//fmt.Printf(">>>>>> ENC: z: %b\n", z)
+		binc.PutUint16(bs[i:], z)
+		i = i + 2
+	}
+	if padzero {
+		i = i + 1
+	}
+	//fmt.Printf(">>>> EncodeTimeExt: t: %v, len: %v, v: %v\n", t, i, bs[0:i])
+	return bs[0:i]
+}
+

+ 66 - 0
codec/ext_dep_test.go

@@ -0,0 +1,66 @@
+//+build ignore
+
+// Copyright (c) 2012, 2013 Ugorji Nwoke. All rights reserved.
+// Use of this source code is governed by a BSD-style license found in the LICENSE file.
+
+package codec
+
+// This file includes benchmarks which have dependencies on 3rdparty  
+// packages (bson and vmihailenco/msgpack) which must be installed locally.
+// 
+// To run the benchmarks including these 3rdparty packages, first
+//   - Uncomment first line in this file (put // // in front of it)
+//   - Get those packages:
+//       go get github.com/vmihailenco/msgpack
+//       go get labix.org/v2/mgo/bson
+//   - Run:
+//       go test -bi -bench=.
+
+import (
+	vmsgpack "github.com/vmihailenco/msgpack"
+	"labix.org/v2/mgo/bson"
+	"testing"
+)
+
+func init() {
+	benchCheckers = append(benchCheckers, 
+		benchChecker{"v-msgpack", fnVMsgpackEncodeFn, fnVMsgpackDecodeFn},
+		benchChecker{"bson", fnBsonEncodeFn, fnBsonDecodeFn},
+	)
+}
+
+func fnVMsgpackEncodeFn(ts *TestStruc) ([]byte, error) {
+	return vmsgpack.Marshal(ts)
+}
+
+func fnVMsgpackDecodeFn(buf []byte, ts *TestStruc) error {
+	return vmsgpack.Unmarshal(buf, ts)
+}
+
+func fnBsonEncodeFn(ts *TestStruc) ([]byte, error) {
+	return bson.Marshal(ts)
+}
+
+func fnBsonDecodeFn(buf []byte, ts *TestStruc) error {
+	return bson.Unmarshal(buf, ts)
+}
+
+func Benchmark__Bson_____Encode(b *testing.B) {
+	fnBenchmarkEncode(b, "bson", fnBsonEncodeFn)
+}
+
+func Benchmark__Bson_____Decode(b *testing.B) {
+	fnBenchmarkDecode(b, "bson", fnBsonEncodeFn, fnBsonDecodeFn)
+}
+
+func Benchmark__VMsgpack_Encode(b *testing.B) {
+	fnBenchmarkEncode(b, "v-msgpack", fnVMsgpackEncodeFn)
+}
+
+func Benchmark__VMsgpack_Decode(b *testing.B) {
+	fnBenchmarkDecode(b, "v-msgpack", fnVMsgpackEncodeFn, fnVMsgpackDecodeFn)
+}
+
+func TestMsgpackPythonGenStreams(t *testing.T) {
+	doTestMsgpackPythonGenStreams(t)
+}

+ 274 - 0
codec/helper.go

@@ -0,0 +1,274 @@
+// Copyright (c) 2012, 2013 Ugorji Nwoke. All rights reserved.
+// Use of this source code is governed by a BSD-style license found in the LICENSE file.
+
+package codec
+
+// Contains code shared by both encode and decode.
+
+import (
+	"unicode"
+	"unicode/utf8"
+	"reflect"
+	"sync"
+	"strings"
+	"fmt"
+	"sort"
+	"time"
+	"encoding/binary"
+)
+
+const (
+	// For >= 4 elements, map outways cost of linear search (especially for reflect.Type)
+	mapAccessThreshold = 4 
+	binarySearchThreshold = 16 
+	structTagName = "codec"
+)
+
+type charEncoding uint8
+
+const (
+	c_RAW charEncoding = iota
+	c_UTF8
+	c_UTF16LE
+	c_UTF16BE
+	c_UTF32LE
+	c_UTF32BE
+)
+
+var (
+	binc = binary.BigEndian
+	structInfoFieldName = "_struct"
+	
+	cachedStructFieldInfos = make(map[reflect.Type]structFieldInfos, 4)
+	cachedStructFieldInfosMutex sync.RWMutex
+
+	nilIntfSlice = []interface{}(nil)
+	intfSliceTyp = reflect.TypeOf(nilIntfSlice)
+	intfTyp = intfSliceTyp.Elem()
+	byteSliceTyp = reflect.TypeOf([]byte(nil))
+	ptrByteSliceTyp = reflect.TypeOf((*[]byte)(nil))
+	mapStringIntfTyp = reflect.TypeOf(map[string]interface{}(nil))
+	mapIntfIntfTyp = reflect.TypeOf(map[interface{}]interface{}(nil))
+	timeTyp = reflect.TypeOf(time.Time{})
+	ptrTimeTyp = reflect.TypeOf((*time.Time)(nil))
+	int64SliceTyp = reflect.TypeOf([]int64(nil))
+	
+	intBitsize uint8 = uint8(reflect.TypeOf(int(0)).Bits())
+	uintBitsize uint8 = uint8(reflect.TypeOf(uint(0)).Bits())
+)
+
+type encdecHandle struct {
+	encHandle
+	decHandle
+}
+
+func (o *encdecHandle) AddExt(
+	rt reflect.Type, 
+	tag byte, 
+	encfn func(reflect.Value) ([]byte, error),
+	decfn func(reflect.Value, []byte) (error),
+) {
+	o.addEncodeExt(rt, tag, encfn)
+	o.addDecodeExt(rt, tag, decfn)
+}
+
+type Handle interface {
+	encodeHandleI
+	decodeHandleI
+}
+	
+type structFieldInfo struct {
+	encName   string      // encode name
+	is        []int
+	i         int16       // field index in struct
+	omitEmpty bool
+	// tag       string   // tag
+	// name      string   // field name
+	// encNameBs []byte   // encoded name as byte stream
+	// ikind     int      // kind of the field as an int i.e. int(reflect.Kind)
+}
+
+type structFieldInfos []structFieldInfo
+
+type sfiSortedByEncName []*structFieldInfo
+
+func (p sfiSortedByEncName) Len() int { 
+	return len(p) 
+}
+
+func (p sfiSortedByEncName) Less(i, j int) bool { 
+	return p[i].encName < p[j].encName 
+}
+
+func (p sfiSortedByEncName) Swap(i, j int) { 
+	p[i], p[j] = p[j], p[i] 
+}
+
+func (sis structFieldInfos) indexForEncName(name string) int {
+	sislen := len(sis)
+	if sislen < binarySearchThreshold {
+		// linear search. faster than binary search in my testing up to 16-field structs.
+		for i := 0; i < sislen; i++ {
+			if sis[i].encName == name {
+				return i
+			}
+		}
+	} else {
+		// binary search. adapted from sort/search.go.
+		h, i, j := 0, 0, sislen
+		for i < j {
+			h = i + (j-i)/2 
+			// i ≤ h < j
+			if sis[h].encName < name {
+				i = h + 1 // preserves f(i-1) == false
+			} else {
+				j = h // preserves f(j) == true
+			}
+		}
+		if i < sislen && sis[i].encName == name {
+			return i
+		}
+	}
+	return -1
+}
+
+func getStructFieldInfos(rt reflect.Type) (sis structFieldInfos) {
+	cachedStructFieldInfosMutex.RLock()
+	sis, ok := cachedStructFieldInfos[rt]
+	cachedStructFieldInfosMutex.RUnlock()
+	if ok {
+		return 
+	}
+	
+	cachedStructFieldInfosMutex.Lock()
+	defer cachedStructFieldInfosMutex.Unlock()
+	
+	var siInfo *structFieldInfo
+	if f, ok := rt.FieldByName(structInfoFieldName); ok {
+		siInfo = parseStructFieldInfo(structInfoFieldName, f.Tag.Get(structTagName))
+	}
+	sisp := make([]*structFieldInfo, 0, rt.NumField())
+	rgetStructFieldInfos(rt, nil, make(map[string]bool), &sisp, siInfo)
+	sort.Sort(sfiSortedByEncName(sisp))
+
+	lsis := len(sisp)
+	sis = make([]structFieldInfo, lsis)
+	for i := 0; i < lsis; i++ {
+		sis[i] = *sisp[i]
+	}
+	// sis = sisp
+	cachedStructFieldInfos[rt] = sis
+	return
+}
+
+func rgetStructFieldInfos(rt reflect.Type, indexstack []int, fnameToHastag map[string]bool, 
+	sis *[]*structFieldInfo, siInfo *structFieldInfo,
+) {
+	for j := 0; j < rt.NumField(); j++ {
+		f := rt.Field(j)
+		stag := f.Tag.Get(structTagName)
+		if stag == "-" {
+			continue
+		}
+		if r1, _ := utf8.DecodeRuneInString(f.Name); r1 == utf8.RuneError || !unicode.IsUpper(r1) {
+			continue
+		} 
+		if f.Anonymous {
+			//if anonymous, inline it if there is no struct tag, else treat as regular field
+			if stag == "" {
+				indexstack2 := append(append([]int(nil), indexstack...), j)
+				rgetStructFieldInfos(f.Type, indexstack2, fnameToHastag, sis, siInfo)
+				continue
+			}
+		} 
+		//do not let fields with same name in embedded structs override field at higher level.
+		//this must be done after anonymous check, to allow anonymous field still include their child fields
+		if _, ok := fnameToHastag[f.Name]; ok {
+			continue
+		}
+		si := parseStructFieldInfo(f.Name, stag)
+		// si.ikind = int(f.Type.Kind())
+		if len(indexstack) == 0 {
+			si.i = int16(j)
+		} else {
+			si.i = -1
+			si.is = append(append([]int(nil), indexstack...), j)
+		}
+
+		if siInfo != nil {
+			if siInfo.omitEmpty {
+				si.omitEmpty = true
+			}
+		}
+		*sis = append(*sis, si)
+		fnameToHastag[f.Name] = stag != ""
+	}
+}
+
+func parseStructFieldInfo(fname string, stag string) (*structFieldInfo) {
+	if fname == "" {
+		panic("parseStructFieldInfo: No Field Name")
+	}
+	si := structFieldInfo {
+		// name: fname,
+		encName: fname,
+		// tag: stag,
+	}	
+	
+	if stag != "" {
+		for i, s := range strings.Split(stag, ",") {
+			if i == 0 {
+				if s != "" {
+					si.encName = s
+				}
+			} else {
+				if s == "omitempty" {
+					si.omitEmpty = true
+				}
+			}
+		}
+	}
+	// si.encNameBs = []byte(si.encName)
+	return &si
+}
+
+func panicToErr(err *error) {
+	if x := recover(); x != nil { 
+		//debug.PrintStack() 
+		panicValToErr(x, err)
+	}
+}
+
+func doPanic(tag string, format string, params ...interface{}) {
+	params2 := make([]interface{}, len(params) + 1)
+	params2[0] = tag
+	copy(params2[1:], params)
+	panic(fmt.Errorf("%s: " + format, params2...))
+}
+
+
+
+
+//--------------------------------------------------
+
+// // This implements the util.Codec interface
+// type Codec struct {
+// 	H Handle
+// }
+
+// func (x Codec) Encode(w io.Writer, v interface{}) error {
+// 	return NewEncoder(w, x.H).Encode(v)
+// }
+
+// func (x Codec) EncodeBytes(out *[]byte, v interface{}) error {
+// 	return NewEncoderBytes(out, x.H).Encode(v)
+// }
+
+// func (x Codec) Decode(r io.Reader, v interface{}) error {
+// 	return NewDecoder(r, x.H).Decode(v)
+// }
+
+// func (x Codec) DecodeBytes(in []byte, v interface{}) error {
+// 	return NewDecoderBytes(in, x.H).Decode(v)
+// }
+

+ 61 - 0
codec/helper_internal.go

@@ -0,0 +1,61 @@
+// Copyright (c) 2012, 2013 Ugorji Nwoke. All rights reserved.
+// Use of this source code is governed by a BSD-style license found in the LICENSE file.
+
+package codec
+
+// All non-std package dependencies live in this file,
+// so porting to different environment is easy (just update functions).
+
+import (
+	"reflect"
+	"fmt"
+	"errors"
+)
+
+var (
+	raisePanicAfterRecover = false
+	debugging = true
+)
+
+func panicValToErr(panicVal interface{}, err *error) {
+	switch xerr := panicVal.(type) {
+	case error:
+		*err = xerr
+	case string:
+		*err = errors.New(xerr)
+	default:
+		*err = fmt.Errorf("%v", panicVal)
+	}
+	if raisePanicAfterRecover {
+		panic(panicVal)
+	}
+	return
+}
+
+func isEmptyValue(v reflect.Value) bool {
+	switch v.Kind() {
+	case reflect.Array, reflect.Map, reflect.Slice, reflect.String:
+		return v.Len() == 0
+	case reflect.Bool:
+		return !v.Bool()
+	case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+		return v.Int() == 0
+	case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+		return v.Uint() == 0
+	case reflect.Float32, reflect.Float64:
+		return v.Float() == 0
+	case reflect.Interface, reflect.Ptr:
+		return v.IsNil()
+	}
+	return false
+}
+
+func debugf(format string, args ...interface{}) {
+	if debugging {
+		if len(format) == 0 || format[len(format)-1] != '\n' {
+			format = format + "\n"
+		}
+		fmt.Printf(format, args...)
+	}
+}
+

+ 731 - 0
codec/msgpack.go

@@ -0,0 +1,731 @@
+// Copyright (c) 2012, 2013 Ugorji Nwoke. All rights reserved.
+// Use of this source code is governed by a BSD-style license found in the LICENSE file.
+
+package codec
+
+import (
+	"reflect"
+	"math"
+	"time"
+	"fmt"
+	"net/rpc"
+	"io"
+)
+
+const (
+	mpPosFixNumMin byte = 0x00
+	mpPosFixNumMax = 0x7f
+	mpFixMapMin = 0x80
+	mpFixMapMax = 0x8f
+	mpFixArrayMin =  0x90
+	mpFixArrayMax =  0x9f
+	mpFixRawMin  = 0xa0
+	mpFixRawMax  = 0xbf
+	mpNil = 0xc0
+	mpFalse = 0xc2
+	mpTrue = 0xc3
+	mpFloat = 0xca
+	mpDouble = 0xcb
+	mpUint8 = 0xcc
+	mpUint16 = 0xcd
+	mpUint32 = 0xce
+	mpUint64 = 0xcf
+	mpInt8 = 0xd0
+	mpInt16 = 0xd1
+	mpInt32 = 0xd2
+	mpInt64 = 0xd3
+	mpRaw16 = 0xda
+	mpRaw32 = 0xdb
+	mpArray16 = 0xdc
+	mpArray32 = 0xdd
+	mpMap16 = 0xde
+	mpMap32 = 0xdf
+	mpNegFixNumMin = 0xe0
+	mpNegFixNumMax = 0xff
+
+	// extensions below
+	// mpBin8 = 0xc4
+	// mpBin16 = 0xc5
+	// mpBin32 = 0xc6
+	// mpExt8 = 0xc7
+	// mpExt16 = 0xc8
+	// mpExt32 = 0xc9
+	// mpFixExt1 = 0xd4
+	// mpFixExt2 = 0xd5
+	// mpFixExt4 = 0xd6
+	// mpFixExt8 = 0xd7
+	// mpFixExt16 = 0xd8
+
+	// extensions based off v4: https://gist.github.com/frsyuki/5235364
+	mpXv4Fixext0 = 0xc4
+	mpXv4Fixext1 = 0xc5
+	mpXv4Fixext2 = 0xc6
+	mpXv4Fixext3 = 0xc7
+	mpXv4Fixext4 = 0xc8
+	mpXv4Fixext5 = 0xc9
+
+	mpXv4Ext8m = 0xd4
+	mpXv4Ext16m = 0xd5
+	mpXv4Ext32m = 0xd6
+	mpXv4Ext8 = 0xd7
+	mpXv4Ext16 = 0xd8
+	mpXv4Ext32 = 0xd9
+)	
+
+
+// A MsgpackContainer type specifies the different types of msgpackContainers.
+type msgpackContainerType struct {
+	cutoff int8
+	b0, b1, b2 byte
+}
+
+var (
+	msgpackContainerRawBytes = msgpackContainerType{32, mpFixRawMin, mpRaw16, mpRaw32}
+	msgpackContainerList = msgpackContainerType{16, mpFixArrayMin, mpArray16, mpArray32}
+	msgpackContainerMap = msgpackContainerType{16, mpFixMapMin, mpMap16, mpMap32}
+)
+
+// MsgpackSpecRpc is the implementation of Rpc that uses custom communication protocol 
+// as defined in the msgpack spec at http://wiki.msgpack.org/display/MSGPACK/RPC+specification
+type MsgpackSpecRpc struct{}
+
+type msgpackSpecRpcCodec struct {
+	rpcCodec
+}
+
+//MsgpackHandle is a Handle for the Msgpack Schema-Free Encoding Format.
+type MsgpackHandle struct {
+	// RawToString controls how raw bytes are decoded into a nil interface{}.
+	// Note that setting an extension func for []byte ensures that raw bytes 
+	// are decoded as strings, regardless of this setting. 
+	// This setting is used only if an extension func isn't defined for []byte.
+	RawToString bool
+	// WriteExt flag supports encoding configured extensions with extension tags.
+	// 
+	// With WriteExt=false, configured extensions are serialized as raw bytes.
+	// 
+	// They can still be decoded into a typed object, provided an appropriate one is 
+	// provided, but the type cannot be inferred from the stream. If no appropriate
+	// type is provided (e.g. decoding into a nil interface{}), you get back
+	// a []byte or string based on the setting of RawToString.
+	WriteExt bool	
+
+	encdecHandle
+	DecodeOptions
+}
+
+type msgpackEncoder struct { 
+	w encWriter
+}
+
+type msgpackDecoder struct {
+	r decReader
+	bd byte
+	bdRead bool
+}
+
+func (e *msgpackEncoder) encodeBuiltinType(rt reflect.Type, rv reflect.Value) bool {
+	//no builtin types. All encodings are based on kinds. Types supported as extensions.
+	return false
+}
+
+func (e *msgpackEncoder) encodeNil() {
+	e.w.writen1(mpNil)
+}
+
+func (e *msgpackEncoder) encodeInt(i int64) {
+	switch {
+	case i >= -32 && i <= math.MaxInt8:
+		e.w.writen1(byte(i))
+	case i < -32 && i >= math.MinInt8:
+		e.w.writen2(mpInt8, byte(i))
+	case i >= math.MinInt16 && i <= math.MaxInt16:
+		e.w.writen1(mpInt16)
+		e.w.writeUint16(uint16(i))
+	case i >= math.MinInt32 && i <= math.MaxInt32:
+		e.w.writen1(mpInt32)
+		e.w.writeUint32(uint32(i))
+	case i >= math.MinInt64 && i <= math.MaxInt64:
+		e.w.writen1(mpInt64)
+		e.w.writeUint64(uint64(i))
+	default:
+		encErr("encInt64: Unreachable block")
+	}
+}
+
+func (e *msgpackEncoder) encodeUint(i uint64) {
+	// uints are not fixnums. fixnums are always signed.
+	// case i <= math.MaxInt8:
+	// 	e.w.writen1(byte(i))
+	switch {
+	case i <= math.MaxUint8:
+		e.w.writen2(mpUint8, byte(i))
+	case i <= math.MaxUint16:
+		e.w.writen1(mpUint16)
+		e.w.writeUint16(uint16(i))
+	case i <= math.MaxUint32:
+		e.w.writen1(mpUint32)
+		e.w.writeUint32(uint32(i))
+	default:
+		e.w.writen1(mpUint64)
+		e.w.writeUint64(uint64(i))
+	}
+}
+
+func (e *msgpackEncoder) encodeBool(b bool) {
+	if b {
+		e.w.writen1(mpTrue)
+	} else {
+		e.w.writen1(mpFalse)
+	}
+}
+
+func (e *msgpackEncoder) encodeFloat32(f float32) {
+	e.w.writen1(mpFloat)
+	e.w.writeUint32(math.Float32bits(f))
+}
+
+func (e *msgpackEncoder) encodeFloat64(f float64) {
+	e.w.writen1(mpDouble)
+	e.w.writeUint64(math.Float64bits(f))
+}
+
+func (e *msgpackEncoder) encodeExtPreamble(xtag byte, l int) {
+	switch {
+	case l <= 4:
+		e.w.writen2(0xd4 | byte(l), xtag)
+	case l <= 8:
+		e.w.writen2(0xc0 | byte(l), xtag)
+	case l < 256:
+		e.w.writen3(mpXv4Fixext5, xtag, byte(l))
+	case l < 65536:
+		e.w.writen2(mpXv4Ext16, xtag)
+		e.w.writeUint16(uint16(l))
+	default:
+		e.w.writen2(mpXv4Ext32, xtag)
+		e.w.writeUint32(uint32(l))
+	}
+}
+
+func (e *msgpackEncoder) encodeArrayPreamble(length int) {
+	e.writeContainerLen(msgpackContainerList, length)
+}
+
+
+func (e *msgpackEncoder) encodeMapPreamble(length int) {
+	e.writeContainerLen(msgpackContainerMap, length)	
+}
+
+func (e *msgpackEncoder) encodeString(c charEncoding, s string) {
+	//ignore charEncoding. 
+	e.writeContainerLen(msgpackContainerRawBytes, len(s))
+	if len(s) > 0 {
+		e.w.writestr(s)
+	}
+}
+
+func (e *msgpackEncoder) encodeStringBytes(c charEncoding, bs []byte) {
+	//ignore charEncoding. 
+	e.writeContainerLen(msgpackContainerRawBytes, len(bs))
+	if len(bs) > 0 {
+		e.w.writeb(bs)
+	}
+}
+
+func (e *msgpackEncoder) writeContainerLen(ct msgpackContainerType, l int) {
+	switch {
+	case l < int(ct.cutoff):
+		e.w.writen1(ct.b0 | byte(l))
+	case l < 65536:
+		e.w.writen1(ct.b1)
+		e.w.writeUint16(uint16(l))
+	default:
+		e.w.writen1(ct.b2)
+		e.w.writeUint32(uint32(l))
+	}
+}
+
+//---------------------------------------------
+
+func (d *msgpackDecoder) decodeBuiltinType(rt reflect.Type, rv reflect.Value) bool { 
+	return false
+}
+
+// Note: This returns either a primitive (int, bool, etc) for non-containers,
+// or a containerType, or a specific type denoting nil or extension. 
+// It is called when a nil interface{} is passed, leaving it up to the Decoder
+// to introspect the stream and decide how best to decode.
+// It deciphers the value by looking at the stream first.
+func (d *msgpackDecoder) decodeNaked(h decodeHandleI) (rv reflect.Value, ctx decodeNakedContext) {
+	d.initReadNext()
+	bd := d.bd
+	
+	var v interface{}
+
+	switch bd {
+	case mpNil:
+		ctx = dncNil
+		d.bdRead = false
+	case mpFalse:
+		v = false
+	case mpTrue:
+		v = true
+
+	case mpFloat:
+		v = math.Float32frombits(d.r.readUint32())
+	case mpDouble:
+		v = math.Float64frombits(d.r.readUint64())
+		
+	case mpUint8:
+		v = d.r.readUint8()
+	case mpUint16:
+		v = d.r.readUint16()
+	case mpUint32:
+		v = d.r.readUint32()
+	case mpUint64:
+		v = d.r.readUint64()
+		
+	case mpInt8:
+		v = int8(d.r.readUint8())
+	case mpInt16:
+		v = int16(d.r.readUint16())
+	case mpInt32:
+		v = int32(d.r.readUint32())
+	case mpInt64:
+		v = int64(d.r.readUint64())
+		
+	default:
+		switch {
+		case bd >= mpPosFixNumMin && bd <= mpPosFixNumMax:
+			// positive fixnum (always signed)
+			v = int8(bd)
+		case bd >= mpNegFixNumMin && bd <= mpNegFixNumMax:
+			// negative fixnum
+			v = int8(bd)		
+		case bd == mpRaw16, bd == mpRaw32, bd >= mpFixRawMin && bd <= mpFixRawMax:
+			ctx = dncContainer
+			// v = containerRawBytes
+			opts := h.(*MsgpackHandle)
+			if opts.rawToStringOverride || opts.RawToString {
+				var rvm string
+				rv = reflect.ValueOf(&rvm).Elem()
+			} else {
+				rv = reflect.New(byteSliceTyp).Elem() // Use New, not Zero, so it's settable
+			}
+		case bd == mpArray16, bd == mpArray32, bd >= mpFixArrayMin && bd <= mpFixArrayMax:
+			ctx = dncContainer
+			// v = containerList
+			opts := h.(*MsgpackHandle)
+			if opts.SliceType == nil {
+				rv = reflect.New(intfSliceTyp).Elem()
+			} else {
+				rv = reflect.New(opts.SliceType).Elem()
+			}
+		case bd == mpMap16, bd == mpMap32, bd >= mpFixMapMin && bd <= mpFixMapMax:
+			ctx = dncContainer
+			// v = containerMap
+			opts := h.(*MsgpackHandle)
+			if opts.MapType == nil {
+				rv = reflect.MakeMap(mapIntfIntfTyp)
+			} else {
+				rv = reflect.MakeMap(opts.MapType)
+			}
+		case bd >= mpXv4Fixext0 && bd <= mpXv4Fixext5, bd >= mpXv4Ext8m && bd <= mpXv4Ext32:
+			//ctx = dncExt
+			xtag := d.r.readUint8()
+			opts := h.(*MsgpackHandle)
+			rt, bfn := opts.getDecodeExtForTag(xtag)
+			if rt == nil {
+				decErr("Unable to find type mapped to extension tag: %v", xtag)
+			}
+			if rt.Kind() == reflect.Ptr {
+				rv = reflect.New(rt.Elem())
+			} else {
+				rv = reflect.New(rt).Elem()
+			}
+			if fnerr := bfn(rv, d.r.readn(d.readExtLen())); fnerr != nil {
+				panic(fnerr)
+			} 
+		default:
+			decErr("Nil-Deciphered DecodeValue: %s: hex: %x, dec: %d", msgBadDesc, bd, bd)
+		}
+	}
+	if ctx == dncHandled {
+		d.bdRead = false
+		if v != nil {
+			rv = reflect.ValueOf(v)
+		}
+	}
+	return
+}
+
+// int can be decoded from msgpack type: intXXX or uintXXX 
+func (d *msgpackDecoder) decodeInt(bitsize uint8) (i int64) {
+	switch d.bd {
+	case mpUint8:
+		i = int64(uint64(d.r.readUint8()))
+	case mpUint16:
+		i = int64(uint64(d.r.readUint16()))
+	case mpUint32:
+		i = int64(uint64(d.r.readUint32()))
+	case mpUint64:
+		i = int64(d.r.readUint64())
+	case mpInt8:
+		i = int64(int8(d.r.readUint8()))
+	case mpInt16:
+		i = int64(int16(d.r.readUint16()))
+	case mpInt32:
+		i = int64(int32(d.r.readUint32()))
+	case mpInt64:
+		i = int64(d.r.readUint64())
+	default:
+		switch {
+		case d.bd >= mpPosFixNumMin && d.bd <= mpPosFixNumMax:
+			i = int64(int8(d.bd))
+		case d.bd >= mpNegFixNumMin && d.bd <= mpNegFixNumMax:
+			i = int64(int8(d.bd))
+		default:
+			decErr("Unhandled single-byte unsigned integer value: %s: %x", msgBadDesc, d.bd)
+		}
+	}
+	// check overflow (logic adapted from std pkg reflect/value.go OverflowUint()
+	if bitsize > 0 {
+		if trunc := (i << (64 - bitsize)) >> (64 - bitsize); i != trunc {
+			decErr("Overflow int value: %v", i)
+		}
+	}
+	d.bdRead = false
+	return
+}
+
+
+// uint can be decoded from msgpack type: intXXX or uintXXX 
+func (d *msgpackDecoder) decodeUint(bitsize uint8) (ui uint64) {
+	switch d.bd {
+	case mpUint8:
+		ui = uint64(d.r.readUint8())
+	case mpUint16:
+		ui = uint64(d.r.readUint16())
+	case mpUint32:
+		ui = uint64(d.r.readUint32())
+	case mpUint64:
+		ui = d.r.readUint64()
+	case mpInt8:
+		if i := int64(int8(d.r.readUint8())); i >= 0 {
+			ui = uint64(i)
+		} else {
+			decErr("Assigning negative signed value: %v, to unsigned type", i)
+		}
+	case mpInt16:
+		if i := int64(int16(d.r.readUint16())); i >= 0 {
+			ui = uint64(i)
+		} else {
+			decErr("Assigning negative signed value: %v, to unsigned type", i)
+		}
+	case mpInt32:
+		if i := int64(int32(d.r.readUint32())); i >= 0 {
+			ui = uint64(i)
+		} else {
+			decErr("Assigning negative signed value: %v, to unsigned type", i)
+		}
+	case mpInt64:
+		if i := int64(d.r.readUint64()); i >= 0 {
+			ui = uint64(i)
+		} else {
+			decErr("Assigning negative signed value: %v, to unsigned type", i)
+		}
+	default:
+		switch {
+		case d.bd >= mpPosFixNumMin && d.bd <= mpPosFixNumMax:
+			ui = uint64(d.bd)
+		case d.bd >= mpNegFixNumMin && d.bd <= mpNegFixNumMax:
+			decErr("Assigning negative signed value: %v, to unsigned type", int(d.bd))
+		default:
+			decErr("Unhandled single-byte unsigned integer value: %s: %x", msgBadDesc, d.bd)
+		}
+	}
+	// check overflow (logic adapted from std pkg reflect/value.go OverflowUint()
+	if bitsize > 0 {
+		if trunc := (ui << (64 - bitsize)) >> (64 - bitsize); ui != trunc {
+			decErr("Overflow uint value: %v", ui) 
+		}
+	}
+	d.bdRead = false
+	return
+}
+
+// float can either be decoded from msgpack type: float, double or intX
+func (d *msgpackDecoder) decodeFloat(chkOverflow32 bool) (f float64) {
+	switch d.bd {
+	case mpFloat:
+		f = float64(math.Float32frombits(d.r.readUint32()))
+	case mpDouble:
+		f = math.Float64frombits(d.r.readUint64())
+	default:
+		f = float64(d.decodeInt(0))
+	}
+	// check overflow (logic adapted from std pkg reflect/value.go OverflowFloat()
+	if chkOverflow32 {
+		f2 := f
+		if f2 < 0 {
+			f2 = -f
+		}
+		if math.MaxFloat32 < f2 && f2 <= math.MaxFloat64 {
+			decErr("Overflow float32 value: %v", f2)
+		}
+	}
+	d.bdRead = false
+	return
+}
+
+// bool can be decoded from bool, fixnum 0 or 1.
+func (d *msgpackDecoder) decodeBool() (b bool) {
+	switch d.bd {
+	case mpFalse, 0:
+		// b = false
+	case mpTrue, 1:
+		b = true
+	default:
+		decErr("Invalid single-byte value for bool: %s: %x", msgBadDesc, d.bd)
+	}
+	d.bdRead = false
+	return
+}
+	
+func (d *msgpackDecoder) decodeString() (s string) {
+	clen := d.readContainerLen(msgpackContainerRawBytes)
+	if clen > 0 {
+		s = string(d.r.readn(clen))
+	}
+	d.bdRead = false
+	return
+}
+
+// Callers must check if changed=true (to decide whether to replace the one they have)
+func (d *msgpackDecoder) decodeStringBytes(bs []byte) (bsOut []byte, changed bool) {
+	clen := d.readContainerLen(msgpackContainerRawBytes)
+	// if clen < 0 {
+	// 	changed = true
+	// 	panic("length cannot be zero. this cannot be nil.")
+	// } 
+	if clen > 0 {
+		// if no contents in stream, don't update the passed byteslice	
+		if len(bs) != clen {
+			// Return changed=true if length of passed slice is different from length of bytes in the stream.
+			if len(bs) > clen {
+				bs = bs[:clen]
+			} else {
+				bs = make([]byte, clen)
+			}
+			bsOut = bs
+			changed = true
+		}
+		d.r.readb(bs)
+	}
+	d.bdRead = false
+	return
+}
+
+// Every top-level decode funcs (i.e. decodeValue, decode) must call this first.
+func (d *msgpackDecoder) initReadNext() {
+	if d.bdRead {
+		return
+	}
+	d.bd = d.r.readUint8()
+	d.bdRead = true
+}
+
+func (d *msgpackDecoder) currentIsNil() bool {
+	if d.bd == mpNil {
+		d.bdRead = false
+		return true
+	} 
+	return false
+}
+
+func (d *msgpackDecoder) readContainerLen(ct msgpackContainerType) (clen int) {
+	switch {
+	case d.bd == mpNil:
+		clen = -1 // to represent nil
+	case d.bd == ct.b1:
+		clen = int(d.r.readUint16())
+	case d.bd == ct.b2:
+		clen = int(d.r.readUint32())
+	case (ct.b0 & d.bd) == ct.b0:
+		clen = int(ct.b0 ^ d.bd)
+	default:
+		decErr("readContainerLen: %s: hex: %x, dec: %d", msgBadDesc, d.bd, d.bd)
+	}
+	d.bdRead = false
+	return	
+}
+
+func (d *msgpackDecoder) readMapLen() int {
+	return d.readContainerLen(msgpackContainerMap)
+}
+
+func (d *msgpackDecoder) readArrayLen() int {
+	return d.readContainerLen(msgpackContainerList)
+}
+
+
+func (d *msgpackDecoder) readExtLen() (clen int) {
+	switch d.bd {
+	case mpNil:
+		clen = -1 // to represent nil
+	case mpXv4Fixext5:
+		clen = int(d.r.readUint8())
+	case mpXv4Ext16:
+		clen = int(d.r.readUint16())
+	case mpXv4Ext32:
+		clen = int(d.r.readUint32())
+	default: 
+		switch {
+		case d.bd >= mpXv4Fixext0 && d.bd <= mpXv4Fixext4:
+			clen = int(d.bd & 0x0f)
+		case d.bd >= mpXv4Ext8m && d.bd <= mpXv4Ext8:
+			clen = int(d.bd & 0x03)
+		default:
+			decErr("decoding ext bytes: found unexpected byte: %x", d.bd)
+		}
+	}
+	return
+}
+
+func (d *msgpackDecoder) decodeExt(tag byte) (xbs []byte) {
+	// if (d.bd >= mpXv4Fixext0 && d.bd <= mpXv4Fixext5) || (d.bd >= mpXv4Ext8m && d.bd <= mpXv4Ext32) {
+	xbd := d.bd
+	switch {
+	case xbd >= mpXv4Fixext0 && xbd <= mpXv4Fixext5, xbd >= mpXv4Ext8m && xbd <= mpXv4Ext32:
+		if xtag := d.r.readUint8(); xtag != tag {
+			decErr("Wrong extension tag. Got %b. Expecting: %v", xtag, tag)
+		}
+		xbs = d.r.readn(d.readExtLen())
+	case xbd == mpRaw16, xbd == mpRaw32, xbd >= mpFixRawMin && xbd <= mpFixRawMax:
+		xbs, _ = d.decodeStringBytes(nil)
+	default:
+		decErr("Wrong byte descriptor (Expecting extensions or raw bytes). Got: 0x%x", xbd)
+	}		
+	d.bdRead = false
+	return
+}
+
+//--------------------------------------------------
+
+func (MsgpackSpecRpc) ServerCodec(conn io.ReadWriteCloser, h Handle) (rpc.ServerCodec) {
+	return &msgpackSpecRpcCodec{ newRPCCodec(conn, h) }
+}
+
+func (MsgpackSpecRpc) ClientCodec(conn io.ReadWriteCloser, h Handle) (rpc.ClientCodec) {
+	return &msgpackSpecRpcCodec{ newRPCCodec(conn, h) }
+}
+
+// /////////////// Spec RPC Codec ///////////////////
+func (c msgpackSpecRpcCodec) WriteRequest(r *rpc.Request, body interface{}) error {
+	return c.writeCustomBody(0, r.Seq, r.ServiceMethod, body)
+}
+
+func (c msgpackSpecRpcCodec) WriteResponse(r *rpc.Response, body interface{}) error {
+	return c.writeCustomBody(1, r.Seq, r.Error, body)
+}
+
+func (c msgpackSpecRpcCodec) ReadResponseHeader(r *rpc.Response) error {
+	return c.parseCustomHeader(1, &r.Seq, &r.Error)
+}
+
+func (c msgpackSpecRpcCodec) ReadRequestHeader(r *rpc.Request) error {
+	return c.parseCustomHeader(0, &r.Seq, &r.ServiceMethod)
+}
+
+func (c msgpackSpecRpcCodec) parseCustomHeader(expectTypeByte byte, msgid *uint64, methodOrError *string) (err error) {
+
+	// We read the response header by hand 
+	// so that the body can be decoded on its own from the stream at a later time.
+
+	bs := make([]byte, 1)
+	n, err := c.rwc.Read(bs)
+	if err != nil {
+		return 
+	}
+	if n != 1 {
+		err = fmt.Errorf("Couldn't read array descriptor: No bytes read")
+		return
+	}
+	const fia byte = 0x94 //four item array descriptor value
+	if bs[0] != fia {
+		err = fmt.Errorf("Unexpected value for array descriptor: Expecting %v. Received %v", fia, bs[0])
+		return
+	}
+	var b byte
+	if err = c.read(&b, msgid, methodOrError); err != nil {
+		return
+	}
+	if b != expectTypeByte {
+		err = fmt.Errorf("Unexpected byte descriptor in header. Expecting %v. Received %v", expectTypeByte, b)
+		return
+	}
+	return
+}
+
+func (c msgpackSpecRpcCodec) writeCustomBody(typeByte byte, msgid uint64, methodOrError string, body interface{}) (err error) {
+	var moe interface{} = methodOrError
+	// response needs nil error (not ""), and only one of error or body can be nil
+	if typeByte == 1 {
+		if methodOrError == "" {
+			moe = nil
+		}
+		if moe != nil && body != nil {
+			body = nil
+		}
+	}
+	r2 := []interface{}{ typeByte, uint32(msgid), moe, body }
+	return c.enc.Encode(r2)
+}
+
+
+
+//--------------------------------------------------
+
+// EncodeBinaryExt returns the underlying bytes of this value AS-IS.
+// Configure this to support the Binary Extension using tag 0.
+func (_ *MsgpackHandle) BinaryEncodeExt(rv reflect.Value) ([]byte, error) {
+	if rv.IsNil() {
+		return nil, nil
+	}
+	return rv.Bytes(), nil
+}
+
+// DecodeBinaryExt sets passed byte array AS-IS into the reflect Value.
+// Configure this to support the Binary Extension using tag 0.
+func (_ *MsgpackHandle) BinaryDecodeExt(rv reflect.Value, bs []byte) (err error) {
+	rv.SetBytes(bs)
+	return
+}
+
+// EncodeBinaryExt returns the underlying bytes of this value AS-IS.
+// Configure this to support the Binary Extension using tag 0.
+func (_ *MsgpackHandle) TimeEncodeExt(rv reflect.Value) (bs []byte, err error) {
+	bs = encodeTime(rv.Interface().(time.Time))
+	return
+}
+
+func (_ *MsgpackHandle) TimeDecodeExt(rv reflect.Value, bs []byte) (err error) {
+	tt, err := decodeTime(bs)
+	if err == nil {
+		rv.Set(reflect.ValueOf(tt))
+	}
+	return
+}
+
+func (_ *MsgpackHandle) newEncoder(w encWriter) encoder {
+	return &msgpackEncoder{w: w}
+}
+
+func (_ *MsgpackHandle) newDecoder(r decReader) decoder {
+	return &msgpackDecoder{r: r}
+}
+
+func (o *MsgpackHandle) writeExt() bool {
+	return o.WriteExt
+}
+

+ 83 - 0
codec/msgpack_test.py

@@ -0,0 +1,83 @@
+#!/usr/bin/env python
+
+# This will create golden files in a directory passed to it.
+# A Test calls this internally to create the golden files
+# So it can process them (so we don't have to checkin the files).
+
+import msgpack, sys, os
+
+def get_test_data_list():
+    # get list with all primitive types, and a combo type
+    l = [ 
+        -8,
+         -1616,
+         -32323232,
+         -6464646464646464,
+         192,
+         1616,
+         32323232,
+         6464646464646464,
+         192,
+         -3232.0,
+         -6464646464.0,
+         3232.0,
+         6464646464.0,
+         False,
+         True,
+         None,
+         1328148122000002,
+         "someday",
+         "",
+         "bytestring",
+         [ 
+            -8,
+             -1616,
+             -32323232,
+             -6464646464646464,
+             192,
+             1616,
+             32323232,
+             6464646464646464,
+             192,
+             -3232.0,
+             -6464646464.0,
+             3232.0,
+             6464646464.0,
+             False,
+             True,
+             None,
+             1328148122000002,
+             "someday",
+             "",
+             "bytestring" 
+             ],
+         { "true": True,
+           "false": False },
+         { "true": "True",
+           "false": False,
+           "uint16(1616)": 1616 },
+         { "list": [1616, 32323232, True, -3232.0, {"TRUE":True, "FALSE":False}, [True, False] ],
+           "int32":32323232, "bool": True, 
+           "LONG STRING": "123456789012345678901234567890123456789012345678901234567890",
+           "SHORT STRING": "1234567890" },	
+	 { True: "true", 8: False, "false": 0 }
+         ]
+    return l
+
+def build_test_data(destdir):
+    l = get_test_data_list()
+    for i in range(len(l)):
+        packer = msgpack.Packer()
+        serialized = packer.pack(l[i])
+        f = open(os.path.join(destdir, str(i) + '.golden'), 'wb')
+        f.write(serialized)
+        f.close()
+
+def doMain(args):
+    if len(args) == 2 and args[0] == "testdata":
+        build_test_data(args[1])
+    else:
+        print("Usage: build.py [testdata]")
+    
+if __name__ == "__main__":
+    doMain(sys.argv[1:])

+ 108 - 0
codec/rpc.go

@@ -0,0 +1,108 @@
+// Copyright (c) 2012, 2013 Ugorji Nwoke. All rights reserved.
+// Use of this source code is governed by a BSD-style license found in the LICENSE file.
+
+/*
+RPC
+
+RPC Client and Server Codecs are implemented, so the codecs can be used
+with the standard net/rpc package. 
+*/
+package codec
+
+import (
+	"net/rpc"
+	"io"
+)
+
+// Rpc interface provides a rpc Server or Client Codec for rpc communication.
+type Rpc interface {
+	ServerCodec(conn io.ReadWriteCloser, h Handle) (rpc.ServerCodec) 
+	ClientCodec(conn io.ReadWriteCloser, h Handle) (rpc.ClientCodec)
+}
+
+type rpcCodec struct {
+	rwc       io.ReadWriteCloser
+	dec       *Decoder
+	enc       *Encoder
+}
+
+type goRpcCodec struct {
+	rpcCodec
+}
+
+// GoRpc is the implementation of Rpc that uses the communication protocol 
+// as defined in net/rpc package.
+type GoRpc struct {}
+
+func (x GoRpc) ServerCodec(conn io.ReadWriteCloser, h Handle) (rpc.ServerCodec) {
+	return goRpcCodec { newRPCCodec(conn, h) }
+}
+
+func (x GoRpc) ClientCodec(conn io.ReadWriteCloser, h Handle) (rpc.ClientCodec) {
+	return goRpcCodec { newRPCCodec(conn, h) }
+}
+
+func newRPCCodec(conn io.ReadWriteCloser, h Handle) (rpcCodec) {
+	return rpcCodec{
+		rwc: conn,
+		dec: NewDecoder(conn, h),
+		enc: NewEncoder(conn, h),
+	}
+}
+
+// /////////////// RPC Codec Shared Methods ///////////////////
+func (c rpcCodec) write(objs ...interface{}) (err error) {
+	for _, obj := range objs {
+		if err = c.enc.Encode(obj); err != nil {
+			return
+		}
+	}
+	return
+}
+
+func (c rpcCodec) read(objs ...interface{}) (err error) {
+	for _, obj := range objs {
+		//If nil is passed in, we should still attempt to read content to nowhere.
+		if obj == nil {
+			//obj = &obj //This bombs/uses all memory up. Dunno why (maybe because obj is not addressable???).
+			var n interface{}
+			obj = &n
+		}
+		if err = c.dec.Decode(obj); err != nil {
+			return
+		}
+	}
+	return
+}
+
+func (c rpcCodec) Close() error {
+	return c.rwc.Close()	
+}
+
+func (c rpcCodec) ReadResponseBody(body interface{}) (err error) {
+	err = c.read(body)
+	return
+}
+
+func (c rpcCodec) ReadRequestBody(body interface{}) error {
+	return c.read(body)
+}
+
+// /////////////// Go RPC Codec ///////////////////
+func (c goRpcCodec) WriteRequest(r *rpc.Request, body interface{}) error {
+	return c.write(r, body)
+}
+
+func (c goRpcCodec) WriteResponse(r *rpc.Response, body interface{}) error {
+	return c.write(r, body)
+}
+
+func (c goRpcCodec) ReadResponseHeader(r *rpc.Response) (err error) {
+	err = c.read(r)
+	return
+}
+
+func (c goRpcCodec) ReadRequestHeader(r *rpc.Request) error {
+	return c.read(r)
+}
+

+ 99 - 0
codec/z_helper_test.go

@@ -0,0 +1,99 @@
+// Copyright (c) 2012, 2013 Ugorji Nwoke. All rights reserved.
+// Use of this source code is governed by a BSD-style license found in the LICENSE file.
+
+package codec
+
+// All non-std package dependencies related to testing live in this file,
+// so porting to different environment is easy (just update functions).
+// 
+// Also, this file is called z_helper_test, to give a "hint" to compiler
+// that its init() function should be called last. (not guaranteed by spec)
+
+import (
+	"testing"
+	"reflect"
+	"errors"
+)
+
+var (
+	testLogToT = true
+	failNowOnFail = true
+)
+
+// does final initialization
+func init() {
+	close(benchInitChan)
+}
+
+func checkErrT(t *testing.T, err error) {
+	if err != nil {
+		logT(t, err.Error())
+		failT(t)
+	}
+}
+
+func checkEqualT(t *testing.T, v1 interface{}, v2 interface{}) {
+	if err := deepEqual(v1, v2); err != nil { 
+		logT(t, "Do not match: %v. v1: %v, v2: %v", err, v1, v2)
+		failT(t)
+	}
+}
+
+func logT(x interface{}, format string, args ...interface{}) {
+	if t, ok := x.(*testing.T); ok && t != nil && testLogToT {
+		t.Logf(format, args...)	
+	} else if b, ok := x.(*testing.B); ok && b != nil && testLogToT {
+		b.Logf(format, args...)
+	} else {
+		debugf(format, args...)
+	}
+}
+
+func failT(t *testing.T) {
+	if failNowOnFail {
+		t.FailNow()
+	} else {
+		t.Fail()
+	}
+}
+
+func deepEqual(v1, v2 interface{}) (err error) {
+	if !reflect.DeepEqual(v1, v2) {
+		err = errors.New("Not Match")
+	}
+	return
+}
+
+func approxDataSize(rv reflect.Value) (sum int) {
+	switch rk := rv.Kind(); rk {
+	case reflect.Invalid:
+	case reflect.Ptr, reflect.Interface:
+		sum += int(rv.Type().Size())
+		sum += approxDataSize(rv.Elem())
+	case reflect.Slice:
+		sum += int(rv.Type().Size())
+		for j := 0; j < rv.Len(); j++ {
+			sum += approxDataSize(rv.Index(j))
+		}
+	case reflect.String:
+		sum += int(rv.Type().Size()) 
+		sum += rv.Len()
+	case reflect.Map:
+		sum += int(rv.Type().Size()) 
+		for _, mk := range rv.MapKeys() {
+			sum += approxDataSize(mk)
+			sum += approxDataSize(rv.MapIndex(mk))
+		}
+	case reflect.Struct:
+		//struct size already includes the full data size.
+		//sum += int(rv.Type().Size())
+		for j := 0; j < rv.NumField(); j++ {
+			sum += approxDataSize(rv.Field(j))
+		}	
+	default:
+		//pure value types
+		sum += int(rv.Type().Size())
+	}
+	return
+}
+