Browse Source

codec: do not pool codecFn helper - make it part of BasicHandle instead

- move codecFner support away from being a pool'ed resource,
  and make it a part and method of BasicHandle
- introduce basicHandle(Handle) function - that initialized BasicHandle from Handle
- add tests for atomic slices and atomic clsErr
- use binary search to find the *codecFn for each given type.
  we previously used a linear search.
Ugorji Nwoke 7 years ago
parent
commit
6bfa3bf159
11 changed files with 684 additions and 625 deletions
  1. 70 26
      codec/codec_test.go
  2. 35 25
      codec/decode.go
  3. 11 16
      codec/encode.go
  4. 204 110
      codec/fast-path.generated.go
  5. 1 1
      codec/fast-path.not.go
  6. 276 288
      codec/helper.go
  7. 24 10
      codec/helper_not_unsafe.go
  8. 42 23
      codec/helper_unsafe.go
  9. 1 1
      codec/rpc.go
  10. 3 0
      codec/shared_test.go
  11. 17 125
      codec/z_all_test.go

+ 70 - 26
codec/codec_test.go

@@ -325,11 +325,11 @@ func (x timeExt) UpdateExt(v interface{}, src interface{}) {
 
 
 func testCodecEncode(ts interface{}, bsIn []byte,
 func testCodecEncode(ts interface{}, bsIn []byte,
 	fn func([]byte) *bytes.Buffer, h Handle) (bs []byte, err error) {
 	fn func([]byte) *bytes.Buffer, h Handle) (bs []byte, err error) {
-	return sTestCodecEncode(ts, bsIn, fn, h, h.getBasicHandle())
+	return sTestCodecEncode(ts, bsIn, fn, h, basicHandle(h))
 }
 }
 
 
 func testCodecDecode(bs []byte, ts interface{}, h Handle) (err error) {
 func testCodecDecode(bs []byte, ts interface{}, h Handle) (err error) {
-	return sTestCodecDecode(bs, ts, h, h.getBasicHandle())
+	return sTestCodecDecode(bs, ts, h, basicHandle(h))
 }
 }
 
 
 func checkErrT(t *testing.T, err error) {
 func checkErrT(t *testing.T, err error) {
@@ -361,7 +361,7 @@ func testInit() {
 	}
 	}
 
 
 	for _, v := range testHandles {
 	for _, v := range testHandles {
-		bh := v.getBasicHandle()
+		bh := basicHandle(v)
 		// pre-fill them first
 		// pre-fill them first
 		bh.EncodeOptions = testEncodeOptions
 		bh.EncodeOptions = testEncodeOptions
 		bh.DecodeOptions = testDecodeOptions
 		bh.DecodeOptions = testDecodeOptions
@@ -840,7 +840,7 @@ func testCodecTableOne(t *testing.T, h Handle) {
 	// doTestCodecTableOne(t, false, h, table[numPrim+1:], tableVerify[numPrim+1:])
 	// doTestCodecTableOne(t, false, h, table[numPrim+1:], tableVerify[numPrim+1:])
 	// func TestMsgpackNilStringMap(t *testing.T) {
 	// func TestMsgpackNilStringMap(t *testing.T) {
 	var oldMapType reflect.Type
 	var oldMapType reflect.Type
-	v := h.getBasicHandle()
+	v := basicHandle(h)
 
 
 	oldMapType, v.MapType = v.MapType, testMapStrIntfTyp
 	oldMapType, v.MapType = v.MapType, testMapStrIntfTyp
 	// defer func() { v.MapType = oldMapType }()
 	// defer func() { v.MapType = oldMapType }()
@@ -1301,7 +1301,7 @@ func doTestMapEncodeForCanonical(t *testing.T, name string, h Handle) {
 	if ch, ok := h.(*CborHandle); ok {
 	if ch, ok := h.(*CborHandle); ok {
 		cborIndef = ch.IndefiniteLength
 		cborIndef = ch.IndefiniteLength
 	}
 	}
-	bh := h.getBasicHandle()
+	bh := basicHandle(h)
 	if !bh.Canonical {
 	if !bh.Canonical {
 		bh.Canonical = true
 		bh.Canonical = true
 		defer func() { bh.Canonical = false }()
 		defer func() { bh.Canonical = false }()
@@ -1371,7 +1371,7 @@ func doTestEncCircularRef(t *testing.T, name string, h Handle) {
 	var bs []byte
 	var bs []byte
 	var err error
 	var err error
 
 
-	bh := h.getBasicHandle()
+	bh := basicHandle(h)
 	if !bh.CheckCircularRef {
 	if !bh.CheckCircularRef {
 		bh.CheckCircularRef = true
 		bh.CheckCircularRef = true
 		defer func() { bh.CheckCircularRef = false }()
 		defer func() { bh.CheckCircularRef = false }()
@@ -1415,7 +1415,7 @@ func doTestAnonCycle(t *testing.T, name string, h Handle) {
 	// just check that you can get typeInfo for T1
 	// just check that you can get typeInfo for T1
 	rt := reflect.TypeOf((*TestAnonCycleT1)(nil)).Elem()
 	rt := reflect.TypeOf((*TestAnonCycleT1)(nil)).Elem()
 	rtid := rt2id(rt)
 	rtid := rt2id(rt)
-	pti := h.getBasicHandle().getTypeInfo(rtid, rt)
+	pti := basicHandle(h).getTypeInfo(rtid, rt)
 	logT(t, "pti: %v", pti)
 	logT(t, "pti: %v", pti)
 }
 }
 
 
@@ -1491,7 +1491,7 @@ func doTestJsonLargeInteger(t *testing.T, v interface{}, ias uint8) {
 
 
 func doTestRawValue(t *testing.T, name string, h Handle) {
 func doTestRawValue(t *testing.T, name string, h Handle) {
 	testOnce.Do(testInitAll)
 	testOnce.Do(testInitAll)
-	bh := h.getBasicHandle()
+	bh := basicHandle(h)
 	if !bh.Raw {
 	if !bh.Raw {
 		bh.Raw = true
 		bh.Raw = true
 		defer func() { bh.Raw = false }()
 		defer func() { bh.Raw = false }()
@@ -1550,7 +1550,7 @@ func doTestPythonGenStreams(t *testing.T, name string, h Handle) {
 		failT(t)
 		failT(t)
 	}
 	}
 
 
-	bh := h.getBasicHandle()
+	bh := basicHandle(h)
 
 
 	oldMapType := bh.MapType
 	oldMapType := bh.MapType
 	tablePythonVerify := testTableVerify(testVerifyForPython|testVerifyTimeAsInteger|testVerifyMapTypeStrIntf, h)
 	tablePythonVerify := testTableVerify(testVerifyForPython|testVerifyTimeAsInteger|testVerifyMapTypeStrIntf, h)
@@ -1700,7 +1700,7 @@ func doTestRawExt(t *testing.T, h Handle) {
 	var v RawExt // interface{}
 	var v RawExt // interface{}
 	_, isJson := h.(*JsonHandle)
 	_, isJson := h.(*JsonHandle)
 	_, isCbor := h.(*CborHandle)
 	_, isCbor := h.(*CborHandle)
-	bh := h.getBasicHandle()
+	bh := basicHandle(h)
 	// isValuer := isJson || isCbor
 	// isValuer := isJson || isCbor
 	// _ = isValuer
 	// _ = isValuer
 	for _, r := range []RawExt{
 	for _, r := range []RawExt{
@@ -1770,7 +1770,7 @@ func doTestMapStructKey(t *testing.T, h Handle) {
 	testOnce.Do(testInitAll)
 	testOnce.Do(testInitAll)
 	var b []byte
 	var b []byte
 	var v interface{} // map[stringUint64T]wrapUint64Slice // interface{}
 	var v interface{} // map[stringUint64T]wrapUint64Slice // interface{}
-	bh := h.getBasicHandle()
+	bh := basicHandle(h)
 	m := map[stringUint64T]wrapUint64Slice{
 	m := map[stringUint64T]wrapUint64Slice{
 		{"55555", 55555}: []wrapUint64{12345},
 		{"55555", 55555}: []wrapUint64{12345},
 		{"333", 333}:     []wrapUint64{123},
 		{"333", 333}:     []wrapUint64{123},
@@ -1794,13 +1794,13 @@ func doTestMapStructKey(t *testing.T, h Handle) {
 	}
 	}
 }
 }
 
 
-func doTestDecodeNilMapValue(t *testing.T, handle Handle) {
+func doTestDecodeNilMapValue(t *testing.T, h Handle) {
 	testOnce.Do(testInitAll)
 	testOnce.Do(testInitAll)
 	type Struct struct {
 	type Struct struct {
 		Field map[uint16]map[uint32]struct{}
 		Field map[uint16]map[uint32]struct{}
 	}
 	}
 
 
-	bh := handle.getBasicHandle()
+	bh := basicHandle(h)
 	oldMapType := bh.MapType
 	oldMapType := bh.MapType
 	oldDeleteOnNilMapValue := bh.DeleteOnNilMapValue
 	oldDeleteOnNilMapValue := bh.DeleteOnNilMapValue
 	defer func() {
 	defer func() {
@@ -1810,13 +1810,13 @@ func doTestDecodeNilMapValue(t *testing.T, handle Handle) {
 	bh.MapType = reflect.TypeOf(map[interface{}]interface{}(nil))
 	bh.MapType = reflect.TypeOf(map[interface{}]interface{}(nil))
 	bh.DeleteOnNilMapValue = false
 	bh.DeleteOnNilMapValue = false
 
 
-	_, isJsonHandle := handle.(*JsonHandle)
+	_, isJsonHandle := h.(*JsonHandle)
 
 
 	toEncode := Struct{Field: map[uint16]map[uint32]struct{}{
 	toEncode := Struct{Field: map[uint16]map[uint32]struct{}{
 		1: nil,
 		1: nil,
 	}}
 	}}
 
 
-	bs, err := testMarshal(toEncode, handle)
+	bs, err := testMarshal(toEncode, h)
 	if err != nil {
 	if err != nil {
 		logT(t, "Error encoding: %v, Err: %v", toEncode, err)
 		logT(t, "Error encoding: %v, Err: %v", toEncode, err)
 		failT(t)
 		failT(t)
@@ -1826,7 +1826,7 @@ func doTestDecodeNilMapValue(t *testing.T, handle Handle) {
 	}
 	}
 
 
 	var decoded Struct
 	var decoded Struct
-	err = testUnmarshal(&decoded, bs, handle)
+	err = testUnmarshal(&decoded, bs, h)
 	if err != nil {
 	if err != nil {
 		logT(t, "Error decoding: %v", err)
 		logT(t, "Error decoding: %v", err)
 		failT(t)
 		failT(t)
@@ -1851,7 +1851,7 @@ func doTestEmbeddedFieldPrecedence(t *testing.T, h Handle) {
 		Embedded: Embedded{Field: 2},
 		Embedded: Embedded{Field: 2},
 	}
 	}
 	_, isJsonHandle := h.(*JsonHandle)
 	_, isJsonHandle := h.(*JsonHandle)
-	handle := h.getBasicHandle()
+	handle := basicHandle(h)
 	oldMapType := handle.MapType
 	oldMapType := handle.MapType
 	defer func() { handle.MapType = oldMapType }()
 	defer func() { handle.MapType = oldMapType }()
 
 
@@ -2120,7 +2120,7 @@ func doTestDifferentMapOrSliceType(t *testing.T, name string, h Handle) {
 	//   To test, take a sequence of []byte and string, and decode into []string and []interface.
 	//   To test, take a sequence of []byte and string, and decode into []string and []interface.
 	//   Also, decode into map[string]string, map[string]interface{}, map[interface{}]string
 	//   Also, decode into map[string]string, map[string]interface{}, map[interface{}]string
 
 
-	bh := h.getBasicHandle()
+	bh := basicHandle(h)
 	oldM, oldS := bh.MapType, bh.SliceType
 	oldM, oldS := bh.MapType, bh.SliceType
 	defer func() { bh.MapType, bh.SliceType = oldM, oldS }()
 	defer func() { bh.MapType, bh.SliceType = oldM, oldS }()
 
 
@@ -2212,7 +2212,7 @@ func doTestScalars(t *testing.T, name string, h Handle) {
 	// - decode into new
 	// - decode into new
 	// - compare to original
 	// - compare to original
 
 
-	bh := h.getBasicHandle()
+	bh := basicHandle(h)
 	if !bh.Canonical {
 	if !bh.Canonical {
 		bh.Canonical = true
 		bh.Canonical = true
 		defer func() { bh.Canonical = false }()
 		defer func() { bh.Canonical = false }()
@@ -2277,7 +2277,7 @@ func doTestScalars(t *testing.T, name string, h Handle) {
 func doTestIntfMapping(t *testing.T, name string, h Handle) {
 func doTestIntfMapping(t *testing.T, name string, h Handle) {
 	testOnce.Do(testInitAll)
 	testOnce.Do(testInitAll)
 	rti := reflect.TypeOf((*testIntfMapI)(nil)).Elem()
 	rti := reflect.TypeOf((*testIntfMapI)(nil)).Elem()
-	defer func() { h.getBasicHandle().Intf2Impl(rti, nil) }()
+	defer func() { basicHandle(h).Intf2Impl(rti, nil) }()
 
 
 	type T9 struct {
 	type T9 struct {
 		I testIntfMapI
 		I testIntfMapI
@@ -2288,7 +2288,7 @@ func doTestIntfMapping(t *testing.T, name string, h Handle) {
 		&testIntfMapT1{"ABC \x41=\x42 \u2318 - \r \b \f - \u2028 and \u2029 ."},
 		&testIntfMapT1{"ABC \x41=\x42 \u2318 - \r \b \f - \u2028 and \u2029 ."},
 		testIntfMapT2{"DEF"},
 		testIntfMapT2{"DEF"},
 	} {
 	} {
-		if err := h.getBasicHandle().Intf2Impl(rti, reflect.TypeOf(v)); err != nil {
+		if err := basicHandle(h).Intf2Impl(rti, reflect.TypeOf(v)); err != nil {
 			failT(t, "Error mapping %v to %T", rti, v)
 			failT(t, "Error mapping %v to %T", rti, v)
 		}
 		}
 		var v1, v2 T9
 		var v1, v2 T9
@@ -2301,7 +2301,7 @@ func doTestIntfMapping(t *testing.T, name string, h Handle) {
 
 
 func doTestOmitempty(t *testing.T, name string, h Handle) {
 func doTestOmitempty(t *testing.T, name string, h Handle) {
 	testOnce.Do(testInitAll)
 	testOnce.Do(testInitAll)
-	if h.getBasicHandle().StructToArray {
+	if basicHandle(h).StructToArray {
 		t.Skipf("Skipping OmitEmpty test when StructToArray=true")
 		t.Skipf("Skipping OmitEmpty test when StructToArray=true")
 	}
 	}
 	type T1 struct {
 	type T1 struct {
@@ -2324,7 +2324,7 @@ func doTestMissingFields(t *testing.T, name string, h Handle) {
 	if codecgen {
 	if codecgen {
 		t.Skipf("Skipping Missing Fields tests as it is not honored by codecgen")
 		t.Skipf("Skipping Missing Fields tests as it is not honored by codecgen")
 	}
 	}
-	if h.getBasicHandle().StructToArray {
+	if basicHandle(h).StructToArray {
 		t.Skipf("Skipping Missing Fields test when StructToArray=true")
 		t.Skipf("Skipping Missing Fields test when StructToArray=true")
 	}
 	}
 	// encode missingFielderT2, decode into missingFielderT1, encode it out again, decode into new missingFielderT2, compare
 	// encode missingFielderT2, decode into missingFielderT1, encode it out again, decode into new missingFielderT2, compare
@@ -2397,13 +2397,13 @@ func doTestMaxDepth(t *testing.T, name string, h Handle) {
 	table = append(table, T{m99, 215, true, nil})
 	table = append(table, T{m99, 215, true, nil})
 
 
 	defer func(n int16, b bool) {
 	defer func(n int16, b bool) {
-		h.getBasicHandle().MaxDepth = n
+		basicHandle(h).MaxDepth = n
 		testUseMust = b
 		testUseMust = b
-	}(h.getBasicHandle().MaxDepth, testUseMust)
+	}(basicHandle(h).MaxDepth, testUseMust)
 
 
 	testUseMust = false
 	testUseMust = false
 	for i, v := range table {
 	for i, v := range table {
-		h.getBasicHandle().MaxDepth = v.M
+		basicHandle(h).MaxDepth = v.M
 		b1 := testMarshalErr(v.I, h, t, name+"-maxdepth-enc"+strconv.FormatInt(int64(i), 10))
 		b1 := testMarshalErr(v.I, h, t, name+"-maxdepth-enc"+strconv.FormatInt(int64(i), 10))
 		// xdebugf("b1: %s", b1)
 		// xdebugf("b1: %s", b1)
 		var err error
 		var err error
@@ -2630,6 +2630,50 @@ func TestBufioDecReader(t *testing.T) {
 	// println()
 	// println()
 }
 }
 
 
+func TestAtomic(t *testing.T) {
+	testOnce.Do(testInitAll)
+	// load, store, load, confirm
+	if true {
+		var a atomicTypeInfoSlice
+		l := a.load()
+		if l != nil {
+			failT(t, "atomic fail: %T, expected load return nil, received: %v", a, l)
+		}
+		l = append(l, rtid2ti{})
+		a.store(l)
+		l = a.load()
+		if len(l) != 1 {
+			failT(t, "atomic fail: %T, expected load to have length 1, received: %d", a, len(l))
+		}
+	}
+	if true {
+		var a atomicRtidFnSlice
+		l := a.load()
+		if l != nil {
+			failT(t, "atomic fail: %T, expected load return nil, received: %v", a, l)
+		}
+		l = append(l, codecRtidFn{})
+		a.store(l)
+		l = a.load()
+		if len(l) != 1 {
+			failT(t, "atomic fail: %T, expected load to have length 1, received: %d", a, len(l))
+		}
+	}
+	if true {
+		var a atomicClsErr
+		l := a.load()
+		if l.errClosed != nil {
+			failT(t, "atomic fail: %T, expected load return clsErr = nil, received: %v", a, l.errClosed)
+		}
+		l.errClosed = io.EOF
+		a.store(l)
+		l = a.load()
+		if l.errClosed != io.EOF {
+			failT(t, "atomic fail: %T, expected clsErr = io.EOF, received: %v", a, l.errClosed)
+		}
+	}
+}
+
 // -----------
 // -----------
 
 
 func TestJsonLargeInteger(t *testing.T) {
 func TestJsonLargeInteger(t *testing.T) {

+ 35 - 25
codec/decode.go

@@ -24,8 +24,8 @@ const (
 const (
 const (
 	decDefMaxDepth         = 1024 // maximum depth
 	decDefMaxDepth         = 1024 // maximum depth
 	decDefSliceCap         = 8
 	decDefSliceCap         = 8
-	decDefChanCap          = 64            // should be large, as cap cannot be expanded
-	decScratchByteArrayLen = cacheLineSize // - (8 * 1)
+	decDefChanCap          = 64                      // should be large, as cap cannot be expanded
+	decScratchByteArrayLen = cacheLineSize + (8 * 2) // - (8 * 1)
 )
 )
 
 
 var (
 var (
@@ -528,6 +528,7 @@ LOOP:
 	goto LOOP
 	goto LOOP
 }
 }
 
 
+//go:noinline
 func (z *ioDecReader) unreadn1() {
 func (z *ioDecReader) unreadn1() {
 	err := z.br.UnreadByte()
 	err := z.br.UnreadByte()
 	if err != nil {
 	if err != nil {
@@ -1654,7 +1655,7 @@ func (d *Decoder) kSlice(f *codecFnInfo, rv reflect.Value) {
 	var rtelem0ZeroValid bool
 	var rtelem0ZeroValid bool
 	var decodeAsNil bool
 	var decodeAsNil bool
 	var j int
 	var j int
-	d.cfer()
+
 	for ; (hasLen && j < containerLenS) || !(hasLen || dd.CheckBreak()); j++ {
 	for ; (hasLen && j < containerLenS) || !(hasLen || dd.CheckBreak()); j++ {
 		if j == 0 && (f.seq == seqTypeSlice || f.seq == seqTypeChan) && rv.IsNil() {
 		if j == 0 && (f.seq == seqTypeSlice || f.seq == seqTypeChan) && rv.IsNil() {
 			if hasLen {
 			if hasLen {
@@ -1687,7 +1688,7 @@ func (d *Decoder) kSlice(f *codecFnInfo, rv reflect.Value) {
 				rv9 = reflect.New(rtelem0).Elem()
 				rv9 = reflect.New(rtelem0).Elem()
 			}
 			}
 			if fn == nil {
 			if fn == nil {
-				fn = d.cf.get(rtelem, true, true)
+				fn = d.h.fn(rtelem, true, true)
 			}
 			}
 			d.decodeValue(rv9, fn, true)
 			d.decodeValue(rv9, fn, true)
 			rv.Send(rv9)
 			rv.Send(rv9)
@@ -1732,7 +1733,7 @@ func (d *Decoder) kSlice(f *codecFnInfo, rv reflect.Value) {
 				}
 				}
 
 
 				if fn == nil {
 				if fn == nil {
-					fn = d.cf.get(rtelem, true, true)
+					fn = d.h.fn(rtelem, true, true)
 				}
 				}
 				d.decodeValue(rv9, fn, true)
 				d.decodeValue(rv9, fn, true)
 			}
 			}
@@ -1822,7 +1823,7 @@ func (d *Decoder) kMap(f *codecFnInfo, rv reflect.Value) {
 	ktypeIsIntf := ktypeId == intfTypId
 	ktypeIsIntf := ktypeId == intfTypId
 	hasLen := containerLen > 0
 	hasLen := containerLen > 0
 	var kstrbs []byte
 	var kstrbs []byte
-	d.cfer()
+
 	for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
 	for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
 		if rvkMut || !rvkp.IsValid() {
 		if rvkMut || !rvkp.IsValid() {
 			rvkp = reflect.New(ktype)
 			rvkp = reflect.New(ktype)
@@ -1844,7 +1845,7 @@ func (d *Decoder) kMap(f *codecFnInfo, rv reflect.Value) {
 			// NOTE: if doing an insert, you MUST use a real string (not stringview)
 			// NOTE: if doing an insert, you MUST use a real string (not stringview)
 		} else {
 		} else {
 			if keyFn == nil {
 			if keyFn == nil {
-				keyFn = d.cf.get(ktypeLo, true, true)
+				keyFn = d.h.fn(ktypeLo, true, true)
 			}
 			}
 			d.decodeValue(rvk, keyFn, true)
 			d.decodeValue(rvk, keyFn, true)
 		}
 		}
@@ -1914,7 +1915,7 @@ func (d *Decoder) kMap(f *codecFnInfo, rv reflect.Value) {
 			rvk.SetString(d.string(kstrbs))
 			rvk.SetString(d.string(kstrbs))
 		}
 		}
 		if valFn == nil {
 		if valFn == nil {
-			valFn = d.cf.get(vtypeLo, true, true)
+			valFn = d.h.fn(vtypeLo, true, true)
 		}
 		}
 		d.decodeValue(rvv, valFn, true)
 		d.decodeValue(rvv, valFn, true)
 		// d.decodeValueFn(rvv, valFn)
 		// d.decodeValueFn(rvv, valFn)
@@ -2227,34 +2228,44 @@ func (z *decReaderSwitch) stopTrack() []byte {
 	}
 	}
 }
 }
 
 
+// func (z *decReaderSwitch) unreadn1() {
+// 	if z.bytes {
+// 		z.rb.unreadn1()
+// 	} else {
+// 		z.unreadn1IO()
+// 	}
+// }
+// func (z *decReaderSwitch) unreadn1IO() {
+// 	if z.bufio {
+// 		z.bi.unreadn1()
+// 	} else {
+// 		z.ri.unreadn1()
+// 	}
+// }
+
 func (z *decReaderSwitch) unreadn1() {
 func (z *decReaderSwitch) unreadn1() {
 	if z.bytes {
 	if z.bytes {
 		z.rb.unreadn1()
 		z.rb.unreadn1()
-	} else {
-		z.unreadn1IO()
-	}
-}
-
-func (z *decReaderSwitch) unreadn1IO() {
-	if z.bufio {
+	} else if z.bufio {
 		z.bi.unreadn1()
 		z.bi.unreadn1()
 	} else {
 	} else {
-		z.ri.unreadn1()
+		z.ri.unreadn1() // not inlined
 	}
 	}
 }
 }
+
 func (z *decReaderSwitch) readx(n uint) []byte {
 func (z *decReaderSwitch) readx(n uint) []byte {
 	if z.bytes {
 	if z.bytes {
 		return z.rb.readx(n)
 		return z.rb.readx(n)
 	}
 	}
 	return z.readxIO(n)
 	return z.readxIO(n)
 }
 }
-
 func (z *decReaderSwitch) readxIO(n uint) []byte {
 func (z *decReaderSwitch) readxIO(n uint) []byte {
 	if z.bufio {
 	if z.bufio {
 		return z.bi.readx(n)
 		return z.bi.readx(n)
 	}
 	}
 	return z.ri.readx(n)
 	return z.ri.readx(n)
 }
 }
+
 func (z *decReaderSwitch) readb(s []byte) {
 func (z *decReaderSwitch) readb(s []byte) {
 	if z.bytes {
 	if z.bytes {
 		z.rb.readb(s)
 		z.rb.readb(s)
@@ -2271,32 +2282,33 @@ func (z *decReaderSwitch) readbIO(s []byte) {
 		z.ri.readb(s)
 		z.ri.readb(s)
 	}
 	}
 }
 }
+
 func (z *decReaderSwitch) readn1() uint8 {
 func (z *decReaderSwitch) readn1() uint8 {
 	if z.bytes {
 	if z.bytes {
 		return z.rb.readn1()
 		return z.rb.readn1()
 	}
 	}
 	return z.readn1IO()
 	return z.readn1IO()
 }
 }
-
 func (z *decReaderSwitch) readn1IO() uint8 {
 func (z *decReaderSwitch) readn1IO() uint8 {
 	if z.bufio {
 	if z.bufio {
 		return z.bi.readn1()
 		return z.bi.readn1()
 	}
 	}
 	return z.ri.readn1()
 	return z.ri.readn1()
 }
 }
+
 func (z *decReaderSwitch) skip(accept *bitset256) (token byte) {
 func (z *decReaderSwitch) skip(accept *bitset256) (token byte) {
 	if z.bytes {
 	if z.bytes {
 		return z.rb.skip(accept)
 		return z.rb.skip(accept)
 	}
 	}
 	return z.skipIO(accept)
 	return z.skipIO(accept)
 }
 }
-
 func (z *decReaderSwitch) skipIO(accept *bitset256) (token byte) {
 func (z *decReaderSwitch) skipIO(accept *bitset256) (token byte) {
 	if z.bufio {
 	if z.bufio {
 		return z.bi.skip(accept)
 		return z.bi.skip(accept)
 	}
 	}
 	return z.ri.skip(accept)
 	return z.ri.skip(accept)
 }
 }
+
 func (z *decReaderSwitch) readTo(in []byte, accept *bitset256) (out []byte) {
 func (z *decReaderSwitch) readTo(in []byte, accept *bitset256) (out []byte) {
 	if z.bytes {
 	if z.bytes {
 		return z.rb.readToNoInput(accept) // z.rb.readTo(in, accept)
 		return z.rb.readToNoInput(accept) // z.rb.readTo(in, accept)
@@ -2351,13 +2363,12 @@ type Decoder struct {
 
 
 	decNakedPooler
 	decNakedPooler
 
 
-	h *BasicHandle
-
+	h  *BasicHandle
+	hh Handle
 	// ---- cpu cache line boundary?
 	// ---- cpu cache line boundary?
 	decReaderSwitch
 	decReaderSwitch
 
 
 	// ---- cpu cache line boundary?
 	// ---- cpu cache line boundary?
-	codecFnPooler
 	// cr containerStateRecv
 	// cr containerStateRecv
 	err error
 	err error
 
 
@@ -2401,7 +2412,7 @@ func NewDecoderBytes(in []byte, h Handle) *Decoder {
 // var defaultDecNaked decNaked
 // var defaultDecNaked decNaked
 
 
 func newDecoder(h Handle) *Decoder {
 func newDecoder(h Handle) *Decoder {
-	d := &Decoder{h: h.getBasicHandle(), err: errDecoderNotInitialized}
+	d := &Decoder{h: basicHandle(h), err: errDecoderNotInitialized}
 	d.bytes = true
 	d.bytes = true
 	if useFinalizers {
 	if useFinalizers {
 		runtime.SetFinalizer(d, (*Decoder).finalize)
 		runtime.SetFinalizer(d, (*Decoder).finalize)
@@ -2631,7 +2642,6 @@ func (d *Decoder) Close() {
 		d.bi.bytesBufPooler.end()
 		d.bi.bytesBufPooler.end()
 	}
 	}
 	d.decNakedPooler.end()
 	d.decNakedPooler.end()
-	d.codecFnPooler.end()
 }
 }
 
 
 // // this is not a smart swallow, as it allocates objects and does unnecessary work.
 // // this is not a smart swallow, as it allocates objects and does unnecessary work.
@@ -2855,7 +2865,7 @@ func (d *Decoder) decodeValue(rv reflect.Value, fn *codecFn, chkAll bool) {
 
 
 	if fn == nil {
 	if fn == nil {
 		// always pass checkCodecSelfer=true, in case T or ****T is passed, where *T is a Selfer
 		// always pass checkCodecSelfer=true, in case T or ****T is passed, where *T is a Selfer
-		fn = d.cfer().get(rv.Type(), chkAll, true) // chkAll, chkAll)
+		fn = d.h.fn(rv.Type(), chkAll, true) // chkAll, chkAll)
 	}
 	}
 	if fn.i.addrD {
 	if fn.i.addrD {
 		if rvpValid {
 		if rvpValid {

+ 11 - 16
codec/encode.go

@@ -584,7 +584,7 @@ func (e *Encoder) kSlice(f *codecFnInfo, rv reflect.Value) {
 		// encoding type, because preEncodeValue may break it down to
 		// encoding type, because preEncodeValue may break it down to
 		// a concrete type and kInterface will bomb.
 		// a concrete type and kInterface will bomb.
 		if rtelem.Kind() != reflect.Interface {
 		if rtelem.Kind() != reflect.Interface {
-			fn = e.cfer().get(rtelem, true, true)
+			fn = e.h.fn(rtelem, true, true)
 		}
 		}
 		for j := 0; j < l; j++ {
 		for j := 0; j < l; j++ {
 			if elemsep {
 			if elemsep {
@@ -881,7 +881,7 @@ func (e *Encoder) kMap(f *codecFnInfo, rv reflect.Value) {
 		rtval = rtval.Elem()
 		rtval = rtval.Elem()
 	}
 	}
 	if rtval.Kind() != reflect.Interface {
 	if rtval.Kind() != reflect.Interface {
-		valFn = e.cfer().get(rtval, true, true)
+		valFn = e.h.fn(rtval, true, true)
 	}
 	}
 	mks := rv.MapKeys()
 	mks := rv.MapKeys()
 
 
@@ -898,7 +898,7 @@ func (e *Encoder) kMap(f *codecFnInfo, rv reflect.Value) {
 		}
 		}
 		if rtkey.Kind() != reflect.Interface {
 		if rtkey.Kind() != reflect.Interface {
 			// rtkeyid = rt2id(rtkey)
 			// rtkeyid = rt2id(rtkey)
-			keyFn = e.cfer().get(rtkey, true, true)
+			keyFn = e.h.fn(rtkey, true, true)
 		}
 		}
 	}
 	}
 
 
@@ -1095,8 +1095,8 @@ type encWriterSwitch struct {
 	bytes bool    // encoding to []byte
 	bytes bool    // encoding to []byte
 	esep  bool    // whether it has elem separators
 	esep  bool    // whether it has elem separators
 	isas  bool    // whether e.as != nil
 	isas  bool    // whether e.as != nil
-	js    bool    // captured here, so that no need to piggy back on *codecFner for this
-	be    bool    // captured here, so that no need to piggy back on *codecFner for this
+	js    bool    // is json encoder?
+	be    bool    // is binary encoder?
 	_     [2]byte // padding
 	_     [2]byte // padding
 	// _    [2]uint64 // padding
 	// _    [2]uint64 // padding
 	// _    uint64    // padding
 	// _    uint64    // padding
@@ -1253,20 +1253,19 @@ type Encoder struct {
 
 
 	err error
 	err error
 
 
-	h *BasicHandle
-
+	h  *BasicHandle
+	hh Handle
 	// ---- cpu cache line boundary? + 3
 	// ---- cpu cache line boundary? + 3
 	encWriterSwitch
 	encWriterSwitch
 
 
 	ci set
 	ci set
-	codecFnPooler
 
 
 	// Extensions can call Encode() within a current Encode() call.
 	// Extensions can call Encode() within a current Encode() call.
 	// We need to know when the top level Encode() call returns,
 	// We need to know when the top level Encode() call returns,
 	// so we can decide whether to Close() or not.
 	// so we can decide whether to Close() or not.
 	calls uint16 // what depth in mustEncode are we in now.
 	calls uint16 // what depth in mustEncode are we in now.
 
 
-	b [(3 * 8) - 2]byte // for encoding chan or (non-addressable) [N]byte
+	b [(5 * 8) - 2]byte // for encoding chan or (non-addressable) [N]byte
 
 
 	// ---- writable fields during execution --- *try* to keep in sep cache line
 	// ---- writable fields during execution --- *try* to keep in sep cache line
 
 
@@ -1298,7 +1297,7 @@ func NewEncoderBytes(out *[]byte, h Handle) *Encoder {
 }
 }
 
 
 func newEncoder(h Handle) *Encoder {
 func newEncoder(h Handle) *Encoder {
-	e := &Encoder{h: h.getBasicHandle(), err: errEncoderNotInitialized}
+	e := &Encoder{h: basicHandle(h), err: errEncoderNotInitialized}
 	e.bytes = true
 	e.bytes = true
 	if useFinalizers {
 	if useFinalizers {
 		runtime.SetFinalizer(e, (*Encoder).finalize)
 		runtime.SetFinalizer(e, (*Encoder).finalize)
@@ -1369,10 +1368,7 @@ func (e *Encoder) ResetBytes(out *[]byte) {
 	if out == nil {
 	if out == nil {
 		return
 		return
 	}
 	}
-	var in []byte
-	if out != nil {
-		in = *out
-	}
+	var in []byte = *out
 	if in == nil {
 	if in == nil {
 		in = make([]byte, defEncByteBufSize)
 		in = make([]byte, defEncByteBufSize)
 	}
 	}
@@ -1538,7 +1534,6 @@ func (e *Encoder) Close() {
 		e.wf.buf = nil
 		e.wf.buf = nil
 		e.wf.bytesBufPooler.end()
 		e.wf.bytesBufPooler.end()
 	}
 	}
-	e.codecFnPooler.end()
 }
 }
 
 
 func (e *Encoder) encode(iv interface{}) {
 func (e *Encoder) encode(iv interface{}) {
@@ -1687,7 +1682,7 @@ TOP:
 	if fn == nil {
 	if fn == nil {
 		rt := rv.Type()
 		rt := rv.Type()
 		// always pass checkCodecSelfer=true, in case T or ****T is passed, where *T is a Selfer
 		// always pass checkCodecSelfer=true, in case T or ****T is passed, where *T is a Selfer
-		fn = e.cfer().get(rt, checkFastpath, true)
+		fn = e.h.fn(rt, checkFastpath, true)
 	}
 	}
 	if fn.i.addrE {
 	if fn.i.addrE {
 		if rvpValid {
 		if rvpValid {

File diff suppressed because it is too large
+ 204 - 110
codec/fast-path.generated.go


+ 1 - 1
codec/fast-path.not.go

@@ -35,7 +35,7 @@ type fastpathA [0]fastpathE
 func (x fastpathA) index(rtid uintptr) int { return -1 }
 func (x fastpathA) index(rtid uintptr) int { return -1 }
 
 
 func (_ fastpathT) DecSliceUint8V(v []uint8, canChange bool, d *Decoder) (_ []uint8, changed bool) {
 func (_ fastpathT) DecSliceUint8V(v []uint8, canChange bool, d *Decoder) (_ []uint8, changed bool) {
-	fn := d.cfer().get(uint8SliceTyp, true, true)
+	fn := d.h.fn(uint8SliceTyp, true, true)
 	d.kSlice(&fn.i, reflect.ValueOf(&v).Elem())
 	d.kSlice(&fn.i, reflect.ValueOf(&v).Elem())
 	return v, true
 	return v, true
 }
 }

+ 276 - 288
codec/helper.go

@@ -109,6 +109,7 @@ import (
 	"strconv"
 	"strconv"
 	"strings"
 	"strings"
 	"sync"
 	"sync"
+	"sync/atomic"
 	"time"
 	"time"
 )
 )
 
 
@@ -522,6 +523,11 @@ type BasicHandle struct {
 
 
 	intf2impls
 	intf2impls
 
 
+	inited uint32
+	_      uint32 // padding
+
+	// ---- cache line
+
 	RPCOptions
 	RPCOptions
 
 
 	// TimeNotBuiltin configures whether time.Time should be treated as a builtin type.
 	// TimeNotBuiltin configures whether time.Time should be treated as a builtin type.
@@ -553,6 +559,11 @@ type BasicHandle struct {
 	//    runtime.SetFinalizer(d, (*Decoder).Close)
 	//    runtime.SetFinalizer(d, (*Decoder).Close)
 	DoNotClose bool
 	DoNotClose bool
 
 
+	be bool   // is handle a binary encoding?
+	js bool   // is handle javascript handler?
+	n  byte   // first letter of handle name
+	_  uint16 // padding
+
 	// ---- cache line
 	// ---- cache line
 
 
 	DecodeOptions
 	DecodeOptions
@@ -562,6 +573,21 @@ type BasicHandle struct {
 	EncodeOptions
 	EncodeOptions
 
 
 	// noBuiltInTypeChecker
 	// noBuiltInTypeChecker
+
+	rtidFns atomicRtidFnSlice
+	mu      sync.Mutex
+	// r []uintptr     // rtids mapped to s above
+}
+
+// basicHandle returns an initialized BasicHandle from the Handle.
+func basicHandle(hh Handle) (x *BasicHandle) {
+	x = hh.getBasicHandle()
+	if atomic.CompareAndSwapUint32(&x.inited, 0, 1) {
+		x.be = hh.isBinary()
+		_, x.js = hh.(*JsonHandle)
+		x.n = hh.Name()[0]
+	}
+	return
 }
 }
 
 
 func (x *BasicHandle) getBasicHandle() *BasicHandle {
 func (x *BasicHandle) getBasicHandle() *BasicHandle {
@@ -575,6 +601,243 @@ func (x *BasicHandle) getTypeInfo(rtid uintptr, rt reflect.Type) (pti *typeInfo)
 	return x.TypeInfos.get(rtid, rt)
 	return x.TypeInfos.get(rtid, rt)
 }
 }
 
 
+func findFn(s []codecRtidFn, rtid uintptr) (i uint, fn *codecFn) {
+	// binary search. adapted from sort/search.go.
+	// Note: we use goto (instead of for loop) so this can be inlined.
+
+	// h, i, j := 0, 0, len(s)
+	var h uint // var h, i uint
+	var j = uint(len(s))
+LOOP:
+	if i < j {
+		h = i + (j-i)/2
+		if s[h].rtid < rtid {
+			i = h + 1
+		} else {
+			j = h
+		}
+		goto LOOP
+	}
+	if i < uint(len(s)) && s[i].rtid == rtid {
+		fn = s[i].fn
+	}
+	return
+}
+
+func (c *BasicHandle) fn(rt reflect.Type, checkFastpath, checkCodecSelfer bool) (fn *codecFn) {
+	rtid := rt2id(rt)
+	sp := c.rtidFns.load()
+	if sp != nil {
+		if _, fn = findFn(sp, rtid); fn != nil {
+			// xdebugf("<<<< %c: found fn for %v in rtidfns of size: %v", c.n, rt, len(sp))
+			return
+		}
+	}
+	// xdebugf("#### for %c: load fn for %v in rtidfns of size: %v", c.n, rt, len(sp))
+	fn = new(codecFn)
+	fi := &(fn.i)
+	ti := c.getTypeInfo(rtid, rt)
+	fi.ti = ti
+
+	rk := reflect.Kind(ti.kind)
+
+	if checkCodecSelfer && (ti.cs || ti.csp) {
+		fn.fe = (*Encoder).selferMarshal
+		fn.fd = (*Decoder).selferUnmarshal
+		fi.addrF = true
+		fi.addrD = ti.csp
+		fi.addrE = ti.csp
+	} else if rtid == timeTypId && !c.TimeNotBuiltin {
+		fn.fe = (*Encoder).kTime
+		fn.fd = (*Decoder).kTime
+	} else if rtid == rawTypId {
+		fn.fe = (*Encoder).raw
+		fn.fd = (*Decoder).raw
+	} else if rtid == rawExtTypId {
+		fn.fe = (*Encoder).rawExt
+		fn.fd = (*Decoder).rawExt
+		fi.addrF = true
+		fi.addrD = true
+		fi.addrE = true
+	} else if xfFn := c.getExt(rtid); xfFn != nil {
+		fi.xfTag, fi.xfFn = xfFn.tag, xfFn.ext
+		fn.fe = (*Encoder).ext
+		fn.fd = (*Decoder).ext
+		fi.addrF = true
+		fi.addrD = true
+		if rk == reflect.Struct || rk == reflect.Array {
+			fi.addrE = true
+		}
+	} else if supportMarshalInterfaces && c.be && (ti.bm || ti.bmp) && (ti.bu || ti.bup) {
+		fn.fe = (*Encoder).binaryMarshal
+		fn.fd = (*Decoder).binaryUnmarshal
+		fi.addrF = true
+		fi.addrD = ti.bup
+		fi.addrE = ti.bmp
+	} else if supportMarshalInterfaces && !c.be && c.js && (ti.jm || ti.jmp) && (ti.ju || ti.jup) {
+		//If JSON, we should check JSONMarshal before textMarshal
+		fn.fe = (*Encoder).jsonMarshal
+		fn.fd = (*Decoder).jsonUnmarshal
+		fi.addrF = true
+		fi.addrD = ti.jup
+		fi.addrE = ti.jmp
+	} else if supportMarshalInterfaces && !c.be && (ti.tm || ti.tmp) && (ti.tu || ti.tup) {
+		fn.fe = (*Encoder).textMarshal
+		fn.fd = (*Decoder).textUnmarshal
+		fi.addrF = true
+		fi.addrD = ti.tup
+		fi.addrE = ti.tmp
+	} else {
+		if fastpathEnabled && checkFastpath && (rk == reflect.Map || rk == reflect.Slice) {
+			if ti.pkgpath == "" { // un-named slice or map
+				if idx := fastpathAV.index(rtid); idx != -1 {
+					fn.fe = fastpathAV[idx].encfn
+					fn.fd = fastpathAV[idx].decfn
+					fi.addrD = true
+					fi.addrF = false
+				}
+			} else {
+				// use mapping for underlying type if there
+				var rtu reflect.Type
+				if rk == reflect.Map {
+					rtu = reflect.MapOf(ti.key, ti.elem)
+				} else {
+					rtu = reflect.SliceOf(ti.elem)
+				}
+				rtuid := rt2id(rtu)
+				if idx := fastpathAV.index(rtuid); idx != -1 {
+					xfnf := fastpathAV[idx].encfn
+					xrt := fastpathAV[idx].rt
+					fn.fe = func(e *Encoder, xf *codecFnInfo, xrv reflect.Value) {
+						xfnf(e, xf, xrv.Convert(xrt))
+					}
+					fi.addrD = true
+					fi.addrF = false // meaning it can be an address(ptr) or a value
+					xfnf2 := fastpathAV[idx].decfn
+					fn.fd = func(d *Decoder, xf *codecFnInfo, xrv reflect.Value) {
+						if xrv.Kind() == reflect.Ptr {
+							xfnf2(d, xf, xrv.Convert(reflect.PtrTo(xrt)))
+						} else {
+							xfnf2(d, xf, xrv.Convert(xrt))
+						}
+					}
+				}
+			}
+		}
+		if fn.fe == nil && fn.fd == nil {
+			switch rk {
+			case reflect.Bool:
+				fn.fe = (*Encoder).kBool
+				fn.fd = (*Decoder).kBool
+			case reflect.String:
+				fn.fe = (*Encoder).kString
+				fn.fd = (*Decoder).kString
+			case reflect.Int:
+				fn.fd = (*Decoder).kInt
+				fn.fe = (*Encoder).kInt
+			case reflect.Int8:
+				fn.fe = (*Encoder).kInt8
+				fn.fd = (*Decoder).kInt8
+			case reflect.Int16:
+				fn.fe = (*Encoder).kInt16
+				fn.fd = (*Decoder).kInt16
+			case reflect.Int32:
+				fn.fe = (*Encoder).kInt32
+				fn.fd = (*Decoder).kInt32
+			case reflect.Int64:
+				fn.fe = (*Encoder).kInt64
+				fn.fd = (*Decoder).kInt64
+			case reflect.Uint:
+				fn.fd = (*Decoder).kUint
+				fn.fe = (*Encoder).kUint
+			case reflect.Uint8:
+				fn.fe = (*Encoder).kUint8
+				fn.fd = (*Decoder).kUint8
+			case reflect.Uint16:
+				fn.fe = (*Encoder).kUint16
+				fn.fd = (*Decoder).kUint16
+			case reflect.Uint32:
+				fn.fe = (*Encoder).kUint32
+				fn.fd = (*Decoder).kUint32
+			case reflect.Uint64:
+				fn.fe = (*Encoder).kUint64
+				fn.fd = (*Decoder).kUint64
+			case reflect.Uintptr:
+				fn.fe = (*Encoder).kUintptr
+				fn.fd = (*Decoder).kUintptr
+			case reflect.Float32:
+				fn.fe = (*Encoder).kFloat32
+				fn.fd = (*Decoder).kFloat32
+			case reflect.Float64:
+				fn.fe = (*Encoder).kFloat64
+				fn.fd = (*Decoder).kFloat64
+			case reflect.Invalid:
+				fn.fe = (*Encoder).kInvalid
+				fn.fd = (*Decoder).kErr
+			case reflect.Chan:
+				fi.seq = seqTypeChan
+				fn.fe = (*Encoder).kSlice
+				fn.fd = (*Decoder).kSlice
+			case reflect.Slice:
+				fi.seq = seqTypeSlice
+				fn.fe = (*Encoder).kSlice
+				fn.fd = (*Decoder).kSlice
+			case reflect.Array:
+				fi.seq = seqTypeArray
+				fn.fe = (*Encoder).kSlice
+				fi.addrF = false
+				fi.addrD = false
+				rt2 := reflect.SliceOf(ti.elem)
+				fn.fd = func(d *Decoder, xf *codecFnInfo, xrv reflect.Value) {
+					d.h.fn(rt2, true, false).fd(d, xf, xrv.Slice(0, xrv.Len()))
+				}
+				// fn.fd = (*Decoder).kArray
+			case reflect.Struct:
+				if ti.anyOmitEmpty || ti.mf || ti.mfp {
+					fn.fe = (*Encoder).kStruct
+				} else {
+					fn.fe = (*Encoder).kStructNoOmitempty
+				}
+				fn.fd = (*Decoder).kStruct
+			case reflect.Map:
+				fn.fe = (*Encoder).kMap
+				fn.fd = (*Decoder).kMap
+			case reflect.Interface:
+				// encode: reflect.Interface are handled already by preEncodeValue
+				fn.fd = (*Decoder).kInterface
+				fn.fe = (*Encoder).kErr
+			default:
+				// reflect.Ptr and reflect.Interface are handled already by preEncodeValue
+				fn.fe = (*Encoder).kErr
+				fn.fd = (*Decoder).kErr
+			}
+		}
+	}
+
+	c.mu.Lock()
+	var sp2 []codecRtidFn
+	sp = c.rtidFns.load()
+	if sp == nil {
+		sp2 = []codecRtidFn{{rtid, fn}}
+		c.rtidFns.store(sp2)
+		// xdebugf(">>>> adding rt: %v to rtidfns of size: %v", rt, len(sp2))
+		// xdebugf(">>>> loading stored rtidfns of size: %v", len(c.rtidFns.load()))
+	} else {
+		idx, fn2 := findFn(sp, rtid)
+		if fn2 == nil {
+			sp2 = make([]codecRtidFn, len(sp)+1)
+			copy(sp2, sp[:idx])
+			copy(sp2[idx+1:], sp[idx:])
+			sp2[idx] = codecRtidFn{rtid, fn}
+			c.rtidFns.store(sp2)
+			// xdebugf(">>>> adding rt: %v to rtidfns of size: %v", rt, len(sp2))
+
+		}
+	}
+	c.mu.Unlock()
+	return
+}
+
 // Handle defines a specific encoding format. It also stores any runtime state
 // Handle defines a specific encoding format. It also stores any runtime state
 // used during an Encoding or Decoding session e.g. stored state about Types, etc.
 // used during an Encoding or Decoding session e.g. stored state about Types, etc.
 //
 //
@@ -589,6 +852,8 @@ func (x *BasicHandle) getTypeInfo(rtid uintptr, rt reflect.Type) (pti *typeInfo)
 // Such a pre-configured Handle is safe for concurrent access.
 // Such a pre-configured Handle is safe for concurrent access.
 type Handle interface {
 type Handle interface {
 	Name() string
 	Name() string
+	// return the basic handle. It may not have been inited.
+	// Prefer to use basicHandle() helper function that ensures it has been inited.
 	getBasicHandle() *BasicHandle
 	getBasicHandle() *BasicHandle
 	recreateEncDriver(encDriver) bool
 	recreateEncDriver(encDriver) bool
 	newEncDriver(w *Encoder) encDriver
 	newEncDriver(w *Encoder) encDriver
@@ -1245,7 +1510,7 @@ func (x *TypeInfos) structTag(t reflect.StructTag) (s string) {
 	return
 	return
 }
 }
 
 
-func (x *TypeInfos) find(s []rtid2ti, rtid uintptr) (i uint, ti *typeInfo) {
+func findTypeInfo(s []rtid2ti, rtid uintptr) (i uint, ti *typeInfo) {
 	// binary search. adapted from sort/search.go.
 	// binary search. adapted from sort/search.go.
 	// Note: we use goto (instead of for loop) so this can be inlined.
 	// Note: we use goto (instead of for loop) so this can be inlined.
 
 
@@ -1276,7 +1541,7 @@ LOOP:
 func (x *TypeInfos) get(rtid uintptr, rt reflect.Type) (pti *typeInfo) {
 func (x *TypeInfos) get(rtid uintptr, rt reflect.Type) (pti *typeInfo) {
 	sp := x.infos.load()
 	sp := x.infos.load()
 	if sp != nil {
 	if sp != nil {
-		_, pti = x.find(sp, rtid)
+		_, pti = findTypeInfo(sp, rtid)
 		if pti != nil {
 		if pti != nil {
 			return
 			return
 		}
 		}
@@ -1356,20 +1621,21 @@ func (x *TypeInfos) get(rtid uintptr, rt reflect.Type) (pti *typeInfo) {
 
 
 	x.mu.Lock()
 	x.mu.Lock()
 	sp = x.infos.load()
 	sp = x.infos.load()
+	var sp2 []rtid2ti
 	if sp == nil {
 	if sp == nil {
 		pti = &ti
 		pti = &ti
-		vs := []rtid2ti{{rtid, pti}}
-		x.infos.store(vs)
+		sp2 = []rtid2ti{{rtid, pti}}
+		x.infos.store(sp2)
 	} else {
 	} else {
 		var idx uint
 		var idx uint
-		idx, pti = x.find(sp, rtid)
+		idx, pti = findTypeInfo(sp, rtid)
 		if pti == nil {
 		if pti == nil {
 			pti = &ti
 			pti = &ti
-			vs := make([]rtid2ti, len(sp)+1)
-			copy(vs, sp[:idx])
-			copy(vs[idx+1:], sp[idx:])
-			vs[idx] = rtid2ti{rtid, pti}
-			x.infos.store(vs)
+			sp2 = make([]rtid2ti, len(sp)+1)
+			copy(sp2, sp[:idx])
+			copy(sp2[idx+1:], sp[idx:])
+			sp2[idx] = rtid2ti{rtid, pti}
+			x.infos.store(sp2)
 		}
 		}
 	}
 	}
 	x.mu.Unlock()
 	x.mu.Unlock()
@@ -1738,7 +2004,6 @@ type codecFnInfo struct {
 	addrD bool
 	addrD bool
 	addrF bool // if addrD, this says whether decode function can take a value or a ptr
 	addrF bool // if addrD, this says whether decode function can take a value or a ptr
 	addrE bool
 	addrE bool
-	ready bool // ready to use
 }
 }
 
 
 // codecFn encapsulates the captured variables and the encode function.
 // codecFn encapsulates the captured variables and the encode function.
@@ -1757,271 +2022,6 @@ type codecRtidFn struct {
 	fn   *codecFn
 	fn   *codecFn
 }
 }
 
 
-type codecFner struct {
-	// hh Handle
-	h  *BasicHandle
-	s  []codecRtidFn
-	be bool
-	js bool
-	_  [6]byte   // padding
-	_  [3]uint64 // padding
-}
-
-func (c *codecFner) reset(hh Handle) {
-	bh := hh.getBasicHandle()
-	// only reset iff extensions changed or *TypeInfos changed
-	var hhSame = true &&
-		c.h == bh && c.h.TypeInfos == bh.TypeInfos &&
-		len(c.h.extHandle) == len(bh.extHandle) &&
-		(len(c.h.extHandle) == 0 || &c.h.extHandle[0] == &bh.extHandle[0])
-	if !hhSame {
-		// c.hh = hh
-		c.h, bh = bh, c.h // swap both
-		_ = bh
-		_, c.js = hh.(*JsonHandle)
-		c.be = hh.isBinary()
-		if len(c.s) > 0 {
-			c.s = c.s[:0]
-		}
-		// for i := range c.s {
-		// 	c.s[i].fn.i.ready = false
-		// }
-	}
-}
-
-func (c *codecFner) get(rt reflect.Type, checkFastpath, checkCodecSelfer bool) (fn *codecFn) {
-	rtid := rt2id(rt)
-
-	for _, x := range c.s {
-		if x.rtid == rtid {
-			// if rtid exists, then there's a *codenFn attached (non-nil)
-			fn = x.fn
-			if fn.i.ready {
-				return
-			}
-			break
-		}
-	}
-	var ti *typeInfo
-	if fn == nil {
-		fn = new(codecFn)
-		if c.s == nil {
-			c.s = make([]codecRtidFn, 0, 8)
-		}
-		c.s = append(c.s, codecRtidFn{rtid, fn})
-	} else {
-		ti = fn.i.ti
-		*fn = codecFn{}
-		fn.i.ti = ti
-		// fn.fe, fn.fd = nil, nil
-	}
-	fi := &(fn.i)
-	fi.ready = true
-	if ti == nil {
-		ti = c.h.getTypeInfo(rtid, rt)
-		fi.ti = ti
-	}
-
-	rk := reflect.Kind(ti.kind)
-
-	if checkCodecSelfer && (ti.cs || ti.csp) {
-		fn.fe = (*Encoder).selferMarshal
-		fn.fd = (*Decoder).selferUnmarshal
-		fi.addrF = true
-		fi.addrD = ti.csp
-		fi.addrE = ti.csp
-	} else if rtid == timeTypId && !c.h.TimeNotBuiltin {
-		fn.fe = (*Encoder).kTime
-		fn.fd = (*Decoder).kTime
-	} else if rtid == rawTypId {
-		fn.fe = (*Encoder).raw
-		fn.fd = (*Decoder).raw
-	} else if rtid == rawExtTypId {
-		fn.fe = (*Encoder).rawExt
-		fn.fd = (*Decoder).rawExt
-		fi.addrF = true
-		fi.addrD = true
-		fi.addrE = true
-	} else if xfFn := c.h.getExt(rtid); xfFn != nil {
-		fi.xfTag, fi.xfFn = xfFn.tag, xfFn.ext
-		fn.fe = (*Encoder).ext
-		fn.fd = (*Decoder).ext
-		fi.addrF = true
-		fi.addrD = true
-		if rk == reflect.Struct || rk == reflect.Array {
-			fi.addrE = true
-		}
-	} else if supportMarshalInterfaces && c.be && (ti.bm || ti.bmp) && (ti.bu || ti.bup) {
-		fn.fe = (*Encoder).binaryMarshal
-		fn.fd = (*Decoder).binaryUnmarshal
-		fi.addrF = true
-		fi.addrD = ti.bup
-		fi.addrE = ti.bmp
-	} else if supportMarshalInterfaces && !c.be && c.js && (ti.jm || ti.jmp) && (ti.ju || ti.jup) {
-		//If JSON, we should check JSONMarshal before textMarshal
-		fn.fe = (*Encoder).jsonMarshal
-		fn.fd = (*Decoder).jsonUnmarshal
-		fi.addrF = true
-		fi.addrD = ti.jup
-		fi.addrE = ti.jmp
-	} else if supportMarshalInterfaces && !c.be && (ti.tm || ti.tmp) && (ti.tu || ti.tup) {
-		fn.fe = (*Encoder).textMarshal
-		fn.fd = (*Decoder).textUnmarshal
-		fi.addrF = true
-		fi.addrD = ti.tup
-		fi.addrE = ti.tmp
-	} else {
-		if fastpathEnabled && checkFastpath && (rk == reflect.Map || rk == reflect.Slice) {
-			if ti.pkgpath == "" { // un-named slice or map
-				if idx := fastpathAV.index(rtid); idx != -1 {
-					fn.fe = fastpathAV[idx].encfn
-					fn.fd = fastpathAV[idx].decfn
-					fi.addrD = true
-					fi.addrF = false
-				}
-			} else {
-				// use mapping for underlying type if there
-				var rtu reflect.Type
-				if rk == reflect.Map {
-					rtu = reflect.MapOf(ti.key, ti.elem)
-				} else {
-					rtu = reflect.SliceOf(ti.elem)
-				}
-				rtuid := rt2id(rtu)
-				if idx := fastpathAV.index(rtuid); idx != -1 {
-					xfnf := fastpathAV[idx].encfn
-					xrt := fastpathAV[idx].rt
-					fn.fe = func(e *Encoder, xf *codecFnInfo, xrv reflect.Value) {
-						xfnf(e, xf, xrv.Convert(xrt))
-					}
-					fi.addrD = true
-					fi.addrF = false // meaning it can be an address(ptr) or a value
-					xfnf2 := fastpathAV[idx].decfn
-					fn.fd = func(d *Decoder, xf *codecFnInfo, xrv reflect.Value) {
-						if xrv.Kind() == reflect.Ptr {
-							xfnf2(d, xf, xrv.Convert(reflect.PtrTo(xrt)))
-						} else {
-							xfnf2(d, xf, xrv.Convert(xrt))
-						}
-					}
-				}
-			}
-		}
-		if fn.fe == nil && fn.fd == nil {
-			switch rk {
-			case reflect.Bool:
-				fn.fe = (*Encoder).kBool
-				fn.fd = (*Decoder).kBool
-			case reflect.String:
-				fn.fe = (*Encoder).kString
-				fn.fd = (*Decoder).kString
-			case reflect.Int:
-				fn.fd = (*Decoder).kInt
-				fn.fe = (*Encoder).kInt
-			case reflect.Int8:
-				fn.fe = (*Encoder).kInt8
-				fn.fd = (*Decoder).kInt8
-			case reflect.Int16:
-				fn.fe = (*Encoder).kInt16
-				fn.fd = (*Decoder).kInt16
-			case reflect.Int32:
-				fn.fe = (*Encoder).kInt32
-				fn.fd = (*Decoder).kInt32
-			case reflect.Int64:
-				fn.fe = (*Encoder).kInt64
-				fn.fd = (*Decoder).kInt64
-			case reflect.Uint:
-				fn.fd = (*Decoder).kUint
-				fn.fe = (*Encoder).kUint
-			case reflect.Uint8:
-				fn.fe = (*Encoder).kUint8
-				fn.fd = (*Decoder).kUint8
-			case reflect.Uint16:
-				fn.fe = (*Encoder).kUint16
-				fn.fd = (*Decoder).kUint16
-			case reflect.Uint32:
-				fn.fe = (*Encoder).kUint32
-				fn.fd = (*Decoder).kUint32
-			case reflect.Uint64:
-				fn.fe = (*Encoder).kUint64
-				fn.fd = (*Decoder).kUint64
-			case reflect.Uintptr:
-				fn.fe = (*Encoder).kUintptr
-				fn.fd = (*Decoder).kUintptr
-			case reflect.Float32:
-				fn.fe = (*Encoder).kFloat32
-				fn.fd = (*Decoder).kFloat32
-			case reflect.Float64:
-				fn.fe = (*Encoder).kFloat64
-				fn.fd = (*Decoder).kFloat64
-			case reflect.Invalid:
-				fn.fe = (*Encoder).kInvalid
-				fn.fd = (*Decoder).kErr
-			case reflect.Chan:
-				fi.seq = seqTypeChan
-				fn.fe = (*Encoder).kSlice
-				fn.fd = (*Decoder).kSlice
-			case reflect.Slice:
-				fi.seq = seqTypeSlice
-				fn.fe = (*Encoder).kSlice
-				fn.fd = (*Decoder).kSlice
-			case reflect.Array:
-				fi.seq = seqTypeArray
-				fn.fe = (*Encoder).kSlice
-				fi.addrF = false
-				fi.addrD = false
-				rt2 := reflect.SliceOf(ti.elem)
-				fn.fd = func(d *Decoder, xf *codecFnInfo, xrv reflect.Value) {
-					d.cfer().get(rt2, true, false).fd(d, xf, xrv.Slice(0, xrv.Len()))
-				}
-				// fn.fd = (*Decoder).kArray
-			case reflect.Struct:
-				if ti.anyOmitEmpty || ti.mf || ti.mfp {
-					fn.fe = (*Encoder).kStruct
-				} else {
-					fn.fe = (*Encoder).kStructNoOmitempty
-				}
-				fn.fd = (*Decoder).kStruct
-			case reflect.Map:
-				fn.fe = (*Encoder).kMap
-				fn.fd = (*Decoder).kMap
-			case reflect.Interface:
-				// encode: reflect.Interface are handled already by preEncodeValue
-				fn.fd = (*Decoder).kInterface
-				fn.fe = (*Encoder).kErr
-			default:
-				// reflect.Ptr and reflect.Interface are handled already by preEncodeValue
-				fn.fe = (*Encoder).kErr
-				fn.fd = (*Decoder).kErr
-			}
-		}
-	}
-	return
-}
-
-type codecFnPooler struct {
-	cf  *codecFner
-	cfp *sync.Pool
-	hh  Handle
-}
-
-func (d *codecFnPooler) cfer() *codecFner {
-	if d.cf == nil {
-		var v interface{}
-		d.cfp, v = pool.codecFner()
-		d.cf = v.(*codecFner)
-		d.cf.reset(d.hh)
-	}
-	return d.cf
-}
-
-func (d *codecFnPooler) end() {
-	if d.cf != nil {
-		d.cfp.Put(d.cf)
-		d.cf, d.cfp = nil, nil
-	}
-}
-
 // ----
 // ----
 
 
 // these "checkOverflow" functions must be inlinable, and not call anybody.
 // these "checkOverflow" functions must be inlinable, and not call anybody.
@@ -2374,7 +2374,6 @@ type pooler struct {
 
 
 	// lifetime-scoped pooled resources
 	// lifetime-scoped pooled resources
 	dn                                 sync.Pool // for decNaked
 	dn                                 sync.Pool // for decNaked
-	cfn                                sync.Pool // for codecFner
 	buf1k, buf2k, buf4k, buf8k, buf16k sync.Pool // for [N]byte
 	buf1k, buf2k, buf4k, buf8k, buf16k sync.Pool // for [N]byte
 }
 }
 
 
@@ -2396,8 +2395,6 @@ func (p *pooler) init() {
 	p.buf16k.New = func() interface{} { return new([16 * 1024]byte) }
 	p.buf16k.New = func() interface{} { return new([16 * 1024]byte) }
 
 
 	p.dn.New = func() interface{} { x := new(decNaked); x.init(); return x }
 	p.dn.New = func() interface{} { x := new(decNaked); x.init(); return x }
-
-	p.cfn.New = func() interface{} { return new(codecFner) }
 }
 }
 
 
 func (p *pooler) sfiRv8() (sp *sync.Pool, v interface{}) {
 func (p *pooler) sfiRv8() (sp *sync.Pool, v interface{}) {
@@ -2435,9 +2432,6 @@ func (p *pooler) bytes16k() (sp *sync.Pool, v interface{}) {
 func (p *pooler) decNaked() (sp *sync.Pool, v interface{}) {
 func (p *pooler) decNaked() (sp *sync.Pool, v interface{}) {
 	return &p.dn, p.dn.Get()
 	return &p.dn, p.dn.Get()
 }
 }
-func (p *pooler) codecFner() (sp *sync.Pool, v interface{}) {
-	return &p.cfn, p.cfn.Get()
-}
 func (p *pooler) tiLoad() (sp *sync.Pool, v interface{}) {
 func (p *pooler) tiLoad() (sp *sync.Pool, v interface{}) {
 	return &p.tiload, p.tiload.Get()
 	return &p.tiload, p.tiload.Get()
 }
 }
@@ -2450,18 +2444,12 @@ func (p *pooler) tiLoad() (sp *sync.Pool, v interface{}) {
 // func (p *pooler) decNakedGet() (v interface{}) {
 // func (p *pooler) decNakedGet() (v interface{}) {
 // 	return p.dn.Get()
 // 	return p.dn.Get()
 // }
 // }
-// func (p *pooler) codecFnerGet() (v interface{}) {
-// 	return p.cfn.Get()
-// }
 // func (p *pooler) tiLoadGet() (v interface{}) {
 // func (p *pooler) tiLoadGet() (v interface{}) {
 // 	return p.tiload.Get()
 // 	return p.tiload.Get()
 // }
 // }
 // func (p *pooler) decNakedPut(v interface{}) {
 // func (p *pooler) decNakedPut(v interface{}) {
 // 	p.dn.Put(v)
 // 	p.dn.Put(v)
 // }
 // }
-// func (p *pooler) codecFnerPut(v interface{}) {
-// 	p.cfn.Put(v)
-// }
 // func (p *pooler) tiLoadPut(v interface{}) {
 // func (p *pooler) tiLoadPut(v interface{}) {
 // 	p.tiload.Put(v)
 // 	p.tiload.Put(v)
 // }
 // }

+ 24 - 10
codec/helper_not_unsafe.go

@@ -98,12 +98,11 @@ type atomicClsErr struct {
 	v atomic.Value
 	v atomic.Value
 }
 }
 
 
-func (x *atomicClsErr) load() clsErr {
-	i := x.v.Load()
-	if i == nil {
-		return clsErr{}
+func (x *atomicClsErr) load() (e clsErr) {
+	if i := x.v.Load(); i != nil {
+		e = i.(clsErr)
 	}
 	}
-	return i.(clsErr)
+	return
 }
 }
 
 
 func (x *atomicClsErr) store(p clsErr) {
 func (x *atomicClsErr) store(p clsErr) {
@@ -115,18 +114,33 @@ type atomicTypeInfoSlice struct { // expected to be 2 words
 	v atomic.Value
 	v atomic.Value
 }
 }
 
 
-func (x *atomicTypeInfoSlice) load() []rtid2ti {
-	i := x.v.Load()
-	if i == nil {
-		return nil
+func (x *atomicTypeInfoSlice) load() (e []rtid2ti) {
+	if i := x.v.Load(); i != nil {
+		e = i.([]rtid2ti)
 	}
 	}
-	return i.([]rtid2ti)
+	return
 }
 }
 
 
 func (x *atomicTypeInfoSlice) store(p []rtid2ti) {
 func (x *atomicTypeInfoSlice) store(p []rtid2ti) {
 	x.v.Store(p)
 	x.v.Store(p)
 }
 }
 
 
+// --------------------------
+type atomicRtidFnSlice struct { // expected to be 2 words
+	v atomic.Value
+}
+
+func (x *atomicRtidFnSlice) load() (e []codecRtidFn) {
+	if i := x.v.Load(); i != nil {
+		e = i.([]codecRtidFn)
+	}
+	return
+}
+
+func (x *atomicRtidFnSlice) store(p []codecRtidFn) {
+	x.v.Store(p)
+}
+
 // --------------------------
 // --------------------------
 func (d *Decoder) raw(f *codecFnInfo, rv reflect.Value) {
 func (d *Decoder) raw(f *codecFnInfo, rv reflect.Value) {
 	rv.SetBytes(d.rawBytes())
 	rv.SetBytes(d.rawBytes())

+ 42 - 23
codec/helper_unsafe.go

@@ -176,46 +176,65 @@ func isEmptyValue(v reflect.Value, tinfos *TypeInfos, deref, checkStruct bool) b
 
 
 // --------------------------
 // --------------------------
 
 
-// atomicTypeInfoSlice contains length and pointer to the array for a slice.
-// It is expected to be 2 words.
+// atomicXXX is expected to be 2 words (for symmetry with atomic.Value)
 //
 //
-// Previously, we atomically loaded and stored the length and array pointer separately,
-// which could lead to some races.
-// We now just atomically store and load the pointer to the value directly.
+// Note that we do not atomically load/store length and data pointer separately,
+// as this could lead to some races. Instead, we atomically load/store cappedSlice.
+//
+// Note: with atomic.(Load|Store)Pointer, we MUST work with an unsafe.Pointer directly.
 
 
-type atomicTypeInfoSlice struct { // expected to be 2 words
-	l int            // length of data array (must be first in struct, for 64-bit alignment in i386)
-	v unsafe.Pointer // data array - Pointer (not uintptr) to maintain GC reference
+// ----------------------
+type atomicTypeInfoSlice struct {
+	v unsafe.Pointer // *[]rtid2ti
+	_ uintptr        // padding (atomicXXX expected to be 2 words)
 }
 }
 
 
-func (x *atomicTypeInfoSlice) load() []rtid2ti {
-	xp := unsafe.Pointer(x)
-	x2 := *(*atomicTypeInfoSlice)(atomic.LoadPointer(&xp))
-	if x2.l == 0 {
-		return nil
+func (x *atomicTypeInfoSlice) load() (s []rtid2ti) {
+	x2 := atomic.LoadPointer(&x.v)
+	if x2 != nil {
+		s = *(*[]rtid2ti)(x2)
 	}
 	}
-	return *(*[]rtid2ti)(unsafe.Pointer(&unsafeSlice{Data: x2.v, Len: x2.l, Cap: x2.l}))
+	return
 }
 }
 
 
 func (x *atomicTypeInfoSlice) store(p []rtid2ti) {
 func (x *atomicTypeInfoSlice) store(p []rtid2ti) {
-	s := (*unsafeSlice)(unsafe.Pointer(&p))
-	xp := unsafe.Pointer(x)
-	atomic.StorePointer(&xp, unsafe.Pointer(&atomicTypeInfoSlice{l: s.Len, v: s.Data}))
+	atomic.StorePointer(&x.v, unsafe.Pointer(&p))
+}
+
+// --------------------------
+type atomicRtidFnSlice struct {
+	v unsafe.Pointer // *[]codecRtidFn
+	_ uintptr        // padding (atomicXXX expected to be 2 words)
+}
+
+func (x *atomicRtidFnSlice) load() (s []codecRtidFn) {
+	x2 := atomic.LoadPointer(&x.v)
+	if x2 != nil {
+		s = *(*[]codecRtidFn)(x2)
+	}
+	return
+}
+
+func (x *atomicRtidFnSlice) store(p []codecRtidFn) {
+	atomic.StorePointer(&x.v, unsafe.Pointer(&p))
 }
 }
 
 
 // --------------------------
 // --------------------------
 type atomicClsErr struct {
 type atomicClsErr struct {
-	v clsErr
+	v unsafe.Pointer // *clsErr
+	_ uintptr        // padding (atomicXXX expected to be 2 words)
 }
 }
 
 
-func (x *atomicClsErr) load() clsErr {
-	xp := unsafe.Pointer(&x.v)
-	return *(*clsErr)(atomic.LoadPointer(&xp))
+func (x *atomicClsErr) load() (e clsErr) {
+	x2 := (*clsErr)(atomic.LoadPointer(&x.v))
+	if x2 != nil {
+		e = *x2
+	}
+	return
 }
 }
 
 
 func (x *atomicClsErr) store(p clsErr) {
 func (x *atomicClsErr) store(p clsErr) {
-	xp := unsafe.Pointer(&x.v)
-	atomic.StorePointer(&xp, unsafe.Pointer(&p))
+	atomic.StorePointer(&x.v, unsafe.Pointer(&p))
 }
 }
 
 
 // --------------------------
 // --------------------------

+ 1 - 1
codec/rpc.go

@@ -57,7 +57,7 @@ func newRPCCodec2(r io.Reader, w io.Writer, c io.Closer, h Handle) rpcCodec {
 	// always ensure that we use a flusher, and always flush what was written to the connection.
 	// always ensure that we use a flusher, and always flush what was written to the connection.
 	// we lose nothing by using a buffered writer internally.
 	// we lose nothing by using a buffered writer internally.
 	f, ok := w.(ioFlusher)
 	f, ok := w.(ioFlusher)
-	bh := h.getBasicHandle()
+	bh := basicHandle(h)
 	if !bh.RPCNoBuffer {
 	if !bh.RPCNoBuffer {
 		if bh.WriterBufferSize <= 0 {
 		if bh.WriterBufferSize <= 0 {
 			if !ok {
 			if !ok {

+ 3 - 0
codec/shared_test.go

@@ -140,6 +140,9 @@ func init() {
 	testHandles = append(testHandles,
 	testHandles = append(testHandles,
 		// testNoopH,
 		// testNoopH,
 		testMsgpackH, testBincH, testSimpleH, testCborH, testJsonH)
 		testMsgpackH, testBincH, testSimpleH, testCborH, testJsonH)
+	for _, h := range testHandles {
+		_ = basicHandle(h) // ensure all basic handles are initialized
+	}
 	// set DoNotClose on each handle
 	// set DoNotClose on each handle
 	testMsgpackH.DoNotClose = true
 	testMsgpackH.DoNotClose = true
 	testBincH.DoNotClose = true
 	testBincH.DoNotClose = true

+ 17 - 125
codec/z_all_test.go

@@ -131,131 +131,15 @@ func testCodecGroup(t *testing.T) {
 	// println("running testcodecsuite")
 	// println("running testcodecsuite")
 	// <setup code>
 	// <setup code>
 
 
-	t.Run("TestBincCodecsTable", TestBincCodecsTable)
-	t.Run("TestBincCodecsMisc", TestBincCodecsMisc)
-	t.Run("TestBincCodecsEmbeddedPointer", TestBincCodecsEmbeddedPointer)
-	t.Run("TestBincStdEncIntf", TestBincStdEncIntf)
-	t.Run("TestBincMammoth", TestBincMammoth)
-	t.Run("TestSimpleCodecsTable", TestSimpleCodecsTable)
-	t.Run("TestSimpleCodecsMisc", TestSimpleCodecsMisc)
-	t.Run("TestSimpleCodecsEmbeddedPointer", TestSimpleCodecsEmbeddedPointer)
-	t.Run("TestSimpleStdEncIntf", TestSimpleStdEncIntf)
-	t.Run("TestSimpleMammoth", TestSimpleMammoth)
-	t.Run("TestMsgpackCodecsTable", TestMsgpackCodecsTable)
-	t.Run("TestMsgpackCodecsMisc", TestMsgpackCodecsMisc)
-	t.Run("TestMsgpackCodecsEmbeddedPointer", TestMsgpackCodecsEmbeddedPointer)
-	t.Run("TestMsgpackStdEncIntf", TestMsgpackStdEncIntf)
-	t.Run("TestMsgpackMammoth", TestMsgpackMammoth)
-	t.Run("TestCborCodecsTable", TestCborCodecsTable)
-	t.Run("TestCborCodecsMisc", TestCborCodecsMisc)
-	t.Run("TestCborCodecsEmbeddedPointer", TestCborCodecsEmbeddedPointer)
-	t.Run("TestCborMapEncodeForCanonical", TestCborMapEncodeForCanonical)
-	t.Run("TestCborCodecChan", TestCborCodecChan)
-	t.Run("TestCborStdEncIntf", TestCborStdEncIntf)
-	t.Run("TestCborMammoth", TestCborMammoth)
-	t.Run("TestJsonCodecsTable", TestJsonCodecsTable)
-	t.Run("TestJsonCodecsMisc", TestJsonCodecsMisc)
-	t.Run("TestJsonCodecsEmbeddedPointer", TestJsonCodecsEmbeddedPointer)
-	t.Run("TestJsonCodecChan", TestJsonCodecChan)
-	t.Run("TestJsonStdEncIntf", TestJsonStdEncIntf)
-	t.Run("TestJsonMammoth", TestJsonMammoth)
-	t.Run("TestJsonRaw", TestJsonRaw)
-	t.Run("TestBincRaw", TestBincRaw)
-	t.Run("TestMsgpackRaw", TestMsgpackRaw)
-	t.Run("TestSimpleRaw", TestSimpleRaw)
-	t.Run("TestCborRaw", TestCborRaw)
-	t.Run("TestAllEncCircularRef", TestAllEncCircularRef)
-	t.Run("TestAllAnonCycle", TestAllAnonCycle)
-	t.Run("TestBincRpcGo", TestBincRpcGo)
-	t.Run("TestSimpleRpcGo", TestSimpleRpcGo)
-	t.Run("TestMsgpackRpcGo", TestMsgpackRpcGo)
-	t.Run("TestCborRpcGo", TestCborRpcGo)
-	t.Run("TestJsonRpcGo", TestJsonRpcGo)
-	t.Run("TestMsgpackRpcSpec", TestMsgpackRpcSpec)
-	t.Run("TestBincUnderlyingType", TestBincUnderlyingType)
-	t.Run("TestJsonLargeInteger", TestJsonLargeInteger)
-	t.Run("TestJsonDecodeNonStringScalarInStringContext", TestJsonDecodeNonStringScalarInStringContext)
-	t.Run("TestJsonEncodeIndent", TestJsonEncodeIndent)
-
-	t.Run("TestJsonSwallowAndZero", TestJsonSwallowAndZero)
-	t.Run("TestCborSwallowAndZero", TestCborSwallowAndZero)
-	t.Run("TestMsgpackSwallowAndZero", TestMsgpackSwallowAndZero)
-	t.Run("TestBincSwallowAndZero", TestBincSwallowAndZero)
-	t.Run("TestSimpleSwallowAndZero", TestSimpleSwallowAndZero)
-	t.Run("TestJsonRawExt", TestJsonRawExt)
-	t.Run("TestCborRawExt", TestCborRawExt)
-	t.Run("TestMsgpackRawExt", TestMsgpackRawExt)
-	t.Run("TestBincRawExt", TestBincRawExt)
-	t.Run("TestSimpleRawExt", TestSimpleRawExt)
-	t.Run("TestJsonMapStructKey", TestJsonMapStructKey)
-	t.Run("TestCborMapStructKey", TestCborMapStructKey)
-	t.Run("TestMsgpackMapStructKey", TestMsgpackMapStructKey)
-	t.Run("TestBincMapStructKey", TestBincMapStructKey)
-	t.Run("TestSimpleMapStructKey", TestSimpleMapStructKey)
-	t.Run("TestJsonDecodeNilMapValue", TestJsonDecodeNilMapValue)
-	t.Run("TestCborDecodeNilMapValue", TestCborDecodeNilMapValue)
-	t.Run("TestMsgpackDecodeNilMapValue", TestMsgpackDecodeNilMapValue)
-	t.Run("TestBincDecodeNilMapValue", TestBincDecodeNilMapValue)
-	t.Run("TestSimpleDecodeNilMapValue", TestSimpleDecodeNilMapValue)
-	t.Run("TestJsonEmbeddedFieldPrecedence", TestJsonEmbeddedFieldPrecedence)
-	t.Run("TestCborEmbeddedFieldPrecedence", TestCborEmbeddedFieldPrecedence)
-	t.Run("TestMsgpackEmbeddedFieldPrecedence", TestMsgpackEmbeddedFieldPrecedence)
-	t.Run("TestBincEmbeddedFieldPrecedence", TestBincEmbeddedFieldPrecedence)
-	t.Run("TestSimpleEmbeddedFieldPrecedence", TestSimpleEmbeddedFieldPrecedence)
-	t.Run("TestJsonLargeContainerLen", TestJsonLargeContainerLen)
-	t.Run("TestCborLargeContainerLen", TestCborLargeContainerLen)
-	t.Run("TestMsgpackLargeContainerLen", TestMsgpackLargeContainerLen)
-	t.Run("TestBincLargeContainerLen", TestBincLargeContainerLen)
-	t.Run("TestSimpleLargeContainerLen", TestSimpleLargeContainerLen)
-	t.Run("TestJsonMammothMapsAndSlices", TestJsonMammothMapsAndSlices)
-	t.Run("TestCborMammothMapsAndSlices", TestCborMammothMapsAndSlices)
-	t.Run("TestMsgpackMammothMapsAndSlices", TestMsgpackMammothMapsAndSlices)
-	t.Run("TestBincMammothMapsAndSlices", TestBincMammothMapsAndSlices)
-	t.Run("TestSimpleMammothMapsAndSlices", TestSimpleMammothMapsAndSlices)
-	t.Run("TestJsonTime", TestJsonTime)
-	t.Run("TestCborTime", TestCborTime)
-	t.Run("TestMsgpackTime", TestMsgpackTime)
-	t.Run("TestBincTime", TestBincTime)
-	t.Run("TestSimpleTime", TestSimpleTime)
-	t.Run("TestJsonUintToInt", TestJsonUintToInt)
-	t.Run("TestCborUintToInt", TestCborUintToInt)
-	t.Run("TestMsgpackUintToInt", TestMsgpackUintToInt)
-	t.Run("TestBincUintToInt", TestBincUintToInt)
-	t.Run("TestSimpleUintToInt", TestSimpleUintToInt)
-	t.Run("TestJsonDifferentMapOrSliceType", TestJsonDifferentMapOrSliceType)
-	t.Run("TestCborDifferentMapOrSliceType", TestCborDifferentMapOrSliceType)
-	t.Run("TestMsgpackDifferentMapOrSliceType", TestMsgpackDifferentMapOrSliceType)
-	t.Run("TestBincDifferentMapOrSliceType", TestBincDifferentMapOrSliceType)
-	t.Run("TestSimpleDifferentMapOrSliceType", TestSimpleDifferentMapOrSliceType)
-	t.Run("TestJsonScalars", TestJsonScalars)
-	t.Run("TestCborScalars", TestCborScalars)
-	t.Run("TestMsgpackScalars", TestMsgpackScalars)
-	t.Run("TestBincScalars", TestBincScalars)
-	t.Run("TestSimpleScalars", TestSimpleScalars)
-	t.Run("TestJsonOmitempty", TestJsonOmitempty)
-	t.Run("TestCborOmitempty", TestCborOmitempty)
-	t.Run("TestMsgpackOmitempty", TestMsgpackOmitempty)
-	t.Run("TestBincOmitempty", TestBincOmitempty)
-	t.Run("TestSimpleOmitempty", TestSimpleOmitempty)
-	t.Run("TestJsonIntfMapping", TestJsonIntfMapping)
-	t.Run("TestCborIntfMapping", TestCborIntfMapping)
-	t.Run("TestMsgpackIntfMapping", TestMsgpackIntfMapping)
-	t.Run("TestBincIntfMapping", TestBincIntfMapping)
-	t.Run("TestSimpleIntfMapping", TestSimpleIntfMapping)
-	t.Run("TestJsonMissingFields", TestJsonMissingFields)
-	t.Run("TestCborMissingFields", TestCborMissingFields)
-	t.Run("TestMsgpackMissingFields", TestMsgpackMissingFields)
-	t.Run("TestBincMissingFields", TestBincMissingFields)
-	t.Run("TestSimpleMissingFields", TestSimpleMissingFields)
-	t.Run("TestJsonMaxDepth", TestJsonMaxDepth)
-	t.Run("TestCborMaxDepth", TestCborMaxDepth)
-	t.Run("TestMsgpackMaxDepth", TestMsgpackMaxDepth)
-	t.Run("TestBincMaxDepth", TestBincMaxDepth)
-	t.Run("TestSimpleMaxDepth", TestSimpleMaxDepth)
+	testJsonGroup(t)
+	testBincGroup(t)
+	testCborGroup(t)
+	testMsgpackGroup(t)
+	testSimpleGroup(t)
+	// testSimpleMammothGroup(t)
+	// testRpcGroup(t)
+	testNonHandlesGroup(t)
 
 
-	t.Run("TestJsonInvalidUnicode", TestJsonInvalidUnicode)
-	t.Run("TestCborHalfFloat", TestCborHalfFloat)
-	t.Run("TestMsgpackDecodeMapAndExtSizeMismatch", TestMsgpackDecodeMapAndExtSizeMismatch)
 	// <tear-down code>
 	// <tear-down code>
 }
 }
 
 
@@ -298,7 +182,7 @@ func testBincGroup(t *testing.T) {
 	t.Run("TestBincStdEncIntf", TestBincStdEncIntf)
 	t.Run("TestBincStdEncIntf", TestBincStdEncIntf)
 	t.Run("TestBincMammoth", TestBincMammoth)
 	t.Run("TestBincMammoth", TestBincMammoth)
 	t.Run("TestBincRaw", TestBincRaw)
 	t.Run("TestBincRaw", TestBincRaw)
-	t.Run("TestSimpleRpcGo", TestSimpleRpcGo)
+	t.Run("TestBincRpcGo", TestBincRpcGo)
 	t.Run("TestBincUnderlyingType", TestBincUnderlyingType)
 	t.Run("TestBincUnderlyingType", TestBincUnderlyingType)
 
 
 	t.Run("TestBincSwallowAndZero", TestBincSwallowAndZero)
 	t.Run("TestBincSwallowAndZero", TestBincSwallowAndZero)
@@ -414,6 +298,14 @@ func testRpcGroup(t *testing.T) {
 	t.Run("TestMsgpackRpcSpec", TestMsgpackRpcSpec)
 	t.Run("TestMsgpackRpcSpec", TestMsgpackRpcSpec)
 }
 }
 
 
+func testNonHandlesGroup(t *testing.T) {
+	// grep "func Test" codec_test.go | grep -v -E '(Cbor|Json|Simple|Msgpack|Binc)'
+	t.Run("TestBufioDecReader", TestBufioDecReader)
+	t.Run("TestAtomic", TestAtomic)
+	t.Run("TestAllEncCircularRef", TestAllEncCircularRef)
+	t.Run("TestAllAnonCycle", TestAllAnonCycle)
+}
+
 func TestCodecSuite(t *testing.T) {
 func TestCodecSuite(t *testing.T) {
 	testSuite(t, testCodecGroup)
 	testSuite(t, testCodecGroup)
 
 

Some files were not shown because too many files changed in this diff