Преглед изворни кода

codec: clean up old comments in code

Ugorji Nwoke пре 6 година
родитељ
комит
c8e440640c

+ 32 - 29
codec/binc.go

@@ -105,10 +105,7 @@ type bincEncDriver struct {
 	m map[string]uint16 // symbols
 	b [8]byte           // scratch, used for encoding numbers - bigendian style
 	s uint16            // symbols sequencer
-	// c containerState
-	// encDriverTrackContainerWriter
-	// encNoSeparator
-	_ [4]uint64 // padding
+	_ [4]uint64         // padding
 	e Encoder
 }
 
@@ -219,9 +216,8 @@ func (e *bincEncDriver) encUint(bd byte, pos bool, v uint64) {
 
 func (e *bincEncDriver) EncodeExt(v interface{}, xtag uint64, ext Ext) {
 	var bs []byte
-	// var bufp bytesBufPooler
 	if ext == SelfExt {
-		bs = e.e.blist.get(1024)[:0] // bufp.get(1024)[:0]
+		bs = e.e.blist.get(1024)[:0]
 		e.e.sideEncode(v, &bs)
 	} else {
 		bs = ext.WriteExt(v)
@@ -233,7 +229,7 @@ func (e *bincEncDriver) EncodeExt(v interface{}, xtag uint64, ext Ext) {
 	e.encodeExtPreamble(uint8(xtag), len(bs))
 	e.e.encWr.writeb(bs)
 	if ext == SelfExt {
-		e.e.blist.put(bs) // bufp.end()
+		e.e.blist.put(bs)
 	}
 }
 
@@ -275,7 +271,6 @@ func (e *bincEncDriver) EncodeSymbol(v string) {
 		return
 	}
 	if e.m == nil {
-		// e.m = pool4mapStrU16.Get().(map[string]uint16)
 		e.m = make(map[string]uint16, 16)
 	}
 	ui, ok := e.m[v]
@@ -354,7 +349,9 @@ func (e *bincEncDriver) EncodeStringBytesRaw(v []byte) {
 }
 
 func (e *bincEncDriver) encBytesLen(c charEncoding, length uint64) {
-	//TODO: support bincUnicodeOther (for now, just use string or bytearray)
+	// NOTE: we currently only support UTF-8 (string) and RAW (bytearray).
+	// We should consider supporting bincUnicodeOther.
+
 	if c == cRAW {
 		e.encLen(bincVdByteArray<<4, length)
 	} else {
@@ -387,12 +384,6 @@ func (e *bincEncDriver) encLenNumber(bd byte, v uint64) {
 
 //------------------------------------
 
-// type bincDecSymbol struct {
-// 	s string
-// 	b []byte
-// 	// i uint16
-// }
-
 type bincDecDriver struct {
 	decDriverNoopContainerReader
 	noBuiltInTypes
@@ -409,9 +400,6 @@ type bincDecDriver struct {
 	// because we typically expect < 32 symbols in each stream.
 	s map[uint16][]byte // []bincDecSymbol
 
-	// noStreamingCodec
-	// decNoSeparator
-
 	b [8]byte   // scratch for decoding numbers - big endian style
 	_ [4]uint64 // padding cache-aligned
 
@@ -476,9 +464,6 @@ func (d *bincDecDriver) ContainerType() (vt valueType) {
 	} else if d.vd == bincVdMap {
 		return valueTypeMap
 	}
-	// else {
-	// d.d.errorf("isContainerType: unsupported parameter: %v", vt)
-	// }
 	return valueTypeUnset
 }
 
@@ -1031,8 +1016,6 @@ func (e *bincEncDriver) atEndOfEncode() {
 		for k := range e.m {
 			delete(e.m, k)
 		}
-		// pool4mapStrU16.Put(e.m)
-		// e.m = nil
 	}
 }
 
@@ -1047,8 +1030,6 @@ func (d *bincDecDriver) atEndOfDecode() {
 		for k := range d.s {
 			delete(d.s, k)
 		}
-		// pool4mapU16Bytes.Put(d.s)
-		// d.s = nil
 	}
 }
 
@@ -1104,7 +1085,7 @@ func (d *bincDecDriver) atEndOfDecode() {
 //       Bits 13..0 = timezone offset in minutes. It is a signed integer in Big Endian format.
 //
 func bincEncodeTime(t time.Time) []byte {
-	//t := rv2i(rv).(time.Time)
+	// t := rv2i(rv).(time.Time)
 	tsecs, tnsecs := t.Unix(), t.Nanosecond()
 	var (
 		bd   byte
@@ -1136,7 +1117,7 @@ func bincEncodeTime(t time.Time) []byte {
 		bd = bd | 0x20
 		// Note that Go Libs do not give access to dst flag.
 		_, zoneOffset := t.Zone()
-		//zoneName, zoneOffset := t.Zone()
+		// zoneName, zoneOffset := t.Zone()
 		zoneOffset /= 60
 		z := uint16(zoneOffset)
 		bigen.PutUint16(btmp[:2], z)
@@ -1165,10 +1146,10 @@ func bincDecodeTime(bs []byte) (tt time.Time, err error) {
 		n = ((bd >> 2) & 0x7) + 1
 		i2 = i + n
 		copy(btmp[8-n:], bs[i:i2])
-		//if first bit of bs[i] is set, then fill btmp[0..8-n] with 0xff (ie sign extend it)
+		// if first bit of bs[i] is set, then fill btmp[0..8-n] with 0xff (ie sign extend it)
 		if bs[i]&(1<<7) != 0 {
 			copy(btmp[0:8-n], bsAll0xff)
-			//for j,k := byte(0), 8-n; j < k; j++ {	btmp[j] = 0xff }
+			// for j,k := byte(0), 8-n; j < k; j++ {	btmp[j] = 0xff }
 		}
 		i = i2
 		tsec = int64(bigen.Uint64(btmp[:]))
@@ -1212,5 +1193,27 @@ func bincDecodeTime(bs []byte) (tt time.Time, err error) {
 	return
 }
 
+// func timeLocUTCName(tzint int16) string {
+// 	if tzint == 0 {
+// 		return "UTC"
+// 	}
+// 	var tzname = []byte("UTC+00:00")
+// 	//tzname := fmt.Sprintf("UTC%s%02d:%02d", tzsign, tz/60, tz%60) //perf issue using Sprintf.. inline below.
+// 	//tzhr, tzmin := tz/60, tz%60 //faster if u convert to int first
+// 	var tzhr, tzmin int16
+// 	if tzint < 0 {
+// 		tzname[3] = '-'
+// 		tzhr, tzmin = -tzint/60, (-tzint)%60
+// 	} else {
+// 		tzhr, tzmin = tzint/60, tzint%60
+// 	}
+// 	tzname[4] = timeDigits[tzhr/10]
+// 	tzname[5] = timeDigits[tzhr%10]
+// 	tzname[7] = timeDigits[tzmin/10]
+// 	tzname[8] = timeDigits[tzmin%10]
+// 	return string(tzname)
+// 	//return time.FixedZone(string(tzname), int(tzint)*60)
+// }
+
 var _ decDriver = (*bincDecDriver)(nil)
 var _ encDriver = (*bincEncDriver)(nil)

+ 0 - 57
codec/cbor.go

@@ -213,9 +213,6 @@ func (e *cborEncDriver) EncodeExt(rv interface{}, xtag uint64, ext Ext) {
 func (e *cborEncDriver) EncodeRawExt(re *RawExt) {
 	e.encUint(uint64(re.Tag), cborBaseTag)
 	// only encodes re.Value (never re.Data)
-	// if false && re.Data != nil {
-	// 	en.encode(re.Data)
-	// } else if re.Value != nil {
 	if re.Value != nil {
 		e.e.encode(re.Value)
 	} else {
@@ -310,36 +307,10 @@ type cborDecDriver struct {
 	st     bool // skip tags
 	fnil   bool // found nil
 	noBuiltInTypes
-	// decNoSeparator
 	_ [6]uint64 // padding cache-aligned
 	d Decoder
 }
 
-// func (d *cborDecDriver) readNextBdSkipTags() {
-// 	d.bd = d.d.decRd.readn1()
-// 	if d.h.SkipUnexpectedTags {
-// 		for d.bd >= cborBaseTag && d.bd < cborBaseSimple {
-// 			d.decUint()
-// 			d.bd = d.d.decRd.readn1()
-// 		}
-// 	}
-// 	d.bdRead = true
-// }
-
-// func (d *cborDecDriver) readNextBd() {
-// 	d.bd = d.d.decRd.readn1()
-// 	if d.handleCborSelfDesc && d.bd == cborSelfDesrTag {
-// 		if x := d.readn1(); x == cborSelfDesrTag2 {
-// 			if x = d.readn1(); x != cborSelfDesrTag3 {
-// 				d.d.errorf("mishandled self desc: expected 0xd9d9f7, got: 0xd9d9%x", x)
-// 			}
-// 		} else {
-// 			d.unreadn1()
-// 		}
-// 	}
-// 	d.bdRead = true
-// }
-
 func (d *cborDecDriver) decoder() *Decoder {
 	return &d.d
 }
@@ -404,9 +375,6 @@ func (d *cborDecDriver) ContainerType() (vt valueType) {
 	} else if d.bd == cborBdIndefiniteMap || (d.bd>>5 == cborMajorMap) {
 		return valueTypeMap
 	}
-	// else {
-	// d.d.errorf("isContainerType: unsupported parameter: %v", vt)
-	// }
 	return valueTypeUnset
 }
 
@@ -627,11 +595,6 @@ func (d *cborDecDriver) DecodeBytes(bs []byte, zerocopy bool) (bsOut []byte) {
 		}
 		return d.decAppendIndefiniteBytes(bs[:0])
 	}
-	// check if an "array" of uint8's (see ContainerType for how to infer if an array)
-	// if d.bd == cborBdIndefiniteArray || (d.bd >> 5 == cborMajorArray) {
-	// 	bsOut, _ = fastpathTV.DecSliceUint8V(bs, true, d.d)
-	// 	return
-	// }
 	if d.bd == cborBdIndefiniteArray {
 		d.bdRead = false
 		if zerocopy && len(bs) == 0 {
@@ -695,23 +658,6 @@ func (d *cborDecDriver) decodeTime(xtag uint64) (t time.Time) {
 			d.d.errorv(err)
 		}
 	case 1:
-		// if !d.bdRead {
-		// 	d.readNextBd()
-		// }
-		// // decode an int64 or a float, and infer time.Time from there.
-		// // for floats, round to microseconds, as that is what is guaranteed to fit well.
-		// switch {
-		// case d.bd == cborBdFloat16, d.bd == cborBdFloat32:
-		// 	f1, f2 := math.Modf(d.DecodeFloat64())
-		// 	t = time.Unix(int64(f1), int64(f2*1e9))
-		// case d.bd == cborBdFloat64:
-		// 	f1, f2 := math.Modf(d.DecodeFloat64())
-		// 	t = time.Unix(int64(f1), int64(f2*1e9))
-		// case d.bd >= cborBaseUint && d.bd < cborBaseBytes:
-		// 	t = time.Unix(d.DecodeInt64(), 0)
-		// default:
-		// 	d.d.errorf("time.Time can only be decoded from a number (or RFC3339 string)")
-		// }
 		f1, f2 := math.Modf(d.DecodeFloat64())
 		t = time.Unix(int64(f1), int64(f2*1e9))
 	default:
@@ -792,9 +738,6 @@ func (d *cborDecDriver) DecodeNaked() {
 			d.DecodeNaked()
 			return // return when done (as true recursive function)
 		}
-		// d.bdRead = false
-		// d.d.decode(&re.Value) // handled by decode itself.
-		// decodeFurther = true
 	case cborMajorSimpleOrFloat:
 		switch d.bd {
 		case cborBdNil, cborBdUndefined:

+ 9 - 350
codec/decode.go

@@ -21,11 +21,10 @@ const (
 )
 
 const (
-	decDefMaxDepth = 1024 // maximum depth
-	decDefSliceCap = 8
-	decDefChanCap  = 64 // should be large, as cap cannot be expanded
-	// decScratchByteArrayLen = cacheLineSize + (5 * 8) // - 5 // + (8 * 2) // - (8 * 1)
-	decScratchByteArrayLen = (6 * 8) // - 5 // + (8 * 2) // - (8 * 1)
+	decDefMaxDepth         = 1024 // maximum depth
+	decDefSliceCap         = 8
+	decDefChanCap          = 64      // should be large, as cap cannot be expanded
+	decScratchByteArrayLen = (6 * 8) // ??? cacheLineSize +
 
 	// decContainerLenUnknown is length returned from Read(Map|Array)Len
 	// when a format doesn't know apiori.
@@ -71,20 +70,6 @@ type decDriver interface {
 	// this will check if the next token is a break.
 	CheckBreak() bool
 
-	// // TryDecodeAsNil tries to decode as nil.
-	// //
-	// // Note: TryDecodeAsNil should be careful not to share any temporary []byte with
-	// // the rest of the decDriver. This is because sometimes, we optimize by holding onto
-	// // a transient []byte, and ensuring the only other call we make to the decDriver
-	// // during that time is maybe a TryDecodeAsNil() call.
-	// TryDecodeAsNil() bool
-
-	// // Nil says whether the last scalar value read from the stream was a nil value.
-	// //
-	// // This is sometimes inspected by the decoder if they need to determine whether
-	// // a pointer should be set to nil or the returned zero value.
-	// Nil() bool
-
 	// TryNil tries to decode as nil.
 	TryNil() bool
 
@@ -94,7 +79,6 @@ type decDriver interface {
 	//
 	// Note: Implementations MUST fully consume sentinel container types, specifically Nil.
 	ContainerType() (vt valueType)
-	// IsBuiltinType(rt uintptr) bool
 
 	// DecodeNaked will decode primitives (number, bool, string, []byte) and RawExt.
 	// For maps and arrays, it will not do the decoding in-band, but will signal
@@ -177,12 +161,6 @@ func (x decDriverNoopContainerReader) ReadMapEnd()             {}
 func (x decDriverNoopContainerReader) CheckBreak() (v bool)    { return }
 func (x decDriverNoopContainerReader) atEndOfDecode()          {}
 
-// func (x decDriverNoopContainerReader) ReadArrayElem()          {}
-// func (x decDriverNoopContainerReader) ReadMapElemKey()         {}
-// func (x decDriverNoopContainerReader) ReadMapElemValue()       {}
-
-// func (x decNoSeparator) uncacheRead() {}
-
 // DecodeOptions captures configuration options during decode.
 type DecodeOptions struct {
 	// MapType specifies type to use during schema-less decoding of a map in the stream.
@@ -295,10 +273,6 @@ type DecodeOptions struct {
 
 // ----------------------------------------
 
-// func (d *Decoder) builtin(f *codecFnInfo, rv reflect.Value) {
-// 	d.d.DecodeBuiltin(f.ti.rtid, rv2i(rv))
-// }
-
 func (d *Decoder) rawExt(f *codecFnInfo, rv reflect.Value) {
 	d.d.DecodeExt(rv2i(rv), 0, nil)
 }
@@ -409,17 +383,12 @@ func (d *Decoder) kUint64(f *codecFnInfo, rv reflect.Value) {
 	rvSetUint64(rv, d.d.DecodeUint64())
 }
 
-// var kIntfCtr uint64
-
 func (d *Decoder) kInterfaceNaked(f *codecFnInfo) (rvn reflect.Value) {
 	// nil interface:
 	// use some hieristics to decode it appropriately
 	// based on the detected next value in the stream.
 	n := d.naked()
 	d.d.DecodeNaked()
-	// if n.v == valueTypeNil {
-	// 	return
-	// }
 
 	// We cannot decode non-nil stream value into nil interface with methods (e.g. io.Reader).
 	// Howver, it is possible that the user has ways to pass in a type for a given interface
@@ -432,7 +401,6 @@ func (d *Decoder) kInterfaceNaked(f *codecFnInfo) (rvn reflect.Value) {
 		d.errorf("cannot decode non-nil codec value into nil %v (%v methods)", f.ti.rt, f.ti.numMeth)
 		return
 	}
-	// var useRvn bool
 	switch n.v {
 	case valueTypeMap:
 		// if json, default to a map type with string keys
@@ -478,9 +446,7 @@ func (d *Decoder) kInterfaceNaked(f *codecFnInfo) (rvn reflect.Value) {
 			}
 		}
 		if reflectArrayOfSupported && d.h.PreferArrayOverSlice {
-			// xdebugf("before: rvn: %#v", rvn)
 			rvn = rvGetArray4Slice(rvn)
-			// xdebugf("after:  rvn: %#v", rvn)
 		}
 	case valueTypeExt:
 		tag, bytes := n.u, n.l // calling decode below might taint the values
@@ -548,7 +514,6 @@ func (d *Decoder) kInterface(f *codecFnInfo, rv reflect.Value) {
 	// We do not replace with a generic value (as got from decodeNaked).
 
 	// every interface passed here MUST be settable.
-	// xdebugf("kInterface: 0")
 	var rvn reflect.Value
 	if rvIsNil(rv) || d.h.InterfaceReset {
 		// check if mapping to a type: if so, initialize it and move on
@@ -565,9 +530,6 @@ func (d *Decoder) kInterface(f *codecFnInfo, rv reflect.Value) {
 				if rvelem := rv.Elem(); rvelem.IsValid() {
 					rv.Set(reflect.Zero(rvelem.Type()))
 				}
-				// } else {
-				// 	rv.Set(reflect.Zero(rv.Type()))
-				// }
 			}
 			return
 		}
@@ -576,11 +538,6 @@ func (d *Decoder) kInterface(f *codecFnInfo, rv reflect.Value) {
 		rvn = rv.Elem()
 	}
 
-	// if d.d.TryDecodeAsNil() {
-	// 	rv.Set(reflect.Zero(rvn.Type()))
-	// 	return
-	// }
-
 	// Note: interface{} is settable, but underlying type may not be.
 	// Consequently, we MAY have to create a decodable value out of the underlying value,
 	// decode into it, and reset the interface itself.
@@ -593,7 +550,6 @@ func (d *Decoder) kInterface(f *codecFnInfo, rv reflect.Value) {
 
 	rvn2 := rvZeroAddrK(rvn.Type(), rvn.Kind())
 	rvSetDirect(rvn2, rvn)
-	// rvn2.Set(rvn)
 	d.decodeValue(rvn2, nil)
 	rv.Set(rvn2)
 }
@@ -619,7 +575,6 @@ func decStructFieldKey(dd decDriver, keyType valueType, b *[decScratchByteArrayL
 func (d *Decoder) kStruct(f *codecFnInfo, rv reflect.Value) {
 	sfn := structFieldNode{v: rv, update: true}
 	ctyp := d.d.ContainerType()
-	// xdebugf("kStruct: rv: %#v", rv)
 	if ctyp == valueTypeNil {
 		rvSetDirect(rv, f.ti.rv0)
 		return
@@ -643,17 +598,10 @@ func (d *Decoder) kStruct(f *codecFnInfo, rv reflect.Value) {
 		for j := 0; (hasLen && j < containerLen) || !(hasLen || d.checkBreak()); j++ {
 			d.mapElemKey()
 			rvkencname = decStructFieldKey(d.d, f.ti.keyType, &d.b)
-			// xdebugf("key: '%s'", rvkencname)
 			d.mapElemValue()
 			if k := f.ti.indexForEncName(rvkencname); k > -1 {
 				si := tisfi[k]
-				// if d.d.TryDecodeAsNil() {
-				// 	si.setToZeroValue(rv)
-				// } else {
-				// 	d.decodeValue(sfn.field(si), nil)
-				// }
 				d.decodeValue(sfn.field(si), nil)
-				// xdebugf("value: '%#v'", sfn.field(si))
 			} else if mf != nil {
 				// store rvkencname in new []byte, as it previously shares Decoder.b, which is used in decode
 				name2 := rvkencname
@@ -691,11 +639,6 @@ func (d *Decoder) kStruct(f *codecFnInfo, rv reflect.Value) {
 				break
 			}
 			d.arrayElem()
-			// if d.d.TryDecodeAsNil() {
-			// 	si.setToZeroValue(rv)
-			// } else {
-			// 	d.decodeValue(sfn.field(si), nil)
-			// }
 			d.decodeValue(sfn.field(si), nil)
 		}
 		if (hasLen && containerLen > len(f.ti.sfiSrc)) || (!hasLen && !checkbreak) {
@@ -724,8 +667,6 @@ func (d *Decoder) kSlice(f *codecFnInfo, rv reflect.Value) {
 	rtelem0 := f.ti.elem
 	ctyp := d.d.ContainerType()
 	if ctyp == valueTypeNil {
-		// xdebug2f("rv: %v, type: %v, canset: %v", rv, rv.Type(), rv.CanSet())
-		// rv.Set(reflect.New(f.ti.rt).Elem())
 		if rv.CanSet() {
 			rvSetDirect(rv, f.ti.rv0)
 		}
@@ -749,18 +690,7 @@ func (d *Decoder) kSlice(f *codecFnInfo, rv reflect.Value) {
 		return
 	}
 
-	// array := f.seq == seqTypeChan
-
-	slh, containerLenS := d.decSliceHelperStart() // only expects valueType(Array|Map)
-
-	// // handle if nil
-	// if slh.IsNil {
-	// 	if rv.CanSet() && !rvIsNil(rv) {
-	// 		// rv.Set(reflect.New(f.ti.rt).Elem())
-	// 		rv.Set(reflect.Zero(f.ti.rt))
-	// 	}
-	// 	return
-	// }
+	slh, containerLenS := d.decSliceHelperStart() // only expects valueType(Array|Map) - never Nil
 
 	// an array can never return a nil slice. so no need to check f.array here.
 	if containerLenS == 0 {
@@ -818,11 +748,6 @@ func (d *Decoder) kSlice(f *codecFnInfo, rv reflect.Value) {
 			if rvCanset {
 				rvSetSliceLen(rv, rvlen)
 			}
-			// else {
-			// rv = rv.Slice(0, rvlen)
-			// rvChanged = true
-			// d.errorf("cannot decode into non-settable slice")
-			// }
 		}
 	}
 
@@ -831,7 +756,6 @@ func (d *Decoder) kSlice(f *codecFnInfo, rv reflect.Value) {
 	var rtelem0ZeroValid bool
 	var j int
 
-	// xdebug2f("0: rvcap: %d, rvlen: %d", rvcap, rvlen)
 	for ; (hasLen && j < containerLenS) || !(hasLen || d.checkBreak()); j++ {
 		if j == 0 && f.seq == seqTypeSlice && rvIsNil(rv) {
 			if hasLen {
@@ -867,14 +791,11 @@ func (d *Decoder) kSlice(f *codecFnInfo, rv reflect.Value) {
 			// expand the slice up to the cap.
 			// Note that we did, so we have to reset it later.
 
-			// xdebug2f("rvcap: %d, rvlen: %d", rvcap, rvlen)
 			if rvlen < rvcap {
 				if rv.CanSet() {
 					rvSetSliceLen(rv, rvcap)
-					// xdebugf("after rvssetlen rv.Len: %d", rv.Len())
 				} else if rvCanset {
 					rv = rvSlice(rv, rvcap)
-					// xdebugf("after rvCanset rv.Len: %d", rv.Len())
 					rvChanged = true
 				} else {
 					d.errorf(errmsgExpandSliceCannotChange)
@@ -886,31 +807,14 @@ func (d *Decoder) kSlice(f *codecFnInfo, rv reflect.Value) {
 					d.errorf(errmsgExpandSliceCannotChange)
 					return
 				}
-				// xdebugf("else: before growCap: rvcap: %d", rvcap)
 				rvcap = growCap(rvcap, rtelem0Size, rvcap)
-				// rvcap = growCap(rvcap, rtelem0Size, rvcap+1+(rvcap*1/3))
 				rv9 = reflect.MakeSlice(f.ti.rt, rvcap, rvcap)
-				// xdebugf("else: rv9.Len: %d, rvcap: %d", rv9.Len(), rvcap)
 				rvCopySlice(rv9, rv)
 				rv = rv9
 				rvChanged = true
 				rvlen = rvcap
 			}
-
-			// var rvcap2 int
-			// var rvErrmsg2 string
-			// rv9, rvcap2, rvChanged, rvErrmsg2 =
-			// 	expandSliceRV(rv, f.ti.rt, rvCanset, rtelem0Size, 1, rvlen, rvcap)
-			// if rvErrmsg2 != "" {
-			// 	d.errorf(rvErrmsg2)
-			// }
-			// rvlen++
-			// if rvChanged {
-			// 	rv = rv9
-			// 	rvcap = rvcap2
-			// }
 		}
-		// xdebugf("rv.Len: %d, j: %d", rv.Len(), j)
 		rv9 = rv.Index(j)
 		if d.h.SliceElementReset {
 			if !rtelem0ZeroValid {
@@ -931,13 +835,13 @@ func (d *Decoder) kSlice(f *codecFnInfo, rv reflect.Value) {
 		} else if rvCanset {
 			rv = rvSlice(rv, j)
 			rvChanged = true
-		} // else { d.errorf("kSlice: cannot change non-settable slice") }
+		}
 		rvlen = j
 	} else if j == 0 && rvIsNil(rv) {
 		if rvCanset {
 			rv = reflect.MakeSlice(f.ti.rt, 0, 0)
 			rvChanged = true
-		} // else { d.errorf("kSlice: cannot change non-settable slice") }
+		}
 	}
 	slh.End()
 
@@ -977,17 +881,8 @@ func (d *Decoder) kSliceForChan(f *codecFnInfo, rv reflect.Value) {
 		return
 	}
 
-	// array := f.seq == seqTypeChan
-
 	// only expects valueType(Array|Map - nil handled above)
 	slh, containerLenS := d.decSliceHelperStart()
-	// // handle if nil
-	// if slh.IsNil {
-	// 	if rv.CanSet() && !rvIsNil(rv) {
-	// 		rvSetDirect(rv, reflect.Zero(f.ti.rt))
-	// 	}
-	// 	return
-	// }
 
 	// an array can never return a nil slice. so no need to check f.array here.
 	if containerLenS == 0 {
@@ -1016,12 +911,8 @@ func (d *Decoder) kSliceForChan(f *codecFnInfo, rv reflect.Value) {
 	var rv9 reflect.Value
 
 	var rvlen int // := rv.Len()
-	// rvcap := rv.Cap()
 	hasLen := containerLenS > 0
 
-	// consider creating new element once, and just decoding into it.
-	// var rtelem0Zero reflect.Value
-	// var rtelem0ZeroValid bool
 	var j int
 
 	for ; (hasLen && j < containerLenS) || !(hasLen || d.checkBreak()); j++ {
@@ -1039,10 +930,6 @@ func (d *Decoder) kSliceForChan(f *codecFnInfo, rv reflect.Value) {
 			}
 		}
 		slh.ElemContainerState(j)
-		// if d.d.TryDecodeAsNil() {
-		// 	// rv.Send(reflect.Zero(rtelem0))
-		// 	continue
-		// }
 		if rtelem0Mut || !rv9.IsValid() { // || (rtElem0Kind == reflect.Ptr && rvIsNil(rv9)) {
 			rv9 = rvZeroAddrK(rtelem0, rtElem0Kind)
 		}
@@ -1060,11 +947,6 @@ func (d *Decoder) kSliceForChan(f *codecFnInfo, rv reflect.Value) {
 
 }
 
-// func (d *Decoder) kArray(f *codecFnInfo, rv reflect.Value) {
-// 	// d.decodeValueFn(rv.Slice(0, rv.Len()))
-// 	f.kSlice(rv.Slice(0, rv.Len()))
-// }
-
 func (d *Decoder) kMap(f *codecFnInfo, rv reflect.Value) {
 	containerLen := d.mapStart()
 	if containerLen == decContainerLenNil {
@@ -1124,8 +1006,6 @@ func (d *Decoder) kMap(f *codecFnInfo, rv reflect.Value) {
 
 	for j := 0; (hasLen && j < containerLen) || !(hasLen || d.checkBreak()); j++ {
 		if j == 0 {
-			// rvvz = reflect.Zero(vtype)
-			// rvkz = reflect.Zero(ktype)
 			if !rvkMut {
 				rvkn = rvZeroAddrK(ktype, ktypeKind)
 			}
@@ -1162,23 +1042,6 @@ func (d *Decoder) kMap(f *codecFnInfo, rv reflect.Value) {
 
 		d.mapElemValue()
 
-		// // Brittle, but OK per TryDecodeAsNil() contract.
-		// // i.e. TryDecodeAsNil never shares slices with other decDriver procedures
-		// if d.d.TryDecodeAsNil() {
-		// 	if d.h.DeleteOnNilMapValue {
-		// 		mapDelete(rv, rvk)
-		// 	} else {
-		// 		if ktypeIsString { // set to a real string (not string view)
-		// 			rvk.SetString(d.string(kstrbs))
-		// 		}
-		// 		if !rvvz.IsValid() {
-		// 			rvvz = reflect.Zero(vtype)
-		// 		}
-		// 		mapSet(rv, rvk, rvvz)
-		// 	}
-		// 	continue
-		// }
-
 		doMapSet = true // set to false if u do a get, and its a non-nil pointer
 		if doMapGet {
 			if !rvvaSet {
@@ -1223,9 +1086,6 @@ func (d *Decoder) kMap(f *codecFnInfo, rv reflect.Value) {
 		if doMapSet {
 			mapSet(rv, rvk, rvv)
 		}
-		// if ktypeIsString {
-		// 	// keepAlive4StringView(kstrbs) // not needed, as reference is outside loop
-		// }
 	}
 
 	d.mapEnd()
@@ -1267,59 +1127,7 @@ type decNaked struct {
 
 	// state
 	v valueType
-	// _ [6]bool // padding
-
-	// ru, ri, rf, rl, rs, rb, rt reflect.Value // mapping to the primitives above
-	//
-	// _ [3]uint64 // padding
-}
-
-// func (n *decNaked) init() {
-// 	n.ru = rv4i(&n.u).Elem()
-// 	n.ri = rv4i(&n.i).Elem()
-// 	n.rf = rv4i(&n.f).Elem()
-// 	n.rl = rv4i(&n.l).Elem()
-// 	n.rs = rv4i(&n.s).Elem()
-// 	n.rt = rv4i(&n.t).Elem()
-// 	n.rb = rv4i(&n.b).Elem()
-// 	// n.rr[] = rv4i(&n.)
-// }
-
-// type decNakedPooler struct {
-// 	n   *decNaked
-// 	nsp *sync.Pool
-// }
-
-// // naked must be called before each call to .DecodeNaked, as they will use it.
-// func (d *decNakedPooler) naked() *decNaked {
-// 	if d.n == nil {
-// 		// consider one of:
-// 		//   - get from sync.Pool  (if GC is frequent, there's no value here)
-// 		//   - new alloc           (safest. only init'ed if it a naked decode will be done)
-// 		//   - field in Decoder    (makes the Decoder struct very big)
-// 		// To support using a decoder where a DecodeNaked is not needed,
-// 		// we prefer #1 or #2.
-// 		// d.n = new(decNaked) // &d.nv // new(decNaked) // grab from a sync.Pool
-// 		// d.n.init()
-// 		var v interface{}
-// 		d.nsp, v = pool.decNaked()
-// 		d.n = v.(*decNaked)
-// 	}
-// 	return d.n
-// }
-
-// func (d *decNakedPooler) end() {
-// 	if d.n != nil {
-// 		// if n != nil, then nsp != nil (they are always set together)
-// 		d.nsp.Put(d.n)
-// 		d.n, d.nsp = nil, nil
-// 	}
-// }
-
-// type rtid2rv struct {
-// 	rtid uintptr
-// 	rv   reflect.Value
-// }
+}
 
 // Decoder reads and decodes an object from an input stream in a supported format.
 //
@@ -1336,16 +1144,10 @@ type Decoder struct {
 
 	d decDriver
 
-	// NOTE: Decoder shouldn't call its read methods,
-	// as the handler MAY need to do some coordination.
-	// r *decRd
-
-	// bi *bufioDecReader
 	// cache the mapTypeId and sliceTypeId for faster comparisons
 	mtid uintptr
 	stid uintptr
 
-	// jdec *jsonDecDriver
 	h *BasicHandle
 
 	blist bytesFreelist
@@ -1356,10 +1158,6 @@ type Decoder struct {
 	// ---- cpu cache line boundary?
 	n decNaked
 
-	// cr containerStateRecv
-
-	// _ [4]uint8 // padding
-
 	hh  Handle
 	err error
 
@@ -1384,9 +1182,6 @@ type Decoder struct {
 	// By being always-available, it can be used for one-off things without
 	// having to get from freelist, use, and return back to freelist.
 	b [decScratchByteArrayLen]byte
-
-	// padding - false sharing help // modify 232 if Decoder struct changes.
-	// _ [cacheLineSize - 232%cacheLineSize]byte
 }
 
 // NewDecoder returns a Decoder for decoding a stream of bytes from an io.Reader.
@@ -1407,31 +1202,23 @@ func NewDecoderBytes(in []byte, h Handle) *Decoder {
 	return d
 }
 
-// var defaultDecNaked decNaked
-
 func (d *Decoder) r() *decRd {
 	return &d.decRd
 }
 
 func (d *Decoder) init(h Handle) {
-	// if useFinalizers {
-	// 	runtime.SetFinalizer(d, (*Decoder).finalize)
-	// }
 	d.bytes = true
 	d.err = errDecoderNotInitialized
-	// d.r = &d.decRd
 	d.h = basicHandle(h)
 	d.hh = h
 	d.be = h.isBinary()
 	// NOTE: do not initialize d.n here. It is lazily initialized in d.naked()
-	// d.esep = d.hh.hasElemSeparators()
 	if d.h.InternString {
 		d.is = make(map[string]string, 32)
 	}
 }
 
 func (d *Decoder) resetCommon() {
-	// d.r = &d.decRd
 	d.d.reset()
 	d.err = nil
 	d.depth = 0
@@ -1460,24 +1247,17 @@ func (d *Decoder) Reset(r io.Reader) {
 		return
 	}
 	d.bytes = false
-	// d.typ = entryTypeUnset
 	if d.h.ReaderBufferSize > 0 {
 		if d.bi == nil {
 			d.bi = new(bufioDecReader)
 		}
 		d.bi.reset(r, d.h.ReaderBufferSize, &d.blist)
-		// d.r = d.bi
-		// d.typ = entryTypeBufio
 		d.bufio = true
 	} else {
-		// d.ri.x = &d.b
-		// d.s = d.sa[:0]
 		if d.ri == nil {
 			d.ri = new(ioDecReader)
 		}
 		d.ri.reset(r, &d.blist)
-		// d.r = d.ri
-		// d.typ = entryTypeIo
 		d.bufio = false
 	}
 	d.resetCommon()
@@ -1491,9 +1271,7 @@ func (d *Decoder) ResetBytes(in []byte) {
 	}
 	d.bytes = true
 	d.bufio = false
-	// d.typ = entryTypeBytes
 	d.rb.reset(in)
-	// d.r = &d.rb
 	d.resetCommon()
 }
 
@@ -1590,8 +1368,6 @@ func (d *Decoder) Decode(v interface{}) (err error) {
 // MustDecode is like Decode, but panics if unable to Decode.
 // This provides insight to the code location that triggered the error.
 func (d *Decoder) MustDecode(v interface{}) {
-	// xdebugf("MustDecode: v: %#v", v)
-	// debug.PrintStack()
 	if d.err != nil {
 		panic(d.err)
 	}
@@ -1601,62 +1377,16 @@ func (d *Decoder) MustDecode(v interface{}) {
 // MustDecode is like Decode, but panics if unable to Decode.
 // This provides insight to the code location that triggered the error.
 func (d *Decoder) mustDecode(v interface{}) {
-	// xdebug2f(".... mustDecode: v: %#v", v)
 	// Top-level: v is a pointer and not nil.
 
-	// if d.bi == nil {
-	// 	// if d.d.TryDecodeAsNil() {
-	// 	// 	setZero(v)
-	// 	// } else {
-	// 	// 	d.decode(v)
-	// 	// }
-	// 	d.decode(v)
-	// 	d.d.atEndOfDecode()
-	// 	// release
-	// 	if !d.h.ExplicitRelease {
-	// 		if d.jdec != nil {
-	// 			d.jdec.release()
-	// 		}
-	// 	}
-	// 	return
-	// }
-
-	// if d.d.TryDecodeAsNil() {
-	// 	setZero(v)
-	// } else {
-	// 	d.bi.calls++
-	// 	d.decode(v)
-	// 	d.bi.calls--
-	// }
 	d.calls++
 	d.decode(v)
 	d.calls--
 	if d.calls == 0 {
 		d.d.atEndOfDecode()
-		// release
-		// if !d.h.ExplicitRelease {
-		// 	d.decRd.release()
-		// 	// if d.jdec != nil {
-		// 	// 	d.jdec.release()
-		// 	// }
-		// }
 	}
 }
 
-// func (d *Decoder) deferred(err1 *error) {
-// 	if recoverPanicToErr {
-// 		if x := recover(); x != nil {
-// 			panicValToErr(d, x, err1)
-// 			panicValToErr(d, x, &d.err)
-// 		}
-// 	}
-// }
-
-// //go:noinline -- as it is run by finalizer
-// func (d *Decoder) finalize() {
-// 	d.Release()
-// }
-
 // Release releases shared (pooled) resources.
 //
 // It is important to call Release() when done with a Decoder, so those resources
@@ -1667,33 +1397,15 @@ func (d *Decoder) mustDecode(v interface{}) {
 // Deprecated: Release is a no-op as pooled resources are not used with an Decoder.
 // This method is kept for compatibility reasons only.
 func (d *Decoder) Release() {
-	// if d.bi != nil {
-	// 	d.bi.release()
-	// }
-	// if d.jdec != nil {
-	// 	d.jdec.release()
-	// }
-	// d.decNakedPooler.end()
 }
 
-// // this is not a smart swallow, as it allocates objects and does unnecessary work.
-// func (d *Decoder) swallowViaHammer() {
-// 	var blank interface{}
-// 	d.decodeValueNoFn(rv4i(&blank).Elem())
-// }
-
 func (d *Decoder) swallow() {
-	// smarter decode that just swallows the content
-	// if d.d.TryDecodeAsNil() {
-	// 	return
-	// }
 	switch d.d.ContainerType() {
 	case valueTypeNil:
 	case valueTypeMap:
 		containerLen := d.mapStart()
 		hasLen := containerLen >= 0
 		for j := 0; (hasLen && j < containerLen) || !(hasLen || d.checkBreak()); j++ {
-			// if clenGtEqualZero {if j >= containerLen {break} } else if d.checkBreak() {break}
 			d.mapElemKey()
 			d.swallow()
 			d.mapElemValue()
@@ -1853,8 +1565,6 @@ func (d *Decoder) decode(iv interface{}) {
 
 	case *interface{}:
 		d.decodeValue(rv4i(iv), nil)
-		// d.decodeValue(rv4i(iv).Elem(), nil)
-		// d.decodeValueNotNil(rv4i(iv).Elem())
 
 	default:
 		if v, ok := iv.(Selfer); ok {
@@ -1910,14 +1620,6 @@ func (d *Decoder) decodeValue(rv reflect.Value, fn *codecFn) {
 	} else {
 		fn.fd(d, &fn.i, rv)
 	}
-
-	// const check = true
-	// if check && rvpValid && scalarBitset.isset(byte(rv.Kind())) && d.d.Nil() {
-	// 	xdebug2f("setting %v (canset: %v) to %v, after updating %v to %v",
-	// 	rv0.Type(), rv0.CanSet(), reflect.Zero(rv0.Type()), rv.Type(), rv)
-	// 	rv0.Set(reflect.Zero(rv0.Type()))
-	// }
-	// return rv
 }
 
 func (d *Decoder) structFieldNotFound(index int, rvkencname string) {
@@ -2119,6 +1821,7 @@ func (d *Decoder) interfaceExtConvertAndDecode(v interface{}, ext Ext) {
 	// - decode into it
 	// - return the interface for passing into UpdateExt.
 	// - interface should be a pointer if struct|array, else a value
+
 	var s interface{}
 	rv := rv4i(v)
 	rv2 := rv.Elem()
@@ -2189,15 +1892,6 @@ func (x decSliceHelper) End() {
 func (x decSliceHelper) ElemContainerState(index int) {
 	// Note: if isnil, clen=0, so we never call into ElemContainerState
 
-	// if x.IsNil {
-	// } else if x.Array {
-	// 	x.d.arrayElem()
-	// } else if index%2 == 0 {
-	// 	x.d.mapElemKey()
-	// } else {
-	// 	x.d.mapElemValue()
-	// }
-
 	if x.Array {
 		x.d.arrayElem()
 	} else {
@@ -2220,7 +1914,6 @@ func decByteSlice(r *decRd, clen, maxInitLen int, bs []byte) (bsOut []byte) {
 		bsOut = bs[:clen]
 		r.readb(bsOut)
 	} else {
-		// bsOut = make([]byte, clen)
 		len2 := decInferLen(clen, maxInitLen, 1)
 		bsOut = make([]byte, len2)
 		r.readb(bsOut)
@@ -2236,13 +1929,6 @@ func decByteSlice(r *decRd, clen, maxInitLen int, bs []byte) (bsOut []byte) {
 	return
 }
 
-// func decByteSliceZeroCopy(r decReader, clen, maxInitLen int, bs []byte) (bsOut []byte) {
-// 	if _, ok := r.(*bytesDecReader); ok && clen <= maxInitLen {
-// 		return r.readx(clen)
-// 	}
-// 	return decByteSlice(r, clen, maxInitLen, bs)
-// }
-
 func detachZeroCopyBytes(isBytesReader bool, dest []byte, in []byte) (out []byte) {
 	if xlen := len(in); xlen > 0 {
 		if isBytesReader || xlen <= scratchByteArrayLen {
@@ -2308,33 +1994,6 @@ func decInferLen(clen, maxlen, unit int) (rvlen int) {
 	return
 }
 
-// func expandSliceRV(s reflect.Value, st reflect.Type, canChange bool, stElemSize, num, slen, scap int) (
-// 	s2 reflect.Value, scap2 int, changed bool, err string) {
-// 	l1 := slen + num // new slice length
-// 	if l1 <= scap {
-// 		if s.CanSet() {
-// 			rvSetSliceLen(s, l1)
-// 		} else if canChange {
-// 			s2 = rvSlice(s, l1)
-// 			scap2 = scap
-// 			changed = true
-// 		} else {
-// 			err = errmsgExpandSliceCannotChange
-// 			return
-// 		}
-// 		return
-// 	}
-// 	if !canChange {
-// 		err = errmsgExpandSliceCannotChange
-// 		return
-// 	}
-// 	scap2 = growCap(scap, stElemSize, num)
-// 	s2 = reflect.MakeSlice(st, l1, scap2)
-// 	changed = true
-// 	rvCopySlice(s2, s)
-// 	return
-// }
-
 func decReadFull(r io.Reader, bs []byte) (n uint, err error) {
 	var nn int
 	for n < uint(len(bs)) && err == nil {

+ 11 - 266
codec/encode.go

@@ -28,17 +28,12 @@ type encDriver interface {
 	EncodeBool(b bool)
 	EncodeFloat32(f float32)
 	EncodeFloat64(f float64)
-	// encodeExtPreamble(xtag byte, length int)
 	EncodeRawExt(re *RawExt)
 	EncodeExt(v interface{}, xtag uint64, ext Ext)
 	// EncodeString using cUTF8, honor'ing StringToRaw flag
 	EncodeString(v string)
-	// EncodeStringEnc(c charEncoding, v string) // c cannot be cRAW
-	// EncodeSymbol(v string)
 	EncodeStringBytesRaw(v []byte)
 	EncodeTime(time.Time)
-	//encBignum(f *big.Int)
-	//encStringRunes(c charEncoding, v []rune)
 	WriteArrayStart(length int)
 	WriteArrayEnd()
 	WriteMapStart(length int)
@@ -55,10 +50,6 @@ type encDriverContainerTracker interface {
 	WriteMapElemValue()
 }
 
-// type encDriverAsis interface {
-// 	EncodeAsis(v []byte)
-// }
-
 type encodeError struct {
 	codecError
 }
@@ -75,23 +66,6 @@ func (encDriverNoopContainerWriter) WriteMapStart(length int)   {}
 func (encDriverNoopContainerWriter) WriteMapEnd()               {}
 func (encDriverNoopContainerWriter) atEndOfEncode()             {}
 
-// func (encDriverNoopContainerWriter) WriteArrayElem()            {}
-// func (encDriverNoopContainerWriter) WriteMapElemKey()           {}
-// func (encDriverNoopContainerWriter) WriteMapElemValue()         {}
-
-// type encDriverTrackContainerWriter struct {
-// 	c containerState
-// }
-
-// func (e *encDriverTrackContainerWriter) WriteArrayStart(length int) { e.c = containerArrayStart }
-// func (e *encDriverTrackContainerWriter) WriteArrayElem()            { e.c = containerArrayElem }
-// func (e *encDriverTrackContainerWriter) WriteArrayEnd()             { e.c = containerArrayEnd }
-// func (e *encDriverTrackContainerWriter) WriteMapStart(length int)   { e.c = containerMapStart }
-// func (e *encDriverTrackContainerWriter) WriteMapElemKey()           { e.c = containerMapKey }
-// func (e *encDriverTrackContainerWriter) WriteMapElemValue()         { e.c = containerMapValue }
-// func (e *encDriverTrackContainerWriter) WriteMapEnd()               { e.c = containerMapEnd }
-// func (e *encDriverTrackContainerWriter) atEndOfEncode()             {}
-
 // EncodeOptions captures configuration options during encode.
 type EncodeOptions struct {
 	// WriterBufferSize is the size of the buffer used when writing.
@@ -220,29 +194,8 @@ func (e *Encoder) kTime(f *codecFnInfo, rv reflect.Value) {
 
 func (e *Encoder) kString(f *codecFnInfo, rv reflect.Value) {
 	e.e.EncodeString(rvGetString(rv))
-	// if e.h.StringToRaw {
-	// 	e.e.EncodeStringBytesRaw(bytesView(rvGetString(rv)))
-	// } else {
-	// 	e.e.EncodeStringEnc(cUTF8, rvGetString(rv))
-	// }
 }
 
-// func (e *Encoder) kString(f *codecFnInfo, rv reflect.Value) {
-// 	if e.h.StringToRaw {
-// 		e.kStringToRaw(f, rv)
-// 	} else {
-// 		e.kStringEnc(f, rv)
-// 	}
-// }
-
-// func (e *Encoder) kStringToRaw(f *codecFnInfo, rv reflect.Value) {
-// 	e.e.EncodeStringBytesRaw(bytesView(rvGetString(rv)))
-// }
-
-// func (e *Encoder) kStringEnc(f *codecFnInfo, rv reflect.Value) {
-// 	e.e.EncodeStringEnc(cUTF8, rvGetString(rv))
-// }
-
 func (e *Encoder) kFloat64(f *codecFnInfo, rv reflect.Value) {
 	e.e.EncodeFloat64(rvGetFloat64(rv))
 }
@@ -421,14 +374,10 @@ func (e *Encoder) kSlice(f *codecFnInfo, rv reflect.Value) {
 func (e *Encoder) kSliceBytesChan(rv reflect.Value) {
 	// do not use range, so that the number of elements encoded
 	// does not change, and encoding does not hang waiting on someone to close chan.
+
 	// for b := range rv2i(rv).(<-chan byte) { bs = append(bs, b) }
 	// ch := rv2i(rv).(<-chan byte) // fix error - that this is a chan byte, not a <-chan byte.
 
-	// if rvIsNil(rv) {
-	// 	e.e.EncodeNil()
-	// 	return
-	// }
-
 	bs := e.b[:0]
 	irv := rv2i(rv)
 	ch, ok := irv.(<-chan byte)
@@ -514,20 +463,6 @@ func (e *Encoder) kStruct(f *codecFnInfo, rv reflect.Value) {
 	}
 	newlen += len(f.ti.sfiSrc)
 
-	// Use sync.Pool to reduce allocating slices unnecessarily.
-	// The cost of sync.Pool is less than the cost of new allocation.
-	//
-	// Each element of the array pools one of encStructPool(8|16|32|64).
-	// It allows the re-use of slices up to 64 in length.
-	// A performance cost of encoding structs was collecting
-	// which values were empty and should be omitted.
-	// We needed slices of reflect.Value and string to collect them.
-	// This shared pool reduces the amount of unnecessary creation we do.
-	// The cost is that of locking sometimes, but sync.Pool is efficient
-	// enough to reduce thread contention.
-
-	// fmt.Printf(">>>>>>>>>>>>>> encode.kStruct: newlen: %d\n", newlen)
-	// var spool sfiRvPooler
 	var fkvs = e.slist.get(newlen)
 
 	recur := e.h.RecursiveEmptyCheck
@@ -538,7 +473,6 @@ func (e *Encoder) kStruct(f *codecFnInfo, rv reflect.Value) {
 	if toMap {
 		newlen = 0
 		for _, si := range f.ti.sfiSort { // use sorted array
-			// kv.r = si.field(rv, false)
 			kv.r = sfn.field(si)
 			if si.omitEmpty() && isEmptyValue(kv.r, e.h.TypeInfos, recur, recur) {
 				continue
@@ -578,9 +512,7 @@ func (e *Encoder) kStruct(f *codecFnInfo, rv reflect.Value) {
 		e.mapEnd()
 	} else {
 		newlen = len(f.ti.sfiSrc)
-		// kv.v = nil
 		for i, si := range f.ti.sfiSrc { // use unsorted array (to match sequence in struct)
-			// kv.r = si.field(rv, false)
 			kv.r = sfn.field(si)
 			// use the zero value.
 			// if a reference or struct, set to nil (so you do not output too much)
@@ -621,7 +553,6 @@ func (e *Encoder) kMap(f *codecFnInfo, rv reflect.Value) {
 		return
 	}
 
-	// var asSymbols bool
 	// determine the underlying key and val encFn's for the map.
 	// This eliminates some work which is done for each loop iteration i.e.
 	// rv.Type(), ref.ValueOf(rt).Pointer(), then check map/list for fn.
@@ -629,9 +560,9 @@ func (e *Encoder) kMap(f *codecFnInfo, rv reflect.Value) {
 	// However, if kind is reflect.Interface, do not pre-determine the
 	// encoding type, because preEncodeValue may break it down to
 	// a concrete type and kInterface will bomb.
+
 	var keyFn, valFn *codecFn
 
-	// rtkeyid := rt2id(f.ti.key)
 	ktypeKind := f.ti.key.Kind()
 	vtypeKind := f.ti.elem.Kind()
 
@@ -660,7 +591,6 @@ func (e *Encoder) kMap(f *codecFnInfo, rv reflect.Value) {
 			rtkey = rtkey.Elem()
 		}
 		if rtkey.Kind() != reflect.Interface {
-			// rtkeyid = rt2id(rtkey)
 			keyFn = e.h.fn(rtkey)
 		}
 	}
@@ -680,11 +610,6 @@ func (e *Encoder) kMap(f *codecFnInfo, rv reflect.Value) {
 		}
 		if keyTypeIsString {
 			e.e.EncodeString(vx.String())
-			// if e.h.StringToRaw {
-			// 	e.e.EncodeStringBytesRaw(bytesView(vx.String()))
-			// } else {
-			// 	e.e.EncodeStringEnc(cUTF8, vx.String())
-			// }
 		} else {
 			e.encodeValue(vx, keyFn)
 		}
@@ -719,7 +644,7 @@ func (e *Encoder) kMapCanonical(rtkey, rtval reflect.Type, rv, rvv reflect.Value
 			e.mapElemKey()
 			e.e.EncodeBool(mksv[i].v)
 			e.mapElemValue()
-			e.encodeValue(mapGet(rv, mksv[i].r, rvv), valFn) // e.encodeValue(rv.MapIndex(mksv[i].r), valFn)
+			e.encodeValue(mapGet(rv, mksv[i].r, rvv), valFn)
 		}
 	case reflect.String:
 		mksv := make([]stringRv, len(mks))
@@ -732,13 +657,8 @@ func (e *Encoder) kMapCanonical(rtkey, rtval reflect.Type, rv, rvv reflect.Value
 		for i := range mksv {
 			e.mapElemKey()
 			e.e.EncodeString(mksv[i].v)
-			// if e.h.StringToRaw {
-			// 	e.e.EncodeStringBytesRaw(bytesView(mksv[i].v))
-			// } else {
-			// 	e.e.EncodeStringEnc(cUTF8, mksv[i].v)
-			// }
 			e.mapElemValue()
-			e.encodeValue(mapGet(rv, mksv[i].r, rvv), valFn) // e.encodeValue(rv.MapIndex(mksv[i].r), valFn)
+			e.encodeValue(mapGet(rv, mksv[i].r, rvv), valFn)
 		}
 	case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint, reflect.Uintptr:
 		mksv := make([]uint64Rv, len(mks))
@@ -752,7 +672,7 @@ func (e *Encoder) kMapCanonical(rtkey, rtval reflect.Type, rv, rvv reflect.Value
 			e.mapElemKey()
 			e.e.EncodeUint(mksv[i].v)
 			e.mapElemValue()
-			e.encodeValue(mapGet(rv, mksv[i].r, rvv), valFn) // e.encodeValue(rv.MapIndex(mksv[i].r), valFn)
+			e.encodeValue(mapGet(rv, mksv[i].r, rvv), valFn)
 		}
 	case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int:
 		mksv := make([]int64Rv, len(mks))
@@ -766,7 +686,7 @@ func (e *Encoder) kMapCanonical(rtkey, rtval reflect.Type, rv, rvv reflect.Value
 			e.mapElemKey()
 			e.e.EncodeInt(mksv[i].v)
 			e.mapElemValue()
-			e.encodeValue(mapGet(rv, mksv[i].r, rvv), valFn) // e.encodeValue(rv.MapIndex(mksv[i].r), valFn)
+			e.encodeValue(mapGet(rv, mksv[i].r, rvv), valFn)
 		}
 	case reflect.Float32:
 		mksv := make([]float64Rv, len(mks))
@@ -780,7 +700,7 @@ func (e *Encoder) kMapCanonical(rtkey, rtval reflect.Type, rv, rvv reflect.Value
 			e.mapElemKey()
 			e.e.EncodeFloat32(float32(mksv[i].v))
 			e.mapElemValue()
-			e.encodeValue(mapGet(rv, mksv[i].r, rvv), valFn) // e.encodeValue(rv.MapIndex(mksv[i].r), valFn)
+			e.encodeValue(mapGet(rv, mksv[i].r, rvv), valFn)
 		}
 	case reflect.Float64:
 		mksv := make([]float64Rv, len(mks))
@@ -794,7 +714,7 @@ func (e *Encoder) kMapCanonical(rtkey, rtval reflect.Type, rv, rvv reflect.Value
 			e.mapElemKey()
 			e.e.EncodeFloat64(mksv[i].v)
 			e.mapElemValue()
-			e.encodeValue(mapGet(rv, mksv[i].r, rvv), valFn) // e.encodeValue(rv.MapIndex(mksv[i].r), valFn)
+			e.encodeValue(mapGet(rv, mksv[i].r, rvv), valFn)
 		}
 	case reflect.Struct:
 		if rtkey == timeTyp {
@@ -817,9 +737,6 @@ func (e *Encoder) kMapCanonical(rtkey, rtval reflect.Type, rv, rvv reflect.Value
 	default:
 		// out-of-band
 		// first encode each key to a []byte first, then sort them, then record
-		// var bufp bytesBufPooler
-		// var mksv []byte = bufp.get(len(mks) * 16)[:0]
-		// var mksv []byte = make([]byte, 0, len(mks)*16)
 		var mksv []byte = e.blist.get(len(mks) * 16)[:0]
 		e2 := NewEncoderBytes(&mksv, e.hh)
 		mksbv := make([]bytesRv, len(mks))
@@ -837,7 +754,6 @@ func (e *Encoder) kMapCanonical(rtkey, rtval reflect.Type, rv, rvv reflect.Value
 			e.mapElemValue()
 			e.encodeValue(mapGet(rv, mksbv[j].r, rvv), valFn)
 		}
-		// bufp.end()
 		e.blist.put(mksv)
 	}
 }
@@ -852,19 +768,12 @@ func (e *Encoder) kMapCanonical(rtkey, rtval reflect.Type, rv, rvv reflect.Value
 // This is the idiomatic way to use.
 type Encoder struct {
 	panicHdl
-	// hopefully, reduce derefencing cost by laying the encWriter inside the Encoder
-	e encDriver
 
-	// NOTE: Encoder shouldn't call it's write methods,
-	// as the handler MAY need to do some coordination.
-	// w *encWr
-
-	// bw *bufio.Writer
-	// as encDriverAsis
+	e encDriver
 
-	// jenc *jsonEncDriver
 	h *BasicHandle
 
+	// hopefully, reduce derefencing cost by laying the encWriter inside the Encoder
 	encWr
 
 	// ---- cpu cache line boundary
@@ -877,17 +786,12 @@ type Encoder struct {
 
 	// ---- writable fields during execution --- *try* to keep in sep cache line
 	ci set // holds set of addresses found during an encoding (if CheckCircularRef=true)
-	// cidef [1]interface{} // default ci
 
 	slist sfiRvFreelist
 
 	b [(2 * 8)]byte // for encoding chan byte, (non-addressable) [N]byte, etc
 
 	// ---- cpu cache line boundary?
-
-	// b [scratchByteArrayLen]byte
-	// _ [cacheLineSize - scratchByteArrayLen]byte // padding
-	// b [cacheLineSize - (8 * 0)]byte // used for encoding a chan or (non-addressable) array of bytes
 }
 
 // NewEncoder returns an Encoder for encoding into an io.Writer.
@@ -914,14 +818,8 @@ func NewEncoderBytes(out *[]byte, h Handle) *Encoder {
 func (e *Encoder) init(h Handle) {
 	e.err = errEncoderNotInitialized
 	e.bytes = true
-	// if useFinalizers {
-	// 	runtime.SetFinalizer(e, (*Encoder).finalize)
-	// }
-	// e.w = &e.encWr
 	e.hh = h
 	e.h = basicHandle(h)
-	// e.esep = h.hasElemSeparators()
-	// e.as, e.isas = e.e.(encDriverAsis)
 	e.be = e.hh.isBinary()
 }
 
@@ -948,34 +846,11 @@ func (e *Encoder) Reset(w io.Writer) {
 	if w == nil {
 		return
 	}
-	// var ok bool
 	e.bytes = false
 	if e.wf == nil {
 		e.wf = new(bufioEncWriter)
 	}
-	// e.typ = entryTypeUnset
-	// if e.h.WriterBufferSize > 0 {
-	// 	// bw := bufio.NewWriterSize(w, e.h.WriterBufferSize)
-	// 	// e.wi.bw = bw
-	// 	// e.wi.sw = bw
-	// 	// e.wi.fw = bw
-	// 	// e.wi.ww = bw
-	// 	if e.wf == nil {
-	// 		e.wf = new(bufioEncWriter)
-	// 	}
-	// 	e.wf.reset(w, e.h.WriterBufferSize)
-	// 	e.typ = entryTypeBufio
-	// } else {
-	// 	if e.wi == nil {
-	// 		e.wi = new(ioEncWriter)
-	// 	}
-	// 	e.wi.reset(w)
-	// 	e.typ = entryTypeIo
-	// }
 	e.wf.reset(w, e.h.WriterBufferSize, &e.blist)
-	// e.typ = entryTypeBufio
-
-	// e.w = e.wi
 	e.resetCommon()
 }
 
@@ -989,9 +864,7 @@ func (e *Encoder) ResetBytes(out *[]byte) {
 		in = make([]byte, defEncByteBufSize)
 	}
 	e.bytes = true
-	// e.typ = entryTypeBytes
 	e.wb.reset(in, out)
-	// e.w = &e.wb
 	e.resetCommon()
 }
 
@@ -1084,6 +957,7 @@ func (e *Encoder) Encode(v interface{}) (err error) {
 	// Also, see https://github.com/golang/go/issues/14939#issuecomment-417836139
 	// defer func() { e.deferred(&err) }() }
 	// { x, y := e, &err; defer func() { x.deferred(y) }() }
+
 	if e.err != nil {
 		return e.err
 	}
@@ -1121,44 +995,15 @@ func (e *Encoder) MustEncode(v interface{}) {
 }
 
 func (e *Encoder) mustEncode(v interface{}) {
-	// if e.wf == nil {
-	// 	e.encode(v)
-	// 	e.e.atEndOfEncode()
-	// 	e.w().end()
-	// 	return
-	// }
-
-	// if e.wf.buf == nil {
-	// 	e.wf.buf = e.wf.bytesBufPooler.get(e.wf.sz)
-	// 	e.wf.buf = e.wf.buf[:cap(e.wf.buf)]
-	// }
 	e.calls++
 	e.encode(v)
 	e.calls--
 	if e.calls == 0 {
 		e.e.atEndOfEncode()
 		e.w().end()
-		// if !e.h.ExplicitRelease {
-		// 	e.Release()
-		// }
 	}
 }
 
-// func (e *Encoder) deferred(err1 *error) {
-// 	e.w().end()
-// 	if recoverPanicToErr {
-// 		if x := recover(); x != nil {
-// 			panicValToErr(e, x, err1)
-// 			panicValToErr(e, x, &e.err)
-// 		}
-// 	}
-// }
-
-// //go:noinline -- as it is run by finalizer
-// func (e *Encoder) finalize() {
-// 	e.Release()
-// }
-
 // Release releases shared (pooled) resources.
 //
 // It is important to call Release() when done with an Encoder, so those resources
@@ -1167,9 +1012,6 @@ func (e *Encoder) mustEncode(v interface{}) {
 // Deprecated: Release is a no-op as pooled resources are not used with an Encoder.
 // This method is kept for compatibility reasons only.
 func (e *Encoder) Release() {
-	// if e.wf != nil {
-	// 	e.wf.release()
-	// }
 }
 
 func (e *Encoder) encode(iv interface{}) {
@@ -1199,11 +1041,6 @@ func (e *Encoder) encode(iv interface{}) {
 
 	case string:
 		e.e.EncodeString(v)
-		// if e.h.StringToRaw {
-		// 	e.e.EncodeStringBytesRaw(bytesView(v))
-		// } else {
-		// 	e.e.EncodeStringEnc(cUTF8, v)
-		// }
 	case bool:
 		e.e.EncodeBool(v)
 	case int:
@@ -1240,11 +1077,6 @@ func (e *Encoder) encode(iv interface{}) {
 		e.rawBytes(*v)
 	case *string:
 		e.e.EncodeString(*v)
-		// if e.h.StringToRaw {
-		// 	e.e.EncodeStringBytesRaw(bytesView(*v))
-		// } else {
-		// 	e.e.EncodeStringEnc(cUTF8, *v)
-		// }
 	case *bool:
 		e.e.EncodeBool(*v)
 	case *int:
@@ -1337,7 +1169,6 @@ TOP:
 	}
 
 	if sptr != nil && (&e.ci).add(sptr) {
-		// e.errorf("circular reference found: # %d", sptr)
 		e.errorf("circular reference found: # %p, %T", sptr, sptr)
 	}
 
@@ -1367,19 +1198,6 @@ TOP:
 	}
 }
 
-// func (e *Encoder) marshal(bs []byte, fnerr error, asis bool, c charEncoding) {
-// 	if fnerr != nil {
-// 		panic(fnerr)
-// 	}
-// 	if bs == nil {
-// 		e.e.EncodeNil()
-// 	} else if asis {
-// 		e.asis(bs)
-// 	} else {
-// 		e.e.EncodeStringBytesRaw(bs)
-// 	}
-// }
-
 func (e *Encoder) marshalUtf8(bs []byte, fnerr error) {
 	if fnerr != nil {
 		panic(fnerr)
@@ -1414,14 +1232,6 @@ func (e *Encoder) marshalRaw(bs []byte, fnerr error) {
 	}
 }
 
-// func (e *Encoder) asis(v []byte) {
-// 	if e.isas {
-// 		e.as.EncodeAsis(v)
-// 	} else {
-// 		e.w().writeb(v)
-// 	}
-// }
-
 func (e *Encoder) rawBytes(vv Raw) {
 	v := []byte(vv)
 	if !e.h.Raw {
@@ -1457,21 +1267,6 @@ func (e *Encoder) mapElemValue() {
 	e.c = containerMapValue
 }
 
-// // Note: This is harder to inline, as there are 2 function calls inside.
-// func (e *Encoder) mapElemKeyOrValue(j uint8) {
-// 	if j == 0 {
-// 		if e.js {
-// 			e.jenc.WriteMapElemKey()
-// 		}
-// 		e.c = containerMapKey
-// 	} else {
-// 		if e.js {
-// 			e.jenc.WriteMapElemValue()
-// 		}
-// 		e.c = containerMapValue
-// 	}
-// }
-
 func (e *Encoder) mapEnd() {
 	e.e.WriteMapEnd()
 	// e.c = containerMapEnd
@@ -1514,19 +1309,6 @@ func encStructFieldKey(encName string, ee encDriver, w *encWr,
 	if keyType == valueTypeString {
 		if js && encNameAsciiAlphaNum { // keyType == valueTypeString
 			w.writeqstr(encName)
-			// ----
-			// w.writen1('"')
-			// w.writestr(encName)
-			// w.writen1('"')
-			// ----
-			// w.writestr(`"` + encName + `"`)
-			// ----
-			// // do concat myself, so it is faster than the generic string concat
-			// b := make([]byte, len(encName)+2)
-			// copy(b[1:], encName)
-			// b[0] = '"'
-			// b[len(b)-1] = '"'
-			// w.writeb(b)
 		} else { // keyType == valueTypeString
 			ee.EncodeString(encName)
 		}
@@ -1538,40 +1320,3 @@ func encStructFieldKey(encName string, ee encDriver, w *encWr,
 		ee.EncodeFloat64(m.Float(strconv.ParseFloat(encName, 64)))
 	}
 }
-
-// type encExtPreambler interface {
-// 	encodeExtPreamble(tag uint8, length int)
-// }
-
-// func encBytesExt(rv interface{}, xtag uint64, ext Ext, h Handle, e encDriver) {
-// 	var bs []byte
-// 	var bufp bytesBufPooler
-// 	if ext == SelfExt {
-// 		bs = bufp.get(1024)[:0]
-// 		rv2 := rv4i(v)
-// 		NewEncoderBytes(&bs, h).encodeValue(rv2, h.fnNoExt(rv2.Type()))
-// 	} else {
-// 		bs = ext.WriteExt(v)
-// 	}
-// 	if bs == nil {
-// 		e.EncodeNil()
-// 		return
-// 	}
-// 	if e.h.WriteExt {
-// 		e.encodeExtPreamble(uint8(xtag), len(bs))
-// 		e.w.writeb(bs)
-// 	} else {
-// 		e.EncodeStringBytesRaw(bs)
-// 	}
-// 	if ext == SelfExt {
-// 		bufp.end()
-// 	}
-// }
-
-// func encStringAsRawBytesMaybe(ee encDriver, s string, stringToRaw bool) {
-// 	if stringToRaw {
-// 		ee.EncodeStringBytesRaw(bytesView(s))
-// 	} else {
-// 		ee.EncodeStringEnc(cUTF8, s)
-// 	}
-// }

+ 0 - 6
codec/fast-path.not.go

@@ -34,12 +34,6 @@ type fastpathA [0]fastpathE
 
 func (x fastpathA) index(rtid uintptr) int { return -1 }
 
-// func (_ fastpathT) DecSliceUint8V(v []uint8, canChange bool, d *Decoder) (_ []uint8, changed bool) {
-// 	fn := d.h.fn(uint8SliceTyp)
-// 	d.kSlice(&fn.i, rv4i(&v).Elem())
-// 	return v, true
-// }
-
 var fastpathAV fastpathA
 var fastpathTV fastpathT
 

+ 1 - 1
codec/gen-helper.generated.go

@@ -10,7 +10,7 @@ package codec
 import "encoding"
 
 // GenVersion is the current version of codecgen.
-const GenVersion = 15
+const GenVersion = 16
 
 // This file is used to generate helper code for codecgen.
 // The values here i.e. genHelper(En|De)coder are not to be used directly by

+ 7 - 83
codec/gen.go

@@ -111,7 +111,8 @@ import (
 // v13: 20190603 removed DecodeString - use DecodeStringAsBytes instead
 // v14: 20190611 refactored nil handling: TryDecodeAsNil -> selective TryNil, etc
 // v15: 20190626 encDriver.EncodeString handles StringToRaw flag inside handle
-const genVersion = 15
+// v16: 20190629 refactoring for v1.1.6
+const genVersion = 16
 
 const (
 	genCodecPkg        = "codec1978"
@@ -348,31 +349,13 @@ func Gen(w io.Writer, buildTags, pkgName, uid string, noExtensions bool,
 	x.line("_, file, _, _ := runtime.Caller(0)")
 	x.linef("ver := strconv.FormatInt(int64(%sGenVersion), 10)", x.cpfx)
 	x.outf(`panic("codecgen version mismatch: current: %v, need " + ver + ". Re-generate file: " + file)`, genVersion)
-	// x.out(`panic(fmt.Errorf("codecgen version mismatch: current: %v, need %v. Re-generate file: %v", `)
-	// x.linef(`%v, %sGenVersion, file))`, genVersion, x.cpfx)
 	x.linef("}")
 	if len(imKeys) > 0 {
 		x.line("if false { // reference the types, but skip this branch at build/run time")
-		// x.line("var _ byte")
-		// x.line("_ = strconv.ParseInt")
-		// var n int
-		// for k, t := range x.im {
 		for _, k := range imKeys {
 			t := x.im[k]
-			// x.linef("var v%v %s.%s", n, x.imn[k], t.Name())
-			// n++
 			x.linef("var _ %s.%s", x.imn[k], t.Name())
 		}
-		// if n > 0 {
-		// 	x.out("_")
-		// 	for i := 1; i < n; i++ {
-		// 		x.out(", _")
-		// 	}
-		// 	x.out(" = v0")
-		// 	for i := 1; i < n; i++ {
-		// 		x.outf(", v%v", i)
-		// 	}
-		// }
 		x.line("} ") // close if false
 	}
 	x.line("}") // close init
@@ -501,13 +484,6 @@ func (x *genRunner) out(s string) {
 	}
 }
 
-// func (x *genRunner) outb(s []byte) {
-// 	_, err := x.w.Write(s)
-// 	if err != nil {
-// 		panic(err)
-// 	}
-// }
-
 func (x *genRunner) outf(s string, params ...interface{}) {
 	_, err := fmt.Fprintf(x.w, s, params...)
 	if err != nil {
@@ -515,13 +491,6 @@ func (x *genRunner) outf(s string, params ...interface{}) {
 	}
 }
 
-// func (x *genRunner) lineb(s []byte) {
-// 	x.outb(s)
-// 	if len(s) == 0 || s[len(s)-1] != '\n' {
-// 		x.out("\n")
-// 	}
-// }
-
 func (x *genRunner) line(s string) {
 	x.out(s)
 	if len(s) == 0 || s[len(s)-1] != '\n' {
@@ -910,7 +879,6 @@ func (x *genRunner) enc(varname string, t reflect.Type) {
 			x.line("z.F." + g.MethodNamePfx("Enc", false) + "V(" + varname + ", e)")
 		} else {
 			x.xtraSM(varname, t, true, false)
-			// x.encListFallback(varname, rtid, t)
 		}
 		x.linef("} // end block: if %s slice == nil", varname)
 	case reflect.Map:
@@ -925,7 +893,6 @@ func (x *genRunner) enc(varname string, t reflect.Type) {
 			x.line("z.F." + g.MethodNamePfx("Enc", false) + "V(" + varname + ", e)")
 		} else {
 			x.xtraSM(varname, t, true, false)
-			// x.encMapFallback(varname, rtid, t)
 		}
 		x.linef("} // end block: if %s map == nil", varname)
 	case reflect.Struct:
@@ -1019,13 +986,11 @@ func (x *genRunner) encStruct(varname string, rtid uintptr, t reflect.Type) {
 	numfieldsvar := genTempVarPfx + "q" + i
 	ti2arrayvar := genTempVarPfx + "r" + i
 	struct2arrvar := genTempVarPfx + "2arr" + i
-	// firstvar := genTempVarPfx + "2first" + i
 
 	x.line(sepVarname + " := !z.EncBinary()")
 	x.linef("%s := z.EncBasicHandle().StructToArray", struct2arrvar)
 	x.linef("_, _ = %s, %s", sepVarname, struct2arrvar)
 	x.linef("const %s bool = %v // struct tag has 'toArray'", ti2arrayvar, ti.toArray)
-	// x.linef("var %s bool = true", firstvar)
 
 	tisfi := ti.sfiSrc // always use sequence from file. decStruct expects same thing.
 
@@ -1040,9 +1005,7 @@ func (x *genRunner) encStruct(varname string, rtid uintptr, t reflect.Type) {
 		for j, si := range tisfi {
 			_ = j
 			if !si.omitEmpty() {
-				// x.linef("%s[%v] = true // %s", numfieldsvar, j, si.fieldName)
 				x.linef("true, // %s", si.fieldName)
-				// nn++
 				continue
 			}
 			var t2 reflect.StructField
@@ -1075,7 +1038,6 @@ func (x *genRunner) encStruct(varname string, rtid uintptr, t reflect.Type) {
 		x.line("}")
 		x.linef("_ = %s", numfieldsvar)
 	}
-	// x.linef("var %snn%s int", genTempVarPfx, i)
 
 	type genFQN struct {
 		i       string
@@ -1112,11 +1074,8 @@ func (x *genRunner) encStruct(varname string, rtid uintptr, t reflect.Type) {
 					} else {
 						q.nilLine.f(" || %s == nil", q.fqname)
 					}
-					// x.linef("if %s == nil { %s = true; goto LABEL%d }", varname3, isNilVarName, i)
-					// "varname3 = new(" + x.genTypeName(t3.Elem()) + ") }")
 				}
 			}
-			// t2 = t.FieldByIndex(si.is)
 		}
 	}
 
@@ -1124,7 +1083,6 @@ func (x *genRunner) encStruct(varname string, rtid uintptr, t reflect.Type) {
 		q := &genFQNs[j]
 		if q.canNil {
 			x.linef("var %s bool = %s", q.nilVar, q.nilLine.v())
-			// x.linef("if %s { %s = true }", q.nilLine.v(), q.nilVar)
 		}
 	}
 
@@ -1155,8 +1113,6 @@ func (x *genRunner) encStruct(varname string, rtid uintptr, t reflect.Type) {
 	x.line("z.EncWriteArrayEnd()")
 	x.linef("} else {") // if not ti.toArray
 	if ti.anyOmitEmpty {
-		// nn = 0
-		// x.linef("var %snn%s = %v", genTempVarPfx, i, nn)
 		x.linef("var %snn%s int", genTempVarPfx, i)
 		x.linef("for _, b := range %s { if b { %snn%s++ } }", numfieldsvar, genTempVarPfx, i)
 		x.linef("z.EncWriteMapStart(%snn%s)", genTempVarPfx, i)
@@ -1189,7 +1145,6 @@ func (x *genRunner) encStruct(varname string, rtid uintptr, t reflect.Type) {
 				x.linef("}")
 			}
 		}
-		// x.linef("r.EncStructFieldKey(codecSelferValueType%s%s, `%s`)", ti.keyType.String(), x.xs, si.encName)
 		x.line("z.EncWriteMapElemValue()")
 		if q.canNil {
 			x.line("if " + q.nilVar + " { r.EncodeNil() } else { ")
@@ -1232,7 +1187,6 @@ func (x *genRunner) encListFallback(varname string, t reflect.Type) {
 		if err != nil {
 			panic(err)
 		}
-		// x.linef("%s = sch%s", varname, i)
 		if elemBytes {
 			x.linef("r.EncodeStringBytesRaw([]byte(%s))", "sch"+i)
 			x.line("}")
@@ -1258,10 +1212,8 @@ func (x *genRunner) encMapFallback(varname string, t reflect.Type) {
 	// NOTE: Canonical Option is not honored
 	i := x.varsfx()
 	x.line("z.EncWriteMapStart(len(" + varname + "))")
-	// x.linef("var %sfirst%s = true", genTempVarPfx, i)
 	x.linef("for %sk%s, %sv%s := range %s {", genTempVarPfx, i, genTempVarPfx, i, varname)
 	x.linef("z.EncWriteMapElemKey()")
-	// x.linef("%sfirst%s = false", genTempVarPfx, i)
 	x.encVar(genTempVarPfx+"k"+i, t.Key())
 	x.line("z.EncWriteMapElemValue()")
 	x.encVar(genTempVarPfx+"v"+i, t.Elem())
@@ -1309,9 +1261,6 @@ func (x *genRunner) decVarInitPtr(varname, nilvar string, t reflect.Type, si *st
 			}
 		}
 	}
-	// if t2typ.Kind() == reflect.Ptr {
-	// 	varname3 = varname3 + t2.Name
-	// }
 	if nilbuf != nil {
 		if nilbufed {
 			nilbuf.s(" { ").s("// remove the if-true\n")
@@ -1382,25 +1331,6 @@ func (x *genRunner) decVar(varname, nilvar string, t reflect.Type, canBeNil, che
 	// This could happen when decoding from a struct encoded as an array.
 	// For that, decVar should be called with canNil=true, to force true as its value.
 
-	// i := x.varsfx()
-	// if !canBeNil {
-	// 	canBeNil = genAnythingCanBeNil || !genIsImmutable(t)
-	// }
-	//
-	// if canBeNil {
-	// 	var buf genBuf
-	// 	x.decVarInitPtr(varname, nilvar, t, nil, nil, &buf)
-	// 	x.linef("if r.TryDecodeAsNil() { %s } else {", buf.buf)
-	// } else {
-	// 	x.line("// cannot be nil")
-	// }
-	// x.decVarMain(varname, i, t, checkNotNil)
-	// if canBeNil {
-	// 	x.line("} ")
-	// }
-
-	// x.decVarMain(varname, i, t, checkNotNil)
-
 	i := x.varsfx()
 	if t.Kind() == reflect.Ptr {
 		var buf genBuf
@@ -1421,7 +1351,6 @@ func (x *genRunner) dec(varname string, t reflect.Type, isptr bool) {
 	//   - t is always a baseType T (not a *T, etc).
 	rtid := rt2id(t)
 	ti2 := x.ti.get(rtid, t)
-	// tptr := reflect.PtrTo(t)
 	if x.checkForSelfer(t, varname) {
 		if ti2.isFlag(tiflagSelfer) || ti2.isFlag(tiflagSelferPtr) {
 			x.line(varname + ".CodecDecodeSelf(d)")
@@ -1456,8 +1385,6 @@ func (x *genRunner) dec(varname string, t reflect.Type, isptr bool) {
 	//   - the type implements (Text|JSON|Binary)(Unm|M)arshal
 
 	mi := x.varsfx()
-	// x.linef("%sm%s := z.DecBinary()", genTempVarPfx, mi)
-	// x.linef("_ = %sm%s", genTempVarPfx, mi)
 
 	var hasIf genIfClause
 	defer hasIf.end(x)
@@ -1487,7 +1414,6 @@ func (x *genRunner) dec(varname string, t reflect.Type, isptr bool) {
 	// and this is not the CodecEncodeSelf or CodecDecodeSelf method (i.e. it is not a Selfer)
 	if !x.nx && varname != genTopLevelVarName && genImportPath(t) != "" && t.Name() != "" {
 		// first check if extensions are configued, before doing the interface conversion
-		// x.linef("} else if z.HasExtensions() && z.DecExt(%s) {", varname)
 		yy := fmt.Sprintf("%sxt%s", genTempVarPfx, mi)
 		x.linef("%s %s := z.Extension(z.I2Rtid(%s)); %s != nil { z.DecExtension(%s, %s) ", hasIf.c(false), yy, varname, yy, varname, yy)
 	}
@@ -1536,7 +1462,6 @@ func (x *genRunner) dec(varname string, t reflect.Type, isptr bool) {
 			x.linef("z.F.%sX(%s%s, d)", g.MethodNamePfx("Dec", false), addrPfx, varname)
 		} else {
 			x.xtraSM(varname, t, false, isptr)
-			// x.decMapFallback(varname, rtid, t)
 		}
 	case reflect.Struct:
 		if inlist {
@@ -1772,13 +1697,11 @@ func (x *genRunner) decStructMap(varname, lenvarname string, rtid uintptr, t ref
 	default: // string
 		x.linef("%s := z.StringView(r.DecodeStringAsBytes())", kName)
 	}
-	// x.linef("%s := z.StringView(r.DecStructFieldKey(codecSelferValueType%s%s, z.DecScratchArrayBuffer()))", kName, ti.keyType.String(), x.xs)
 
 	x.line("z.DecReadMapElemValue()")
 	x.decStructMapSwitch(kName, varname, rtid, t)
 
 	x.line("} // end for " + tpfx + "j" + i)
-	// x.line("z.DecReadMapEnd()")
 }
 
 func (x *genRunner) decStructArray(varname, lenvarname, breakString string, rtid uintptr, t reflect.Type) {
@@ -1828,11 +1751,9 @@ func (x *genRunner) decStructArray(varname, lenvarname, breakString string, rtid
 	x.line("z.DecReadArrayElem()")
 	x.linef(`z.DecStructFieldNotFound(%sj%s - 1, "")`, tpfx, i)
 	x.line("}")
-	// x.line("z.DecReadArrayEnd()")
 }
 
 func (x *genRunner) decStruct(varname string, rtid uintptr, t reflect.Type) {
-	// xdebugf("decStruct: t: %v", t)
 	// varname MUST be a ptr, or a struct field or a slice element.
 	i := x.varsfx()
 	x.linef("%sct%s := r.ContainerType()", genTempVarPfx, i)
@@ -2175,9 +2096,12 @@ func genInternalDecCommandAsString(s string) string {
 
 func genInternalSortType(s string, elem bool) string {
 	for _, v := range [...]string{
-		"int", "uint", "float",
+		"int",
+		"uint",
+		"float",
 		"bool",
-		"string", "bytes", "[]uint8", "[]byte",
+		"string",
+		"bytes", "[]uint8", "[]byte",
 	} {
 		if v == "[]byte" || v == "[]uint8" {
 			v = "bytes"

+ 21 - 629
codec/helper.go

@@ -175,23 +175,6 @@ const (
 	// so structFieldInfo fits into 8 bytes
 	maxLevelsEmbedding = 14
 
-	// // useFinalizers=true configures finalizers to release pool'ed resources
-	// // acquired by Encoder/Decoder during their GC.
-	// //
-	// // Note that calling SetFinalizer is always expensive,
-	// // as code must be run on the systemstack even for SetFinalizer(t, nil).
-	// //
-	// // We document that folks SHOULD call Release() when done, or they can
-	// // explicitly call SetFinalizer themselves e.g.
-	// //    runtime.SetFinalizer(e, (*Encoder).Release)
-	// //    runtime.SetFinalizer(d, (*Decoder).Release)
-	// useFinalizers = false
-
-	// // usePool controls whether we use sync.Pool or not.
-	// //
-	// // sync.Pool can help manage memory use, but it may come at a performance cost.
-	// usePool = false
-
 	// xdebug controls whether xdebugf prints any output
 	xdebug = true
 )
@@ -202,7 +185,6 @@ var (
 
 	codecgen bool
 
-	// defPooler pooler
 	panicv panicHdl
 
 	refBitset    bitset32
@@ -215,34 +197,9 @@ var (
 	errSliceTypeNotSliceKind = errors.New("SliceType MUST be of Slice Kind")
 )
 
-var (
-	pool4tiload = sync.Pool{New: func() interface{} { return new(typeInfoLoadArray) }}
-
-	// pool4sfiRv8   = sync.Pool{New: func() interface{} { return new([8]sfiRv) }}
-	// pool4sfiRv16  = sync.Pool{New: func() interface{} { return new([16]sfiRv) }}
-	// pool4sfiRv32  = sync.Pool{New: func() interface{} { return new([32]sfiRv) }}
-	// pool4sfiRv64  = sync.Pool{New: func() interface{} { return new([64]sfiRv) }}
-	// pool4sfiRv128 = sync.Pool{New: func() interface{} { return new([128]sfiRv) }}
-
-	// // dn = sync.Pool{ New: func() interface{} { x := new(decNaked); x.init(); return x } }
-
-	// pool4buf256 = sync.Pool{New: func() interface{} { return new([256]byte) }}
-	// pool4buf1k  = sync.Pool{New: func() interface{} { return new([1 * 1024]byte) }}
-	// pool4buf2k  = sync.Pool{New: func() interface{} { return new([2 * 1024]byte) }}
-	// pool4buf4k  = sync.Pool{New: func() interface{} { return new([4 * 1024]byte) }}
-	// pool4buf8k  = sync.Pool{New: func() interface{} { return new([8 * 1024]byte) }}
-	// pool4buf16k = sync.Pool{New: func() interface{} { return new([16 * 1024]byte) }}
-	// pool4buf32k = sync.Pool{New: func() interface{} { return new([32 * 1024]byte) }}
-	// pool4buf64k = sync.Pool{New: func() interface{} { return new([64 * 1024]byte) }}
-
-	// pool4mapStrU16 = sync.Pool{New: func() interface{} { return make(map[string]uint16, 16) }}
-	// pool4mapU16Str   = sync.Pool{New: func() interface{} { return make(map[uint16]string, 16) }}
-	// pool4mapU16Bytes = sync.Pool{New: func() interface{} { return make(map[uint16][]byte, 16) }}
-)
+var pool4tiload = sync.Pool{New: func() interface{} { return new(typeInfoLoadArray) }}
 
 func init() {
-	// defPooler.init()
-
 	refBitset = refBitset.
 		set(byte(reflect.Map)).
 		set(byte(reflect.Ptr)).
@@ -278,7 +235,6 @@ func init() {
 		set(byte(reflect.Complex128)).
 		set(byte(reflect.String))
 
-	// xdebugf("bitsets: ref: %b, isnil: %b, scalar: %b", refBitset, isnilBitset, scalarBitset)
 }
 
 type handleFlag uint8
@@ -294,15 +250,6 @@ type clsErr struct {
 	errClosed error // error on closing
 }
 
-// type entryType uint8
-
-// const (
-// 	entryTypeBytes entryType = iota // make this 0, so a comparison is cheap
-// 	entryTypeIo
-// 	entryTypeBufio
-// 	entryTypeUnset = 255
-// )
-
 type charEncoding uint8
 
 const (
@@ -385,12 +332,6 @@ const (
 	containerArrayEnd
 )
 
-// // sfiIdx used for tracking where a (field/enc)Name is seen in a []*structFieldInfo
-// type sfiIdx struct {
-// 	name  string
-// 	index int
-// }
-
 // do not recurse if a containing type refers to an embedded type
 // which refers back to its containing type (via a pointer).
 // The second time this back-reference happens, break out,
@@ -414,8 +355,6 @@ const (
 
 // typeInfoLoad is a transient object used while loading up a typeInfo.
 type typeInfoLoad struct {
-	// fNames   []string
-	// encNames []string
 	etypes []uintptr
 	sfis   []structFieldInfo
 }
@@ -423,20 +362,12 @@ type typeInfoLoad struct {
 // typeInfoLoadArray is a cache object used to efficiently load up a typeInfo without
 // much allocation.
 type typeInfoLoadArray struct {
-	// fNames   [typeInfoLoadArrayLen]string
-	// encNames [typeInfoLoadArrayLen]string
 	sfis   [typeInfoLoadArraySfisLen]structFieldInfo
 	sfiidx [typeInfoLoadArraySfiidxLen]byte
 	etypes [typeInfoLoadArrayEtypesLen]uintptr
 	b      [typeInfoLoadArrayBLen]byte // scratch - used for struct field names
 }
 
-// // cacheLineSafer denotes that a type is safe for cache-line access.
-// // This could mean that
-// type cacheLineSafer interface {
-// 	cacheLineSafe()
-// }
-
 // mirror json.Marshaler and json.Unmarshaler here,
 // so we don't import the encoding/json package
 
@@ -475,8 +406,6 @@ func (e codecError) Error() string {
 	return fmt.Sprintf("%s error: %v", e.name, e.err)
 }
 
-// type byteAccepter func(byte) bool
-
 var (
 	bigen               = binary.BigEndian
 	structInfoFieldName = "_struct"
@@ -715,18 +644,7 @@ type BasicHandle struct {
 	// Setting this value has no effect. It is maintained for backward compatibility.
 	ExplicitRelease bool
 
-	// flags handleFlag // holds flag for if binaryEncoding, jsonHandler, etc
-	// be    bool       // is handle a binary encoding?
-	// js    bool       // is handle javascript handler?
-	// n  byte // first letter of handle name
-	// _  uint16 // padding
-
 	// ---- cache line
-
-	// noBuiltInTypeChecker
-
-	// _      uint32 // padding
-	// r []uintptr     // rtids mapped to s above
 }
 
 // basicHandle returns an initialized BasicHandle from the Handle.
@@ -769,8 +687,6 @@ func (x *BasicHandle) init(hh Handle) {
 		if _, b := hh.(*JsonHandle); b {
 			f |= jsonHandleFlag
 		}
-		// _, x.js = hh.(*JsonHandle)
-		// x.n = hh.Name()[0]
 		atomic.StoreUint32(&x.inited, uint32(f))
 		// ensure MapType and SliceType are of correct type
 		if x.MapType != nil && x.MapType.Kind() != reflect.Map {
@@ -945,7 +861,6 @@ func (x *BasicHandle) fnLoad(rt reflect.Type, rtid uintptr, checkExt bool) (fn *
 					xfnf2 := fastpathAV[idx].decfn
 					xptr2rt := reflect.PtrTo(xrt)
 					fn.fd = func(d *Decoder, xf *codecFnInfo, xrv reflect.Value) {
-						// xdebug2f("fd: convert from %v to %v", xrv.Type(), xrt)
 						if xrv.Kind() == reflect.Ptr {
 							xfnf2(d, xf, rvConvert(xrv, xptr2rt))
 						} else {
@@ -961,14 +876,16 @@ func (x *BasicHandle) fnLoad(rt reflect.Type, rtid uintptr, checkExt bool) (fn *
 				fn.fe = (*Encoder).kBool
 				fn.fd = (*Decoder).kBool
 			case reflect.String:
-				// Do not check this here, as it will statically set the function for a string
-				// type, and if the Handle is modified thereafter, behaviour is non-deterministic.
-				//
-				// if x.StringToRaw {
-				// 	fn.fe = (*Encoder).kStringToRaw
-				// } else {
-				// 	fn.fe = (*Encoder).kStringEnc
-				// }
+				// Do not use different functions based on StringToRaw option,
+				// as that will statically set the function for a string type,
+				// and if the Handle is modified thereafter, behaviour is non-deterministic.
+				// i.e. DO NOT DO:
+				//   if x.StringToRaw {
+				//   	fn.fe = (*Encoder).kStringToRaw
+				//   } else {
+				//   	fn.fe = (*Encoder).kStringEnc
+				//   }
+
 				fn.fe = (*Encoder).kString
 				fn.fd = (*Decoder).kString
 			case reflect.Int:
@@ -1028,11 +945,9 @@ func (x *BasicHandle) fnLoad(rt reflect.Type, rtid uintptr, checkExt bool) (fn *
 				fi.addrD = false
 				rt2 := reflect.SliceOf(ti.elem)
 				fn.fd = func(d *Decoder, xf *codecFnInfo, xrv reflect.Value) {
+					// call fnVia directly, so fn(...) is not recursive, and can be inlined
 					d.h.fnVia(rt2, &x.rtidFns, true).fd(d, xf, rvGetSlice4Array(xrv, rt2))
-					// call fnVia directly, so it's not recursive, and fn(...) can be inlined
-					// d.h.fn(rt2).fd(d, xf, rvGetSlice4Array(xrv, rt2))
 				}
-				// fn.fd = (*Decoder).kArray
 			case reflect.Struct:
 				if ti.anyOmitEmpty ||
 					ti.isFlag(tiflagMissingFielder) ||
@@ -1079,12 +994,9 @@ type Handle interface {
 	// return the basic handle. It may not have been inited.
 	// Prefer to use basicHandle() helper function that ensures it has been inited.
 	getBasicHandle() *BasicHandle
-	// recreateEncDriver(encDriver) bool
 	newEncDriver() encDriver
 	newDecDriver() decDriver
 	isBinary() bool
-	// hasElemSeparators() bool
-	// IsBuiltinType(rtid uintptr) bool
 }
 
 // Raw represents raw formatted bytes.
@@ -1196,11 +1108,6 @@ func (interfaceExtFailer) UpdateExt(dest interface{}, v interface{}) {
 	panicv.errorstr("InterfaceExt.UpdateExt is not supported")
 }
 
-// type extWrapper struct {
-// 	BytesExt
-// 	InterfaceExt
-// }
-
 type bytesExtWrapper struct {
 	interfaceExtFailer
 	BytesExt
@@ -1227,24 +1134,11 @@ func (textEncodingType) isBinary() bool { return false }
 // noBuiltInTypes is embedded into many types which do not support builtins
 // e.g. msgpack, simple, cbor.
 
-// type noBuiltInTypeChecker struct{}
-// func (noBuiltInTypeChecker) IsBuiltinType(rt uintptr) bool { return false }
-// type noBuiltInTypes struct{ noBuiltInTypeChecker }
-
 type noBuiltInTypes struct{}
 
 func (noBuiltInTypes) EncodeBuiltin(rt uintptr, v interface{}) {}
 func (noBuiltInTypes) DecodeBuiltin(rt uintptr, v interface{}) {}
 
-// type noStreamingCodec struct{}
-// func (noStreamingCodec) CheckBreak() bool { return false }
-// func (noStreamingCodec) hasElemSeparators() bool { return false }
-
-// type noElemSeparators struct{}
-
-// func (noElemSeparators) hasElemSeparators() (v bool)            { return }
-// func (noElemSeparators) recreateEncDriver(e encDriver) (v bool) { return }
-
 // bigenHelper.
 // Users must already slice the x completely, because we will not reslice.
 type bigenHelper struct {
@@ -1299,6 +1193,8 @@ func (o *extHandle) AddExt(rt reflect.Type, tag byte,
 // Deprecated: Use SetBytesExt or SetInterfaceExt on the Handle instead.
 func (o *extHandle) SetExt(rt reflect.Type, tag uint64, ext Ext) (err error) {
 	// o is a pointer, because we may need to initialize it
+	// We EXPECT *o is a pointer to a non-nil extHandle.
+
 	rk := rt.Kind()
 	for rk == reflect.Ptr {
 		rt = rt.Elem()
@@ -1317,13 +1213,7 @@ func (o *extHandle) SetExt(rt reflect.Type, tag uint64, ext Ext) (err error) {
 		// Instead, we silently treat as a no-op, and return.
 		return
 	}
-	// if o == nil {
-	// 	return errors.New("codec.Handle.SetExt: extHandle not initialized")
-	// }
 	o2 := *o
-	// if o2 == nil {
-	// 	return errors.New("codec.Handle.SetExt: extHandle not initialized")
-	// }
 	for i := range o2 {
 		v := &o2[i]
 		if v.rtid == rtid {
@@ -1470,11 +1360,6 @@ func (si *structFieldInfo) field(v reflect.Value, update bool) (rv2 reflect.Valu
 	return v, true
 }
 
-// func (si *structFieldInfo) fieldval(v reflect.Value, update bool) reflect.Value {
-// 	v, _ = si.field(v, update)
-// 	return v
-// }
-
 func parseStructInfo(stag string) (toArray, omitEmpty bool, keytype valueType) {
 	keytype = valueTypeString // default
 	if stag == "" {
@@ -1521,9 +1406,6 @@ func (si *structFieldInfo) parseTag(stag string) {
 			switch s {
 			case "omitempty":
 				si.flagSet(structFieldInfoFlagOmitEmpty)
-				// si.omitEmpty = true
-				// case "toarray":
-				// 	si.toArray = true
 			}
 		}
 	}
@@ -1573,6 +1455,7 @@ type structFieldNode struct {
 
 func (x *structFieldNode) field(si *structFieldInfo) (fv reflect.Value) {
 	// return si.fieldval(x.v, x.update)
+
 	// Note: we only cache if nis=2 or nis=3 i.e. up to 2 levels of embedding
 	// This mostly saves us time on the repeated calls to v.Elem, v.Field, etc.
 	var valid bool
@@ -1658,13 +1541,6 @@ const (
 
 	tiflagMissingFielder
 	tiflagMissingFielderPtr
-
-	// tiflag
-	// tiflag
-	// tiflag
-	// tiflag
-	// tiflag
-	// tiflag
 )
 
 // typeInfo keeps static (non-changing readonly)information
@@ -1682,7 +1558,6 @@ type typeInfo struct {
 	pkgpath string
 
 	rtid uintptr
-	// rv0  reflect.Value // saved zero value, used if immutableKind
 
 	numMeth uint16 // number of methods
 	kind    uint8
@@ -1709,26 +1584,6 @@ type typeInfo struct {
 	// so beneficial for intXX, bool, slices, structs, etc
 	rv0 reflect.Value
 
-	// format of marshal type fields below: [btj][mu]p? OR csp?
-
-	// bm  bool // T is a binaryMarshaler
-	// bmp bool // *T is a binaryMarshaler
-	// bu  bool // T is a binaryUnmarshaler
-	// bup bool // *T is a binaryUnmarshaler
-	// tm  bool // T is a textMarshaler
-	// tmp bool // *T is a textMarshaler
-	// tu  bool // T is a textUnmarshaler
-	// tup bool // *T is a textUnmarshaler
-
-	// jm  bool // T is a jsonMarshaler
-	// jmp bool // *T is a jsonMarshaler
-	// ju  bool // T is a jsonUnmarshaler
-	// jup bool // *T is a jsonUnmarshaler
-	// cs  bool // T is a Selfer
-	// csp bool // *T is a Selfer
-	// mf  bool // T is a MissingFielder
-	// mfp bool // *T is a MissingFielder
-
 	// other flags, with individual bits representing if set.
 	flags tiflag
 
@@ -1809,11 +1664,6 @@ func findTypeInfo(s []rtid2ti, rtid uintptr) (i uint, ti *typeInfo) {
 	// binary search. adapted from sort/search.go.
 	// Note: we use goto (instead of for loop) so this can be inlined.
 
-	// if sp == nil {
-	// 	return -1, nil
-	// }
-	// s := *sp
-
 	// h, i, j := 0, 0, len(s)
 	var h uint // var h, i uint
 	var j = uint(len(s))
@@ -1859,7 +1709,6 @@ func (x *TypeInfos) get(rtid uintptr, rt reflect.Type) (pti *typeInfo) {
 	}
 	ti.rv0 = reflect.Zero(rt)
 
-	// ti.comparable = rt.Comparable()
 	ti.numMeth = uint16(rt.NumMethod())
 
 	var b1, b2 bool
@@ -1899,7 +1748,6 @@ func (x *TypeInfos) get(rtid uintptr, rt reflect.Type) (pti *typeInfo) {
 		// vv := typeInfoLoad{pv.fNames[:0], pv.encNames[:0], pv.etypes[:1], pv.sfis[:0]}
 		vv := typeInfoLoad{pv.etypes[:1], pv.sfis[:0]}
 		x.rget(rt, rtid, omitEmpty, nil, &vv)
-		// ti.sfis = vv.sfis
 		ti.sfiSrc, ti.sfiSort, ti.sfiNamesSort, ti.anyOmitEmpty = rgetResolveSFI(rt, vv.sfis, pv)
 		pp.Put(pi)
 	case reflect.Map:
@@ -1914,7 +1762,6 @@ func (x *TypeInfos) get(rtid uintptr, rt reflect.Type) (pti *typeInfo) {
 	case reflect.Array, reflect.Ptr:
 		ti.elem = rt.Elem()
 	}
-	// sfi = sfiSrc
 
 	x.mu.Lock()
 	sp = x.infos.load()
@@ -2061,9 +1908,6 @@ LOOP:
 		si.fieldName = f.Name
 		si.flagSet(structFieldInfoFlagReady)
 
-		// pv.encNames = append(pv.encNames, si.encName)
-
-		// si.ikind = int(f.Type.Kind())
 		if len(indexstack) > maxLevelsEmbedding-1 {
 			panicv.errorf("codec: only supports up to %v depth of embedding - type has %v depth",
 				maxLevelsEmbedding-1, len(indexstack))
@@ -2081,10 +1925,12 @@ LOOP:
 
 func tiSep(name string) uint8 {
 	// (xn[0]%64) // (between 192-255 - outside ascii BMP)
-	// return 0xfe - (name[0] & 63)
-	// return 0xfe - (name[0] & 63) - uint8(len(name))
-	// return 0xfe - (name[0] & 63) - uint8(len(name)&63)
-	// return ((0xfe - (name[0] & 63)) & 0xf8) | (uint8(len(name) & 0x07))
+	// Tried the following before settling on correct implementation:
+	//   return 0xfe - (name[0] & 63)
+	//   return 0xfe - (name[0] & 63) - uint8(len(name))
+	//   return 0xfe - (name[0] & 63) - uint8(len(name)&63)
+	//   return ((0xfe - (name[0] & 63)) & 0xf8) | (uint8(len(name) & 0x07))
+
 	return 0xfe - (name[0] & 63) - uint8(len(name)&63)
 }
 
@@ -2364,17 +2210,6 @@ func baseRV(v interface{}) (rv reflect.Value) {
 	return
 }
 
-// func newAddressableRV(t reflect.Type, k reflect.Kind) reflect.Value {
-// 	if k == reflect.Ptr {
-// 		return reflect.New(t.Elem()) // this is not addressable???
-// 	}
-// 	return reflect.New(t).Elem()
-// }
-
-// func newAddressableRV(t reflect.Type) reflect.Value {
-// 	return reflect.New(t).Elem()
-// }
-
 // ----
 
 // these "checkOverflow" functions must be inlinable, and not call anybody.
@@ -2662,119 +2497,6 @@ func (x bitset32) isset(pos byte) bool {
 
 // ------------
 
-// type strBytes struct {
-// 	s string
-// 	b []byte
-// 	// i uint16
-// }
-
-// ------------
-
-// type pooler struct {
-// 	// function-scoped pooled resources
-// 	tiload                                      sync.Pool // for type info loading
-// 	sfiRv8, sfiRv16, sfiRv32, sfiRv64, sfiRv128 sync.Pool // for struct encoding
-
-// 	// lifetime-scoped pooled resources
-// 	// dn                                 sync.Pool // for decNaked
-// 	buf256, buf1k, buf2k, buf4k, buf8k, buf16k, buf32k sync.Pool // for [N]byte
-
-// 	mapStrU16, mapU16Str, mapU16Bytes sync.Pool // for Binc
-// 	// mapU16StrBytes sync.Pool // for Binc
-// }
-
-// func (p *pooler) init() {
-// 	p.tiload.New = func() interface{} { return new(typeInfoLoadArray) }
-
-// 	p.sfiRv8.New = func() interface{} { return new([8]sfiRv) }
-// 	p.sfiRv16.New = func() interface{} { return new([16]sfiRv) }
-// 	p.sfiRv32.New = func() interface{} { return new([32]sfiRv) }
-// 	p.sfiRv64.New = func() interface{} { return new([64]sfiRv) }
-// 	p.sfiRv128.New = func() interface{} { return new([128]sfiRv) }
-
-// 	// p.dn.New = func() interface{} { x := new(decNaked); x.init(); return x }
-
-// 	p.buf256.New = func() interface{} { return new([256]byte) }
-// 	p.buf1k.New = func() interface{} { return new([1 * 1024]byte) }
-// 	p.buf2k.New = func() interface{} { return new([2 * 1024]byte) }
-// 	p.buf4k.New = func() interface{} { return new([4 * 1024]byte) }
-// 	p.buf8k.New = func() interface{} { return new([8 * 1024]byte) }
-// 	p.buf16k.New = func() interface{} { return new([16 * 1024]byte) }
-// 	p.buf32k.New = func() interface{} { return new([32 * 1024]byte) }
-// 	// p.buf64k.New = func() interface{} { return new([64 * 1024]byte) }
-
-// 	p.mapStrU16.New = func() interface{} { return make(map[string]uint16, 16) }
-// 	p.mapU16Str.New = func() interface{} { return make(map[uint16]string, 16) }
-// 	p.mapU16Bytes.New = func() interface{} { return make(map[uint16][]byte, 16) }
-// 	// p.mapU16StrBytes.New = func() interface{} { return make(map[uint16]strBytes, 16) }
-// }
-
-// func (p *pooler) sfiRv8() (sp *sync.Pool, v interface{}) {
-// 	return &p.strRv8, p.strRv8.Get()
-// }
-// func (p *pooler) sfiRv16() (sp *sync.Pool, v interface{}) {
-// 	return &p.strRv16, p.strRv16.Get()
-// }
-// func (p *pooler) sfiRv32() (sp *sync.Pool, v interface{}) {
-// 	return &p.strRv32, p.strRv32.Get()
-// }
-// func (p *pooler) sfiRv64() (sp *sync.Pool, v interface{}) {
-// 	return &p.strRv64, p.strRv64.Get()
-// }
-// func (p *pooler) sfiRv128() (sp *sync.Pool, v interface{}) {
-// 	return &p.strRv128, p.strRv128.Get()
-// }
-
-// func (p *pooler) bytes1k() (sp *sync.Pool, v interface{}) {
-// 	return &p.buf1k, p.buf1k.Get()
-// }
-// func (p *pooler) bytes2k() (sp *sync.Pool, v interface{}) {
-// 	return &p.buf2k, p.buf2k.Get()
-// }
-// func (p *pooler) bytes4k() (sp *sync.Pool, v interface{}) {
-// 	return &p.buf4k, p.buf4k.Get()
-// }
-// func (p *pooler) bytes8k() (sp *sync.Pool, v interface{}) {
-// 	return &p.buf8k, p.buf8k.Get()
-// }
-// func (p *pooler) bytes16k() (sp *sync.Pool, v interface{}) {
-// 	return &p.buf16k, p.buf16k.Get()
-// }
-// func (p *pooler) bytes32k() (sp *sync.Pool, v interface{}) {
-// 	return &p.buf32k, p.buf32k.Get()
-// }
-// func (p *pooler) bytes64k() (sp *sync.Pool, v interface{}) {
-// 	return &p.buf64k, p.buf64k.Get()
-// }
-
-// func (p *pooler) tiLoad() (sp *sync.Pool, v interface{}) {
-// 	return &p.tiload, p.tiload.Get()
-// }
-
-// func (p *pooler) decNaked() (sp *sync.Pool, v interface{}) {
-// 	return &p.dn, p.dn.Get()
-// }
-
-// func (p *pooler) decNaked() (v *decNaked, f func(*decNaked) ) {
-// 	sp := &(p.dn)
-// 	vv := sp.Get()
-// 	return vv.(*decNaked), func(x *decNaked) { sp.Put(vv) }
-// }
-// func (p *pooler) decNakedGet() (v interface{}) {
-// 	return p.dn.Get()
-// }
-// func (p *pooler) tiLoadGet() (v interface{}) {
-// 	return p.tiload.Get()
-// }
-// func (p *pooler) decNakedPut(v interface{}) {
-// 	p.dn.Put(v)
-// }
-// func (p *pooler) tiLoadPut(v interface{}) {
-// 	p.tiload.Put(v)
-// }
-
-// ----------------------------------------------------
-
 type panicHdl struct{}
 
 func (panicHdl) errorv(err error) {
@@ -2840,295 +2562,6 @@ func (must) Float(s float64, err error) float64 {
 
 // -------------------
 
-/*
-
-type pooler struct {
-	pool  *sync.Pool
-	poolv interface{}
-}
-
-func (z *pooler) end() {
-	if z.pool != nil {
-		z.pool.Put(z.poolv)
-		z.pool, z.poolv = nil, nil
-	}
-}
-
-// -------------------
-
-const bytesBufPoolerMaxSize = 32 * 1024
-
-type bytesBufPooler struct {
-	pooler
-}
-
-func (z *bytesBufPooler) capacity() (c int) {
-	switch z.pool {
-	case nil:
-	case &pool4buf256:
-		c = 256
-	case &pool4buf1k:
-		c = 1024
-	case &pool4buf2k:
-		c = 2 * 1024
-	case &pool4buf4k:
-		c = 4 * 1024
-	case &pool4buf8k:
-		c = 8 * 1024
-	case &pool4buf16k:
-		c = 16 * 1024
-	case &pool4buf32k:
-		c = 32 * 1024
-	}
-	return
-}
-
-// func (z *bytesBufPooler) ensureCap(newcap int, bs []byte) (bs2 []byte) {
-// 	if z.pool == nil {
-// 		bs2 = z.get(newcap)[:len(bs)]
-// 		copy(bs2, bs)
-// 		return
-// 	}
-// 	var bp2 bytesBufPooler
-// 	bs2 = bp2.get(newcap)[:len(bs)]
-// 	copy(bs2, bs)
-// 	z.end()
-// 	*z = bp2
-// 	return
-// }
-
-// func (z *bytesBufPooler) buf() (buf []byte) {
-// 	switch z.pool {
-// 	case nil:
-// 	case &pool.buf256:
-// 		buf = z.poolv.(*[256]byte)[:]
-// 	case &pool.buf1k:
-// 		buf = z.poolv.(*[1 * 1024]byte)[:]
-// 	case &pool.buf2k:
-// 		buf = z.poolv.(*[2 * 1024]byte)[:]
-// 	case &pool.buf4k:
-// 		buf = z.poolv.(*[4 * 1024]byte)[:]
-// 	case &pool.buf8k:
-// 		buf = z.poolv.(*[8 * 1024]byte)[:]
-// 	case &pool.buf16k:
-// 		buf = z.poolv.(*[16 * 1024]byte)[:]
-// 	case &pool.buf32k:
-// 		buf = z.poolv.(*[32 * 1024]byte)[:]
-// 	}
-// 	return
-// }
-
-func (z *bytesBufPooler) get(bufsize int) (buf []byte) {
-	if !usePool {
-		return make([]byte, bufsize)
-	}
-
-	if bufsize > bytesBufPoolerMaxSize {
-		z.end()
-		return make([]byte, bufsize)
-	}
-
-	switch z.pool {
-	case nil:
-		goto NEW
-	case &pool4buf256:
-		if bufsize <= 256 {
-			buf = z.poolv.(*[256]byte)[:bufsize]
-		}
-	case &pool4buf1k:
-		if bufsize <= 1*1024 {
-			buf = z.poolv.(*[1 * 1024]byte)[:bufsize]
-		}
-	case &pool4buf2k:
-		if bufsize <= 2*1024 {
-			buf = z.poolv.(*[2 * 1024]byte)[:bufsize]
-		}
-	case &pool4buf4k:
-		if bufsize <= 4*1024 {
-			buf = z.poolv.(*[4 * 1024]byte)[:bufsize]
-		}
-	case &pool4buf8k:
-		if bufsize <= 8*1024 {
-			buf = z.poolv.(*[8 * 1024]byte)[:bufsize]
-		}
-	case &pool4buf16k:
-		if bufsize <= 16*1024 {
-			buf = z.poolv.(*[16 * 1024]byte)[:bufsize]
-		}
-	case &pool4buf32k:
-		if bufsize <= 32*1024 {
-			buf = z.poolv.(*[32 * 1024]byte)[:bufsize]
-		}
-	}
-	if buf != nil {
-		return
-	}
-	z.end()
-
-NEW:
-
-	// // Try to use binary search.
-	// // This is not optimal, as most folks select 1k or 2k buffers
-	// // so a linear search is better (sequence of if/else blocks)
-	// if bufsize < 1 {
-	// 	bufsize = 0
-	// } else {
-	// 	bufsize--
-	// 	bufsize /= 1024
-	// }
-	// switch bufsize {
-	// case 0:
-	// 	z.pool, z.poolv = pool.bytes1k()
-	// 	buf = z.poolv.(*[1 * 1024]byte)[:]
-	// case 1:
-	// 	z.pool, z.poolv = pool.bytes2k()
-	// 	buf = z.poolv.(*[2 * 1024]byte)[:]
-	// case 2, 3:
-	// 	z.pool, z.poolv = pool.bytes4k()
-	// 	buf = z.poolv.(*[4 * 1024]byte)[:]
-	// case 4, 5, 6, 7:
-	// 	z.pool, z.poolv = pool.bytes8k()
-	// 	buf = z.poolv.(*[8 * 1024]byte)[:]
-	// case 8, 9, 10, 11, 12, 13, 14, 15:
-	// 	z.pool, z.poolv = pool.bytes16k()
-	// 	buf = z.poolv.(*[16 * 1024]byte)[:]
-	// case 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31:
-	// 	z.pool, z.poolv = pool.bytes32k()
-	// 	buf = z.poolv.(*[32 * 1024]byte)[:]
-	// default:
-	// 	z.pool, z.poolv = pool.bytes64k()
-	// 	buf = z.poolv.(*[64 * 1024]byte)[:]
-	// }
-	// return
-
-	if bufsize <= 256 {
-		z.pool, z.poolv = &pool4buf256, pool4buf256.Get() // pool.bytes1k()
-		buf = z.poolv.(*[256]byte)[:bufsize]
-	} else if bufsize <= 1*1024 {
-		z.pool, z.poolv = &pool4buf1k, pool4buf1k.Get() // pool.bytes1k()
-		buf = z.poolv.(*[1 * 1024]byte)[:bufsize]
-	} else if bufsize <= 2*1024 {
-		z.pool, z.poolv = &pool4buf2k, pool4buf2k.Get() // pool.bytes2k()
-		buf = z.poolv.(*[2 * 1024]byte)[:bufsize]
-	} else if bufsize <= 4*1024 {
-		z.pool, z.poolv = &pool4buf4k, pool4buf4k.Get() // pool.bytes4k()
-		buf = z.poolv.(*[4 * 1024]byte)[:bufsize]
-	} else if bufsize <= 8*1024 {
-		z.pool, z.poolv = &pool4buf8k, pool4buf8k.Get() // pool.bytes8k()
-		buf = z.poolv.(*[8 * 1024]byte)[:bufsize]
-	} else if bufsize <= 16*1024 {
-		z.pool, z.poolv = &pool4buf16k, pool4buf16k.Get() // pool.bytes16k()
-		buf = z.poolv.(*[16 * 1024]byte)[:bufsize]
-	} else if bufsize <= 32*1024 {
-		z.pool, z.poolv = &pool4buf32k, pool4buf32k.Get() // pool.bytes32k()
-		buf = z.poolv.(*[32 * 1024]byte)[:bufsize]
-		// } else {
-		// 	z.pool, z.poolv = &pool.buf64k, pool.buf64k.Get() // pool.bytes64k()
-		// 	buf = z.poolv.(*[64 * 1024]byte)[:]
-	}
-	return
-}
-
-// ----------------
-
-type bytesBufSlicePooler struct {
-	bytesBufPooler
-	buf []byte
-}
-
-func (z *bytesBufSlicePooler) ensureExtraCap(num int) {
-	if cap(z.buf) < len(z.buf)+num {
-		z.ensureCap(len(z.buf) + num)
-	}
-}
-
-func (z *bytesBufSlicePooler) ensureCap(newcap int) {
-	if cap(z.buf) >= newcap {
-		return
-	}
-	var bs2 []byte
-	if z.pool == nil {
-		bs2 = z.bytesBufPooler.get(newcap)[:len(z.buf)]
-		if z.buf == nil {
-			z.buf = bs2
-		} else {
-			copy(bs2, z.buf)
-			z.buf = bs2
-		}
-		return
-	}
-	var bp2 bytesBufPooler
-	if newcap > bytesBufPoolerMaxSize {
-		bs2 = make([]byte, newcap)
-	} else {
-		bs2 = bp2.get(newcap)
-	}
-	bs2 = bs2[:len(z.buf)]
-	copy(bs2, z.buf)
-	z.end()
-	z.buf = bs2
-	z.bytesBufPooler = bp2
-}
-
-func (z *bytesBufSlicePooler) get(length int) {
-	z.buf = z.bytesBufPooler.get(length)
-}
-
-func (z *bytesBufSlicePooler) append(b byte) {
-	z.ensureExtraCap(1)
-	z.buf = append(z.buf, b)
-}
-
-func (z *bytesBufSlicePooler) appends(b []byte) {
-	z.ensureExtraCap(len(b))
-	z.buf = append(z.buf, b...)
-}
-
-func (z *bytesBufSlicePooler) end() {
-	z.bytesBufPooler.end()
-	z.buf = nil
-}
-
-func (z *bytesBufSlicePooler) resetBuf() {
-	if z.buf != nil {
-		z.buf = z.buf[:0]
-	}
-}
-
-// ----------------
-
-type sfiRvPooler struct {
-	pooler
-}
-
-func (z *sfiRvPooler) get(newlen int) (fkvs []sfiRv) {
-	if newlen < 0 { // bounds-check-elimination
-		// cannot happen // here for bounds-check-elimination
-	} else if newlen <= 8 {
-		z.pool, z.poolv = &pool4sfiRv8, pool4sfiRv8.Get() // pool.sfiRv8()
-		fkvs = z.poolv.(*[8]sfiRv)[:newlen]
-	} else if newlen <= 16 {
-		z.pool, z.poolv = &pool4sfiRv16, pool4sfiRv16.Get() // pool.sfiRv16()
-		fkvs = z.poolv.(*[16]sfiRv)[:newlen]
-	} else if newlen <= 32 {
-		z.pool, z.poolv = &pool4sfiRv32, pool4sfiRv32.Get() // pool.sfiRv32()
-		fkvs = z.poolv.(*[32]sfiRv)[:newlen]
-	} else if newlen <= 64 {
-		z.pool, z.poolv = &pool4sfiRv64, pool4sfiRv64.Get() // pool.sfiRv64()
-		fkvs = z.poolv.(*[64]sfiRv)[:newlen]
-	} else if newlen <= 128 {
-		z.pool, z.poolv = &pool4sfiRv128, pool4sfiRv128.Get() // pool.sfiRv128()
-		fkvs = z.poolv.(*[128]sfiRv)[:newlen]
-	} else {
-		fkvs = make([]sfiRv, newlen)
-	}
-	return
-}
-
-*/
-
-// ----------------
-
 func freelistCapacity(length int) (capacity int) {
 	for capacity = 8; capacity < length; capacity *= 2 {
 	}
@@ -3238,44 +2671,3 @@ func xdebugAnyf(colorcode, pattern string, args ...interface{}) {
 var _ = xdebug2f
 var _ = xdebugf
 var _ = isNaN32
-
-// func isImmutableKind(k reflect.Kind) (v bool) {
-// 	return false ||
-// 		k == reflect.Int ||
-// 		k == reflect.Int8 ||
-// 		k == reflect.Int16 ||
-// 		k == reflect.Int32 ||
-// 		k == reflect.Int64 ||
-// 		k == reflect.Uint ||
-// 		k == reflect.Uint8 ||
-// 		k == reflect.Uint16 ||
-// 		k == reflect.Uint32 ||
-// 		k == reflect.Uint64 ||
-// 		k == reflect.Uintptr ||
-// 		k == reflect.Float32 ||
-// 		k == reflect.Float64 ||
-// 		k == reflect.Bool ||
-// 		k == reflect.String
-// }
-
-// func timeLocUTCName(tzint int16) string {
-// 	if tzint == 0 {
-// 		return "UTC"
-// 	}
-// 	var tzname = []byte("UTC+00:00")
-// 	//tzname := fmt.Sprintf("UTC%s%02d:%02d", tzsign, tz/60, tz%60) //perf issue using Sprintf.. inline below.
-// 	//tzhr, tzmin := tz/60, tz%60 //faster if u convert to int first
-// 	var tzhr, tzmin int16
-// 	if tzint < 0 {
-// 		tzname[3] = '-'
-// 		tzhr, tzmin = -tzint/60, (-tzint)%60
-// 	} else {
-// 		tzhr, tzmin = tzint/60, tzint%60
-// 	}
-// 	tzname[4] = timeDigits[tzhr/10]
-// 	tzname[5] = timeDigits[tzhr%10]
-// 	tzname[7] = timeDigits[tzmin/10]
-// 	tzname[8] = timeDigits[tzmin%10]
-// 	return string(tzname)
-// 	//return time.FixedZone(string(tzname), int(tzint)*60)
-// }

+ 0 - 34
codec/helper_not_unsafe.go

@@ -60,10 +60,6 @@ func rvSetSliceLen(rv reflect.Value, length int) {
 	rv.SetLen(length)
 }
 
-// func rvzeroaddr(t reflect.Type) reflect.Value {
-// 	return reflect.New(t).Elem()
-// }
-
 func rvZeroAddrK(t reflect.Type, k reflect.Kind) reflect.Value {
 	return reflect.New(t).Elem()
 }
@@ -72,18 +68,6 @@ func rvConvert(v reflect.Value, t reflect.Type) (rv reflect.Value) {
 	return v.Convert(t)
 }
 
-// func rvisnilref(rv reflect.Value) bool {
-// 	return rv.IsNil()
-// }
-
-// func rvslen(rv reflect.Value) int {
-// 	return rv.Len()
-// }
-
-// func rv2rtid(rv reflect.Value) uintptr {
-// 	return rv4i(rv.Type()).Pointer()
-// }
-
 func rt2id(rt reflect.Type) uintptr {
 	return rv4i(rt).Pointer()
 }
@@ -122,14 +106,6 @@ func isEmptyValue(v reflect.Value, tinfos *TypeInfos, deref, checkStruct bool) b
 	return false
 }
 
-// --------------------------
-// type ptrToRvMap struct{}
-
-// func (*ptrToRvMap) init() {}
-// func (*ptrToRvMap) get(i interface{}) reflect.Value {
-// 	return rv4i(i).Elem()
-// }
-
 // --------------------------
 type atomicClsErr struct {
 	v atomic.Value
@@ -309,9 +285,7 @@ func rvGetArrayBytesRO(rv reflect.Value, scratch []byte) (bs []byte) {
 
 func rvGetArray4Slice(rv reflect.Value) (v reflect.Value) {
 	v = rvZeroAddrK(reflectArrayOf(rvGetSliceLen(rv), rv.Type().Elem()), reflect.Array)
-	// xdebugf("rvGetArray4Slice: b4 copy: rv: %#v, v: %#v", rv, v)
 	reflect.Copy(v, rv)
-	// xdebugf("rvGetArray4Slice: after copy: v: %#v", v)
 	return
 }
 
@@ -341,14 +315,6 @@ func rvGetString(rv reflect.Value) string {
 	return rv.String()
 }
 
-// func rvGetStringToRaw(rv reflect.Value) {
-// 	e.e.EncodeStringBytesRaw(bytesView(rv.String()))
-// }
-
-// func rvGetStringEnc(rv reflect.Value) {
-// 	e.e.EncodeStringEnc(cUTF8, rv.String())
-// }
-
 func rvGetFloat64(rv reflect.Value) float64 {
 	return rv.Float()
 }

+ 9 - 80
codec/helper_unsafe.go

@@ -24,10 +24,6 @@ import (
 //
 // We can also optimize
 //      - IsNil
-//
-// We cannot do the same for Cap, Len if we still have to do conditional.
-
-// var zeroRTv [4]uintptr
 
 const safeMode = false
 
@@ -102,13 +98,10 @@ func isNil(v interface{}) (rv reflect.Value, isnil bool) {
 	tk := rv.Kind()
 	isnil = (tk == reflect.Interface || tk == reflect.Slice) && *(*unsafe.Pointer)(ui.word) == nil
 	return
-	// fmt.Printf(">>>> isNil: isnil: %v, TYPE: %T, word: %v, *word: %v, type: %v, nil: %v\n",
-	// 	v == nil, v, word, *((*unsafe.Pointer)(word)), ui.typ, nil)
 }
 
 func rv2ptr(urv *unsafeReflectValue) (ptr unsafe.Pointer) {
 	// true references (map, func, chan, ptr - NOT slice) may be double-referenced? as flagIndir
-	// rv := *((*reflect.Value)(unsafe.Pointer(urv)))
 	if refBitset.isset(byte(urv.flag&unsafeFlagKindMask)) && urv.flag&unsafeFlagIndir != 0 {
 		ptr = *(*unsafe.Pointer)(urv.ptr)
 	} else {
@@ -120,11 +113,9 @@ func rv2ptr(urv *unsafeReflectValue) (ptr unsafe.Pointer) {
 func rv4i(i interface{}) (rv reflect.Value) {
 	// Unfortunately, we cannot get the "kind" of the interface directly here.
 	// We need the 'rtype', whose structure changes in different go versions.
-	// Finally, it's not clear that there is benefit to reimplementin it,
-	// as the "escapes(i)" is not clearly expensive.
-	//
-	// urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
-	// ui := (*unsafeIntf)(unsafe.Pointer(&i))
+	// Finally, it's not clear that there is benefit to reimplementing it,
+	// as the "escapes(i)" is not clearly expensive since we want i to exist on the heap.
+
 	return reflect.ValueOf(i)
 }
 
@@ -148,18 +139,7 @@ func rvSetSliceLen(rv reflect.Value, length int) {
 	(*unsafeString)(urv.ptr).Len = length
 }
 
-// func rvzeroaddr(t reflect.Type) (rv reflect.Value) {
-// 	// return reflect.New(t).Elem()
-// 	var ui = (*unsafeIntf)(unsafe.Pointer(&t))
-// 	urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
-// 	urv.typ = ui.word
-// 	urv.flag = uintptr(t.Kind()) | unsafeFlagIndir | unsafeFlagAddr
-// 	urv.ptr = unsafe_New(ui.word)
-// 	return
-// }
-
 func rvZeroAddrK(t reflect.Type, k reflect.Kind) (rv reflect.Value) {
-	// return reflect.New(t).Elem()
 	urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
 	urv.flag = uintptr(k) | unsafeFlagIndir | unsafeFlagAddr
 	urv.typ = ((*unsafeIntf)(unsafe.Pointer(&t))).word
@@ -175,19 +155,6 @@ func rvConvert(v reflect.Value, t reflect.Type) (rv reflect.Value) {
 	return
 }
 
-// func rvisnilref(rv reflect.Value) bool {
-// 	return (*unsafeReflectValue)(unsafe.Pointer(&rv)).ptr == nil
-// }
-
-// func rvslen(rv reflect.Value) int {
-// 	urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
-// 	return (*unsafeString)(urv.ptr).Len
-// }
-
-// func rv2rtid(rv reflect.Value) uintptr {
-// 	return uintptr((*unsafeReflectValue)(unsafe.Pointer(&rv)).typ)
-// }
-
 func rt2id(rt reflect.Type) uintptr {
 	return uintptr(((*unsafeIntf)(unsafe.Pointer(&rt))).word)
 }
@@ -248,7 +215,7 @@ func isEmptyValue(v reflect.Value, tinfos *TypeInfos, deref, checkStruct bool) b
 		}
 		return isnil
 	case reflect.Ptr:
-		// isnil := urv.ptr == nil (not sufficient, as a pointer value encodes the type)
+		// isnil := urv.ptr == nil // (not sufficient, as a pointer value encodes the type)
 		isnil := urv.ptr == nil || *(*unsafe.Pointer)(urv.ptr) == nil
 		if deref {
 			if isnil {
@@ -503,8 +470,6 @@ func rvSlice(rv reflect.Value, length int) (v reflect.Value) {
 	uv.ptr = unsafe.Pointer(&x)
 	*(*unsafeSlice)(uv.ptr) = *(*unsafeSlice)(urv.ptr)
 	(*unsafeSlice)(uv.ptr).Len = length
-	// xdebugf("length: %d, slice: from: %#v, to: %#v",
-	// 	length, *(*unsafeSlice)(urv.ptr), *(*unsafeSlice)(uv.ptr))
 	return
 }
 
@@ -530,7 +495,6 @@ func rvGetArrayBytesRO(rv reflect.Value, scratch []byte) (bs []byte) {
 }
 
 func rvGetArray4Slice(rv reflect.Value) (v reflect.Value) {
-	urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
 	// It is possible that this slice is based off an array with a larger
 	// len that we want (where array len == slice cap).
 	// However, it is ok to create an array type that is a subset of the full
@@ -538,19 +502,16 @@ func rvGetArray4Slice(rv reflect.Value) (v reflect.Value) {
 	// off of it. That is ok.
 	//
 	// Consequently, we use rvGetSliceLen, not rvGetSliceCap.
+
 	t := reflectArrayOf(rvGetSliceLen(rv), rv.Type().Elem())
+	// v = rvZeroAddrK(t, reflect.Array)
+
 	uv := (*unsafeReflectValue)(unsafe.Pointer(&v))
 	uv.flag = uintptr(reflect.Array) | unsafeFlagIndir | unsafeFlagAddr
 	uv.typ = ((*unsafeIntf)(unsafe.Pointer(&t))).word
-	uv.ptr = *(*unsafe.Pointer)(urv.ptr) // slice rv has a ptr to the slice.
 
-	// t := reflectArrayOf(rvGetSliceLen(rv), rv.Type().Elem())
-	// v = rvZeroAddrK(t, reflect.Array)
-	// // xdebugf("rvGetArray4Slice: b4 copy: rv: %#v, v: %#v", rv, v)
-	// urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
-	// uv := (*unsafeReflectValue)(unsafe.Pointer(&v))
-	// uv.ptr = *(*unsafe.Pointer)(urv.ptr) // slice rv has a ptr to the slice.
-	// // xdebugf("rvGetArray4Slice: after copy: v: %#v", v)
+	urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
+	uv.ptr = *(*unsafe.Pointer)(urv.ptr) // slice rv has a ptr to the slice.
 
 	return
 }
@@ -690,11 +651,6 @@ type unsafeMapHashIter struct {
 	// other fields are ignored
 }
 
-// type unsafeReflectMapIter struct {
-// 	m  unsafeReflectValue
-// 	it unsafe.Pointer
-// }
-
 type mapIter struct {
 	unsafeMapIter
 }
@@ -711,16 +667,6 @@ type unsafeMapIter struct {
 	// _ [2]uint64 // padding (cache-aligned)
 }
 
-// // pprof show that 13% of cbor encode time taken in
-// // allocation of unsafeMapIter.
-// // Options are to try to alloc on stack, or pool it.
-// // Easiest to pool it.
-// const unsafeMapIterUsePool = false
-
-// var unsafeMapIterPool = sync.Pool{
-// 	New: func() interface{} { return new(unsafeMapIter) },
-// }
-
 func (t *unsafeMapIter) ValidKV() (r bool) {
 	return false
 }
@@ -747,22 +693,14 @@ func (t *unsafeMapIter) Next() (r bool) {
 }
 
 func (t *unsafeMapIter) Key() (r reflect.Value) {
-	// return t.k
 	return
 }
 
 func (t *unsafeMapIter) Value() (r reflect.Value) {
-	// if t.mapvalues {
-	// 	return t.v
-	// }
 	return
 }
 
 func (t *unsafeMapIter) Done() {
-	// if unsafeMapIterUsePool && t != nil {
-	// 	*t = unsafeMapIter{}
-	// 	unsafeMapIterPool.Put(t)
-	// }
 }
 
 func unsafeMapSet(p, ptyp, p2 unsafe.Pointer, isref bool) {
@@ -782,20 +720,11 @@ func unsafeMapKVPtr(urv *unsafeReflectValue) unsafe.Pointer {
 
 func mapRange(t *mapIter, m, k, v reflect.Value, mapvalues bool) {
 	if rvIsNil(m) {
-		// return &unsafeMapIter{done: true}
 		t.done = true
 		return
 	}
 	t.done = false
 	t.started = false
-	// if unsafeMapIterUsePool {
-	// 	t = unsafeMapIterPool.Get().(*unsafeMapIter)
-	// } else {
-	//	t = new(unsafeMapIter)
-	// }
-	// t = new(unsafeMapIter)
-	// t.k = k
-	// t.v = v
 	t.mapvalues = mapvalues
 
 	var urv *unsafeReflectValue

+ 86 - 298
codec/json.go

@@ -18,9 +18,6 @@ package codec
 //   - we cannot use strconv.Quote and strconv.Unquote because json quotes/unquotes differently.
 //     We implement it here.
 
-// Top-level methods of json(End|Dec)Driver (which are implementations of (en|de)cDriver
-// MUST not call one-another.
-
 import (
 	"bytes"
 	"encoding/base64"
@@ -130,17 +127,13 @@ func init() {
 
 type jsonEncDriver struct {
 	noBuiltInTypes
-	// w *encWr
 	h *JsonHandle
 
-	// bs []byte // for encoding strings
 	se interfaceExtWrapper
 
 	// ---- cpu cache line boundary?
-	// ds string // indent string
-	di int8 // indent per: if negative, use tabs
-	d  bool // indenting?
-	// dt bool   // indent using tabs
+	di int8   // indent per: if negative, use tabs
+	d  bool   // indenting?
 	dl uint16 // indent level
 	ks bool   // map key as string
 	is byte   // integer as string
@@ -211,27 +204,20 @@ func (e *jsonEncDriver) EncodeNil() {
 
 	// e.e.encWr.writeb(jsonLiteralNull)
 	e.e.encWr.writen([rwNLen]byte{'n', 'u', 'l', 'l'}, 4)
-
-	// if e.h.MapKeyAsString && e.e.c == containerMapKey {
-	// 	e.e.encWr.writeb(jsonLiterals[jsonLitNullQ : jsonLitNullQ+6])
-	// } else {
-	// 	e.e.encWr.writeb(jsonLiterals[jsonLitNull : jsonLitNull+4])
-	// }
 }
 
 func (e *jsonEncDriver) EncodeTime(t time.Time) {
 	// Do NOT use MarshalJSON, as it allocates internally.
 	// instead, we call AppendFormat directly, using our scratch buffer (e.b)
+
 	if t.IsZero() {
 		e.EncodeNil()
 	} else {
 		e.b[0] = '"'
-		// b := t.AppendFormat(e.b[1:1], time.RFC3339Nano)
 		b := fmtTime(t, e.b[1:1])
 		e.b[len(b)+1] = '"'
 		e.e.encWr.writeb(e.b[:len(b)+2])
 	}
-	// v, err := t.MarshalJSON(); if err != nil { e.e.error(err) } e.e.encWr.writeb(v)
 }
 
 func (e *jsonEncDriver) EncodeExt(rv interface{}, xtag uint64, ext Ext) {
@@ -255,20 +241,20 @@ func (e *jsonEncDriver) EncodeRawExt(re *RawExt) {
 }
 
 func (e *jsonEncDriver) EncodeBool(b bool) {
+	// Use writen with an array instead of writeb with a slice
+	// i.e. in place of e.e.encWr.writeb(jsonLiteralTrueQ)
+	//      OR jsonLiteralTrue, jsonLiteralFalse, jsonLiteralFalseQ, etc
+
 	if e.ks && e.e.c == containerMapKey {
 		if b {
-			// e.e.encWr.writeb(jsonLiteralTrueQ)
 			e.e.encWr.writen([rwNLen]byte{'"', 't', 'r', 'u', 'e', '"'}, 6)
 		} else {
-			// e.e.encWr.writeb(jsonLiteralFalseQ)
 			e.e.encWr.writen([rwNLen]byte{'"', 'f', 'a', 'l', 's', 'e', '"'}, 7)
 		}
 	} else {
 		if b {
-			// e.e.encWr.writeb(jsonLiteralTrue)
 			e.e.encWr.writen([rwNLen]byte{'t', 'r', 'u', 'e'}, 4)
 		} else {
-			// e.e.encWr.writeb(jsonLiteralFalse)
 			e.e.encWr.writen([rwNLen]byte{'f', 'a', 'l', 's', 'e'}, 5)
 		}
 	}
@@ -320,14 +306,6 @@ func (e *jsonEncDriver) EncodeUint(v uint64) {
 	e.e.encWr.writeb(strconv.AppendUint(e.b[:0], v, 10))
 }
 
-// func (e *jsonEncDriver) EncodeFloat32(f float32) {
-// 	// e.encodeFloat(float64(f), 32)
-// 	// always encode all floats as IEEE 64-bit floating point.
-// 	// It also ensures that we can decode in full precision even if into a float32,
-// 	// as what is written is always to float64 precision.
-// 	e.EncodeFloat64(float64(f))
-// }
-
 func (e *jsonEncDriver) EncodeString(v string) {
 	if e.h.StringToRaw {
 		e.EncodeStringBytesRaw(bytesView(v))
@@ -354,11 +332,6 @@ func (e *jsonEncDriver) EncodeStringBytesRaw(v []byte) {
 	} else {
 		bs = e.b[:slen]
 	}
-	// if cap(e.bs) >= slen {
-	// 	e.bs = e.bs[:slen]
-	// } else {
-	// 	e.bs = make([]byte, slen)
-	// }
 	bs[0] = '"'
 	base64.StdEncoding.Encode(bs[1:], v)
 	bs[len(bs)-1] = '"'
@@ -368,10 +341,6 @@ func (e *jsonEncDriver) EncodeStringBytesRaw(v []byte) {
 	}
 }
 
-// func (e *jsonEncDriver) EncodeAsis(v []byte) {
-// 	e.e.encWr.writeb(v)
-// }
-
 // indent is done as below:
 //   - newline and indent are added before each mapKey or arrayElem
 //   - newline and indent are added before each ending,
@@ -487,11 +456,6 @@ func (e *jsonEncDriver) quoteStr(s string) {
 }
 
 func (e *jsonEncDriver) atEndOfEncode() {
-	// if e.e.c == 0 { // scalar written, output space
-	// 	e.e.encWr.writen1(' ')
-	// } else if e.h.TermWhitespace { // container written, output new-line
-	// 	e.e.encWr.writen1('\n')
-	// }
 	if e.h.TermWhitespace {
 		if e.e.c == 0 { // scalar written, output space
 			e.e.encWr.writen1(' ')
@@ -501,103 +465,16 @@ func (e *jsonEncDriver) atEndOfEncode() {
 	}
 }
 
-// ----------------
-
-/*
-type jsonEncDriverTypical jsonEncDriver
-
-func (e *jsonEncDriverTypical) WriteArrayStart(length int) {
-	e.e.encWr.writen1('[')
-}
-
-func (e *jsonEncDriverTypical) WriteArrayElem() {
-	if e.e.c != containerArrayStart {
-		e.e.encWr.writen1(',')
-	}
-}
-
-func (e *jsonEncDriverTypical) WriteArrayEnd() {
-	e.e.encWr.writen1(']')
-}
-
-func (e *jsonEncDriverTypical) WriteMapStart(length int) {
-	e.e.encWr.writen1('{')
-}
-
-func (e *jsonEncDriverTypical) WriteMapElemKey() {
-	if e.e.c != containerMapStart {
-		e.e.encWr.writen1(',')
-	}
-}
-
-func (e *jsonEncDriverTypical) WriteMapElemValue() {
-	e.e.encWr.writen1(':')
-}
-
-func (e *jsonEncDriverTypical) WriteMapEnd() {
-	e.e.encWr.writen1('}')
-}
-
-func (e *jsonEncDriverTypical) EncodeBool(b bool) {
-	if b {
-		// e.e.encWr.writeb(jsonLiteralTrue)
-		e.e.encWr.writen([rwNLen]byte{'t', 'r', 'u', 'e'}, 4)
-	} else {
-		// e.e.encWr.writeb(jsonLiteralFalse)
-		e.e.encWr.writen([rwNLen]byte{'f', 'a', 'l', 's', 'e'}, 5)
-	}
-}
-
-func (e *jsonEncDriverTypical) EncodeInt(v int64) {
-	e.e.encWr.writeb(strconv.AppendInt(e.b[:0], v, 10))
-}
-
-func (e *jsonEncDriverTypical) EncodeUint(v uint64) {
-	e.e.encWr.writeb(strconv.AppendUint(e.b[:0], v, 10))
-}
-
-func (e *jsonEncDriverTypical) EncodeFloat64(f float64) {
-	fmt, prec := jsonFloatStrconvFmtPrec64(f)
-	e.e.encWr.writeb(strconv.AppendFloat(e.b[:0], f, fmt, int(prec), 64))
-	// e.e.encWr.writeb(strconv.AppendFloat(e.b[:0], f, jsonFloatStrconvFmtPrec64(f), 64))
-}
-
-func (e *jsonEncDriverTypical) EncodeFloat32(f float32) {
-	fmt, prec := jsonFloatStrconvFmtPrec32(f)
-	e.e.encWr.writeb(strconv.AppendFloat(e.b[:0], float64(f), fmt, int(prec), 32))
-}
-
-// func (e *jsonEncDriverTypical) encodeFloat(f float64, bitsize uint8) {
-// 	fmt, prec := jsonFloatStrconvFmtPrec(f, bitsize == 32)
-// 	e.e.encWr.writeb(strconv.AppendFloat(e.b[:0], f, fmt, prec, int(bitsize)))
-// }
-
-// func (e *jsonEncDriverTypical) atEndOfEncode() {
-// 	if e.tw {
-// 		e.e.encWr.writen1(' ')
-// 	}
-// }
-
-*/
-
 // ----------
 
 type jsonDecDriver struct {
 	noBuiltInTypes
 	h *JsonHandle
-	// r *decRd
 
 	tok  uint8   // used to store the token read right after skipWhiteSpace
 	fnil bool    // found null
 	_    [2]byte // padding
 	bstr [4]byte // scratch used for string \UXXX parsing
-	// c     containerState
-
-	// ---- cpu cache line boundary (half - way)
-	// b [jsonScratchArrayLen]byte // scratch 1, used for parsing strings or numbers or time.Time
-	// ---- cpu cache line boundary?
-	// ---- writable fields during execution --- *try* to keep in sep cache line
-	// bs []byte // scratch - for parsing strings, bytes
 
 	buf []byte
 	se  interfaceExtWrapper
@@ -606,11 +483,6 @@ type jsonDecDriver struct {
 
 	// ---- cpu cache line boundary?
 
-	// b2 [cacheLineSize + 32]byte // scratch 2, used only for readUntil, decNumBytes
-
-	// n jsonNum
-
-	// ---- cpu cache line boundary?
 	d Decoder
 }
 
@@ -710,22 +582,6 @@ func (d *jsonDecDriver) ReadMapEnd() {
 	d.tok = 0
 }
 
-// func (d *jsonDecDriver) readDelim(xc uint8) {
-// 	d.advance()
-// 	if d.tok != xc {
-// 		d.d.errorf("read json delimiter - expect char '%c' but got char '%c'", xc, d.tok)
-// 	}
-// 	d.tok = 0
-// }
-
-// func (d *jsonDecDriver) readDelim(xc uint8) {
-// 	d.advance()
-// 	if d.tok != xc {
-// 		d.d.errorf("read json delimiter - expect char '%c' but got char '%c'", xc, d.tok)
-// 	}
-// 	d.tok = 0
-// }
-
 // func (d *jsonDecDriver) readDelim(xc uint8) {
 // 	d.advance()
 // 	if d.tok != xc {
@@ -734,27 +590,10 @@ func (d *jsonDecDriver) ReadMapEnd() {
 // 	d.tok = 0
 // }
 
-// func (d *jsonDecDriver) readDelim(xc uint8) {
-// 	if d.tok != xc {
-// 		d.d.errorf("read json delimiter - expect char '%c' but got char '%c'", xc, d.tok)
-// 	}
-// 	d.tok = 0
-// }
-
-// //go:noinline
 func (d *jsonDecDriver) readDelimError(xc uint8) {
 	d.d.errorf("read json delimiter - expect char '%c' but got char '%c'", xc, d.tok)
 }
 
-// func (d *jsonDecDriver) readLit(length, fromIdx uint8) {
-// 	// length here is always less than 8 (literals are: null, true, false)
-// 	bs := d.d.decRd.readx(int(length))
-// 	d.tok = 0
-// 	if jsonValidateSymbols && !bytes.Equal(bs, jsonLiterals[fromIdx:fromIdx+length]) {
-// 		d.d.errorf("expecting %s: got %s", jsonLiterals[fromIdx:fromIdx+length], bs)
-// 	}
-// }
-
 func (d *jsonDecDriver) readLit4True() {
 	bs := d.d.decRd.readn(3)
 	d.tok = 0
@@ -876,7 +715,6 @@ func (d *jsonDecDriver) decNumBytes() (bs []byte) {
 		d.d.decRd.unreadn1()
 		bs = d.d.decRd.readTo(&jsonNumSet)
 	}
-	// xdebugf("decNumBytes: %s", bs)
 	d.tok = 0
 	return
 }
@@ -937,7 +775,6 @@ func (d *jsonDecDriver) decUint64ViaFloat(s []byte) (u uint64) {
 	f, err := parseFloat64(s)
 	if err != nil {
 		d.d.errorf("invalid syntax for integer: %s", s)
-		// d.d.errorv(err)
 	}
 	fi, ff := math.Modf(f)
 	if ff > 0 {
@@ -948,20 +785,7 @@ func (d *jsonDecDriver) decUint64ViaFloat(s []byte) (u uint64) {
 	return uint64(fi)
 }
 
-// func (d *jsonDecDriver) decodeFloat(bitsize int) (f float64) {
-// 	bs := d.decNumBytes()
-// 	if len(bs) == 0 {
-// 		return
-// 	}
-// 	f, err := parseFloat(bs, bitsize)
-// 	if err != nil {
-// 		d.d.errorv(err)
-// 	}
-// 	return
-// }
-
 func (d *jsonDecDriver) DecodeFloat64() (f float64) {
-	// return d.decodeFloat(64)
 	var err error
 	if bs := d.decNumBytes(); len(bs) > 0 {
 		if f, err = parseFloat64(bs); err != nil {
@@ -1042,11 +866,6 @@ func (d *jsonDecDriver) DecodeBytes(bs []byte, zerocopy bool) (bsOut []byte) {
 	// appendStringAsBytes returns a zero-len slice for both, so as not to reset d.buf.
 	// However, it sets a fnil field to true, so we can check if a null was found.
 
-	// d.appendStringAsBytes()
-	// if d.fnil {
-	// 	return nil
-	// }
-
 	if d.tok == 'n' {
 		d.readLit4Null()
 		return nil
@@ -1059,18 +878,8 @@ func (d *jsonDecDriver) DecodeBytes(bs []byte, zerocopy bool) (bsOut []byte) {
 	} else if slen <= cap(bs) {
 		bsOut = bs[:slen]
 	} else if zerocopy {
-		// if d.buf == nil {
-		// 	d.buf = d.bp.get(slen)
-		// }
 		d.buf = d.d.blist.check(d.buf, slen)
 		bsOut = d.buf
-		// if slen <= cap(d.buf) {
-		// 	bsOut = d.buf[:slen]
-		// } else {
-		// 	d.bp.get(slen)
-		// 	bsOut = d.buf
-		// 	// bsOut = make([]byte, slen)
-		// }
 	} else {
 		bsOut = make([]byte, slen)
 	}
@@ -1085,13 +894,7 @@ func (d *jsonDecDriver) DecodeBytes(bs []byte, zerocopy bool) (bsOut []byte) {
 	return
 }
 
-// func (d *jsonDecDriver) DecodeString() (s string) {
-// 	d.appendStringAsBytes()
-// 	return d.sliceToString()
-// }
-
 func (d *jsonDecDriver) DecodeStringAsBytes() (s []byte) {
-	// defer func() { xdebug2f("DecodeStringAsBytes: %s", s) }()
 	d.advance()
 	if d.tok != '"' {
 		// d.d.errorf("expect char '%c' but got char '%c'", '"', d.tok)
@@ -1129,8 +932,6 @@ func (d *jsonDecDriver) readString() (bs []byte) {
 }
 
 func (d *jsonDecDriver) appendStringAsBytes() (bs []byte) {
-	// xdebug2f("appendStringAsBytes: found: '%c'", d.tok)
-
 	if d.buf != nil {
 		d.buf = d.buf[:0]
 	}
@@ -1143,30 +944,18 @@ func (d *jsonDecDriver) appendStringAsBytes() (bs []byte) {
 	// e.g. end-of-slice, " or \,
 	// we will append the full range into the v slice before proceeding
 
-	// xdebug2f("start")
 	var cs = d.d.decRd.readUntil('"', true)
-	// xdebugf("appendStringAsBytes: len: %d, cs: %s", len(cs), cs)
-	// var cslen = uint(len(cs))
 	var c uint8
 	var i, cursor uint
 	for {
 		if i >= uint(len(cs)) {
-			// d.bp.appends(cs[cursor:])
-			// d.bp.ensureExtraCap(int(cslen - cursor))
 			d.buf = append(d.buf, cs[cursor:]...)
 			cs = d.d.decRd.readUntil('"', true)
-			// xdebugf("appendStringAsBytes: len: %d, cs: %s", len(cs), cs)
-			// cslen = uint(len(cs))
 			i, cursor = 0, 0
 			continue // this continue helps elide the cs[i] below
 		}
 		c = cs[i]
 		if c == '"' {
-			// if len(d.buf) > 0 {
-			// 	// d.bp.appends(cs[cursor:i])
-			// 	// d.bp.ensureExtraCap(int(i - cursor))
-			// 	d.buf = append(d.buf, cs[cursor:i]...)
-			// }
 			break
 		}
 		if c != '\\' {
@@ -1174,10 +963,7 @@ func (d *jsonDecDriver) appendStringAsBytes() (bs []byte) {
 			continue
 		}
 
-		// d.bp.appends(cs[cursor:i])
-		// d.bp.ensureExtraCap(int(i - cursor))
 		d.buf = append(d.buf, cs[cursor:i]...)
-		// d.bp.ensureExtraCap(4) // NOTE: 1 is sufficient, but say 4 for now
 		i++
 		if i >= uint(len(cs)) {
 			d.d.errorf("need at least 1 more bytes for \\ escape sequence")
@@ -1215,28 +1001,11 @@ func (d *jsonDecDriver) appendStringAsBytes() (bs []byte) {
 			if d.d.bytes {
 				return cs
 			}
-			// d.bp.ensureExtraCap(len(cs))
 			d.buf = d.d.blist.check(d.buf, len(cs))
 			copy(d.buf, cs)
 		}
 	}
 	return d.buf
-	// if len(d.buf) == 0 && len(cs) > 0 {
-	// 	// return cs[:len(cs)-1]
-	// 	// returning cs was failing for bufio, as it seems bufio needs the buffer for other things.
-	// 	// only return cs if bytesDecReader
-	// 	cs = cs[:len(cs)-1]
-	// 	if d.d.bytes {
-	// 		return cs
-	// 	}
-	// 	// d.bp.ensureExtraCap(len(cs))
-	// 	d.buf = d.d.blist.check(d.buf, len(cs))
-	// 	copy(d.buf, cs)
-	// 	// xdebugf("cs: '%s', d.buf: '%s'", cs, d.buf)
-	// 	return d.buf
-	// }
-	// // xdebug2f("returning d.buf: %s", d.buf)
-	// return d.buf
 }
 
 func (d *jsonDecDriver) appendStringAsBytesSlashU(cs []byte, i uint) uint {
@@ -1350,7 +1119,6 @@ F:
 }
 
 func (d *jsonDecDriver) sliceToString(bs []byte) string {
-	// if x := d.s.sc; x != nil && x.so && x.st == '}' { // map key
 	if d.d.is != nil && (jsonAlwaysReturnInternString || d.d.c == containerMapKey) {
 		return d.d.string(bs)
 	}
@@ -1359,7 +1127,6 @@ func (d *jsonDecDriver) sliceToString(bs []byte) string {
 
 func (d *jsonDecDriver) DecodeNaked() {
 	z := d.d.naked()
-	// var decodeFurther bool
 
 	d.advance()
 	var bs []byte
@@ -1413,9 +1180,6 @@ func (d *jsonDecDriver) DecodeNaked() {
 			return
 		}
 	}
-	// if decodeFurther {
-	// 	d.s.sc.retryRead()
-	// }
 }
 
 //----------------------
@@ -1507,32 +1271,9 @@ func (h *JsonHandle) typical() bool {
 	return h.Indent == 0 && !h.MapKeyAsString && h.IntegerAsString != 'A' && h.IntegerAsString != 'L'
 }
 
-// func (h *JsonHandle) recreateEncDriver(ed encDriver) (v bool) {
-// 	_, v = ed.(*jsonEncDriverTypical)
-// 	return v != h.typical()
-// }
-
-// func (h *JsonHandle) newEncDriver(e *Encoder) (ee encDriver) {
-// 	const allowTypical = true
-// 	var hd *jsonEncDriver
-// 	if allowTypical && h.typical() {
-// 		var v jsonEncDriverTypical
-// 		ee = &v
-// 		hd = &v.jsonEncDriver
-// 	} else {
-// 		var v jsonEncDriverGeneric
-// 		ee = &v
-// 		hd = &v.jsonEncDriver
-// 	}
-// 	hd.e, hd.h = e, h
-// 	ee.reset()
-// 	return
-// }
-
 func (h *JsonHandle) newEncDriver() encDriver {
 	var e = &jsonEncDriver{h: h}
 	e.e.e = e
-	// e.e.jenc = e
 	e.e.js = true
 	e.e.init(h)
 	e.reset()
@@ -1542,7 +1283,6 @@ func (h *JsonHandle) newEncDriver() encDriver {
 func (h *JsonHandle) newDecDriver() decDriver {
 	var d = &jsonDecDriver{h: h}
 	d.d.d = d
-	// d.d.jdec = d
 	d.d.js = true
 	d.d.jsms = h.MapKeyAsString
 	d.d.init(h)
@@ -1551,7 +1291,6 @@ func (h *JsonHandle) newDecDriver() decDriver {
 }
 
 func (e *jsonEncDriver) reset() {
-	// e.w = e.e.w()
 	// (htmlasis && jsonCharSafeSet.isset(b)) || jsonCharHtmlSafeSet.isset(b)
 	e.typical = e.h.typical()
 	if e.h.HTMLCharsAsIs {
@@ -1560,48 +1299,24 @@ func (e *jsonEncDriver) reset() {
 		e.s = &jsonCharHtmlSafeSet
 	}
 	e.se.InterfaceExt = e.h.RawBytesExt
-	// if e.bs == nil {
-	// 	e.bs = e.b[:0]
-	// } else {
-	// 	e.bs = e.bs[:0]
-	// }
 	e.d, e.dl, e.di = false, 0, 0
 	if e.h.Indent != 0 {
 		e.d = true
 		e.di = int8(e.h.Indent)
 	}
-	// if e.h.Indent > 0 {
-	// 	e.d = true
-	// 	e.di = int8(e.h.Indent)
-	// } else if e.h.Indent < 0 {
-	// 	e.d = true
-	// 	// e.dt = true
-	// 	e.di = int8(-e.h.Indent)
-	// }
 	e.ks = e.h.MapKeyAsString
 	e.is = e.h.IntegerAsString
 }
 
 func (d *jsonDecDriver) reset() {
-	// d.r = d.d.r()
 	d.se.InterfaceExt = d.h.RawBytesExt
 	d.buf = d.d.blist.check(d.buf, 256)[:0]
-	// if d.buf != nil {
-	// 	d.buf = d.buf[:0]
-	// }
 	d.tok = 0
 	d.fnil = false
 }
 
 func (d *jsonDecDriver) atEndOfDecode() {}
 
-// func (d *jsonDecDriver) release() {
-// 	l := d.bp.capacity()
-// 	if l > 0 {
-// 		d.bp.end()
-// 	}
-// }
-
 // jsonFloatStrconvFmtPrec ...
 //
 // ensure that every float has an 'e' or '.' in it,/ for easy differentiation from integers.
@@ -1696,10 +1411,83 @@ var _ decDriverContainerTracker = (*jsonDecDriver)(nil)
 var _ encDriverContainerTracker = (*jsonEncDriver)(nil)
 var _ decDriver = (*jsonDecDriver)(nil)
 
-// var _ encDriver = (*jsonEncDriverGeneric)(nil)
-// var _ encDriver = (*jsonEncDriverTypical)(nil)
-// var _ (interface{ getJsonEncDriver() *jsonEncDriver }) = (*jsonEncDriverTypical)(nil)
-// var _ (interface{ getJsonEncDriver() *jsonEncDriver }) = (*jsonEncDriverGeneric)(nil)
-// var _ (interface{ getJsonEncDriver() *jsonEncDriver }) = (*jsonEncDriver)(nil)
-
 var _ encDriver = (*jsonEncDriver)(nil)
+
+// ----------------
+
+/*
+type jsonEncDriverTypical jsonEncDriver
+
+func (e *jsonEncDriverTypical) WriteArrayStart(length int) {
+	e.e.encWr.writen1('[')
+}
+
+func (e *jsonEncDriverTypical) WriteArrayElem() {
+	if e.e.c != containerArrayStart {
+		e.e.encWr.writen1(',')
+	}
+}
+
+func (e *jsonEncDriverTypical) WriteArrayEnd() {
+	e.e.encWr.writen1(']')
+}
+
+func (e *jsonEncDriverTypical) WriteMapStart(length int) {
+	e.e.encWr.writen1('{')
+}
+
+func (e *jsonEncDriverTypical) WriteMapElemKey() {
+	if e.e.c != containerMapStart {
+		e.e.encWr.writen1(',')
+	}
+}
+
+func (e *jsonEncDriverTypical) WriteMapElemValue() {
+	e.e.encWr.writen1(':')
+}
+
+func (e *jsonEncDriverTypical) WriteMapEnd() {
+	e.e.encWr.writen1('}')
+}
+
+func (e *jsonEncDriverTypical) EncodeBool(b bool) {
+	if b {
+		// e.e.encWr.writeb(jsonLiteralTrue)
+		e.e.encWr.writen([rwNLen]byte{'t', 'r', 'u', 'e'}, 4)
+	} else {
+		// e.e.encWr.writeb(jsonLiteralFalse)
+		e.e.encWr.writen([rwNLen]byte{'f', 'a', 'l', 's', 'e'}, 5)
+	}
+}
+
+func (e *jsonEncDriverTypical) EncodeInt(v int64) {
+	e.e.encWr.writeb(strconv.AppendInt(e.b[:0], v, 10))
+}
+
+func (e *jsonEncDriverTypical) EncodeUint(v uint64) {
+	e.e.encWr.writeb(strconv.AppendUint(e.b[:0], v, 10))
+}
+
+func (e *jsonEncDriverTypical) EncodeFloat64(f float64) {
+	fmt, prec := jsonFloatStrconvFmtPrec64(f)
+	e.e.encWr.writeb(strconv.AppendFloat(e.b[:0], f, fmt, int(prec), 64))
+	// e.e.encWr.writeb(strconv.AppendFloat(e.b[:0], f, jsonFloatStrconvFmtPrec64(f), 64))
+}
+
+func (e *jsonEncDriverTypical) EncodeFloat32(f float32) {
+	fmt, prec := jsonFloatStrconvFmtPrec32(f)
+	e.e.encWr.writeb(strconv.AppendFloat(e.b[:0], float64(f), fmt, int(prec), 32))
+}
+
+// func (e *jsonEncDriverTypical) encodeFloat(f float64, bitsize uint8) {
+// 	fmt, prec := jsonFloatStrconvFmtPrec(f, bitsize == 32)
+// 	e.e.encWr.writeb(strconv.AppendFloat(e.b[:0], f, fmt, prec, int(bitsize)))
+// }
+
+// func (e *jsonEncDriverTypical) atEndOfEncode() {
+// 	if e.tw {
+// 		e.e.encWr.writen1(' ')
+// 	}
+// }
+
+*/

+ 2 - 2
codec/mammoth2_codecgen_generated_test.go

@@ -36,10 +36,10 @@ type codecSelfer19781 struct{}
 func codecSelfer19781False() bool { return false }
 
 func init() {
-	if GenVersion != 15 {
+	if GenVersion != 16 {
 		_, file, _, _ := runtime.Caller(0)
 		ver := strconv.FormatInt(int64(GenVersion), 10)
-		panic("codecgen version mismatch: current: 15, need " + ver + ". Re-generate file: " + file)
+		panic("codecgen version mismatch: current: 16, need " + ver + ". Re-generate file: " + file)
 	}
 }
 

+ 3 - 30
codec/msgpack.go

@@ -2,8 +2,6 @@
 // Use of this source code is governed by a MIT license found in the LICENSE file.
 
 /*
-MSGPACK
-
 Msgpack-c implementation powers the c, c++, python, ruby, etc libraries.
 We need to maintain compatibility with it and how it encodes integer values
 without caring about the type.
@@ -199,7 +197,6 @@ var (
 type msgpackEncDriver struct {
 	noBuiltInTypes
 	encDriverNoopContainerWriter
-	// encNoSeparator
 	h *MsgpackHandle
 	x [8]byte
 	_ [6]uint64 // padding
@@ -322,9 +319,8 @@ func (e *msgpackEncDriver) EncodeTime(t time.Time) {
 
 func (e *msgpackEncDriver) EncodeExt(v interface{}, xtag uint64, ext Ext) {
 	var bs []byte
-	// var bufp bytesBufPooler
 	if ext == SelfExt {
-		bs = e.e.blist.get(1024)[:0] // bufp.get(1024)[:0]
+		bs = e.e.blist.get(1024)[:0]
 		e.e.sideEncode(v, &bs)
 	} else {
 		bs = ext.WriteExt(v)
@@ -340,7 +336,7 @@ func (e *msgpackEncDriver) EncodeExt(v interface{}, xtag uint64, ext Ext) {
 		e.EncodeStringBytesRaw(bs)
 	}
 	if ext == SelfExt {
-		e.e.blist.put(bs) // bufp.end()
+		e.e.blist.put(bs)
 	}
 }
 
@@ -393,19 +389,6 @@ func (e *msgpackEncDriver) EncodeString(s string) {
 	} else {
 		ct = msgpackContainerRawLegacy
 	}
-	// if e.h.StringToRaw {
-	// 	if e.h.WriteExt {
-	// 		ct = msgpackContainerBin
-	// 	} else {
-	// 		ct = msgpackContainerRawLegacy
-	// 	}
-	// } else {
-	// 	if e.h.WriteExt {
-	// 		ct = msgpackContainerStr
-	// 	} else {
-	// 		ct = msgpackContainerRawLegacy
-	// 	}
-	// }
 	e.writeContainerLen(ct, len(s))
 	if len(s) > 0 {
 		e.e.encWr.writestr(s)
@@ -451,8 +434,6 @@ type msgpackDecDriver struct {
 	bdRead bool
 	fnil   bool
 	noBuiltInTypes
-	// noStreamingCodec
-	// decNoSeparator
 	_ [6]uint64 // padding
 	d Decoder
 }
@@ -796,9 +777,6 @@ func (d *msgpackDecDriver) ContainerType() (vt valueType) {
 	} else if bd == mpMap16 || bd == mpMap32 || (bd >= mpFixMapMin && bd <= mpFixMapMax) {
 		return valueTypeMap
 	}
-	// else {
-	// d.d.errorf("isContainerType: unsupported parameter: %v", vt)
-	// }
 	return valueTypeUnset
 }
 
@@ -963,7 +941,6 @@ func (d *msgpackDecDriver) decodeExtV(verifyTag bool, tag byte) (xtag byte, xbs
 //MsgpackHandle is a Handle for the Msgpack Schema-Free Encoding Format.
 type MsgpackHandle struct {
 	binaryEncodingType
-	// noElemSeparators
 	BasicHandle
 
 	// NoFixedNum says to output all signed integers as 2-bytes, never as 1-byte fixednum.
@@ -1074,11 +1051,7 @@ func (c *msgpackSpecRpcCodec) parseCustomHeader(expectTypeByte byte, msgid *uint
 	// so that the body can be decoded on its own from the stream at a later time.
 
 	const fia byte = 0x94 //four item array descriptor value
-	// Not sure why the panic of EOF is swallowed above.
-	// if bs1 := c.dec.r.readn1(); bs1 != fia {
-	// 	err = fmt.Errorf("Unexpected value for array descriptor: Expecting %v. Received %v", fia, bs1)
-	// 	return
-	// }
+
 	var ba [1]byte
 	var n int
 	for {

+ 5 - 392
codec/reader.go

@@ -41,22 +41,6 @@ const (
 	unreadByteCanUnread
 )
 
-// func appendPool(bs []byte, b byte, bufp *bytesBufPooler) []byte {
-// 	if cap(bs)-len(bs) < 1 {
-// 		bs = bufp.ensureCap(len(bs)+1, bs)
-// 	}
-// 	bs = append(bs, b)
-// 	return bs
-// }
-
-// func appendPoolMany(bs []byte, b []byte, bufp *bytesBufPooler) []byte {
-// 	if cap(bs)-len(bs) < 1 {
-// 		bs = bufp.ensureCap(len(bs)+1, bs)
-// 	}
-// 	bs = append(bs, b...)
-// 	return bs
-// }
-
 // --------------------
 
 type ioDecReaderCommon struct {
@@ -98,19 +82,6 @@ func (z *ioDecReaderCommon) stopTrack() (bs []byte) {
 	return z.tr
 }
 
-// func (z *ioDecReaderCommon) resetBufr() {
-// 	if cap(z.bufr) < 128 {
-// 		blist.put(z.bufr)
-// 		z.bufr = blist.get(128)
-// 	}
-// 	z.bufr = z.bufr[:0]
-// }
-
-// func (z *ioDecReaderCommon) release() {
-// 	z.tr.end()
-// 	z.bufr.end()
-// }
-
 // ------------------------------------------
 
 // ioDecReader is a decReader that reads off an io.Reader.
@@ -311,16 +282,6 @@ LOOP:
 }
 
 func (z *ioDecReader) readUntil(stop byte, includeLast bool) []byte {
-	// for {
-	// 	token, eof := z.readn1eof()
-	// 	if eof {
-	// 		panic(io.EOF)
-	// 	}
-	// 	out = append(out, token)
-	// 	if token == stop {
-	// 		return
-	// 	}
-	// }
 	z.bufr = z.blist.check(z.bufr, 256)[:0]
 LOOP:
 	token, eof := z.readn1eof()
@@ -359,28 +320,17 @@ type bufioDecReader struct {
 
 	c   uint // cursor
 	buf []byte
-
-	// bp bytesBufSlicePooler
-
-	// err error
 }
 
 func (z *bufioDecReader) reset(r io.Reader, bufsize int, blist *bytesFreelist) {
 	z.ioDecReaderCommon.reset(r, blist)
 	z.c = 0
 	if cap(z.buf) < bufsize {
-		// z.bp.get(bufsize)
-		// z.buf = make([]byte, 0, bufsize)
 		z.buf = blist.get(bufsize)
 	}
 	z.buf = z.buf[:0]
 }
 
-// func (z *bufioDecReader) release() {
-// 	z.ioDecReaderCommon.release()
-// 	// z.bp.end()
-// }
-
 func (z *bufioDecReader) readb(p []byte) {
 	var n = uint(copy(p, z.buf[z.c:]))
 	z.n += n
@@ -498,30 +448,7 @@ func (z *bufioDecReader) readx(n uint) (bs []byte) {
 	return
 }
 
-// func (z *bufioDecReader) doTrack(y uint) {
-// 	z.tr = append(z.tr, z.buf[z.c:y]...) // cost=14???
-// }
-
-// func (z *bufioDecReader) skipLoopFn(i uint) {
-// 	z.n += (i - z.c) - 1
-// 	i++
-// 	if z.trb {
-// 		// z.tr = append(z.tr, z.buf[z.c:i]...)
-// 		z.doTrack(i)
-// 	}
-// 	z.c = i
-// }
-
 func (z *bufioDecReader) skip(accept *bitset256) (token byte) {
-	// token, _ = z.search(nil, accept, 0, 1); return
-
-	// for i := z.c; i < len(z.buf); i++ {
-	// 	if token = z.buf[i]; !accept.isset(token) {
-	// 		z.skipLoopFn(i)
-	// 		return
-	// 	}
-	// }
-
 	i := z.c
 LOOP:
 	if i < uint(len(z.buf)) {
@@ -542,7 +469,6 @@ LOOP:
 }
 
 func (z *bufioDecReader) skipFill(accept *bitset256) (token byte) {
-	// defer func() { xdebugf("skipFill '%c'", token) }()
 	z.n += uint(len(z.buf)) - z.c
 	if z.trb {
 		z.tr = append(z.tr, z.buf[z.c:]...)
@@ -565,12 +491,6 @@ func (z *bufioDecReader) skipFill(accept *bitset256) (token byte) {
 				return
 			}
 		}
-		// for i := 0; i < n2; i++ {
-		// 	if token = z.buf[i]; !accept.isset(token) {
-		// 		z.skipLoopFn(i)
-		// 		return
-		// 	}
-		// }
 		z.n += uint(n2)
 		if z.trb {
 			z.tr = append(z.tr, z.buf...)
@@ -578,12 +498,6 @@ func (z *bufioDecReader) skipFill(accept *bitset256) (token byte) {
 	}
 }
 
-// func (z *bufioDecReader) readLoopFn(i uint, out0 []byte) (out []byte) {
-// 	out = appendPool(out0, z.buf[z.c:i]...)
-// 	z.loopFn(i)
-// 	return
-// }
-
 func (z *bufioDecReader) loopFn(i uint) {
 	if z.trb {
 		z.tr = append(z.tr, z.buf[z.c:i]...) // z.doTrack(i)
@@ -591,28 +505,12 @@ func (z *bufioDecReader) loopFn(i uint) {
 	z.c = i
 }
 
-// func (z *bufioDecReader) readToLoopFn(i uint, out0 []byte) (out []byte) {
-// 	// out0 is never nil
-// 	z.n += (i - z.c) - 1
-// 	return z.readLoopFn(i, out0)
-// }
-
 func (z *bufioDecReader) readTo(accept *bitset256) (out []byte) {
-	// defer func() { xdebug2f("bufio: readTo: %s", out) }()
-	// _, out = z.search(in, accept, 0, 2); return
-
-	// for i := z.c; i < len(z.buf); i++ {
-	// 	if !accept.isset(z.buf[i]) {
-	// 		return z.readToLoopFn(i, nil)
-	// 	}
-	// }
-
 	i := z.c
 LOOP:
 	if i < uint(len(z.buf)) {
 		// if !accept.isset(z.buf[i]) {
 		if accept.check(z.buf[i]) == 0 {
-			// return z.readToLoopFn(i, nil)
 			// inline readToLoopFn here (for performance)
 			z.n += (i - z.c) - 1
 			out = z.buf[z.c:i]
@@ -657,11 +555,6 @@ func (z *bufioDecReader) readToFill(accept *bitset256) []byte {
 				return z.bufr
 			}
 		}
-		// for i := 0; i < n2; i++ {
-		// 	if !accept.isset(z.buf[i]) {
-		// 		return z.readToLoopFn(i, out)
-		// 	}
-		// }
 		z.bufr = append(z.bufr, z.buf...)
 		z.n += uint(n2)
 		if z.trb {
@@ -670,27 +563,11 @@ func (z *bufioDecReader) readToFill(accept *bitset256) []byte {
 	}
 }
 
-// func (z *bufioDecReader) readUntilLoopFn(i uint, out0 []byte) (out []byte) {
-// 	z.n += (i - z.c) - 1
-// 	return z.readLoopFn(i+1, out0)
-// }
-
 func (z *bufioDecReader) readUntil(stop byte, includeLast bool) (out []byte) {
-	// defer func() { xdebug2f("bufio: readUntil: %s", out) }()
-	// _, out = z.search(in, nil, stop, 4); return
-
-	// for i := z.c; i < len(z.buf); i++ {
-	// 	if z.buf[i] == stop {
-	// 		return z.readUntilLoopFn(i, nil)
-	// 	}
-	// }
-
 	i := z.c
 LOOP:
 	if i < uint(len(z.buf)) {
 		if z.buf[i] == stop {
-			// inline readUntilLoopFn
-			// return z.readUntilLoopFn(i, nil)
 			z.n += (i - z.c) - 1
 			i++
 			out = z.buf[z.c:i]
@@ -735,11 +612,6 @@ func (z *bufioDecReader) readUntilFill(stop byte) []byte {
 				return z.bufr
 			}
 		}
-		// for i := 0; i < n2; i++ {
-		// 	if z.buf[i] == stop {
-		// 		return z.readUntilLoopFn(i, out)
-		// 	}
-		// }
 		z.bufr = append(z.bufr, z.buf...)
 		z.n += n2
 		if z.trb {
@@ -782,54 +654,8 @@ func (z *bytesDecReader) readx(n uint) (bs []byte) {
 	// as more computation is required to decipher the pointer start position.
 	// However, we do it only once, and it's better than reslicing both z.b and return value.
 
-	// if n <= 0 {
-	// } else if z.a == 0 {
-	// 	panic(io.EOF)
-	// } else if n > z.a {
-	// 	panic(io.ErrUnexpectedEOF)
-	// } else {
-	// 	c0 := z.c
-	// 	z.c = c0 + n
-	// 	z.a = z.a - n
-	// 	bs = z.b[c0:z.c]
-	// }
-	// return
-
-	// if n == 0 {
-	// 	return
-	// }
 	z.c += n
-	// if z.c > uint(len(z.b)) {
-	// 	z.c = uint(len(z.b))
-	// 	panic(io.EOF)
-	// }
-
-	// bs = z.b[z.c-n : z.c]
-	// return
 	return z.b[z.c-n : z.c]
-
-	// if n == 0 {
-	// } else if z.c+n > uint(len(z.b)) {
-	// 	z.c = uint(len(z.b))
-	// 	panic(io.EOF)
-	// } else {
-	// 	z.c += n
-	// 	bs = z.b[z.c-n : z.c]
-	// }
-	// return
-
-	// if n == 0 {
-	// 	return
-	// }
-	// if z.c == uint(len(z.b)) {
-	// 	panic(io.EOF)
-	// }
-	// if z.c+n > uint(len(z.b)) {
-	// 	panic(io.ErrUnexpectedEOF)
-	// }
-	// // z.a -= n
-	// z.c += n
-	// return z.b[z.c-n : z.c]
 }
 
 func (z *bytesDecReader) readb(bs []byte) {
@@ -846,9 +672,6 @@ func (z *bytesDecReader) readn1() (v uint8) {
 
 	v = z.b[z.c]
 	z.c++
-	// v = z.b[z.c] // cost = 7
-	// z.c++ // cost = 4
-	// z.a--
 	return
 }
 
@@ -857,60 +680,19 @@ func (z *bytesDecReader) readn(num uint8) (bs [rwNLen]byte) {
 	// 	panic(io.EOF)
 	// }
 
-	// _ = z.b[z.c:z.c+uint(num)]
-	// _ = bs[0]
-	// _ = bs[num-1]
-	// _ = z.b[z.c]
-	// _ = z.b[z.c+uint(num-1)]
-
 	// for bounds-check elimination, reslice z.b and ensure bs is within len
 	// bb := z.b[z.c:][:num]
 	bb := z.b[z.c : z.c+uint(num)]
 	_ = bs[len(bb)-1]
+	// for i := uint(0); i < uint(len(bb)); i++ {
 	for i := 0; i < len(bb); i++ {
-		// for i := uint(0); i < uint(len(bb)); i++ {
 		bs[i] = bb[i]
 	}
 
-	// for i := uint8(0); i < num; i++ {
-	// 	bs[i] = z.b[z.c+uint(i)]
-	// }
-
-	// for i := num; i > 0; i-- {
-	// 	// xdebugf("i: %d", i)
-	// 	bs[i-1] = z.b[z.c+uint(i-1)]
-	// }
-
-	// copy(bs[:], z.b[z.c:z.c+uint(num)])
 	z.c += uint(num)
 	return
 }
 
-// func (z *bytesDecReader) readn4() (bs [4]byte) {
-// 	// if z.c+3 >= uint(len(z.b)) {
-// 	// 	panic(io.EOF)
-// 	// }
-
-// 	// copy(bs[:], z.b[z.c:z.c+4])
-// 	bs[3] = z.b[z.c+3]
-// 	bs[2] = z.b[z.c+2]
-// 	bs[1] = z.b[z.c+1]
-// 	bs[0] = z.b[z.c]
-// 	z.c += 4
-// 	return
-// }
-
-// func (z *bytesDecReader) readn1eof() (v uint8, eof bool) {
-// 	if z.a == 0 {
-// 		eof = true
-// 		return
-// 	}
-// 	v = z.b[z.c]
-// 	z.c++
-// 	z.a--
-// 	return
-// }
-
 func (z *bytesDecReader) skip(accept *bitset256) (token byte) {
 	i := z.c
 	// if i == len(z.b) {
@@ -951,42 +733,6 @@ LOOP:
 
 func (z *bytesDecReader) readTo(accept *bitset256) (out []byte) {
 	i := z.c
-	// if i == uint(len(z.b)) {
-	// 	panic(io.EOF)
-	// }
-
-	// Replace loop with goto construct, so that this can be inlined
-	// for i := z.c; i < blen; i++ {
-	// 	if !accept.isset(z.b[i]) {
-	// 		out = z.b[z.c:i]
-	// 		z.a -= (i - z.c)
-	// 		z.c = i
-	// 		return
-	// 	}
-	// }
-	// out = z.b[z.c:]
-	// z.a, z.c = 0, blen
-	// return
-
-	// 	i := z.c
-	// LOOP:
-	// 	if i < blen {
-	// 		if accept.isset(z.b[i]) {
-	// 			i++
-	// 			goto LOOP
-	// 		}
-	// 		out = z.b[z.c:i]
-	// 		z.a -= (i - z.c)
-	// 		z.c = i
-	// 		return
-	// 	}
-	// 	out = z.b[z.c:]
-	// 	// z.a, z.c = 0, blen
-	// 	z.a = 0
-	// 	z.c = blen
-	// 	return
-
-	// c := i
 LOOP:
 	if i < uint(len(z.b)) {
 		if accept.isset(z.b[i]) {
@@ -996,29 +742,12 @@ LOOP:
 	}
 
 	out = z.b[z.c:i]
-	// z.a -= (i - z.c)
 	z.c = i
 	return // z.b[c:i]
-	// z.c, i = i, z.c
-	// return z.b[i:z.c]
 }
 
 func (z *bytesDecReader) readUntil(stop byte, includeLast bool) (out []byte) {
 	i := z.c
-	// if i == len(z.b) {
-	// 	panic(io.EOF)
-	// }
-
-	// Replace loop with goto construct, so that this can be inlined
-	// for i := z.c; i < blen; i++ {
-	// 	if z.b[i] == stop {
-	// 		i++
-	// 		out = z.b[z.c:i]
-	// 		z.a -= (i - z.c)
-	// 		z.c = i
-	// 		return
-	// 	}
-	// }
 LOOP:
 	// if i < uint(len(z.b)) {
 	if z.b[i] == stop {
@@ -1036,8 +765,6 @@ LOOP:
 	goto LOOP
 	// }
 	// panic(io.EOF)
-	// z.a = 0
-	// z.c = blen
 }
 
 func (z *bytesDecReader) track() {
@@ -1051,7 +778,6 @@ func (z *bytesDecReader) stopTrack() (bs []byte) {
 // --------------
 
 type decRd struct {
-	// esep     bool // has elem separators
 	mtr bool // is maptype a known type?
 	str bool // is slicetype a known type?
 
@@ -1060,7 +786,6 @@ type decRd struct {
 	jsms bool // is json handle, and MapKeyAsString
 	cbor bool // is cbor handle
 
-	// typ   entryType
 	bytes bool // is bytes reader
 	bufio bool // is this a bufioDecReader?
 
@@ -1085,14 +810,6 @@ type decRd struct {
 // Instead, we have a if/else-if/else block so that IO calls do not have to jump through
 // a second unnecessary function call.
 
-// func (z *decRd) release() {
-// 	if z.bytes {
-// 	} else if z.bufio {
-// 		z.bi.release()
-// 	} else {
-// 		z.ri.release()
-// 	}
-// }
 func (z *decRd) numread() uint {
 	if z.bytes {
 		return z.rb.numread()
@@ -1202,6 +919,10 @@ func (z *decRd) readUntil(stop byte, includeLast bool) (out []byte) {
 }
 
 /*
+// If golang inlining gets better and bytesDecReader methods can be inlined,
+// then we can revert to using these 2 functions so the bytesDecReader
+// methods are inlined and the IO paths call out to a function.
+
 func (z *decRd) unreadn1() {
 	if z.bytes {
 		z.rb.unreadn1()
@@ -1313,112 +1034,4 @@ func (z *decRd) readUntilIO(stop byte) (out []byte) {
 }
 */
 
-/*
-func (z *decRd) numread() int {
-	switch z.typ {
-	case entryTypeBytes:
-		return z.rb.numread()
-	case entryTypeIo:
-		return z.ri.numread()
-	default:
-		return z.bi.numread()
-	}
-}
-func (z *decRd) track() {
-	switch z.typ {
-	case entryTypeBytes:
-		z.rb.track()
-	case entryTypeIo:
-		z.ri.track()
-	default:
-		z.bi.track()
-	}
-}
-func (z *decRd) stopTrack() []byte {
-	switch z.typ {
-	case entryTypeBytes:
-		return z.rb.stopTrack()
-	case entryTypeIo:
-		return z.ri.stopTrack()
-	default:
-		return z.bi.stopTrack()
-	}
-}
-
-func (z *decRd) unreadn1() {
-	switch z.typ {
-	case entryTypeBytes:
-		z.rb.unreadn1()
-	case entryTypeIo:
-		z.ri.unreadn1()
-	default:
-		z.bi.unreadn1()
-	}
-}
-func (z *decRd) readx(n int) []byte {
-	switch z.typ {
-	case entryTypeBytes:
-		return z.rb.readx(n)
-	case entryTypeIo:
-		return z.ri.readx(n)
-	default:
-		return z.bi.readx(n)
-	}
-}
-func (z *decRd) readb(s []byte) {
-	switch z.typ {
-	case entryTypeBytes:
-		z.rb.readb(s)
-	case entryTypeIo:
-		z.ri.readb(s)
-	default:
-		z.bi.readb(s)
-	}
-}
-func (z *decRd) readn1() uint8 {
-	switch z.typ {
-	case entryTypeBytes:
-		return z.rb.readn1()
-	case entryTypeIo:
-		return z.ri.readn1()
-	default:
-		return z.bi.readn1()
-	}
-}
-func (z *decRd) skip(accept *bitset256) (token byte) {
-	switch z.typ {
-	case entryTypeBytes:
-		return z.rb.skip(accept)
-	case entryTypeIo:
-		return z.ri.skip(accept)
-	default:
-		return z.bi.skip(accept)
-	}
-}
-func (z *decRd) readTo(accept *bitset256) (out []byte) {
-	switch z.typ {
-	case entryTypeBytes:
-		return z.rb.readTo(accept)
-	case entryTypeIo:
-		return z.ri.readTo(accept)
-	default:
-		return z.bi.readTo(accept)
-	}
-}
-func (z *decRd) readUntil(stop byte) (out []byte) {
-	switch z.typ {
-	case entryTypeBytes:
-		return z.rb.readUntil(stop)
-	case entryTypeIo:
-		return z.ri.readUntil(stop)
-	default:
-		return z.bi.readUntil(stop)
-	}
-}
-
-*/
-
 var _ decReader = (*decRd)(nil)
-
-// // register these here, so that staticcheck stops barfing
-// var _ = (*bytesDecReader).readUntil

+ 0 - 3
codec/rpc.go

@@ -97,9 +97,6 @@ func (c *rpcCodec) write(obj1, obj2 interface{}, writeObj2 bool) (err error) {
 		if writeObj2 {
 			err = c.enc.Encode(obj2)
 		}
-		// if err == nil && c.f != nil {
-		// 	err = c.f.Flush()
-		// }
 	}
 	if c.f != nil {
 		if err == nil {

+ 2 - 19
codec/simple.go

@@ -33,11 +33,8 @@ const (
 type simpleEncDriver struct {
 	noBuiltInTypes
 	encDriverNoopContainerWriter
-	// encNoSeparator
 	h *SimpleHandle
 	b [8]byte
-	// c containerState
-	// encDriverTrackContainerWriter
 	_ [6]uint64 // padding (cache-aligned)
 	e Encoder
 }
@@ -166,10 +163,6 @@ func (e *simpleEncDriver) WriteMapStart(length int) {
 	e.encLen(simpleVdMap, length)
 }
 
-// func (e *simpleEncDriver) EncodeSymbol(v string) {
-// 	e.EncodeStringEnc(cUTF8, v)
-// }
-
 func (e *simpleEncDriver) EncodeString(v string) {
 	if e.h.EncZeroValuesAsNil && e.e.c != containerMapKey && v == "" {
 		e.EncodeNil()
@@ -216,10 +209,7 @@ type simpleDecDriver struct {
 	bdRead bool
 	bd     byte
 	fnil   bool
-	// c      containerState
-	// b      [scratchByteArrayLen]byte
 	noBuiltInTypes
-	// noStreamingCodec
 	decDriverNoopContainerReader
 	_ [6]uint64 // padding
 	d Decoder
@@ -280,12 +270,7 @@ func (d *simpleDecDriver) ContainerType() (vt valueType) {
 	case simpleVdMap, simpleVdMap + 1,
 		simpleVdMap + 2, simpleVdMap + 3, simpleVdMap + 4:
 		return valueTypeMap
-		// case simpleVdTime:
-		// 	return valueTypeTime
 	}
-	// else {
-	// d.d.errorf("isContainerType: unsupported parameter: %v", vt)
-	// }
 	return valueTypeUnset
 }
 
@@ -319,7 +304,8 @@ func (d *simpleDecDriver) decCheckInteger() (ui uint64, neg bool) {
 		d.d.errorf("integer only valid from pos/neg integer1..8. Invalid descriptor: %v", d.bd)
 		return
 	}
-	// don't do this check, because callers may only want the unsigned value.
+	// DO NOT do this check below, because callers may only want the unsigned value:
+	//
 	// if ui > math.MaxInt64 {
 	// 	d.d.errorf("decIntAny: Integer out of range for signed int64: %v", ui)
 	//		return
@@ -623,7 +609,6 @@ func (d *simpleDecDriver) DecodeNaked() {
 // The full spec will be published soon.
 type SimpleHandle struct {
 	binaryEncodingType
-	// noElemSeparators
 	BasicHandle
 	// EncZeroValuesAsNil says to encode zero values for numbers, bool, string, etc as nil
 	EncZeroValuesAsNil bool
@@ -634,8 +619,6 @@ type SimpleHandle struct {
 // Name returns the name of the handle: simple
 func (h *SimpleHandle) Name() string { return "simple" }
 
-// func (h *SimpleHandle) hasElemSeparators() bool { return true } // as it implements Write(Map|Array)XXX
-
 func (h *SimpleHandle) newEncDriver() encDriver {
 	var e = &simpleEncDriver{h: h}
 	e.e.e = e

+ 2 - 2
codec/values_codecgen_generated_test.go

@@ -37,10 +37,10 @@ type codecSelfer19780 struct{}
 func codecSelfer19780False() bool { return false }
 
 func init() {
-	if GenVersion != 15 {
+	if GenVersion != 16 {
 		_, file, _, _ := runtime.Caller(0)
 		ver := strconv.FormatInt(int64(GenVersion), 10)
-		panic("codecgen version mismatch: current: 15, need " + ver + ". Re-generate file: " + file)
+		panic("codecgen version mismatch: current: 16, need " + ver + ". Re-generate file: " + file)
 	}
 	if false { // reference the types, but skip this branch at build/run time
 		var _ time.Time

+ 2 - 247
codec/writer.go

@@ -17,103 +17,6 @@ type encWriter interface {
 	end()
 }
 
-// type ioEncWriterWriter interface {
-// 	WriteByte(c byte) error
-// 	WriteString(s string) (n int, err error)
-// 	Write(p []byte) (n int, err error)
-// }
-
-// ---------------------------------------------
-
-/*
-
-type ioEncStringWriter interface {
-	WriteString(s string) (n int, err error)
-}
-
-// ioEncWriter implements encWriter and can write to an io.Writer implementation
-type ioEncWriter struct {
-	w  io.Writer
-	ww io.Writer
-	bw io.ByteWriter
-	sw ioEncStringWriter
-	fw ioFlusher
-	b  [8]byte
-}
-
-func (z *ioEncWriter) reset(w io.Writer) {
-	z.w = w
-	var ok bool
-	if z.bw, ok = w.(io.ByteWriter); !ok {
-		z.bw = z
-	}
-	if z.sw, ok = w.(ioEncStringWriter); !ok {
-		z.sw = z
-	}
-	z.fw, _ = w.(ioFlusher)
-	z.ww = w
-}
-
-func (z *ioEncWriter) WriteByte(b byte) (err error) {
-	z.b[0] = b
-	_, err = z.w.Write(z.b[:1])
-	return
-}
-
-func (z *ioEncWriter) WriteString(s string) (n int, err error) {
-	return z.w.Write(bytesView(s))
-}
-
-func (z *ioEncWriter) writeb(bs []byte) {
-	if _, err := z.ww.Write(bs); err != nil {
-		panic(err)
-	}
-}
-
-func (z *ioEncWriter) writestr(s string) {
-	if _, err := z.sw.WriteString(s); err != nil {
-		panic(err)
-	}
-}
-
-func (z *ioEncWriter) writeqstr(s string) {
-	writestr("\"" + s + "\"")
-}
-
-func (z *ioEncWriter) writen1(b byte) {
-	if err := z.bw.WriteByte(b); err != nil {
-		panic(err)
-	}
-}
-
-func (z *ioEncWriter) writen2(b1, b2 byte) {
-	var err error
-	if err = z.bw.WriteByte(b1); err == nil {
-		if err = z.bw.WriteByte(b2); err == nil {
-			return
-		}
-	}
-	panic(err)
-}
-
-// func (z *ioEncWriter) writen5(b1, b2, b3, b4, b5 byte) {
-// 	z.b[0], z.b[1], z.b[2], z.b[3], z.b[4] = b1, b2, b3, b4, b5
-// 	if _, err := z.ww.Write(z.b[:5]); err != nil {
-// 		panic(err)
-// 	}
-// }
-
-//go:noinline - so *encWr.XXX has the bytesEncAppender.XXX inlined
-func (z *ioEncWriter) end() {
-	if z.fw != nil {
-		if err := z.fw.Flush(); err != nil {
-			panic(err)
-		}
-	}
-}
-
-*/
-
 // ---------------------------------------------
 
 // bufioEncWriter
@@ -124,31 +27,12 @@ type bufioEncWriter struct {
 
 	n int
 
-	// // Extensions can call Encode() within a current Encode() call.
-	// // We need to know when the top level Encode() call returns,
-	// // so we can decide whether to Release() or not.
-	// calls uint16 // what depth in mustDecode are we in now.
-
-	// sz int // buf size
-	// _ uint64 // padding (cache-aligned)
-
-	// ---- cache line
-
-	// write-most fields below
-
-	// // less used fields
-	// bytesBufPooler
-
 	b [16]byte // scratch buffer and padding (cache-aligned)
-	// a int
-	// b   [4]byte
-	// err
 }
 
 func (z *bufioEncWriter) reset(w io.Writer, bufsize int, blist *bytesFreelist) {
 	z.w = w
 	z.n = 0
-	// z.calls = 0
 	if bufsize <= 0 {
 		bufsize = defEncByteBufSize
 	}
@@ -156,7 +40,6 @@ func (z *bufioEncWriter) reset(w io.Writer, bufsize int, blist *bytesFreelist) {
 	if bufsize <= 8 {
 		bufsize = 8
 	}
-	// z.sz = bufsize
 	if cap(z.buf) < bufsize {
 		if len(z.buf) > 0 && &z.buf[0] != &z.b[0] {
 			blist.put(z.buf)
@@ -168,29 +51,8 @@ func (z *bufioEncWriter) reset(w io.Writer, bufsize int, blist *bytesFreelist) {
 		}
 	}
 	z.buf = z.buf[:cap(z.buf)]
-	// if bufsize <= cap(z.buf) {
-	// 	z.buf = z.buf[:cap(z.buf)]
-	// } else {
-	// } else if bufsize <= len(z.b) {
-	// 	if len(z.buf) > 0 && &z.buf[0] != &z.b[0] {
-	// 		blist.put(z.buf)
-	// 	}
-	// 	z.buf = z.b[:]
-	// } else {
-	// 	// z.buf = z.bytesBufPooler.get(bufsize)
-	// 	// z.buf = z.buf[:cap(z.buf)]
-	// 	if len(z.buf) > 0 && &z.buf[0] != &z.b[0] {
-	// 		blist.put(z.buf)
-	// 	}
-	// 	z.buf = blist.get(bufsize)
-	// }
 }
 
-// func (z *bufioEncWriter) release() {
-// 	z.buf = nil
-// 	z.bytesBufPooler.end()
-// }
-
 //go:noinline - flush only called intermittently
 func (z *bufioEncWriter) flushErr() (err error) {
 	n, err := z.w.Write(z.buf[:z.n])
@@ -305,10 +167,6 @@ func (z *bytesEncAppender) writestr(s string) {
 	z.b = append(z.b, s...)
 }
 func (z *bytesEncAppender) writeqstr(s string) {
-	// z.writen1('"')
-	// z.writestr(s)
-	// z.writen1('"')
-
 	z.b = append(append(append(z.b, '"'), s...), '"')
 
 	// z.b = append(z.b, '"')
@@ -320,9 +178,6 @@ func (z *bytesEncAppender) writen1(b1 byte) {
 }
 func (z *bytesEncAppender) writen2(b1, b2 byte) {
 	z.b = append(z.b, b1, b2) // cost: 81
-	// z.b = append(z.b, b1, b2, b1, b2, b1, b2) // cost: 85
-	// z.b = append(z.b, []byte{b1, b2}...) // cost: 83
-	// z.b = append(append(z.b, b1), b2) // cost 82
 }
 func (z *bytesEncAppender) writen(s [rwNLen]byte, num uint8) {
 	// if num <= rwNLen {
@@ -342,23 +197,16 @@ func (z *bytesEncAppender) reset(in []byte, out *[]byte) {
 // --------------------------------------------------
 
 type encWr struct {
-	// esep  bool // whether it has elem separators
 	bytes bool // encoding to []byte
-	// isas  bool // whether e.as != nil
-	js bool // is json encoder?
-	be bool // is binary encoder?
+	js    bool // is json encoder?
+	be    bool // is binary encoder?
 
 	c containerState
 
 	calls uint16
 
-	// _    [3]byte // padding
-	// _    [2]uint64 // padding
-	// _    uint64    // padding
-	// wi   *ioEncWriter
 	wb bytesEncAppender
 	wf *bufioEncWriter
-	// typ  entryType
 }
 
 func (z *encWr) writeb(s []byte) {
@@ -416,97 +264,4 @@ func (z *encWr) end() {
 	}
 }
 
-/*
-
-// ------------------------------------------
-func (z *encWr) writeb(s []byte) {
-	switch z.typ {
-	case entryTypeBytes:
-		z.wb.writeb(s)
-	case entryTypeIo:
-		z.wi.writeb(s)
-	default:
-		z.wf.writeb(s)
-	}
-}
-func (z *encWr) writestr(s string) {
-	switch z.typ {
-	case entryTypeBytes:
-		z.wb.writestr(s)
-	case entryTypeIo:
-		z.wi.writestr(s)
-	default:
-		z.wf.writestr(s)
-	}
-}
-func (z *encWr) writen1(b1 byte) {
-	switch z.typ {
-	case entryTypeBytes:
-		z.wb.writen1(b1)
-	case entryTypeIo:
-		z.wi.writen1(b1)
-	default:
-		z.wf.writen1(b1)
-	}
-}
-func (z *encWr) writen2(b1, b2 byte) {
-	switch z.typ {
-	case entryTypeBytes:
-		z.wb.writen2(b1, b2)
-	case entryTypeIo:
-		z.wi.writen2(b1, b2)
-	default:
-		z.wf.writen2(b1, b2)
-	}
-}
-func (z *encWr) end() {
-	switch z.typ {
-	case entryTypeBytes:
-		z.wb.end()
-	case entryTypeIo:
-		z.wi.end()
-	default:
-		z.wf.end()
-	}
-}
-
-// ------------------------------------------
-func (z *encWr) writeb(s []byte) {
-	if z.bytes {
-		z.wb.writeb(s)
-	} else {
-		z.wi.writeb(s)
-	}
-}
-func (z *encWr) writestr(s string) {
-	if z.bytes {
-		z.wb.writestr(s)
-	} else {
-		z.wi.writestr(s)
-	}
-}
-func (z *encWr) writen1(b1 byte) {
-	if z.bytes {
-		z.wb.writen1(b1)
-	} else {
-		z.wi.writen1(b1)
-	}
-}
-func (z *encWr) writen2(b1, b2 byte) {
-	if z.bytes {
-		z.wb.writen2(b1, b2)
-	} else {
-		z.wi.writen2(b1, b2)
-	}
-}
-func (z *encWr) end() {
-	if z.bytes {
-		z.wb.end()
-	} else {
-		z.wi.end()
-	}
-}
-
-*/
-
 var _ encWriter = (*encWr)(nil)