encode.go 32 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234
  1. // Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved.
  2. // Use of this source code is governed by a MIT license found in the LICENSE file.
  3. package codec
  4. import (
  5. "bytes"
  6. "encoding"
  7. "errors"
  8. "fmt"
  9. "io"
  10. "reflect"
  11. "sort"
  12. "sync"
  13. )
  14. const (
  15. defEncByteBufSize = 1 << 6 // 4:16, 6:64, 8:256, 10:1024
  16. )
  17. // AsSymbolFlag defines what should be encoded as symbols.
  18. type AsSymbolFlag uint8
  19. const (
  20. // AsSymbolDefault is default.
  21. // Currently, this means only encode struct field names as symbols.
  22. // The default is subject to change.
  23. AsSymbolDefault AsSymbolFlag = iota
  24. // AsSymbolAll means encode anything which could be a symbol as a symbol.
  25. AsSymbolAll = 0xfe
  26. // AsSymbolNone means do not encode anything as a symbol.
  27. AsSymbolNone = 1 << iota
  28. // AsSymbolMapStringKeys means encode keys in map[string]XXX as symbols.
  29. AsSymbolMapStringKeysFlag
  30. // AsSymbolStructFieldName means encode struct field names as symbols.
  31. AsSymbolStructFieldNameFlag
  32. )
  33. // encWriter abstracts writing to a byte array or to an io.Writer.
  34. type encWriter interface {
  35. writeb([]byte)
  36. writestr(string)
  37. writen1(byte)
  38. writen2(byte, byte)
  39. atEndOfEncode()
  40. }
  41. // encDriver abstracts the actual codec (binc vs msgpack, etc)
  42. type encDriver interface {
  43. IsBuiltinType(rt uintptr) bool
  44. EncodeBuiltin(rt uintptr, v interface{})
  45. EncodeNil()
  46. EncodeInt(i int64)
  47. EncodeUint(i uint64)
  48. EncodeBool(b bool)
  49. EncodeFloat32(f float32)
  50. EncodeFloat64(f float64)
  51. // encodeExtPreamble(xtag byte, length int)
  52. EncodeRawExt(re *RawExt, e *Encoder)
  53. EncodeExt(v interface{}, xtag uint64, ext Ext, e *Encoder)
  54. EncodeArrayStart(length int)
  55. EncodeArrayEnd()
  56. EncodeArrayEntrySeparator()
  57. EncodeMapStart(length int)
  58. EncodeMapEnd()
  59. EncodeMapEntrySeparator()
  60. EncodeMapKVSeparator()
  61. EncodeString(c charEncoding, v string)
  62. EncodeSymbol(v string)
  63. EncodeStringBytes(c charEncoding, v []byte)
  64. //TODO
  65. //encBignum(f *big.Int)
  66. //encStringRunes(c charEncoding, v []rune)
  67. }
  68. type encNoSeparator struct{}
  69. func (_ encNoSeparator) EncodeMapEnd() {}
  70. func (_ encNoSeparator) EncodeArrayEnd() {}
  71. func (_ encNoSeparator) EncodeArrayEntrySeparator() {}
  72. func (_ encNoSeparator) EncodeMapEntrySeparator() {}
  73. func (_ encNoSeparator) EncodeMapKVSeparator() {}
  74. type encStructFieldBytesV struct {
  75. b []byte
  76. v reflect.Value
  77. }
  78. type encStructFieldBytesVslice []encStructFieldBytesV
  79. func (p encStructFieldBytesVslice) Len() int { return len(p) }
  80. func (p encStructFieldBytesVslice) Less(i, j int) bool { return bytes.Compare(p[i].b, p[j].b) == -1 }
  81. func (p encStructFieldBytesVslice) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
  82. type ioEncWriterWriter interface {
  83. WriteByte(c byte) error
  84. WriteString(s string) (n int, err error)
  85. Write(p []byte) (n int, err error)
  86. }
  87. type ioEncStringWriter interface {
  88. WriteString(s string) (n int, err error)
  89. }
  90. type EncodeOptions struct {
  91. // Encode a struct as an array, and not as a map
  92. StructToArray bool
  93. // Canonical representation means that encoding a value will always result in the same
  94. // sequence of bytes.
  95. //
  96. // This only affects maps, as the iteration order for maps is random.
  97. // In this case, the map keys will first be encoded into []byte, and then sorted,
  98. // before writing the sorted keys and the corresponding map values to the stream.
  99. Canonical bool
  100. // AsSymbols defines what should be encoded as symbols.
  101. //
  102. // Encoding as symbols can reduce the encoded size significantly.
  103. //
  104. // However, during decoding, each string to be encoded as a symbol must
  105. // be checked to see if it has been seen before. Consequently, encoding time
  106. // will increase if using symbols, because string comparisons has a clear cost.
  107. //
  108. // Sample values:
  109. // AsSymbolNone
  110. // AsSymbolAll
  111. // AsSymbolMapStringKeys
  112. // AsSymbolMapStringKeysFlag | AsSymbolStructFieldNameFlag
  113. AsSymbols AsSymbolFlag
  114. }
  115. // ---------------------------------------------
  116. type simpleIoEncWriterWriter struct {
  117. w io.Writer
  118. bw io.ByteWriter
  119. sw ioEncStringWriter
  120. }
  121. func (o *simpleIoEncWriterWriter) WriteByte(c byte) (err error) {
  122. if o.bw != nil {
  123. return o.bw.WriteByte(c)
  124. }
  125. _, err = o.w.Write([]byte{c})
  126. return
  127. }
  128. func (o *simpleIoEncWriterWriter) WriteString(s string) (n int, err error) {
  129. if o.sw != nil {
  130. return o.sw.WriteString(s)
  131. }
  132. // return o.w.Write([]byte(s))
  133. return o.w.Write(bytesView(s))
  134. }
  135. func (o *simpleIoEncWriterWriter) Write(p []byte) (n int, err error) {
  136. return o.w.Write(p)
  137. }
  138. // ----------------------------------------
  139. // ioEncWriter implements encWriter and can write to an io.Writer implementation
  140. type ioEncWriter struct {
  141. w ioEncWriterWriter
  142. // x [8]byte // temp byte array re-used internally for efficiency
  143. }
  144. func (z *ioEncWriter) writeb(bs []byte) {
  145. if len(bs) == 0 {
  146. return
  147. }
  148. n, err := z.w.Write(bs)
  149. if err != nil {
  150. panic(err)
  151. }
  152. if n != len(bs) {
  153. panic(fmt.Errorf("incorrect num bytes written. Expecting: %v, Wrote: %v", len(bs), n))
  154. }
  155. }
  156. func (z *ioEncWriter) writestr(s string) {
  157. n, err := z.w.WriteString(s)
  158. if err != nil {
  159. panic(err)
  160. }
  161. if n != len(s) {
  162. panic(fmt.Errorf("incorrect num bytes written. Expecting: %v, Wrote: %v", len(s), n))
  163. }
  164. }
  165. func (z *ioEncWriter) writen1(b byte) {
  166. if err := z.w.WriteByte(b); err != nil {
  167. panic(err)
  168. }
  169. }
  170. func (z *ioEncWriter) writen2(b1 byte, b2 byte) {
  171. z.writen1(b1)
  172. z.writen1(b2)
  173. }
  174. func (z *ioEncWriter) atEndOfEncode() {}
  175. // ----------------------------------------
  176. // bytesEncWriter implements encWriter and can write to an byte slice.
  177. // It is used by Marshal function.
  178. type bytesEncWriter struct {
  179. b []byte
  180. c int // cursor
  181. out *[]byte // write out on atEndOfEncode
  182. }
  183. func (z *bytesEncWriter) writeb(s []byte) {
  184. if len(s) > 0 {
  185. c := z.grow(len(s))
  186. copy(z.b[c:], s)
  187. }
  188. }
  189. func (z *bytesEncWriter) writestr(s string) {
  190. if len(s) > 0 {
  191. c := z.grow(len(s))
  192. copy(z.b[c:], s)
  193. }
  194. }
  195. func (z *bytesEncWriter) writen1(b1 byte) {
  196. c := z.grow(1)
  197. z.b[c] = b1
  198. }
  199. func (z *bytesEncWriter) writen2(b1 byte, b2 byte) {
  200. c := z.grow(2)
  201. z.b[c] = b1
  202. z.b[c+1] = b2
  203. }
  204. func (z *bytesEncWriter) atEndOfEncode() {
  205. *(z.out) = z.b[:z.c]
  206. }
  207. func (z *bytesEncWriter) grow(n int) (oldcursor int) {
  208. oldcursor = z.c
  209. z.c = oldcursor + n
  210. if z.c > len(z.b) {
  211. if z.c > cap(z.b) {
  212. // Tried using appendslice logic: (if cap < 1024, *2, else *1.25).
  213. // However, it was too expensive, causing too many iterations of copy.
  214. // Using bytes.Buffer model was much better (2*cap + n)
  215. bs := make([]byte, 2*cap(z.b)+n)
  216. copy(bs, z.b[:oldcursor])
  217. z.b = bs
  218. } else {
  219. z.b = z.b[:cap(z.b)]
  220. }
  221. }
  222. return
  223. }
  224. // ---------------------------------------------
  225. type encFnInfoX struct {
  226. e *Encoder
  227. ti *typeInfo
  228. xfFn Ext
  229. xfTag uint64
  230. seq seqType
  231. }
  232. type encFnInfo struct {
  233. // use encFnInfo as a value receiver.
  234. // keep most of it less-used variables accessible via a pointer (*encFnInfoX).
  235. // As sweet spot for value-receiver is 3 words, keep everything except
  236. // encDriver (which everyone needs) directly accessible.
  237. // ensure encFnInfoX is set for everyone who needs it i.e.
  238. // rawExt, ext, builtin, (selfer|binary|text)Marshal, kSlice, kStruct, kMap, kInterface, fastpath
  239. ee encDriver
  240. *encFnInfoX
  241. }
  242. func (f encFnInfo) builtin(rv reflect.Value) {
  243. f.ee.EncodeBuiltin(f.ti.rtid, rv.Interface())
  244. }
  245. func (f encFnInfo) rawExt(rv reflect.Value) {
  246. // rev := rv.Interface().(RawExt)
  247. // f.ee.EncodeRawExt(&rev, f.e)
  248. var re *RawExt
  249. if rv.CanAddr() {
  250. re = rv.Addr().Interface().(*RawExt)
  251. } else {
  252. rev := rv.Interface().(RawExt)
  253. re = &rev
  254. }
  255. f.ee.EncodeRawExt(re, f.e)
  256. }
  257. func (f encFnInfo) ext(rv reflect.Value) {
  258. // if this is a struct and it was addressable, then pass the address directly (not the value)
  259. if rv.CanAddr() && rv.Kind() == reflect.Struct {
  260. rv = rv.Addr()
  261. }
  262. f.ee.EncodeExt(rv.Interface(), f.xfTag, f.xfFn, f.e)
  263. }
  264. func (f encFnInfo) getValueForMarshalInterface(rv reflect.Value, indir int8) (v interface{}, proceed bool) {
  265. if indir == 0 {
  266. v = rv.Interface()
  267. } else if indir == -1 {
  268. v = rv.Addr().Interface()
  269. } else {
  270. for j := int8(0); j < indir; j++ {
  271. if rv.IsNil() {
  272. f.ee.EncodeNil()
  273. return
  274. }
  275. rv = rv.Elem()
  276. }
  277. v = rv.Interface()
  278. }
  279. return v, true
  280. }
  281. func (f encFnInfo) selferMarshal(rv reflect.Value) {
  282. if v, proceed := f.getValueForMarshalInterface(rv, f.ti.csIndir); proceed {
  283. v.(Selfer).CodecEncodeSelf(f.e)
  284. }
  285. }
  286. func (f encFnInfo) binaryMarshal(rv reflect.Value) {
  287. if v, proceed := f.getValueForMarshalInterface(rv, f.ti.bmIndir); proceed {
  288. bs, fnerr := v.(encoding.BinaryMarshaler).MarshalBinary()
  289. if fnerr != nil {
  290. panic(fnerr)
  291. }
  292. if bs == nil {
  293. f.ee.EncodeNil()
  294. } else {
  295. f.ee.EncodeStringBytes(c_RAW, bs)
  296. }
  297. }
  298. }
  299. func (f encFnInfo) textMarshal(rv reflect.Value) {
  300. if v, proceed := f.getValueForMarshalInterface(rv, f.ti.tmIndir); proceed {
  301. // debugf(">>>> encoding.TextMarshaler: %T", rv.Interface())
  302. bs, fnerr := v.(encoding.TextMarshaler).MarshalText()
  303. if fnerr != nil {
  304. panic(fnerr)
  305. }
  306. if bs == nil {
  307. f.ee.EncodeNil()
  308. } else {
  309. f.ee.EncodeStringBytes(c_UTF8, bs)
  310. }
  311. }
  312. }
  313. func (f encFnInfo) kBool(rv reflect.Value) {
  314. f.ee.EncodeBool(rv.Bool())
  315. }
  316. func (f encFnInfo) kString(rv reflect.Value) {
  317. f.ee.EncodeString(c_UTF8, rv.String())
  318. }
  319. func (f encFnInfo) kFloat64(rv reflect.Value) {
  320. f.ee.EncodeFloat64(rv.Float())
  321. }
  322. func (f encFnInfo) kFloat32(rv reflect.Value) {
  323. f.ee.EncodeFloat32(float32(rv.Float()))
  324. }
  325. func (f encFnInfo) kInt(rv reflect.Value) {
  326. f.ee.EncodeInt(rv.Int())
  327. }
  328. func (f encFnInfo) kUint(rv reflect.Value) {
  329. f.ee.EncodeUint(rv.Uint())
  330. }
  331. func (f encFnInfo) kInvalid(rv reflect.Value) {
  332. f.ee.EncodeNil()
  333. }
  334. func (f encFnInfo) kErr(rv reflect.Value) {
  335. f.e.errorf("unsupported kind %s, for %#v", rv.Kind(), rv)
  336. }
  337. func (f encFnInfo) kSlice(rv reflect.Value) {
  338. ti := f.ti
  339. // array may be non-addressable, so we have to manage with care
  340. // (don't call rv.Bytes, rv.Slice, etc).
  341. // E.g. type struct S{B [2]byte};
  342. // Encode(S{}) will bomb on "panic: slice of unaddressable array".
  343. if f.seq != seqTypeArray {
  344. if rv.IsNil() {
  345. f.ee.EncodeNil()
  346. return
  347. }
  348. // If in this method, then there was no extension function defined.
  349. // So it's okay to treat as []byte.
  350. if ti.rtid == uint8SliceTypId {
  351. f.ee.EncodeStringBytes(c_RAW, rv.Bytes())
  352. return
  353. }
  354. }
  355. rtelem := ti.rt.Elem()
  356. l := rv.Len()
  357. if rtelem.Kind() == reflect.Uint8 {
  358. switch f.seq {
  359. case seqTypeArray:
  360. // if l == 0 { f.ee.encodeStringBytes(c_RAW, nil) } else
  361. if rv.CanAddr() {
  362. f.ee.EncodeStringBytes(c_RAW, rv.Slice(0, l).Bytes())
  363. } else {
  364. var bs []byte
  365. if l <= cap(f.e.b) {
  366. bs = f.e.b[:l]
  367. } else {
  368. bs = make([]byte, l)
  369. }
  370. reflect.Copy(reflect.ValueOf(bs), rv)
  371. // TODO: Test that reflect.Copy works instead of manual one-by-one
  372. // for i := 0; i < l; i++ {
  373. // bs[i] = byte(rv.Index(i).Uint())
  374. // }
  375. f.ee.EncodeStringBytes(c_RAW, bs)
  376. }
  377. case seqTypeSlice:
  378. f.ee.EncodeStringBytes(c_RAW, rv.Bytes())
  379. case seqTypeChan:
  380. bs := f.e.b[:0]
  381. // do not use range, so that the number of elements encoded
  382. // does not change, and encoding does not hang waiting on someone to close chan.
  383. // for b := range rv.Interface().(<-chan byte) {
  384. // bs = append(bs, b)
  385. // }
  386. ch := rv.Interface().(<-chan byte)
  387. for i := 0; i < l; i++ {
  388. bs = append(bs, <-ch)
  389. }
  390. f.ee.EncodeStringBytes(c_RAW, bs)
  391. }
  392. return
  393. }
  394. if ti.mbs {
  395. if l%2 == 1 {
  396. f.e.errorf("mapBySlice requires even slice length, but got %v", l)
  397. return
  398. }
  399. f.ee.EncodeMapStart(l / 2)
  400. } else {
  401. f.ee.EncodeArrayStart(l)
  402. }
  403. e := f.e
  404. sep := !e.be
  405. if l > 0 {
  406. for rtelem.Kind() == reflect.Ptr {
  407. rtelem = rtelem.Elem()
  408. }
  409. // if kind is reflect.Interface, do not pre-determine the
  410. // encoding type, because preEncodeValue may break it down to
  411. // a concrete type and kInterface will bomb.
  412. var fn encFn
  413. if rtelem.Kind() != reflect.Interface {
  414. rtelemid := reflect.ValueOf(rtelem).Pointer()
  415. fn = e.getEncFn(rtelemid, rtelem, true, true)
  416. }
  417. // TODO: Consider perf implication of encoding odd index values as symbols if type is string
  418. if sep {
  419. for j := 0; j < l; j++ {
  420. if j > 0 {
  421. if ti.mbs {
  422. if j%2 == 0 {
  423. f.ee.EncodeMapEntrySeparator()
  424. } else {
  425. f.ee.EncodeMapKVSeparator()
  426. }
  427. } else {
  428. f.ee.EncodeArrayEntrySeparator()
  429. }
  430. }
  431. if f.seq == seqTypeChan {
  432. if rv2, ok2 := rv.Recv(); ok2 {
  433. e.encodeValue(rv2, fn)
  434. }
  435. } else {
  436. e.encodeValue(rv.Index(j), fn)
  437. }
  438. }
  439. } else {
  440. for j := 0; j < l; j++ {
  441. if f.seq == seqTypeChan {
  442. if rv2, ok2 := rv.Recv(); ok2 {
  443. e.encodeValue(rv2, fn)
  444. }
  445. } else {
  446. e.encodeValue(rv.Index(j), fn)
  447. }
  448. }
  449. }
  450. }
  451. if sep {
  452. if ti.mbs {
  453. f.ee.EncodeMapEnd()
  454. } else {
  455. f.ee.EncodeArrayEnd()
  456. }
  457. }
  458. }
  459. func (f encFnInfo) kStruct(rv reflect.Value) {
  460. fti := f.ti
  461. e := f.e
  462. tisfi := fti.sfip
  463. toMap := !(fti.toArray || e.h.StructToArray)
  464. newlen := len(fti.sfi)
  465. // Use sync.Pool to reduce allocating slices unnecessarily.
  466. // The cost of the occasional locking is less than the cost of locking.
  467. var fkvs []encStructFieldKV
  468. var pool *sync.Pool
  469. var poolv interface{}
  470. idxpool := newlen / 8
  471. if encStructPoolLen != 4 {
  472. panic(errors.New("encStructPoolLen must be equal to 4")) // defensive, in case it is changed
  473. }
  474. if idxpool < encStructPoolLen {
  475. pool = &encStructPool[idxpool]
  476. poolv = pool.Get()
  477. switch vv := poolv.(type) {
  478. case *[8]encStructFieldKV:
  479. fkvs = vv[:newlen]
  480. case *[16]encStructFieldKV:
  481. fkvs = vv[:newlen]
  482. case *[32]encStructFieldKV:
  483. fkvs = vv[:newlen]
  484. case *[64]encStructFieldKV:
  485. fkvs = vv[:newlen]
  486. }
  487. }
  488. if fkvs == nil {
  489. fkvs = make([]encStructFieldKV, newlen)
  490. }
  491. // if toMap, use the sorted array. If toArray, use unsorted array (to match sequence in struct)
  492. if toMap {
  493. tisfi = fti.sfi
  494. }
  495. newlen = 0
  496. var kv encStructFieldKV
  497. for _, si := range tisfi {
  498. kv.v = si.field(rv, false)
  499. // if si.i != -1 {
  500. // rvals[newlen] = rv.Field(int(si.i))
  501. // } else {
  502. // rvals[newlen] = rv.FieldByIndex(si.is)
  503. // }
  504. if toMap {
  505. if si.omitEmpty && isEmptyValue(kv.v) {
  506. continue
  507. }
  508. kv.k = si.encName
  509. } else {
  510. // use the zero value.
  511. // if a reference or struct, set to nil (so you do not output too much)
  512. if si.omitEmpty && isEmptyValue(kv.v) {
  513. switch kv.v.Kind() {
  514. case reflect.Struct, reflect.Interface, reflect.Ptr, reflect.Array,
  515. reflect.Map, reflect.Slice:
  516. kv.v = reflect.Value{} //encode as nil
  517. }
  518. }
  519. }
  520. fkvs[newlen] = kv
  521. newlen++
  522. }
  523. // debugf(">>>> kStruct: newlen: %v", newlen)
  524. sep := !e.be
  525. ee := f.ee //don't dereference everytime
  526. if sep {
  527. if toMap {
  528. ee.EncodeMapStart(newlen)
  529. // asSymbols := e.h.AsSymbols&AsSymbolStructFieldNameFlag != 0
  530. asSymbols := e.h.AsSymbols == AsSymbolDefault || e.h.AsSymbols&AsSymbolStructFieldNameFlag != 0
  531. for j := 0; j < newlen; j++ {
  532. kv = fkvs[j]
  533. if j > 0 {
  534. ee.EncodeMapEntrySeparator()
  535. }
  536. if asSymbols {
  537. ee.EncodeSymbol(kv.k)
  538. } else {
  539. ee.EncodeString(c_UTF8, kv.k)
  540. }
  541. ee.EncodeMapKVSeparator()
  542. e.encodeValue(kv.v, encFn{})
  543. }
  544. ee.EncodeMapEnd()
  545. } else {
  546. ee.EncodeArrayStart(newlen)
  547. for j := 0; j < newlen; j++ {
  548. kv = fkvs[j]
  549. if j > 0 {
  550. ee.EncodeArrayEntrySeparator()
  551. }
  552. e.encodeValue(kv.v, encFn{})
  553. }
  554. ee.EncodeArrayEnd()
  555. }
  556. } else {
  557. if toMap {
  558. ee.EncodeMapStart(newlen)
  559. // asSymbols := e.h.AsSymbols&AsSymbolStructFieldNameFlag != 0
  560. asSymbols := e.h.AsSymbols == AsSymbolDefault || e.h.AsSymbols&AsSymbolStructFieldNameFlag != 0
  561. for j := 0; j < newlen; j++ {
  562. kv = fkvs[j]
  563. if asSymbols {
  564. ee.EncodeSymbol(kv.k)
  565. } else {
  566. ee.EncodeString(c_UTF8, kv.k)
  567. }
  568. e.encodeValue(kv.v, encFn{})
  569. }
  570. } else {
  571. ee.EncodeArrayStart(newlen)
  572. for j := 0; j < newlen; j++ {
  573. kv = fkvs[j]
  574. e.encodeValue(kv.v, encFn{})
  575. }
  576. }
  577. }
  578. // do not use defer. Instead, use explicit pool return at end of function.
  579. // defer has a cost we are trying to avoid.
  580. // If there is a panic and these slices are not returned, it is ok.
  581. if pool != nil {
  582. pool.Put(poolv)
  583. }
  584. }
  585. // func (f encFnInfo) kPtr(rv reflect.Value) {
  586. // debugf(">>>>>>> ??? encode kPtr called - shouldn't get called")
  587. // if rv.IsNil() {
  588. // f.ee.encodeNil()
  589. // return
  590. // }
  591. // f.e.encodeValue(rv.Elem())
  592. // }
  593. func (f encFnInfo) kInterface(rv reflect.Value) {
  594. if rv.IsNil() {
  595. f.ee.EncodeNil()
  596. return
  597. }
  598. f.e.encodeValue(rv.Elem(), encFn{})
  599. }
  600. func (f encFnInfo) kMap(rv reflect.Value) {
  601. if rv.IsNil() {
  602. f.ee.EncodeNil()
  603. return
  604. }
  605. l := rv.Len()
  606. f.ee.EncodeMapStart(l)
  607. e := f.e
  608. sep := !e.be
  609. if l == 0 {
  610. if sep {
  611. f.ee.EncodeMapEnd()
  612. }
  613. return
  614. }
  615. var asSymbols bool
  616. // determine the underlying key and val encFn's for the map.
  617. // This eliminates some work which is done for each loop iteration i.e.
  618. // rv.Type(), ref.ValueOf(rt).Pointer(), then check map/list for fn.
  619. //
  620. // However, if kind is reflect.Interface, do not pre-determine the
  621. // encoding type, because preEncodeValue may break it down to
  622. // a concrete type and kInterface will bomb.
  623. var keyFn, valFn encFn
  624. ti := f.ti
  625. rtkey := ti.rt.Key()
  626. rtval := ti.rt.Elem()
  627. rtkeyid := reflect.ValueOf(rtkey).Pointer()
  628. // keyTypeIsString := f.ti.rt.Key().Kind() == reflect.String
  629. var keyTypeIsString = rtkeyid == stringTypId
  630. if keyTypeIsString {
  631. asSymbols = e.h.AsSymbols&AsSymbolMapStringKeysFlag != 0
  632. } else {
  633. for rtkey.Kind() == reflect.Ptr {
  634. rtkey = rtkey.Elem()
  635. }
  636. if rtkey.Kind() != reflect.Interface {
  637. rtkeyid = reflect.ValueOf(rtkey).Pointer()
  638. keyFn = e.getEncFn(rtkeyid, rtkey, true, true)
  639. }
  640. }
  641. for rtval.Kind() == reflect.Ptr {
  642. rtval = rtval.Elem()
  643. }
  644. if rtval.Kind() != reflect.Interface {
  645. rtvalid := reflect.ValueOf(rtval).Pointer()
  646. valFn = e.getEncFn(rtvalid, rtval, true, true)
  647. }
  648. mks := rv.MapKeys()
  649. // for j, lmks := 0, len(mks); j < lmks; j++ {
  650. ee := f.ee //don't dereference everytime
  651. if e.h.Canonical {
  652. // first encode each key to a []byte first, then sort them, then record
  653. // println(">>>>>>>> CANONICAL <<<<<<<<")
  654. var mksv []byte = make([]byte, 0, len(mks)*16) // temporary byte slice for the encoding
  655. e2 := NewEncoderBytes(&mksv, e.hh)
  656. mksbv := make([]encStructFieldBytesV, len(mks))
  657. for i, k := range mks {
  658. l := len(mksv)
  659. e2.MustEncode(k)
  660. mksbv[i].v = k
  661. mksbv[i].b = mksv[l:]
  662. }
  663. sort.Sort(encStructFieldBytesVslice(mksbv))
  664. for j := range mksbv {
  665. if j > 0 {
  666. ee.EncodeMapEntrySeparator()
  667. }
  668. e.w.writeb(mksbv[j].b)
  669. ee.EncodeMapKVSeparator()
  670. e.encodeValue(rv.MapIndex(mksbv[j].v), valFn)
  671. }
  672. ee.EncodeMapEnd()
  673. } else if sep {
  674. for j := range mks {
  675. if j > 0 {
  676. ee.EncodeMapEntrySeparator()
  677. }
  678. if keyTypeIsString {
  679. if asSymbols {
  680. ee.EncodeSymbol(mks[j].String())
  681. } else {
  682. ee.EncodeString(c_UTF8, mks[j].String())
  683. }
  684. } else {
  685. e.encodeValue(mks[j], keyFn)
  686. }
  687. ee.EncodeMapKVSeparator()
  688. e.encodeValue(rv.MapIndex(mks[j]), valFn)
  689. }
  690. ee.EncodeMapEnd()
  691. } else {
  692. for j := range mks {
  693. if keyTypeIsString {
  694. if asSymbols {
  695. ee.EncodeSymbol(mks[j].String())
  696. } else {
  697. ee.EncodeString(c_UTF8, mks[j].String())
  698. }
  699. } else {
  700. e.encodeValue(mks[j], keyFn)
  701. }
  702. e.encodeValue(rv.MapIndex(mks[j]), valFn)
  703. }
  704. }
  705. }
  706. // --------------------------------------------------
  707. // encFn encapsulates the captured variables and the encode function.
  708. // This way, we only do some calculations one times, and pass to the
  709. // code block that should be called (encapsulated in a function)
  710. // instead of executing the checks every time.
  711. type encFn struct {
  712. i encFnInfo
  713. f func(encFnInfo, reflect.Value)
  714. }
  715. // --------------------------------------------------
  716. type rtidEncFn struct {
  717. rtid uintptr
  718. fn encFn
  719. }
  720. // An Encoder writes an object to an output stream in the codec format.
  721. type Encoder struct {
  722. // hopefully, reduce derefencing cost by laying the encWriter inside the Encoder
  723. e encDriver
  724. w encWriter
  725. s []rtidEncFn
  726. be bool // is binary encoding
  727. wi ioEncWriter
  728. wb bytesEncWriter
  729. h *BasicHandle
  730. hh Handle
  731. f map[uintptr]encFn
  732. b [scratchByteArrayLen]byte
  733. }
  734. // NewEncoder returns an Encoder for encoding into an io.Writer.
  735. //
  736. // For efficiency, Users are encouraged to pass in a memory buffered writer
  737. // (eg bufio.Writer, bytes.Buffer).
  738. func NewEncoder(w io.Writer, h Handle) *Encoder {
  739. e := &Encoder{hh: h, h: h.getBasicHandle(), be: h.isBinary()}
  740. ww, ok := w.(ioEncWriterWriter)
  741. if !ok {
  742. sww := simpleIoEncWriterWriter{w: w}
  743. sww.bw, _ = w.(io.ByteWriter)
  744. sww.sw, _ = w.(ioEncStringWriter)
  745. ww = &sww
  746. //ww = bufio.NewWriterSize(w, defEncByteBufSize)
  747. }
  748. e.wi.w = ww
  749. e.w = &e.wi
  750. e.e = h.newEncDriver(e)
  751. return e
  752. }
  753. // NewEncoderBytes returns an encoder for encoding directly and efficiently
  754. // into a byte slice, using zero-copying to temporary slices.
  755. //
  756. // It will potentially replace the output byte slice pointed to.
  757. // After encoding, the out parameter contains the encoded contents.
  758. func NewEncoderBytes(out *[]byte, h Handle) *Encoder {
  759. e := &Encoder{hh: h, h: h.getBasicHandle(), be: h.isBinary()}
  760. in := *out
  761. if in == nil {
  762. in = make([]byte, defEncByteBufSize)
  763. }
  764. e.wb.b, e.wb.out = in, out
  765. e.w = &e.wb
  766. e.e = h.newEncDriver(e)
  767. return e
  768. }
  769. // Encode writes an object into a stream.
  770. //
  771. // Encoding can be configured via the struct tag for the fields.
  772. // The "codec" key in struct field's tag value is the key name,
  773. // followed by an optional comma and options.
  774. // Note that the "json" key is used in the absence of the "codec" key.
  775. //
  776. // To set an option on all fields (e.g. omitempty on all fields), you
  777. // can create a field called _struct, and set flags on it.
  778. //
  779. // Struct values "usually" encode as maps. Each exported struct field is encoded unless:
  780. // - the field's tag is "-", OR
  781. // - the field is empty (empty or the zero value) and its tag specifies the "omitempty" option.
  782. //
  783. // When encoding as a map, the first string in the tag (before the comma)
  784. // is the map key string to use when encoding.
  785. //
  786. // However, struct values may encode as arrays. This happens when:
  787. // - StructToArray Encode option is set, OR
  788. // - the tag on the _struct field sets the "toarray" option
  789. //
  790. // Values with types that implement MapBySlice are encoded as stream maps.
  791. //
  792. // The empty values (for omitempty option) are false, 0, any nil pointer
  793. // or interface value, and any array, slice, map, or string of length zero.
  794. //
  795. // Anonymous fields are encoded inline if no struct tag is present.
  796. // Else they are encoded as regular fields.
  797. //
  798. // Examples:
  799. //
  800. // // NOTE: 'json:' can be used as struct tag key, in place 'codec:' below.
  801. // type MyStruct struct {
  802. // _struct bool `codec:",omitempty"` //set omitempty for every field
  803. // Field1 string `codec:"-"` //skip this field
  804. // Field2 int `codec:"myName"` //Use key "myName" in encode stream
  805. // Field3 int32 `codec:",omitempty"` //use key "Field3". Omit if empty.
  806. // Field4 bool `codec:"f4,omitempty"` //use key "f4". Omit if empty.
  807. // ...
  808. // }
  809. //
  810. // type MyStruct struct {
  811. // _struct bool `codec:",omitempty,toarray"` //set omitempty for every field
  812. // //and encode struct as an array
  813. // }
  814. //
  815. // The mode of encoding is based on the type of the value. When a value is seen:
  816. // - If an extension is registered for it, call that extension function
  817. // - If it implements BinaryMarshaler, call its MarshalBinary() (data []byte, err error)
  818. // - Else encode it based on its reflect.Kind
  819. //
  820. // Note that struct field names and keys in map[string]XXX will be treated as symbols.
  821. // Some formats support symbols (e.g. binc) and will properly encode the string
  822. // only once in the stream, and use a tag to refer to it thereafter.
  823. func (e *Encoder) Encode(v interface{}) (err error) {
  824. defer panicToErr(&err)
  825. e.encode(v)
  826. e.w.atEndOfEncode()
  827. return
  828. }
  829. // MustEncode is like Encode, but panics if unable to Encode.
  830. // This provides insight to the code location that triggered the error.
  831. func (e *Encoder) MustEncode(v interface{}) {
  832. e.encode(v)
  833. e.w.atEndOfEncode()
  834. }
  835. // comment out these (Must)Write methods. They were only put there to support cbor.
  836. // However, users already have access to the streams, and can write directly.
  837. //
  838. // // Write allows users write to the Encoder stream directly.
  839. // func (e *Encoder) Write(bs []byte) (err error) {
  840. // defer panicToErr(&err)
  841. // e.w.writeb(bs)
  842. // return
  843. // }
  844. // // MustWrite is like write, but panics if unable to Write.
  845. // func (e *Encoder) MustWrite(bs []byte) {
  846. // e.w.writeb(bs)
  847. // }
  848. func (e *Encoder) encode(iv interface{}) {
  849. // if ics, ok := iv.(Selfer); ok {
  850. // ics.CodecEncodeSelf(e)
  851. // return
  852. // }
  853. switch v := iv.(type) {
  854. case nil:
  855. e.e.EncodeNil()
  856. case Selfer:
  857. v.CodecEncodeSelf(e)
  858. case reflect.Value:
  859. e.encodeValue(v, encFn{})
  860. case string:
  861. e.e.EncodeString(c_UTF8, v)
  862. case bool:
  863. e.e.EncodeBool(v)
  864. case int:
  865. e.e.EncodeInt(int64(v))
  866. case int8:
  867. e.e.EncodeInt(int64(v))
  868. case int16:
  869. e.e.EncodeInt(int64(v))
  870. case int32:
  871. e.e.EncodeInt(int64(v))
  872. case int64:
  873. e.e.EncodeInt(v)
  874. case uint:
  875. e.e.EncodeUint(uint64(v))
  876. case uint8:
  877. e.e.EncodeUint(uint64(v))
  878. case uint16:
  879. e.e.EncodeUint(uint64(v))
  880. case uint32:
  881. e.e.EncodeUint(uint64(v))
  882. case uint64:
  883. e.e.EncodeUint(v)
  884. case float32:
  885. e.e.EncodeFloat32(v)
  886. case float64:
  887. e.e.EncodeFloat64(v)
  888. case []uint8:
  889. e.e.EncodeStringBytes(c_RAW, v)
  890. case *string:
  891. e.e.EncodeString(c_UTF8, *v)
  892. case *bool:
  893. e.e.EncodeBool(*v)
  894. case *int:
  895. e.e.EncodeInt(int64(*v))
  896. case *int8:
  897. e.e.EncodeInt(int64(*v))
  898. case *int16:
  899. e.e.EncodeInt(int64(*v))
  900. case *int32:
  901. e.e.EncodeInt(int64(*v))
  902. case *int64:
  903. e.e.EncodeInt(*v)
  904. case *uint:
  905. e.e.EncodeUint(uint64(*v))
  906. case *uint8:
  907. e.e.EncodeUint(uint64(*v))
  908. case *uint16:
  909. e.e.EncodeUint(uint64(*v))
  910. case *uint32:
  911. e.e.EncodeUint(uint64(*v))
  912. case *uint64:
  913. e.e.EncodeUint(*v)
  914. case *float32:
  915. e.e.EncodeFloat32(*v)
  916. case *float64:
  917. e.e.EncodeFloat64(*v)
  918. case *[]uint8:
  919. e.e.EncodeStringBytes(c_RAW, *v)
  920. default:
  921. // canonical mode is not supported for fastpath of maps (but is fine for slices)
  922. if e.h.Canonical {
  923. if !fastpathEncodeTypeSwitchSlice(iv, e) {
  924. e.encodeI(iv, false, false)
  925. }
  926. } else if !fastpathEncodeTypeSwitch(iv, e) {
  927. e.encodeI(iv, false, false)
  928. }
  929. }
  930. }
  931. func (e *Encoder) encodeI(iv interface{}, checkFastpath, checkCodecSelfer bool) {
  932. if rv, proceed := e.preEncodeValue(reflect.ValueOf(iv)); proceed {
  933. rt := rv.Type()
  934. rtid := reflect.ValueOf(rt).Pointer()
  935. fn := e.getEncFn(rtid, rt, checkFastpath, checkCodecSelfer)
  936. fn.f(fn.i, rv)
  937. }
  938. }
  939. func (e *Encoder) preEncodeValue(rv reflect.Value) (rv2 reflect.Value, proceed bool) {
  940. LOOP:
  941. for {
  942. switch rv.Kind() {
  943. case reflect.Ptr, reflect.Interface:
  944. if rv.IsNil() {
  945. e.e.EncodeNil()
  946. return
  947. }
  948. rv = rv.Elem()
  949. continue LOOP
  950. case reflect.Slice, reflect.Map:
  951. if rv.IsNil() {
  952. e.e.EncodeNil()
  953. return
  954. }
  955. case reflect.Invalid, reflect.Func:
  956. e.e.EncodeNil()
  957. return
  958. }
  959. break
  960. }
  961. return rv, true
  962. }
  963. func (e *Encoder) encodeValue(rv reflect.Value, fn encFn) {
  964. // if a valid fn is passed, it MUST BE for the dereferenced type of rv
  965. if rv, proceed := e.preEncodeValue(rv); proceed {
  966. if fn.f == nil {
  967. rt := rv.Type()
  968. rtid := reflect.ValueOf(rt).Pointer()
  969. fn = e.getEncFn(rtid, rt, true, true)
  970. }
  971. fn.f(fn.i, rv)
  972. }
  973. }
  974. func (e *Encoder) getEncFn(rtid uintptr, rt reflect.Type, checkFastpath, checkCodecSelfer bool) (fn encFn) {
  975. // rtid := reflect.ValueOf(rt).Pointer()
  976. var ok bool
  977. if useMapForCodecCache {
  978. fn, ok = e.f[rtid]
  979. } else {
  980. for _, v := range e.s {
  981. if v.rtid == rtid {
  982. fn, ok = v.fn, true
  983. break
  984. }
  985. }
  986. }
  987. if ok {
  988. return
  989. }
  990. // fi.encFnInfoX = new(encFnInfoX)
  991. ti := getTypeInfo(rtid, rt)
  992. var fi encFnInfo
  993. fi.ee = e.e
  994. if checkCodecSelfer && ti.cs {
  995. fi.encFnInfoX = &encFnInfoX{e: e, ti: ti}
  996. fn.f = (encFnInfo).selferMarshal
  997. } else if rtid == rawExtTypId {
  998. fi.encFnInfoX = &encFnInfoX{e: e, ti: ti}
  999. fn.f = (encFnInfo).rawExt
  1000. } else if e.e.IsBuiltinType(rtid) {
  1001. fi.encFnInfoX = &encFnInfoX{e: e, ti: ti}
  1002. fn.f = (encFnInfo).builtin
  1003. } else if xfFn := e.h.getExt(rtid); xfFn != nil {
  1004. // fi.encFnInfoX = new(encFnInfoX)
  1005. fi.encFnInfoX = &encFnInfoX{e: e, ti: ti}
  1006. fi.xfTag, fi.xfFn = xfFn.tag, xfFn.ext
  1007. fn.f = (encFnInfo).ext
  1008. } else if supportMarshalInterfaces && e.be && ti.bm {
  1009. fi.encFnInfoX = &encFnInfoX{e: e, ti: ti}
  1010. fn.f = (encFnInfo).binaryMarshal
  1011. } else if supportMarshalInterfaces && !e.be && ti.tm {
  1012. fi.encFnInfoX = &encFnInfoX{e: e, ti: ti}
  1013. fn.f = (encFnInfo).textMarshal
  1014. } else {
  1015. rk := rt.Kind()
  1016. // if fastpathEnabled && checkFastpath && (rk == reflect.Map || rk == reflect.Slice) {
  1017. if fastpathEnabled && checkFastpath && (rk == reflect.Slice || (rk == reflect.Map && !e.h.Canonical)) {
  1018. if rt.PkgPath() == "" {
  1019. if idx := fastpathAV.index(rtid); idx != -1 {
  1020. fi.encFnInfoX = &encFnInfoX{e: e, ti: ti}
  1021. fn.f = fastpathAV[idx].encfn
  1022. }
  1023. } else {
  1024. ok = false
  1025. // use mapping for underlying type if there
  1026. var rtu reflect.Type
  1027. if rk == reflect.Map {
  1028. rtu = reflect.MapOf(rt.Key(), rt.Elem())
  1029. } else {
  1030. rtu = reflect.SliceOf(rt.Elem())
  1031. }
  1032. rtuid := reflect.ValueOf(rtu).Pointer()
  1033. if idx := fastpathAV.index(rtuid); idx != -1 {
  1034. xfnf := fastpathAV[idx].encfn
  1035. xrt := fastpathAV[idx].rt
  1036. fi.encFnInfoX = &encFnInfoX{e: e, ti: ti}
  1037. fn.f = func(xf encFnInfo, xrv reflect.Value) {
  1038. xfnf(xf, xrv.Convert(xrt))
  1039. }
  1040. }
  1041. }
  1042. }
  1043. if fn.f == nil {
  1044. switch rk {
  1045. case reflect.Bool:
  1046. fn.f = (encFnInfo).kBool
  1047. case reflect.String:
  1048. fn.f = (encFnInfo).kString
  1049. case reflect.Float64:
  1050. fn.f = (encFnInfo).kFloat64
  1051. case reflect.Float32:
  1052. fn.f = (encFnInfo).kFloat32
  1053. case reflect.Int, reflect.Int8, reflect.Int64, reflect.Int32, reflect.Int16:
  1054. fn.f = (encFnInfo).kInt
  1055. case reflect.Uint8, reflect.Uint64, reflect.Uint, reflect.Uint32, reflect.Uint16:
  1056. fn.f = (encFnInfo).kUint
  1057. case reflect.Invalid:
  1058. fn.f = (encFnInfo).kInvalid
  1059. case reflect.Chan:
  1060. fi.encFnInfoX = &encFnInfoX{e: e, ti: ti, seq: seqTypeChan}
  1061. fn.f = (encFnInfo).kSlice
  1062. case reflect.Slice:
  1063. fi.encFnInfoX = &encFnInfoX{e: e, ti: ti, seq: seqTypeSlice}
  1064. fn.f = (encFnInfo).kSlice
  1065. case reflect.Array:
  1066. fi.encFnInfoX = &encFnInfoX{e: e, ti: ti, seq: seqTypeArray}
  1067. fn.f = (encFnInfo).kSlice
  1068. case reflect.Struct:
  1069. fi.encFnInfoX = &encFnInfoX{e: e, ti: ti}
  1070. fn.f = (encFnInfo).kStruct
  1071. // case reflect.Ptr:
  1072. // fn.f = (encFnInfo).kPtr
  1073. case reflect.Interface:
  1074. fi.encFnInfoX = &encFnInfoX{e: e, ti: ti}
  1075. fn.f = (encFnInfo).kInterface
  1076. case reflect.Map:
  1077. fi.encFnInfoX = &encFnInfoX{e: e, ti: ti}
  1078. fn.f = (encFnInfo).kMap
  1079. default:
  1080. fn.f = (encFnInfo).kErr
  1081. }
  1082. }
  1083. }
  1084. fn.i = fi
  1085. if useMapForCodecCache {
  1086. if e.f == nil {
  1087. e.f = make(map[uintptr]encFn, 32)
  1088. }
  1089. e.f[rtid] = fn
  1090. } else {
  1091. if e.s == nil {
  1092. e.s = make([]rtidEncFn, 0, 32)
  1093. }
  1094. e.s = append(e.s, rtidEncFn{rtid, fn})
  1095. }
  1096. return
  1097. }
  1098. func (e *Encoder) errorf(format string, params ...interface{}) {
  1099. err := fmt.Errorf(format, params...)
  1100. panic(err)
  1101. }
  1102. // ----------------------------------------
  1103. type encStructFieldKV struct {
  1104. k string
  1105. v reflect.Value
  1106. }
  1107. const encStructPoolLen = 4
  1108. // encStructPool is an array of sync.Pool.
  1109. // Each element of the array pools one of encStructPool(8|16|32|64).
  1110. // It allows the re-use of slices up to 64 in length.
  1111. // A performance cost of encoding structs was collecting
  1112. // which values were empty and should be omitted.
  1113. // We needed slices of reflect.Value and string to collect them.
  1114. // This shared pool reduces the amount of unnecessary creation we do.
  1115. // The cost is that of locking sometimes, but sync.Pool is efficient
  1116. // enough to reduce thread contention.
  1117. var encStructPool [encStructPoolLen]sync.Pool
  1118. func init() {
  1119. encStructPool[0].New = func() interface{} { return new([8]encStructFieldKV) }
  1120. encStructPool[1].New = func() interface{} { return new([16]encStructFieldKV) }
  1121. encStructPool[2].New = func() interface{} { return new([32]encStructFieldKV) }
  1122. encStructPool[3].New = func() interface{} { return new([64]encStructFieldKV) }
  1123. }
  1124. // ----------------------------------------
  1125. // func encErr(format string, params ...interface{}) {
  1126. // doPanic(msgTagEnc, format, params...)
  1127. // }