helper.go 87 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238
  1. // Copyright (c) 2012-2018 Ugorji Nwoke. All rights reserved.
  2. // Use of this source code is governed by a MIT license found in the LICENSE file.
  3. package codec
  4. // Contains code shared by both encode and decode.
  5. // Some shared ideas around encoding/decoding
  6. // ------------------------------------------
  7. //
  8. // If an interface{} is passed, we first do a type assertion to see if it is
  9. // a primitive type or a map/slice of primitive types, and use a fastpath to handle it.
  10. //
  11. // If we start with a reflect.Value, we are already in reflect.Value land and
  12. // will try to grab the function for the underlying Type and directly call that function.
  13. // This is more performant than calling reflect.Value.Interface().
  14. //
  15. // This still helps us bypass many layers of reflection, and give best performance.
  16. //
  17. // Containers
  18. // ------------
  19. // Containers in the stream are either associative arrays (key-value pairs) or
  20. // regular arrays (indexed by incrementing integers).
  21. //
  22. // Some streams support indefinite-length containers, and use a breaking
  23. // byte-sequence to denote that the container has come to an end.
  24. //
  25. // Some streams also are text-based, and use explicit separators to denote the
  26. // end/beginning of different values.
  27. //
  28. // Philosophy
  29. // ------------
  30. // On decode, this codec will update containers appropriately:
  31. // - If struct, update fields from stream into fields of struct.
  32. // If field in stream not found in struct, handle appropriately (based on option).
  33. // If a struct field has no corresponding value in the stream, leave it AS IS.
  34. // If nil in stream, set value to nil/zero value.
  35. // - If map, update map from stream.
  36. // If the stream value is NIL, set the map to nil.
  37. // - if slice, try to update up to length of array in stream.
  38. // if container len is less than stream array length,
  39. // and container cannot be expanded, handled (based on option).
  40. // This means you can decode 4-element stream array into 1-element array.
  41. //
  42. // ------------------------------------
  43. // On encode, user can specify omitEmpty. This means that the value will be omitted
  44. // if the zero value. The problem may occur during decode, where omitted values do not affect
  45. // the value being decoded into. This means that if decoding into a struct with an
  46. // int field with current value=5, and the field is omitted in the stream, then after
  47. // decoding, the value will still be 5 (not 0).
  48. // omitEmpty only works if you guarantee that you always decode into zero-values.
  49. //
  50. // ------------------------------------
  51. // We could have truncated a map to remove keys not available in the stream,
  52. // or set values in the struct which are not in the stream to their zero values.
  53. // We decided against it because there is no efficient way to do it.
  54. // We may introduce it as an option later.
  55. // However, that will require enabling it for both runtime and code generation modes.
  56. //
  57. // To support truncate, we need to do 2 passes over the container:
  58. // map
  59. // - first collect all keys (e.g. in k1)
  60. // - for each key in stream, mark k1 that the key should not be removed
  61. // - after updating map, do second pass and call delete for all keys in k1 which are not marked
  62. // struct:
  63. // - for each field, track the *typeInfo s1
  64. // - iterate through all s1, and for each one not marked, set value to zero
  65. // - this involves checking the possible anonymous fields which are nil ptrs.
  66. // too much work.
  67. //
  68. // ------------------------------------------
  69. // Error Handling is done within the library using panic.
  70. //
  71. // This way, the code doesn't have to keep checking if an error has happened,
  72. // and we don't have to keep sending the error value along with each call
  73. // or storing it in the En|Decoder and checking it constantly along the way.
  74. //
  75. // We considered storing the error is En|Decoder.
  76. // - once it has its err field set, it cannot be used again.
  77. // - panicing will be optional, controlled by const flag.
  78. // - code should always check error first and return early.
  79. //
  80. // We eventually decided against it as it makes the code clumsier to always
  81. // check for these error conditions.
  82. //
  83. // ------------------------------------------
  84. // We use sync.Pool only for the aid of long-lived objects shared across multiple goroutines.
  85. // Encoder, Decoder, enc|decDriver, reader|writer, etc do not fall into this bucket.
  86. //
  87. // Also, GC is much better now, eliminating some of the reasons to use a shared pool structure.
  88. // Instead, the short-lived objects use free-lists that live as long as the object exists.
  89. //
  90. // ------------------------------------------
  91. // Performance is affected by the following:
  92. // - Bounds Checking
  93. // - Inlining
  94. // - Pointer chasing
  95. // This package tries hard to manage the performance impact of these.
  96. //
  97. // To alleviate performance due to pointer-chasing:
  98. // - Prefer non-pointer values in a struct field
  99. // - Refer to these directly within helper classes
  100. // e.g. json.go refers directly to d.d.decRd
  101. //
  102. import (
  103. "bytes"
  104. "encoding"
  105. "encoding/binary"
  106. "errors"
  107. "fmt"
  108. "io"
  109. "math"
  110. "reflect"
  111. "sort"
  112. "strconv"
  113. "strings"
  114. "sync"
  115. "sync/atomic"
  116. "time"
  117. )
  118. const (
  119. scratchByteArrayLen = 64
  120. // initCollectionCap = 16 // 32 is defensive. 16 is preferred.
  121. // Support encoding.(Binary|Text)(Unm|M)arshaler.
  122. // This constant flag will enable or disable it.
  123. supportMarshalInterfaces = true
  124. // for debugging, set this to false, to catch panic traces.
  125. // Note that this will always cause rpc tests to fail, since they need io.EOF sent via panic.
  126. recoverPanicToErr = true
  127. // arrayCacheLen is the length of the cache used in encoder or decoder for
  128. // allowing zero-alloc initialization.
  129. // arrayCacheLen = 8
  130. // size of the cacheline: defaulting to value for archs: amd64, arm64, 386
  131. // should use "runtime/internal/sys".CacheLineSize, but that is not exposed.
  132. cacheLineSize = 64
  133. wordSizeBits = 32 << (^uint(0) >> 63) // strconv.IntSize
  134. wordSize = wordSizeBits / 8
  135. // so structFieldInfo fits into 8 bytes
  136. maxLevelsEmbedding = 14
  137. // useFinalizers=true configures finalizers to release pool'ed resources
  138. // acquired by Encoder/Decoder during their GC.
  139. //
  140. // Note that calling SetFinalizer is always expensive,
  141. // as code must be run on the systemstack even for SetFinalizer(t, nil).
  142. //
  143. // We document that folks SHOULD call Release() when done, or they can
  144. // explicitly call SetFinalizer themselves e.g.
  145. // runtime.SetFinalizer(e, (*Encoder).Release)
  146. // runtime.SetFinalizer(d, (*Decoder).Release)
  147. useFinalizers = false
  148. // // usePool controls whether we use sync.Pool or not.
  149. // //
  150. // // sync.Pool can help manage memory use, but it may come at a performance cost.
  151. // usePool = false
  152. // xdebug controls whether xdebugf prints any output
  153. xdebug = true
  154. )
  155. var (
  156. oneByteArr [1]byte
  157. zeroByteSlice = oneByteArr[:0:0]
  158. codecgen bool
  159. // defPooler pooler
  160. panicv panicHdl
  161. refBitset bitset32
  162. isnilBitset bitset32
  163. scalarBitset bitset32
  164. )
  165. var (
  166. errMapTypeNotMapKind = errors.New("MapType MUST be of Map Kind")
  167. errSliceTypeNotSliceKind = errors.New("SliceType MUST be of Slice Kind")
  168. )
  169. var (
  170. pool4tiload = sync.Pool{New: func() interface{} { return new(typeInfoLoadArray) }}
  171. // pool4sfiRv8 = sync.Pool{New: func() interface{} { return new([8]sfiRv) }}
  172. // pool4sfiRv16 = sync.Pool{New: func() interface{} { return new([16]sfiRv) }}
  173. // pool4sfiRv32 = sync.Pool{New: func() interface{} { return new([32]sfiRv) }}
  174. // pool4sfiRv64 = sync.Pool{New: func() interface{} { return new([64]sfiRv) }}
  175. // pool4sfiRv128 = sync.Pool{New: func() interface{} { return new([128]sfiRv) }}
  176. // // dn = sync.Pool{ New: func() interface{} { x := new(decNaked); x.init(); return x } }
  177. // pool4buf256 = sync.Pool{New: func() interface{} { return new([256]byte) }}
  178. // pool4buf1k = sync.Pool{New: func() interface{} { return new([1 * 1024]byte) }}
  179. // pool4buf2k = sync.Pool{New: func() interface{} { return new([2 * 1024]byte) }}
  180. // pool4buf4k = sync.Pool{New: func() interface{} { return new([4 * 1024]byte) }}
  181. // pool4buf8k = sync.Pool{New: func() interface{} { return new([8 * 1024]byte) }}
  182. // pool4buf16k = sync.Pool{New: func() interface{} { return new([16 * 1024]byte) }}
  183. // pool4buf32k = sync.Pool{New: func() interface{} { return new([32 * 1024]byte) }}
  184. // pool4buf64k = sync.Pool{New: func() interface{} { return new([64 * 1024]byte) }}
  185. // pool4mapStrU16 = sync.Pool{New: func() interface{} { return make(map[string]uint16, 16) }}
  186. // pool4mapU16Str = sync.Pool{New: func() interface{} { return make(map[uint16]string, 16) }}
  187. // pool4mapU16Bytes = sync.Pool{New: func() interface{} { return make(map[uint16][]byte, 16) }}
  188. )
  189. func init() {
  190. // defPooler.init()
  191. refBitset = refBitset.
  192. set(byte(reflect.Map)).
  193. set(byte(reflect.Ptr)).
  194. set(byte(reflect.Func)).
  195. set(byte(reflect.Chan)).
  196. set(byte(reflect.UnsafePointer))
  197. isnilBitset = isnilBitset.
  198. set(byte(reflect.Map)).
  199. set(byte(reflect.Ptr)).
  200. set(byte(reflect.Func)).
  201. set(byte(reflect.Chan)).
  202. set(byte(reflect.UnsafePointer)).
  203. set(byte(reflect.Interface)).
  204. set(byte(reflect.Slice))
  205. scalarBitset = scalarBitset.
  206. set(byte(reflect.Bool)).
  207. set(byte(reflect.Int)).
  208. set(byte(reflect.Int8)).
  209. set(byte(reflect.Int16)).
  210. set(byte(reflect.Int32)).
  211. set(byte(reflect.Int64)).
  212. set(byte(reflect.Uint)).
  213. set(byte(reflect.Uint8)).
  214. set(byte(reflect.Uint16)).
  215. set(byte(reflect.Uint32)).
  216. set(byte(reflect.Uint64)).
  217. set(byte(reflect.Uintptr)).
  218. set(byte(reflect.Float32)).
  219. set(byte(reflect.Float64)).
  220. set(byte(reflect.Complex64)).
  221. set(byte(reflect.Complex128)).
  222. set(byte(reflect.String))
  223. // xdebugf("bitsets: ref: %b, isnil: %b, scalar: %b", refBitset, isnilBitset, scalarBitset)
  224. }
  225. type handleFlag uint8
  226. const (
  227. initedHandleFlag handleFlag = 1 << iota
  228. binaryHandleFlag
  229. jsonHandleFlag
  230. )
  231. type clsErr struct {
  232. closed bool // is it closed?
  233. errClosed error // error on closing
  234. }
  235. // type entryType uint8
  236. // const (
  237. // entryTypeBytes entryType = iota // make this 0, so a comparison is cheap
  238. // entryTypeIo
  239. // entryTypeBufio
  240. // entryTypeUnset = 255
  241. // )
  242. type charEncoding uint8
  243. const (
  244. _ charEncoding = iota // make 0 unset
  245. cUTF8
  246. cUTF16LE
  247. cUTF16BE
  248. cUTF32LE
  249. cUTF32BE
  250. // Deprecated: not a true char encoding value
  251. cRAW charEncoding = 255
  252. )
  253. // valueType is the stream type
  254. type valueType uint8
  255. const (
  256. valueTypeUnset valueType = iota
  257. valueTypeNil
  258. valueTypeInt
  259. valueTypeUint
  260. valueTypeFloat
  261. valueTypeBool
  262. valueTypeString
  263. valueTypeSymbol
  264. valueTypeBytes
  265. valueTypeMap
  266. valueTypeArray
  267. valueTypeTime
  268. valueTypeExt
  269. // valueTypeInvalid = 0xff
  270. )
  271. var valueTypeStrings = [...]string{
  272. "Unset",
  273. "Nil",
  274. "Int",
  275. "Uint",
  276. "Float",
  277. "Bool",
  278. "String",
  279. "Symbol",
  280. "Bytes",
  281. "Map",
  282. "Array",
  283. "Timestamp",
  284. "Ext",
  285. }
  286. func (x valueType) String() string {
  287. if int(x) < len(valueTypeStrings) {
  288. return valueTypeStrings[x]
  289. }
  290. return strconv.FormatInt(int64(x), 10)
  291. }
  292. type seqType uint8
  293. const (
  294. _ seqType = iota
  295. seqTypeArray
  296. seqTypeSlice
  297. seqTypeChan
  298. )
  299. // note that containerMapStart and containerArraySend are not sent.
  300. // This is because the ReadXXXStart and EncodeXXXStart already does these.
  301. type containerState uint8
  302. const (
  303. _ containerState = iota
  304. containerMapStart
  305. containerMapKey
  306. containerMapValue
  307. containerMapEnd
  308. containerArrayStart
  309. containerArrayElem
  310. containerArrayEnd
  311. )
  312. // // sfiIdx used for tracking where a (field/enc)Name is seen in a []*structFieldInfo
  313. // type sfiIdx struct {
  314. // name string
  315. // index int
  316. // }
  317. // do not recurse if a containing type refers to an embedded type
  318. // which refers back to its containing type (via a pointer).
  319. // The second time this back-reference happens, break out,
  320. // so as not to cause an infinite loop.
  321. const rgetMaxRecursion = 2
  322. // Anecdotally, we believe most types have <= 12 fields.
  323. // - even Java's PMD rules set TooManyFields threshold to 15.
  324. // However, go has embedded fields, which should be regarded as
  325. // top level, allowing structs to possibly double or triple.
  326. // In addition, we don't want to keep creating transient arrays,
  327. // especially for the sfi index tracking, and the evtypes tracking.
  328. //
  329. // So - try to keep typeInfoLoadArray within 2K bytes
  330. const (
  331. typeInfoLoadArraySfisLen = 16
  332. typeInfoLoadArraySfiidxLen = 8 * 112
  333. typeInfoLoadArrayEtypesLen = 12
  334. typeInfoLoadArrayBLen = 8 * 4
  335. )
  336. // typeInfoLoad is a transient object used while loading up a typeInfo.
  337. type typeInfoLoad struct {
  338. // fNames []string
  339. // encNames []string
  340. etypes []uintptr
  341. sfis []structFieldInfo
  342. }
  343. // typeInfoLoadArray is a cache object used to efficiently load up a typeInfo without
  344. // much allocation.
  345. type typeInfoLoadArray struct {
  346. // fNames [typeInfoLoadArrayLen]string
  347. // encNames [typeInfoLoadArrayLen]string
  348. sfis [typeInfoLoadArraySfisLen]structFieldInfo
  349. sfiidx [typeInfoLoadArraySfiidxLen]byte
  350. etypes [typeInfoLoadArrayEtypesLen]uintptr
  351. b [typeInfoLoadArrayBLen]byte // scratch - used for struct field names
  352. }
  353. // // cacheLineSafer denotes that a type is safe for cache-line access.
  354. // // This could mean that
  355. // type cacheLineSafer interface {
  356. // cacheLineSafe()
  357. // }
  358. // mirror json.Marshaler and json.Unmarshaler here,
  359. // so we don't import the encoding/json package
  360. type jsonMarshaler interface {
  361. MarshalJSON() ([]byte, error)
  362. }
  363. type jsonUnmarshaler interface {
  364. UnmarshalJSON([]byte) error
  365. }
  366. type isZeroer interface {
  367. IsZero() bool
  368. }
  369. type codecError struct {
  370. name string
  371. err interface{}
  372. }
  373. func (e codecError) Cause() error {
  374. switch xerr := e.err.(type) {
  375. case nil:
  376. return nil
  377. case error:
  378. return xerr
  379. case string:
  380. return errors.New(xerr)
  381. case fmt.Stringer:
  382. return errors.New(xerr.String())
  383. default:
  384. return fmt.Errorf("%v", e.err)
  385. }
  386. }
  387. func (e codecError) Error() string {
  388. return fmt.Sprintf("%s error: %v", e.name, e.err)
  389. }
  390. // type byteAccepter func(byte) bool
  391. var (
  392. bigen = binary.BigEndian
  393. structInfoFieldName = "_struct"
  394. mapStrIntfTyp = reflect.TypeOf(map[string]interface{}(nil))
  395. mapIntfIntfTyp = reflect.TypeOf(map[interface{}]interface{}(nil))
  396. intfSliceTyp = reflect.TypeOf([]interface{}(nil))
  397. intfTyp = intfSliceTyp.Elem()
  398. reflectValTyp = reflect.TypeOf((*reflect.Value)(nil)).Elem()
  399. stringTyp = reflect.TypeOf("")
  400. timeTyp = reflect.TypeOf(time.Time{})
  401. rawExtTyp = reflect.TypeOf(RawExt{})
  402. rawTyp = reflect.TypeOf(Raw{})
  403. uintptrTyp = reflect.TypeOf(uintptr(0))
  404. uint8Typ = reflect.TypeOf(uint8(0))
  405. uint8SliceTyp = reflect.TypeOf([]uint8(nil))
  406. uintTyp = reflect.TypeOf(uint(0))
  407. intTyp = reflect.TypeOf(int(0))
  408. mapBySliceTyp = reflect.TypeOf((*MapBySlice)(nil)).Elem()
  409. binaryMarshalerTyp = reflect.TypeOf((*encoding.BinaryMarshaler)(nil)).Elem()
  410. binaryUnmarshalerTyp = reflect.TypeOf((*encoding.BinaryUnmarshaler)(nil)).Elem()
  411. textMarshalerTyp = reflect.TypeOf((*encoding.TextMarshaler)(nil)).Elem()
  412. textUnmarshalerTyp = reflect.TypeOf((*encoding.TextUnmarshaler)(nil)).Elem()
  413. jsonMarshalerTyp = reflect.TypeOf((*jsonMarshaler)(nil)).Elem()
  414. jsonUnmarshalerTyp = reflect.TypeOf((*jsonUnmarshaler)(nil)).Elem()
  415. selferTyp = reflect.TypeOf((*Selfer)(nil)).Elem()
  416. missingFielderTyp = reflect.TypeOf((*MissingFielder)(nil)).Elem()
  417. iszeroTyp = reflect.TypeOf((*isZeroer)(nil)).Elem()
  418. uint8TypId = rt2id(uint8Typ)
  419. uint8SliceTypId = rt2id(uint8SliceTyp)
  420. rawExtTypId = rt2id(rawExtTyp)
  421. rawTypId = rt2id(rawTyp)
  422. intfTypId = rt2id(intfTyp)
  423. timeTypId = rt2id(timeTyp)
  424. stringTypId = rt2id(stringTyp)
  425. mapStrIntfTypId = rt2id(mapStrIntfTyp)
  426. mapIntfIntfTypId = rt2id(mapIntfIntfTyp)
  427. intfSliceTypId = rt2id(intfSliceTyp)
  428. // mapBySliceTypId = rt2id(mapBySliceTyp)
  429. intBitsize = uint8(intTyp.Bits())
  430. uintBitsize = uint8(uintTyp.Bits())
  431. // bsAll0x00 = []byte{0, 0, 0, 0, 0, 0, 0, 0}
  432. bsAll0xff = []byte{0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff}
  433. chkOvf checkOverflow
  434. errNoFieldNameToStructFieldInfo = errors.New("no field name passed to parseStructFieldInfo")
  435. )
  436. var defTypeInfos = NewTypeInfos([]string{"codec", "json"})
  437. var immutableKindsSet = [32]bool{
  438. // reflect.Invalid: ,
  439. reflect.Bool: true,
  440. reflect.Int: true,
  441. reflect.Int8: true,
  442. reflect.Int16: true,
  443. reflect.Int32: true,
  444. reflect.Int64: true,
  445. reflect.Uint: true,
  446. reflect.Uint8: true,
  447. reflect.Uint16: true,
  448. reflect.Uint32: true,
  449. reflect.Uint64: true,
  450. reflect.Uintptr: true,
  451. reflect.Float32: true,
  452. reflect.Float64: true,
  453. reflect.Complex64: true,
  454. reflect.Complex128: true,
  455. // reflect.Array
  456. // reflect.Chan
  457. // reflect.Func: true,
  458. // reflect.Interface
  459. // reflect.Map
  460. // reflect.Ptr
  461. // reflect.Slice
  462. reflect.String: true,
  463. // reflect.Struct
  464. // reflect.UnsafePointer
  465. }
  466. // SelfExt is a sentinel extension signifying that types
  467. // registered with it SHOULD be encoded and decoded
  468. // based on the naive mode of the format.
  469. //
  470. // This allows users to define a tag for an extension,
  471. // but signify that the types should be encoded/decoded as the native encoding.
  472. // This way, users need not also define how to encode or decode the extension.
  473. var SelfExt = &extFailWrapper{}
  474. // Selfer defines methods by which a value can encode or decode itself.
  475. //
  476. // Any type which implements Selfer will be able to encode or decode itself.
  477. // Consequently, during (en|de)code, this takes precedence over
  478. // (text|binary)(M|Unm)arshal or extension support.
  479. //
  480. // By definition, it is not allowed for a Selfer to directly call Encode or Decode on itself.
  481. // If that is done, Encode/Decode will rightfully fail with a Stack Overflow style error.
  482. // For example, the snippet below will cause such an error.
  483. // type testSelferRecur struct{}
  484. // func (s *testSelferRecur) CodecEncodeSelf(e *Encoder) { e.MustEncode(s) }
  485. // func (s *testSelferRecur) CodecDecodeSelf(d *Decoder) { d.MustDecode(s) }
  486. //
  487. // Note: *the first set of bytes of any value MUST NOT represent nil in the format*.
  488. // This is because, during each decode, we first check the the next set of bytes
  489. // represent nil, and if so, we just set the value to nil.
  490. type Selfer interface {
  491. CodecEncodeSelf(*Encoder)
  492. CodecDecodeSelf(*Decoder)
  493. }
  494. // MissingFielder defines the interface allowing structs to internally decode or encode
  495. // values which do not map to struct fields.
  496. //
  497. // We expect that this interface is bound to a pointer type (so the mutation function works).
  498. //
  499. // A use-case is if a version of a type unexports a field, but you want compatibility between
  500. // both versions during encoding and decoding.
  501. //
  502. // Note that the interface is completely ignored during codecgen.
  503. type MissingFielder interface {
  504. // CodecMissingField is called to set a missing field and value pair.
  505. //
  506. // It returns true if the missing field was set on the struct.
  507. CodecMissingField(field []byte, value interface{}) bool
  508. // CodecMissingFields returns the set of fields which are not struct fields
  509. CodecMissingFields() map[string]interface{}
  510. }
  511. // MapBySlice is a tag interface that denotes wrapped slice should encode as a map in the stream.
  512. // The slice contains a sequence of key-value pairs.
  513. // This affords storing a map in a specific sequence in the stream.
  514. //
  515. // Example usage:
  516. // type T1 []string // or []int or []Point or any other "slice" type
  517. // func (_ T1) MapBySlice{} // T1 now implements MapBySlice, and will be encoded as a map
  518. // type T2 struct { KeyValues T1 }
  519. //
  520. // var kvs = []string{"one", "1", "two", "2", "three", "3"}
  521. // var v2 = T2{ KeyValues: T1(kvs) }
  522. // // v2 will be encoded like the map: {"KeyValues": {"one": "1", "two": "2", "three": "3"} }
  523. //
  524. // The support of MapBySlice affords the following:
  525. // - A slice type which implements MapBySlice will be encoded as a map
  526. // - A slice can be decoded from a map in the stream
  527. // - It MUST be a slice type (not a pointer receiver) that implements MapBySlice
  528. type MapBySlice interface {
  529. MapBySlice()
  530. }
  531. // BasicHandle encapsulates the common options and extension functions.
  532. //
  533. // Deprecated: DO NOT USE DIRECTLY. EXPORTED FOR GODOC BENEFIT. WILL BE REMOVED.
  534. type BasicHandle struct {
  535. // BasicHandle is always a part of a different type.
  536. // It doesn't have to fit into it own cache lines.
  537. // TypeInfos is used to get the type info for any type.
  538. //
  539. // If not configured, the default TypeInfos is used, which uses struct tag keys: codec, json
  540. TypeInfos *TypeInfos
  541. // Note: BasicHandle is not comparable, due to these slices here (extHandle, intf2impls).
  542. // If *[]T is used instead, this becomes comparable, at the cost of extra indirection.
  543. // Thses slices are used all the time, so keep as slices (not pointers).
  544. extHandle
  545. rtidFns atomicRtidFnSlice
  546. rtidFnsNoExt atomicRtidFnSlice
  547. // ---- cache line
  548. DecodeOptions
  549. // ---- cache line
  550. EncodeOptions
  551. intf2impls
  552. mu sync.Mutex
  553. inited uint32 // holds if inited, and also handle flags (binary encoding, json handler, etc)
  554. RPCOptions
  555. // TimeNotBuiltin configures whether time.Time should be treated as a builtin type.
  556. //
  557. // All Handlers should know how to encode/decode time.Time as part of the core
  558. // format specification, or as a standard extension defined by the format.
  559. //
  560. // However, users can elect to handle time.Time as a custom extension, or via the
  561. // standard library's encoding.Binary(M|Unm)arshaler or Text(M|Unm)arshaler interface.
  562. // To elect this behavior, users can set TimeNotBuiltin=true.
  563. //
  564. // Note: Setting TimeNotBuiltin=true can be used to enable the legacy behavior
  565. // (for Cbor and Msgpack), where time.Time was not a builtin supported type.
  566. //
  567. // Note: DO NOT CHANGE AFTER FIRST USE.
  568. //
  569. // Once a Handle has been used, do not modify this option.
  570. // It will lead to unexpected behaviour during encoding and decoding.
  571. TimeNotBuiltin bool
  572. // ExplicitRelease configures whether Release() is implicitly called after an encode or
  573. // decode call.
  574. //
  575. // If you will hold onto an Encoder or Decoder for re-use, by calling Reset(...)
  576. // on it or calling (Must)Encode repeatedly into a given []byte or io.Writer,
  577. // then you do not want it to be implicitly closed after each Encode/Decode call.
  578. // Doing so will unnecessarily return resources to the shared pool, only for you to
  579. // grab them right after again to do another Encode/Decode call.
  580. //
  581. // Instead, you configure ExplicitRelease=true, and you explicitly call Release() when
  582. // you are truly done.
  583. //
  584. // As an alternative, you can explicitly set a finalizer - so its resources
  585. // are returned to the shared pool before it is garbage-collected. Do it as below:
  586. // runtime.SetFinalizer(e, (*Encoder).Release)
  587. // runtime.SetFinalizer(d, (*Decoder).Release)
  588. //
  589. // Deprecated: This is not longer used as pools are only used for long-lived objects
  590. // which are shared across goroutines.
  591. // Setting this value has no effect. It is maintained for backward compatibility.
  592. ExplicitRelease bool
  593. // flags handleFlag // holds flag for if binaryEncoding, jsonHandler, etc
  594. // be bool // is handle a binary encoding?
  595. // js bool // is handle javascript handler?
  596. // n byte // first letter of handle name
  597. // _ uint16 // padding
  598. // ---- cache line
  599. // noBuiltInTypeChecker
  600. // _ uint32 // padding
  601. // r []uintptr // rtids mapped to s above
  602. }
  603. // basicHandle returns an initialized BasicHandle from the Handle.
  604. func basicHandle(hh Handle) (x *BasicHandle) {
  605. x = hh.getBasicHandle()
  606. // ** We need to simulate once.Do, to ensure no data race within the block.
  607. // ** Consequently, below would not work.
  608. // if atomic.CompareAndSwapUint32(&x.inited, 0, 1) {
  609. // x.be = hh.isBinary()
  610. // _, x.js = hh.(*JsonHandle)
  611. // x.n = hh.Name()[0]
  612. // }
  613. // simulate once.Do using our own stored flag and mutex as a CompareAndSwap
  614. // is not sufficient, since a race condition can occur within init(Handle) function.
  615. // init is made noinline, so that this function can be inlined by its caller.
  616. if atomic.LoadUint32(&x.inited) == 0 {
  617. x.init(hh)
  618. }
  619. return
  620. }
  621. func (x *BasicHandle) isJs() bool {
  622. return handleFlag(x.inited)&jsonHandleFlag != 0
  623. }
  624. func (x *BasicHandle) isBe() bool {
  625. return handleFlag(x.inited)&binaryHandleFlag != 0
  626. }
  627. //go:noinline
  628. func (x *BasicHandle) init(hh Handle) {
  629. // make it uninlineable, as it is called at most once
  630. x.mu.Lock()
  631. if x.inited == 0 {
  632. var f = initedHandleFlag
  633. if hh.isBinary() {
  634. f |= binaryHandleFlag
  635. }
  636. if _, b := hh.(*JsonHandle); b {
  637. f |= jsonHandleFlag
  638. }
  639. // _, x.js = hh.(*JsonHandle)
  640. // x.n = hh.Name()[0]
  641. atomic.StoreUint32(&x.inited, uint32(f))
  642. // ensure MapType and SliceType are of correct type
  643. if x.MapType != nil && x.MapType.Kind() != reflect.Map {
  644. panic(errMapTypeNotMapKind)
  645. }
  646. if x.SliceType != nil && x.SliceType.Kind() != reflect.Slice {
  647. panic(errSliceTypeNotSliceKind)
  648. }
  649. }
  650. x.mu.Unlock()
  651. }
  652. func (x *BasicHandle) getBasicHandle() *BasicHandle {
  653. return x
  654. }
  655. func (x *BasicHandle) getTypeInfo(rtid uintptr, rt reflect.Type) (pti *typeInfo) {
  656. if x.TypeInfos == nil {
  657. return defTypeInfos.get(rtid, rt)
  658. }
  659. return x.TypeInfos.get(rtid, rt)
  660. }
  661. func findFn(s []codecRtidFn, rtid uintptr) (i uint, fn *codecFn) {
  662. // binary search. adapted from sort/search.go.
  663. // Note: we use goto (instead of for loop) so this can be inlined.
  664. // h, i, j := 0, 0, len(s)
  665. var h uint // var h, i uint
  666. var j = uint(len(s))
  667. LOOP:
  668. if i < j {
  669. h = i + (j-i)/2
  670. if s[h].rtid < rtid {
  671. i = h + 1
  672. } else {
  673. j = h
  674. }
  675. goto LOOP
  676. }
  677. if i < uint(len(s)) && s[i].rtid == rtid {
  678. fn = s[i].fn
  679. }
  680. return
  681. }
  682. func (x *BasicHandle) fn(rt reflect.Type) (fn *codecFn) {
  683. return x.fnVia(rt, &x.rtidFns, true)
  684. }
  685. func (x *BasicHandle) fnNoExt(rt reflect.Type) (fn *codecFn) {
  686. return x.fnVia(rt, &x.rtidFnsNoExt, false)
  687. }
  688. func (x *BasicHandle) fnVia(rt reflect.Type, fs *atomicRtidFnSlice, checkExt bool) (fn *codecFn) {
  689. rtid := rt2id(rt)
  690. sp := fs.load()
  691. if sp != nil {
  692. if _, fn = findFn(sp, rtid); fn != nil {
  693. return
  694. }
  695. }
  696. fn = x.fnLoad(rt, rtid, checkExt)
  697. x.mu.Lock()
  698. var sp2 []codecRtidFn
  699. sp = fs.load()
  700. if sp == nil {
  701. sp2 = []codecRtidFn{{rtid, fn}}
  702. fs.store(sp2)
  703. } else {
  704. idx, fn2 := findFn(sp, rtid)
  705. if fn2 == nil {
  706. sp2 = make([]codecRtidFn, len(sp)+1)
  707. copy(sp2, sp[:idx])
  708. copy(sp2[idx+1:], sp[idx:])
  709. sp2[idx] = codecRtidFn{rtid, fn}
  710. fs.store(sp2)
  711. }
  712. }
  713. x.mu.Unlock()
  714. return
  715. }
  716. func (x *BasicHandle) fnLoad(rt reflect.Type, rtid uintptr, checkExt bool) (fn *codecFn) {
  717. fn = new(codecFn)
  718. fi := &(fn.i)
  719. ti := x.getTypeInfo(rtid, rt)
  720. fi.ti = ti
  721. rk := reflect.Kind(ti.kind)
  722. // anything can be an extension except the built-in ones: time, raw and rawext
  723. if rtid == timeTypId && !x.TimeNotBuiltin {
  724. fn.fe = (*Encoder).kTime
  725. fn.fd = (*Decoder).kTime
  726. } else if rtid == rawTypId {
  727. fn.fe = (*Encoder).raw
  728. fn.fd = (*Decoder).raw
  729. } else if rtid == rawExtTypId {
  730. fn.fe = (*Encoder).rawExt
  731. fn.fd = (*Decoder).rawExt
  732. fi.addrF = true
  733. fi.addrD = true
  734. fi.addrE = true
  735. } else if xfFn := x.getExt(rtid, checkExt); xfFn != nil {
  736. fi.xfTag, fi.xfFn = xfFn.tag, xfFn.ext
  737. fn.fe = (*Encoder).ext
  738. fn.fd = (*Decoder).ext
  739. fi.addrF = true
  740. fi.addrD = true
  741. if rk == reflect.Struct || rk == reflect.Array {
  742. fi.addrE = true
  743. }
  744. } else if ti.isFlag(tiflagSelfer) || ti.isFlag(tiflagSelferPtr) {
  745. fn.fe = (*Encoder).selferMarshal
  746. fn.fd = (*Decoder).selferUnmarshal
  747. fi.addrF = true
  748. fi.addrD = ti.isFlag(tiflagSelferPtr)
  749. fi.addrE = ti.isFlag(tiflagSelferPtr)
  750. } else if supportMarshalInterfaces && x.isBe() &&
  751. (ti.isFlag(tiflagBinaryMarshaler) || ti.isFlag(tiflagBinaryMarshalerPtr)) &&
  752. (ti.isFlag(tiflagBinaryUnmarshaler) || ti.isFlag(tiflagBinaryUnmarshalerPtr)) {
  753. fn.fe = (*Encoder).binaryMarshal
  754. fn.fd = (*Decoder).binaryUnmarshal
  755. fi.addrF = true
  756. fi.addrD = ti.isFlag(tiflagBinaryUnmarshalerPtr)
  757. fi.addrE = ti.isFlag(tiflagBinaryMarshalerPtr)
  758. } else if supportMarshalInterfaces && !x.isBe() && x.isJs() &&
  759. (ti.isFlag(tiflagJsonMarshaler) || ti.isFlag(tiflagJsonMarshalerPtr)) &&
  760. (ti.isFlag(tiflagJsonUnmarshaler) || ti.isFlag(tiflagJsonUnmarshalerPtr)) {
  761. //If JSON, we should check JSONMarshal before textMarshal
  762. fn.fe = (*Encoder).jsonMarshal
  763. fn.fd = (*Decoder).jsonUnmarshal
  764. fi.addrF = true
  765. fi.addrD = ti.isFlag(tiflagJsonUnmarshalerPtr)
  766. fi.addrE = ti.isFlag(tiflagJsonMarshalerPtr)
  767. } else if supportMarshalInterfaces && !x.isBe() &&
  768. (ti.isFlag(tiflagTextMarshaler) || ti.isFlag(tiflagTextMarshalerPtr)) &&
  769. (ti.isFlag(tiflagTextUnmarshaler) || ti.isFlag(tiflagTextUnmarshalerPtr)) {
  770. fn.fe = (*Encoder).textMarshal
  771. fn.fd = (*Decoder).textUnmarshal
  772. fi.addrF = true
  773. fi.addrD = ti.isFlag(tiflagTextUnmarshalerPtr)
  774. fi.addrE = ti.isFlag(tiflagTextMarshalerPtr)
  775. } else {
  776. if fastpathEnabled && (rk == reflect.Map || rk == reflect.Slice) {
  777. if ti.pkgpath == "" { // un-named slice or map
  778. if idx := fastpathAV.index(rtid); idx != -1 {
  779. fn.fe = fastpathAV[idx].encfn
  780. fn.fd = fastpathAV[idx].decfn
  781. fi.addrD = true
  782. fi.addrF = false
  783. }
  784. } else {
  785. // use mapping for underlying type if there
  786. var rtu reflect.Type
  787. if rk == reflect.Map {
  788. rtu = reflect.MapOf(ti.key, ti.elem)
  789. } else {
  790. rtu = reflect.SliceOf(ti.elem)
  791. }
  792. rtuid := rt2id(rtu)
  793. if idx := fastpathAV.index(rtuid); idx != -1 {
  794. xfnf := fastpathAV[idx].encfn
  795. xrt := fastpathAV[idx].rt
  796. fn.fe = func(e *Encoder, xf *codecFnInfo, xrv reflect.Value) {
  797. xfnf(e, xf, rvConvert(xrv, xrt))
  798. }
  799. fi.addrD = true
  800. fi.addrF = false // meaning it can be an address(ptr) or a value
  801. xfnf2 := fastpathAV[idx].decfn
  802. xptr2rt := reflect.PtrTo(xrt)
  803. fn.fd = func(d *Decoder, xf *codecFnInfo, xrv reflect.Value) {
  804. // xdebug2f("fd: convert from %v to %v", xrv.Type(), xrt)
  805. if xrv.Kind() == reflect.Ptr {
  806. xfnf2(d, xf, rvConvert(xrv, xptr2rt))
  807. } else {
  808. xfnf2(d, xf, rvConvert(xrv, xrt))
  809. }
  810. }
  811. }
  812. }
  813. }
  814. if fn.fe == nil && fn.fd == nil {
  815. switch rk {
  816. case reflect.Bool:
  817. fn.fe = (*Encoder).kBool
  818. fn.fd = (*Decoder).kBool
  819. case reflect.String:
  820. // Do not check this here, as it will statically set the function for a string
  821. // type, and if the Handle is modified thereafter, behaviour is non-deterministic.
  822. //
  823. // if x.StringToRaw {
  824. // fn.fe = (*Encoder).kStringToRaw
  825. // } else {
  826. // fn.fe = (*Encoder).kStringEnc
  827. // }
  828. fn.fe = (*Encoder).kString
  829. fn.fd = (*Decoder).kString
  830. case reflect.Int:
  831. fn.fd = (*Decoder).kInt
  832. fn.fe = (*Encoder).kInt
  833. case reflect.Int8:
  834. fn.fe = (*Encoder).kInt8
  835. fn.fd = (*Decoder).kInt8
  836. case reflect.Int16:
  837. fn.fe = (*Encoder).kInt16
  838. fn.fd = (*Decoder).kInt16
  839. case reflect.Int32:
  840. fn.fe = (*Encoder).kInt32
  841. fn.fd = (*Decoder).kInt32
  842. case reflect.Int64:
  843. fn.fe = (*Encoder).kInt64
  844. fn.fd = (*Decoder).kInt64
  845. case reflect.Uint:
  846. fn.fd = (*Decoder).kUint
  847. fn.fe = (*Encoder).kUint
  848. case reflect.Uint8:
  849. fn.fe = (*Encoder).kUint8
  850. fn.fd = (*Decoder).kUint8
  851. case reflect.Uint16:
  852. fn.fe = (*Encoder).kUint16
  853. fn.fd = (*Decoder).kUint16
  854. case reflect.Uint32:
  855. fn.fe = (*Encoder).kUint32
  856. fn.fd = (*Decoder).kUint32
  857. case reflect.Uint64:
  858. fn.fe = (*Encoder).kUint64
  859. fn.fd = (*Decoder).kUint64
  860. case reflect.Uintptr:
  861. fn.fe = (*Encoder).kUintptr
  862. fn.fd = (*Decoder).kUintptr
  863. case reflect.Float32:
  864. fn.fe = (*Encoder).kFloat32
  865. fn.fd = (*Decoder).kFloat32
  866. case reflect.Float64:
  867. fn.fe = (*Encoder).kFloat64
  868. fn.fd = (*Decoder).kFloat64
  869. case reflect.Invalid:
  870. fn.fe = (*Encoder).kInvalid
  871. fn.fd = (*Decoder).kErr
  872. case reflect.Chan:
  873. fi.seq = seqTypeChan
  874. fn.fe = (*Encoder).kSlice
  875. fn.fd = (*Decoder).kSliceForChan
  876. case reflect.Slice:
  877. fi.seq = seqTypeSlice
  878. fn.fe = (*Encoder).kSlice
  879. fn.fd = (*Decoder).kSlice
  880. case reflect.Array:
  881. fi.seq = seqTypeArray
  882. fn.fe = (*Encoder).kSlice
  883. fi.addrF = false
  884. fi.addrD = false
  885. rt2 := reflect.SliceOf(ti.elem)
  886. fn.fd = func(d *Decoder, xf *codecFnInfo, xrv reflect.Value) {
  887. d.h.fn(rt2).fd(d, xf, rvGetSlice4Array(xrv, rt2))
  888. }
  889. // fn.fd = (*Decoder).kArray
  890. case reflect.Struct:
  891. if ti.anyOmitEmpty ||
  892. ti.isFlag(tiflagMissingFielder) ||
  893. ti.isFlag(tiflagMissingFielderPtr) {
  894. fn.fe = (*Encoder).kStruct
  895. } else {
  896. fn.fe = (*Encoder).kStructNoOmitempty
  897. }
  898. fn.fd = (*Decoder).kStruct
  899. case reflect.Map:
  900. fn.fe = (*Encoder).kMap
  901. fn.fd = (*Decoder).kMap
  902. case reflect.Interface:
  903. // encode: reflect.Interface are handled already by preEncodeValue
  904. fn.fd = (*Decoder).kInterface
  905. fn.fe = (*Encoder).kErr
  906. default:
  907. // reflect.Ptr and reflect.Interface are handled already by preEncodeValue
  908. fn.fe = (*Encoder).kErr
  909. fn.fd = (*Decoder).kErr
  910. }
  911. }
  912. }
  913. return
  914. }
  915. // Handle defines a specific encoding format. It also stores any runtime state
  916. // used during an Encoding or Decoding session e.g. stored state about Types, etc.
  917. //
  918. // Once a handle is configured, it can be shared across multiple Encoders and Decoders.
  919. //
  920. // Note that a Handle is NOT safe for concurrent modification.
  921. //
  922. // A Handle also should not be modified after it is configured and has
  923. // been used at least once. This is because stored state may be out of sync with the
  924. // new configuration, and a data race can occur when multiple goroutines access it.
  925. // i.e. multiple Encoders or Decoders in different goroutines.
  926. //
  927. // Consequently, the typical usage model is that a Handle is pre-configured
  928. // before first time use, and not modified while in use.
  929. // Such a pre-configured Handle is safe for concurrent access.
  930. type Handle interface {
  931. Name() string
  932. // return the basic handle. It may not have been inited.
  933. // Prefer to use basicHandle() helper function that ensures it has been inited.
  934. getBasicHandle() *BasicHandle
  935. // recreateEncDriver(encDriver) bool
  936. newEncDriver(w *Encoder) encDriver
  937. newDecDriver(r *Decoder) decDriver
  938. isBinary() bool
  939. hasElemSeparators() bool
  940. // IsBuiltinType(rtid uintptr) bool
  941. }
  942. // Raw represents raw formatted bytes.
  943. // We "blindly" store it during encode and retrieve the raw bytes during decode.
  944. // Note: it is dangerous during encode, so we may gate the behaviour
  945. // behind an Encode flag which must be explicitly set.
  946. type Raw []byte
  947. // RawExt represents raw unprocessed extension data.
  948. // Some codecs will decode extension data as a *RawExt
  949. // if there is no registered extension for the tag.
  950. //
  951. // Only one of Data or Value is nil.
  952. // If Data is nil, then the content of the RawExt is in the Value.
  953. type RawExt struct {
  954. Tag uint64
  955. // Data is the []byte which represents the raw ext. If nil, ext is exposed in Value.
  956. // Data is used by codecs (e.g. binc, msgpack, simple) which do custom serialization of types
  957. Data []byte
  958. // Value represents the extension, if Data is nil.
  959. // Value is used by codecs (e.g. cbor, json) which leverage the format to do
  960. // custom serialization of the types.
  961. Value interface{}
  962. }
  963. // BytesExt handles custom (de)serialization of types to/from []byte.
  964. // It is used by codecs (e.g. binc, msgpack, simple) which do custom serialization of the types.
  965. type BytesExt interface {
  966. // WriteExt converts a value to a []byte.
  967. //
  968. // Note: v is a pointer iff the registered extension type is a struct or array kind.
  969. WriteExt(v interface{}) []byte
  970. // ReadExt updates a value from a []byte.
  971. //
  972. // Note: dst is always a pointer kind to the registered extension type.
  973. ReadExt(dst interface{}, src []byte)
  974. }
  975. // InterfaceExt handles custom (de)serialization of types to/from another interface{} value.
  976. // The Encoder or Decoder will then handle the further (de)serialization of that known type.
  977. //
  978. // It is used by codecs (e.g. cbor, json) which use the format to do custom serialization of types.
  979. type InterfaceExt interface {
  980. // ConvertExt converts a value into a simpler interface for easy encoding
  981. // e.g. convert time.Time to int64.
  982. //
  983. // Note: v is a pointer iff the registered extension type is a struct or array kind.
  984. ConvertExt(v interface{}) interface{}
  985. // UpdateExt updates a value from a simpler interface for easy decoding
  986. // e.g. convert int64 to time.Time.
  987. //
  988. // Note: dst is always a pointer kind to the registered extension type.
  989. UpdateExt(dst interface{}, src interface{})
  990. }
  991. // Ext handles custom (de)serialization of custom types / extensions.
  992. type Ext interface {
  993. BytesExt
  994. InterfaceExt
  995. }
  996. // addExtWrapper is a wrapper implementation to support former AddExt exported method.
  997. type addExtWrapper struct {
  998. encFn func(reflect.Value) ([]byte, error)
  999. decFn func(reflect.Value, []byte) error
  1000. }
  1001. func (x addExtWrapper) WriteExt(v interface{}) []byte {
  1002. bs, err := x.encFn(rv4i(v))
  1003. if err != nil {
  1004. panic(err)
  1005. }
  1006. return bs
  1007. }
  1008. func (x addExtWrapper) ReadExt(v interface{}, bs []byte) {
  1009. if err := x.decFn(rv4i(v), bs); err != nil {
  1010. panic(err)
  1011. }
  1012. }
  1013. func (x addExtWrapper) ConvertExt(v interface{}) interface{} {
  1014. return x.WriteExt(v)
  1015. }
  1016. func (x addExtWrapper) UpdateExt(dest interface{}, v interface{}) {
  1017. x.ReadExt(dest, v.([]byte))
  1018. }
  1019. type bytesExtFailer struct{}
  1020. func (bytesExtFailer) WriteExt(v interface{}) []byte {
  1021. panicv.errorstr("BytesExt.WriteExt is not supported")
  1022. return nil
  1023. }
  1024. func (bytesExtFailer) ReadExt(v interface{}, bs []byte) {
  1025. panicv.errorstr("BytesExt.ReadExt is not supported")
  1026. }
  1027. type interfaceExtFailer struct{}
  1028. func (interfaceExtFailer) ConvertExt(v interface{}) interface{} {
  1029. panicv.errorstr("InterfaceExt.ConvertExt is not supported")
  1030. return nil
  1031. }
  1032. func (interfaceExtFailer) UpdateExt(dest interface{}, v interface{}) {
  1033. panicv.errorstr("InterfaceExt.UpdateExt is not supported")
  1034. }
  1035. // type extWrapper struct {
  1036. // BytesExt
  1037. // InterfaceExt
  1038. // }
  1039. type bytesExtWrapper struct {
  1040. interfaceExtFailer
  1041. BytesExt
  1042. }
  1043. type interfaceExtWrapper struct {
  1044. bytesExtFailer
  1045. InterfaceExt
  1046. }
  1047. type extFailWrapper struct {
  1048. bytesExtFailer
  1049. interfaceExtFailer
  1050. }
  1051. type binaryEncodingType struct{}
  1052. func (binaryEncodingType) isBinary() bool { return true }
  1053. type textEncodingType struct{}
  1054. func (textEncodingType) isBinary() bool { return false }
  1055. // noBuiltInTypes is embedded into many types which do not support builtins
  1056. // e.g. msgpack, simple, cbor.
  1057. // type noBuiltInTypeChecker struct{}
  1058. // func (noBuiltInTypeChecker) IsBuiltinType(rt uintptr) bool { return false }
  1059. // type noBuiltInTypes struct{ noBuiltInTypeChecker }
  1060. type noBuiltInTypes struct{}
  1061. func (noBuiltInTypes) EncodeBuiltin(rt uintptr, v interface{}) {}
  1062. func (noBuiltInTypes) DecodeBuiltin(rt uintptr, v interface{}) {}
  1063. // type noStreamingCodec struct{}
  1064. // func (noStreamingCodec) CheckBreak() bool { return false }
  1065. // func (noStreamingCodec) hasElemSeparators() bool { return false }
  1066. type noElemSeparators struct{}
  1067. func (noElemSeparators) hasElemSeparators() (v bool) { return }
  1068. func (noElemSeparators) recreateEncDriver(e encDriver) (v bool) { return }
  1069. // bigenHelper.
  1070. // Users must already slice the x completely, because we will not reslice.
  1071. type bigenHelper struct {
  1072. x []byte // must be correctly sliced to appropriate len. slicing is a cost.
  1073. w *encWr
  1074. }
  1075. func (z bigenHelper) writeUint16(v uint16) {
  1076. bigen.PutUint16(z.x, v)
  1077. z.w.writeb(z.x)
  1078. }
  1079. func (z bigenHelper) writeUint32(v uint32) {
  1080. bigen.PutUint32(z.x, v)
  1081. z.w.writeb(z.x)
  1082. }
  1083. func (z bigenHelper) writeUint64(v uint64) {
  1084. bigen.PutUint64(z.x, v)
  1085. z.w.writeb(z.x)
  1086. }
  1087. type extTypeTagFn struct {
  1088. rtid uintptr
  1089. rtidptr uintptr
  1090. rt reflect.Type
  1091. tag uint64
  1092. ext Ext
  1093. // _ [1]uint64 // padding
  1094. }
  1095. type extHandle []extTypeTagFn
  1096. // AddExt registes an encode and decode function for a reflect.Type.
  1097. // To deregister an Ext, call AddExt with nil encfn and/or nil decfn.
  1098. //
  1099. // Deprecated: Use SetBytesExt or SetInterfaceExt on the Handle instead.
  1100. func (o *extHandle) AddExt(rt reflect.Type, tag byte,
  1101. encfn func(reflect.Value) ([]byte, error),
  1102. decfn func(reflect.Value, []byte) error) (err error) {
  1103. if encfn == nil || decfn == nil {
  1104. return o.SetExt(rt, uint64(tag), nil)
  1105. }
  1106. return o.SetExt(rt, uint64(tag), addExtWrapper{encfn, decfn})
  1107. }
  1108. // SetExt will set the extension for a tag and reflect.Type.
  1109. // Note that the type must be a named type, and specifically not a pointer or Interface.
  1110. // An error is returned if that is not honored.
  1111. // To Deregister an ext, call SetExt with nil Ext.
  1112. //
  1113. // Deprecated: Use SetBytesExt or SetInterfaceExt on the Handle instead.
  1114. func (o *extHandle) SetExt(rt reflect.Type, tag uint64, ext Ext) (err error) {
  1115. // o is a pointer, because we may need to initialize it
  1116. rk := rt.Kind()
  1117. for rk == reflect.Ptr {
  1118. rt = rt.Elem()
  1119. rk = rt.Kind()
  1120. }
  1121. if rt.PkgPath() == "" || rk == reflect.Interface { // || rk == reflect.Ptr {
  1122. return fmt.Errorf("codec.Handle.SetExt: Takes named type, not a pointer or interface: %v", rt)
  1123. }
  1124. rtid := rt2id(rt)
  1125. switch rtid {
  1126. case timeTypId, rawTypId, rawExtTypId:
  1127. // all natively supported type, so cannot have an extension
  1128. return // TODO: should we silently ignore, or return an error???
  1129. }
  1130. // if o == nil {
  1131. // return errors.New("codec.Handle.SetExt: extHandle not initialized")
  1132. // }
  1133. o2 := *o
  1134. // if o2 == nil {
  1135. // return errors.New("codec.Handle.SetExt: extHandle not initialized")
  1136. // }
  1137. for i := range o2 {
  1138. v := &o2[i]
  1139. if v.rtid == rtid {
  1140. v.tag, v.ext = tag, ext
  1141. return
  1142. }
  1143. }
  1144. rtidptr := rt2id(reflect.PtrTo(rt))
  1145. *o = append(o2, extTypeTagFn{rtid, rtidptr, rt, tag, ext}) // , [1]uint64{}})
  1146. return
  1147. }
  1148. func (o extHandle) getExt(rtid uintptr, check bool) (v *extTypeTagFn) {
  1149. if !check {
  1150. return
  1151. }
  1152. for i := range o {
  1153. v = &o[i]
  1154. if v.rtid == rtid || v.rtidptr == rtid {
  1155. return
  1156. }
  1157. }
  1158. return nil
  1159. }
  1160. func (o extHandle) getExtForTag(tag uint64) (v *extTypeTagFn) {
  1161. for i := range o {
  1162. v = &o[i]
  1163. if v.tag == tag {
  1164. return
  1165. }
  1166. }
  1167. return nil
  1168. }
  1169. type intf2impl struct {
  1170. rtid uintptr // for intf
  1171. impl reflect.Type
  1172. // _ [1]uint64 // padding // not-needed, as *intf2impl is never returned.
  1173. }
  1174. type intf2impls []intf2impl
  1175. // Intf2Impl maps an interface to an implementing type.
  1176. // This allows us support infering the concrete type
  1177. // and populating it when passed an interface.
  1178. // e.g. var v io.Reader can be decoded as a bytes.Buffer, etc.
  1179. //
  1180. // Passing a nil impl will clear the mapping.
  1181. func (o *intf2impls) Intf2Impl(intf, impl reflect.Type) (err error) {
  1182. if impl != nil && !impl.Implements(intf) {
  1183. return fmt.Errorf("Intf2Impl: %v does not implement %v", impl, intf)
  1184. }
  1185. rtid := rt2id(intf)
  1186. o2 := *o
  1187. for i := range o2 {
  1188. v := &o2[i]
  1189. if v.rtid == rtid {
  1190. v.impl = impl
  1191. return
  1192. }
  1193. }
  1194. *o = append(o2, intf2impl{rtid, impl})
  1195. return
  1196. }
  1197. func (o intf2impls) intf2impl(rtid uintptr) (rv reflect.Value) {
  1198. for i := range o {
  1199. v := &o[i]
  1200. if v.rtid == rtid {
  1201. if v.impl == nil {
  1202. return
  1203. }
  1204. vkind := v.impl.Kind()
  1205. if vkind == reflect.Ptr {
  1206. return reflect.New(v.impl.Elem())
  1207. }
  1208. return rvZeroAddrK(v.impl, vkind)
  1209. }
  1210. }
  1211. return
  1212. }
  1213. type structFieldInfoFlag uint8
  1214. const (
  1215. _ structFieldInfoFlag = 1 << iota
  1216. structFieldInfoFlagReady
  1217. structFieldInfoFlagOmitEmpty
  1218. )
  1219. func (x *structFieldInfoFlag) flagSet(f structFieldInfoFlag) {
  1220. *x = *x | f
  1221. }
  1222. func (x *structFieldInfoFlag) flagClr(f structFieldInfoFlag) {
  1223. *x = *x &^ f
  1224. }
  1225. func (x structFieldInfoFlag) flagGet(f structFieldInfoFlag) bool {
  1226. return x&f != 0
  1227. }
  1228. func (x structFieldInfoFlag) omitEmpty() bool {
  1229. return x.flagGet(structFieldInfoFlagOmitEmpty)
  1230. }
  1231. func (x structFieldInfoFlag) ready() bool {
  1232. return x.flagGet(structFieldInfoFlagReady)
  1233. }
  1234. type structFieldInfo struct {
  1235. encName string // encode name
  1236. fieldName string // field name
  1237. is [maxLevelsEmbedding]uint16 // (recursive/embedded) field index in struct
  1238. nis uint8 // num levels of embedding. if 1, then it's not embedded.
  1239. encNameAsciiAlphaNum bool // the encName only contains ascii alphabet and numbers
  1240. structFieldInfoFlag
  1241. // _ [1]byte // padding
  1242. }
  1243. // func (si *structFieldInfo) setToZeroValue(v reflect.Value) {
  1244. // if v, valid := si.field(v, false); valid {
  1245. // v.Set(reflect.Zero(v.Type()))
  1246. // }
  1247. // }
  1248. // rv returns the field of the struct.
  1249. // If anonymous, it returns an Invalid
  1250. func (si *structFieldInfo) field(v reflect.Value, update bool) (rv2 reflect.Value, valid bool) {
  1251. // replicate FieldByIndex
  1252. for i, x := range si.is {
  1253. if uint8(i) == si.nis {
  1254. break
  1255. }
  1256. if v, valid = baseStructRv(v, update); !valid {
  1257. return
  1258. }
  1259. v = v.Field(int(x))
  1260. }
  1261. return v, true
  1262. }
  1263. // func (si *structFieldInfo) fieldval(v reflect.Value, update bool) reflect.Value {
  1264. // v, _ = si.field(v, update)
  1265. // return v
  1266. // }
  1267. func parseStructInfo(stag string) (toArray, omitEmpty bool, keytype valueType) {
  1268. keytype = valueTypeString // default
  1269. if stag == "" {
  1270. return
  1271. }
  1272. for i, s := range strings.Split(stag, ",") {
  1273. if i == 0 {
  1274. } else {
  1275. switch s {
  1276. case "omitempty":
  1277. omitEmpty = true
  1278. case "toarray":
  1279. toArray = true
  1280. case "int":
  1281. keytype = valueTypeInt
  1282. case "uint":
  1283. keytype = valueTypeUint
  1284. case "float":
  1285. keytype = valueTypeFloat
  1286. // case "bool":
  1287. // keytype = valueTypeBool
  1288. case "string":
  1289. keytype = valueTypeString
  1290. }
  1291. }
  1292. }
  1293. return
  1294. }
  1295. func (si *structFieldInfo) parseTag(stag string) {
  1296. // if fname == "" {
  1297. // panic(errNoFieldNameToStructFieldInfo)
  1298. // }
  1299. if stag == "" {
  1300. return
  1301. }
  1302. for i, s := range strings.Split(stag, ",") {
  1303. if i == 0 {
  1304. if s != "" {
  1305. si.encName = s
  1306. }
  1307. } else {
  1308. switch s {
  1309. case "omitempty":
  1310. si.flagSet(structFieldInfoFlagOmitEmpty)
  1311. // si.omitEmpty = true
  1312. // case "toarray":
  1313. // si.toArray = true
  1314. }
  1315. }
  1316. }
  1317. }
  1318. type sfiSortedByEncName []*structFieldInfo
  1319. func (p sfiSortedByEncName) Len() int { return len(p) }
  1320. func (p sfiSortedByEncName) Less(i, j int) bool { return p[uint(i)].encName < p[uint(j)].encName }
  1321. func (p sfiSortedByEncName) Swap(i, j int) { p[uint(i)], p[uint(j)] = p[uint(j)], p[uint(i)] }
  1322. const structFieldNodeNumToCache = 4
  1323. type structFieldNodeCache struct {
  1324. rv [structFieldNodeNumToCache]reflect.Value
  1325. idx [structFieldNodeNumToCache]uint32
  1326. num uint8
  1327. }
  1328. func (x *structFieldNodeCache) get(key uint32) (fv reflect.Value, valid bool) {
  1329. for i, k := range &x.idx {
  1330. if uint8(i) == x.num {
  1331. return // break
  1332. }
  1333. if key == k {
  1334. return x.rv[i], true
  1335. }
  1336. }
  1337. return
  1338. }
  1339. func (x *structFieldNodeCache) tryAdd(fv reflect.Value, key uint32) {
  1340. if x.num < structFieldNodeNumToCache {
  1341. x.rv[x.num] = fv
  1342. x.idx[x.num] = key
  1343. x.num++
  1344. return
  1345. }
  1346. }
  1347. type structFieldNode struct {
  1348. v reflect.Value
  1349. cache2 structFieldNodeCache
  1350. cache3 structFieldNodeCache
  1351. update bool
  1352. }
  1353. func (x *structFieldNode) field(si *structFieldInfo) (fv reflect.Value) {
  1354. // return si.fieldval(x.v, x.update)
  1355. // Note: we only cache if nis=2 or nis=3 i.e. up to 2 levels of embedding
  1356. // This mostly saves us time on the repeated calls to v.Elem, v.Field, etc.
  1357. var valid bool
  1358. switch si.nis {
  1359. case 1:
  1360. fv = x.v.Field(int(si.is[0]))
  1361. case 2:
  1362. if fv, valid = x.cache2.get(uint32(si.is[0])); valid {
  1363. fv = fv.Field(int(si.is[1]))
  1364. return
  1365. }
  1366. fv = x.v.Field(int(si.is[0]))
  1367. if fv, valid = baseStructRv(fv, x.update); !valid {
  1368. return
  1369. }
  1370. x.cache2.tryAdd(fv, uint32(si.is[0]))
  1371. fv = fv.Field(int(si.is[1]))
  1372. case 3:
  1373. var key uint32 = uint32(si.is[0])<<16 | uint32(si.is[1])
  1374. if fv, valid = x.cache3.get(key); valid {
  1375. fv = fv.Field(int(si.is[2]))
  1376. return
  1377. }
  1378. fv = x.v.Field(int(si.is[0]))
  1379. if fv, valid = baseStructRv(fv, x.update); !valid {
  1380. return
  1381. }
  1382. fv = fv.Field(int(si.is[1]))
  1383. if fv, valid = baseStructRv(fv, x.update); !valid {
  1384. return
  1385. }
  1386. x.cache3.tryAdd(fv, key)
  1387. fv = fv.Field(int(si.is[2]))
  1388. default:
  1389. fv, _ = si.field(x.v, x.update)
  1390. }
  1391. return
  1392. }
  1393. func baseStructRv(v reflect.Value, update bool) (v2 reflect.Value, valid bool) {
  1394. for v.Kind() == reflect.Ptr {
  1395. if rvIsNil(v) {
  1396. if !update {
  1397. return
  1398. }
  1399. rvSetDirect(v, reflect.New(v.Type().Elem()))
  1400. }
  1401. v = v.Elem()
  1402. }
  1403. return v, true
  1404. }
  1405. type tiflag uint32
  1406. const (
  1407. _ tiflag = 1 << iota
  1408. tiflagComparable
  1409. tiflagIsZeroer
  1410. tiflagIsZeroerPtr
  1411. tiflagBinaryMarshaler
  1412. tiflagBinaryMarshalerPtr
  1413. tiflagBinaryUnmarshaler
  1414. tiflagBinaryUnmarshalerPtr
  1415. tiflagTextMarshaler
  1416. tiflagTextMarshalerPtr
  1417. tiflagTextUnmarshaler
  1418. tiflagTextUnmarshalerPtr
  1419. tiflagJsonMarshaler
  1420. tiflagJsonMarshalerPtr
  1421. tiflagJsonUnmarshaler
  1422. tiflagJsonUnmarshalerPtr
  1423. tiflagSelfer
  1424. tiflagSelferPtr
  1425. tiflagMissingFielder
  1426. tiflagMissingFielderPtr
  1427. // tiflag
  1428. // tiflag
  1429. // tiflag
  1430. // tiflag
  1431. // tiflag
  1432. // tiflag
  1433. )
  1434. // typeInfo keeps static (non-changing readonly)information
  1435. // about each (non-ptr) type referenced in the encode/decode sequence.
  1436. //
  1437. // During an encode/decode sequence, we work as below:
  1438. // - If base is a built in type, en/decode base value
  1439. // - If base is registered as an extension, en/decode base value
  1440. // - If type is binary(M/Unm)arshaler, call Binary(M/Unm)arshal method
  1441. // - If type is text(M/Unm)arshaler, call Text(M/Unm)arshal method
  1442. // - Else decode appropriately based on the reflect.Kind
  1443. type typeInfo struct {
  1444. rt reflect.Type
  1445. elem reflect.Type
  1446. pkgpath string
  1447. rtid uintptr
  1448. // rv0 reflect.Value // saved zero value, used if immutableKind
  1449. numMeth uint16 // number of methods
  1450. kind uint8
  1451. chandir uint8
  1452. anyOmitEmpty bool // true if a struct, and any of the fields are tagged "omitempty"
  1453. toArray bool // whether this (struct) type should be encoded as an array
  1454. keyType valueType // if struct, how is the field name stored in a stream? default is string
  1455. mbs bool // base type (T or *T) is a MapBySlice
  1456. // ---- cpu cache line boundary?
  1457. sfiSort []*structFieldInfo // sorted. Used when enc/dec struct to map.
  1458. sfiSrc []*structFieldInfo // unsorted. Used when enc/dec struct to array.
  1459. key reflect.Type
  1460. // ---- cpu cache line boundary?
  1461. // sfis []structFieldInfo // all sfi, in src order, as created.
  1462. sfiNamesSort []byte // all names, with indexes into the sfiSort
  1463. // rv0 is the zero value for the type.
  1464. // It is mostly beneficial for all non-reference kinds
  1465. // i.e. all but map/chan/func/ptr/unsafe.pointer
  1466. // so beneficial for intXX, bool, slices, structs, etc
  1467. rv0 reflect.Value
  1468. // format of marshal type fields below: [btj][mu]p? OR csp?
  1469. // bm bool // T is a binaryMarshaler
  1470. // bmp bool // *T is a binaryMarshaler
  1471. // bu bool // T is a binaryUnmarshaler
  1472. // bup bool // *T is a binaryUnmarshaler
  1473. // tm bool // T is a textMarshaler
  1474. // tmp bool // *T is a textMarshaler
  1475. // tu bool // T is a textUnmarshaler
  1476. // tup bool // *T is a textUnmarshaler
  1477. // jm bool // T is a jsonMarshaler
  1478. // jmp bool // *T is a jsonMarshaler
  1479. // ju bool // T is a jsonUnmarshaler
  1480. // jup bool // *T is a jsonUnmarshaler
  1481. // cs bool // T is a Selfer
  1482. // csp bool // *T is a Selfer
  1483. // mf bool // T is a MissingFielder
  1484. // mfp bool // *T is a MissingFielder
  1485. // other flags, with individual bits representing if set.
  1486. flags tiflag
  1487. infoFieldOmitempty bool
  1488. _ [3]byte // padding
  1489. _ [1]uint64 // padding
  1490. }
  1491. func (ti *typeInfo) isFlag(f tiflag) bool {
  1492. return ti.flags&f != 0
  1493. }
  1494. func (ti *typeInfo) flag(when bool, f tiflag) *typeInfo {
  1495. if when {
  1496. ti.flags |= f
  1497. }
  1498. return ti
  1499. }
  1500. func (ti *typeInfo) indexForEncName(name []byte) (index int16) {
  1501. var sn []byte
  1502. if len(name)+2 <= 32 {
  1503. var buf [32]byte // should not escape to heap
  1504. sn = buf[:len(name)+2]
  1505. } else {
  1506. sn = make([]byte, len(name)+2)
  1507. }
  1508. copy(sn[1:], name)
  1509. sn[0], sn[len(sn)-1] = tiSep2(name), 0xff
  1510. j := bytes.Index(ti.sfiNamesSort, sn)
  1511. if j < 0 {
  1512. return -1
  1513. }
  1514. index = int16(uint16(ti.sfiNamesSort[j+len(sn)+1]) | uint16(ti.sfiNamesSort[j+len(sn)])<<8)
  1515. return
  1516. }
  1517. type rtid2ti struct {
  1518. rtid uintptr
  1519. ti *typeInfo
  1520. }
  1521. // TypeInfos caches typeInfo for each type on first inspection.
  1522. //
  1523. // It is configured with a set of tag keys, which are used to get
  1524. // configuration for the type.
  1525. type TypeInfos struct {
  1526. // infos: formerly map[uintptr]*typeInfo, now *[]rtid2ti, 2 words expected
  1527. infos atomicTypeInfoSlice
  1528. mu sync.Mutex
  1529. _ uint64 // padding (cache-aligned)
  1530. tags []string
  1531. _ uint64 // padding (cache-aligned)
  1532. }
  1533. // NewTypeInfos creates a TypeInfos given a set of struct tags keys.
  1534. //
  1535. // This allows users customize the struct tag keys which contain configuration
  1536. // of their types.
  1537. func NewTypeInfos(tags []string) *TypeInfos {
  1538. return &TypeInfos{tags: tags}
  1539. }
  1540. func (x *TypeInfos) structTag(t reflect.StructTag) (s string) {
  1541. // check for tags: codec, json, in that order.
  1542. // this allows seamless support for many configured structs.
  1543. for _, x := range x.tags {
  1544. s = t.Get(x)
  1545. if s != "" {
  1546. return s
  1547. }
  1548. }
  1549. return
  1550. }
  1551. func findTypeInfo(s []rtid2ti, rtid uintptr) (i uint, ti *typeInfo) {
  1552. // binary search. adapted from sort/search.go.
  1553. // Note: we use goto (instead of for loop) so this can be inlined.
  1554. // if sp == nil {
  1555. // return -1, nil
  1556. // }
  1557. // s := *sp
  1558. // h, i, j := 0, 0, len(s)
  1559. var h uint // var h, i uint
  1560. var j = uint(len(s))
  1561. LOOP:
  1562. if i < j {
  1563. h = i + (j-i)/2
  1564. if s[h].rtid < rtid {
  1565. i = h + 1
  1566. } else {
  1567. j = h
  1568. }
  1569. goto LOOP
  1570. }
  1571. if i < uint(len(s)) && s[i].rtid == rtid {
  1572. ti = s[i].ti
  1573. }
  1574. return
  1575. }
  1576. func (x *TypeInfos) get(rtid uintptr, rt reflect.Type) (pti *typeInfo) {
  1577. sp := x.infos.load()
  1578. if sp != nil {
  1579. _, pti = findTypeInfo(sp, rtid)
  1580. if pti != nil {
  1581. return
  1582. }
  1583. }
  1584. rk := rt.Kind()
  1585. if rk == reflect.Ptr { // || (rk == reflect.Interface && rtid != intfTypId) {
  1586. panicv.errorf("invalid kind passed to TypeInfos.get: %v - %v", rk, rt)
  1587. }
  1588. // do not hold lock while computing this.
  1589. // it may lead to duplication, but that's ok.
  1590. ti := typeInfo{
  1591. rt: rt,
  1592. rtid: rtid,
  1593. kind: uint8(rk),
  1594. pkgpath: rt.PkgPath(),
  1595. keyType: valueTypeString, // default it - so it's never 0
  1596. }
  1597. ti.rv0 = reflect.Zero(rt)
  1598. // ti.comparable = rt.Comparable()
  1599. ti.numMeth = uint16(rt.NumMethod())
  1600. var b1, b2 bool
  1601. b1, b2 = implIntf(rt, binaryMarshalerTyp)
  1602. ti.flag(b1, tiflagBinaryMarshaler).flag(b2, tiflagBinaryMarshalerPtr)
  1603. b1, b2 = implIntf(rt, binaryUnmarshalerTyp)
  1604. ti.flag(b1, tiflagBinaryUnmarshaler).flag(b2, tiflagBinaryUnmarshalerPtr)
  1605. b1, b2 = implIntf(rt, textMarshalerTyp)
  1606. ti.flag(b1, tiflagTextMarshaler).flag(b2, tiflagTextMarshalerPtr)
  1607. b1, b2 = implIntf(rt, textUnmarshalerTyp)
  1608. ti.flag(b1, tiflagTextUnmarshaler).flag(b2, tiflagTextUnmarshalerPtr)
  1609. b1, b2 = implIntf(rt, jsonMarshalerTyp)
  1610. ti.flag(b1, tiflagJsonMarshaler).flag(b2, tiflagJsonMarshalerPtr)
  1611. b1, b2 = implIntf(rt, jsonUnmarshalerTyp)
  1612. ti.flag(b1, tiflagJsonUnmarshaler).flag(b2, tiflagJsonUnmarshalerPtr)
  1613. b1, b2 = implIntf(rt, selferTyp)
  1614. ti.flag(b1, tiflagSelfer).flag(b2, tiflagSelferPtr)
  1615. b1, b2 = implIntf(rt, missingFielderTyp)
  1616. ti.flag(b1, tiflagMissingFielder).flag(b2, tiflagMissingFielderPtr)
  1617. b1, b2 = implIntf(rt, iszeroTyp)
  1618. ti.flag(b1, tiflagIsZeroer).flag(b2, tiflagIsZeroerPtr)
  1619. b1 = rt.Comparable()
  1620. ti.flag(b1, tiflagComparable)
  1621. switch rk {
  1622. case reflect.Struct:
  1623. var omitEmpty bool
  1624. if f, ok := rt.FieldByName(structInfoFieldName); ok {
  1625. ti.toArray, omitEmpty, ti.keyType = parseStructInfo(x.structTag(f.Tag))
  1626. ti.infoFieldOmitempty = omitEmpty
  1627. } else {
  1628. ti.keyType = valueTypeString
  1629. }
  1630. pp, pi := &pool4tiload, pool4tiload.Get() // pool.tiLoad()
  1631. pv := pi.(*typeInfoLoadArray)
  1632. pv.etypes[0] = ti.rtid
  1633. // vv := typeInfoLoad{pv.fNames[:0], pv.encNames[:0], pv.etypes[:1], pv.sfis[:0]}
  1634. vv := typeInfoLoad{pv.etypes[:1], pv.sfis[:0]}
  1635. x.rget(rt, rtid, omitEmpty, nil, &vv)
  1636. // ti.sfis = vv.sfis
  1637. ti.sfiSrc, ti.sfiSort, ti.sfiNamesSort, ti.anyOmitEmpty = rgetResolveSFI(rt, vv.sfis, pv)
  1638. pp.Put(pi)
  1639. case reflect.Map:
  1640. ti.elem = rt.Elem()
  1641. ti.key = rt.Key()
  1642. case reflect.Slice:
  1643. ti.mbs, _ = implIntf(rt, mapBySliceTyp)
  1644. ti.elem = rt.Elem()
  1645. case reflect.Chan:
  1646. ti.elem = rt.Elem()
  1647. ti.chandir = uint8(rt.ChanDir())
  1648. case reflect.Array, reflect.Ptr:
  1649. ti.elem = rt.Elem()
  1650. }
  1651. // sfi = sfiSrc
  1652. x.mu.Lock()
  1653. sp = x.infos.load()
  1654. var sp2 []rtid2ti
  1655. if sp == nil {
  1656. pti = &ti
  1657. sp2 = []rtid2ti{{rtid, pti}}
  1658. x.infos.store(sp2)
  1659. } else {
  1660. var idx uint
  1661. idx, pti = findTypeInfo(sp, rtid)
  1662. if pti == nil {
  1663. pti = &ti
  1664. sp2 = make([]rtid2ti, len(sp)+1)
  1665. copy(sp2, sp[:idx])
  1666. copy(sp2[idx+1:], sp[idx:])
  1667. sp2[idx] = rtid2ti{rtid, pti}
  1668. x.infos.store(sp2)
  1669. }
  1670. }
  1671. x.mu.Unlock()
  1672. return
  1673. }
  1674. func (x *TypeInfos) rget(rt reflect.Type, rtid uintptr, omitEmpty bool,
  1675. indexstack []uint16, pv *typeInfoLoad) {
  1676. // Read up fields and store how to access the value.
  1677. //
  1678. // It uses go's rules for message selectors,
  1679. // which say that the field with the shallowest depth is selected.
  1680. //
  1681. // Note: we consciously use slices, not a map, to simulate a set.
  1682. // Typically, types have < 16 fields,
  1683. // and iteration using equals is faster than maps there
  1684. flen := rt.NumField()
  1685. if flen > (1<<maxLevelsEmbedding - 1) {
  1686. panicv.errorf("codec: types with > %v fields are not supported - has %v fields",
  1687. (1<<maxLevelsEmbedding - 1), flen)
  1688. }
  1689. // pv.sfis = make([]structFieldInfo, flen)
  1690. LOOP:
  1691. for j, jlen := uint16(0), uint16(flen); j < jlen; j++ {
  1692. f := rt.Field(int(j))
  1693. fkind := f.Type.Kind()
  1694. // skip if a func type, or is unexported, or structTag value == "-"
  1695. switch fkind {
  1696. case reflect.Func, reflect.Complex64, reflect.Complex128, reflect.UnsafePointer:
  1697. continue LOOP
  1698. }
  1699. isUnexported := f.PkgPath != ""
  1700. if isUnexported && !f.Anonymous {
  1701. continue
  1702. }
  1703. stag := x.structTag(f.Tag)
  1704. if stag == "-" {
  1705. continue
  1706. }
  1707. var si structFieldInfo
  1708. var parsed bool
  1709. // if anonymous and no struct tag (or it's blank),
  1710. // and a struct (or pointer to struct), inline it.
  1711. if f.Anonymous && fkind != reflect.Interface {
  1712. // ^^ redundant but ok: per go spec, an embedded pointer type cannot be to an interface
  1713. ft := f.Type
  1714. isPtr := ft.Kind() == reflect.Ptr
  1715. for ft.Kind() == reflect.Ptr {
  1716. ft = ft.Elem()
  1717. }
  1718. isStruct := ft.Kind() == reflect.Struct
  1719. // Ignore embedded fields of unexported non-struct types.
  1720. // Also, from go1.10, ignore pointers to unexported struct types
  1721. // because unmarshal cannot assign a new struct to an unexported field.
  1722. // See https://golang.org/issue/21357
  1723. if (isUnexported && !isStruct) || (!allowSetUnexportedEmbeddedPtr && isUnexported && isPtr) {
  1724. continue
  1725. }
  1726. doInline := stag == ""
  1727. if !doInline {
  1728. si.parseTag(stag)
  1729. parsed = true
  1730. doInline = si.encName == ""
  1731. // doInline = si.isZero()
  1732. }
  1733. if doInline && isStruct {
  1734. // if etypes contains this, don't call rget again (as fields are already seen here)
  1735. ftid := rt2id(ft)
  1736. // We cannot recurse forever, but we need to track other field depths.
  1737. // So - we break if we see a type twice (not the first time).
  1738. // This should be sufficient to handle an embedded type that refers to its
  1739. // owning type, which then refers to its embedded type.
  1740. processIt := true
  1741. numk := 0
  1742. for _, k := range pv.etypes {
  1743. if k == ftid {
  1744. numk++
  1745. if numk == rgetMaxRecursion {
  1746. processIt = false
  1747. break
  1748. }
  1749. }
  1750. }
  1751. if processIt {
  1752. pv.etypes = append(pv.etypes, ftid)
  1753. indexstack2 := make([]uint16, len(indexstack)+1)
  1754. copy(indexstack2, indexstack)
  1755. indexstack2[len(indexstack)] = j
  1756. // indexstack2 := append(append(make([]int, 0, len(indexstack)+4), indexstack...), j)
  1757. x.rget(ft, ftid, omitEmpty, indexstack2, pv)
  1758. }
  1759. continue
  1760. }
  1761. }
  1762. // after the anonymous dance: if an unexported field, skip
  1763. if isUnexported {
  1764. continue
  1765. }
  1766. if f.Name == "" {
  1767. panic(errNoFieldNameToStructFieldInfo)
  1768. }
  1769. // pv.fNames = append(pv.fNames, f.Name)
  1770. // if si.encName == "" {
  1771. if !parsed {
  1772. si.encName = f.Name
  1773. si.parseTag(stag)
  1774. parsed = true
  1775. } else if si.encName == "" {
  1776. si.encName = f.Name
  1777. }
  1778. si.encNameAsciiAlphaNum = true
  1779. for i := len(si.encName) - 1; i >= 0; i-- { // bounds-check elimination
  1780. b := si.encName[i]
  1781. if (b >= '0' && b <= '9') || (b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') {
  1782. continue
  1783. }
  1784. si.encNameAsciiAlphaNum = false
  1785. break
  1786. }
  1787. si.fieldName = f.Name
  1788. si.flagSet(structFieldInfoFlagReady)
  1789. // pv.encNames = append(pv.encNames, si.encName)
  1790. // si.ikind = int(f.Type.Kind())
  1791. if len(indexstack) > maxLevelsEmbedding-1 {
  1792. panicv.errorf("codec: only supports up to %v depth of embedding - type has %v depth",
  1793. maxLevelsEmbedding-1, len(indexstack))
  1794. }
  1795. si.nis = uint8(len(indexstack)) + 1
  1796. copy(si.is[:], indexstack)
  1797. si.is[len(indexstack)] = j
  1798. if omitEmpty {
  1799. si.flagSet(structFieldInfoFlagOmitEmpty)
  1800. }
  1801. pv.sfis = append(pv.sfis, si)
  1802. }
  1803. }
  1804. func tiSep(name string) uint8 {
  1805. // (xn[0]%64) // (between 192-255 - outside ascii BMP)
  1806. // return 0xfe - (name[0] & 63)
  1807. // return 0xfe - (name[0] & 63) - uint8(len(name))
  1808. // return 0xfe - (name[0] & 63) - uint8(len(name)&63)
  1809. // return ((0xfe - (name[0] & 63)) & 0xf8) | (uint8(len(name) & 0x07))
  1810. return 0xfe - (name[0] & 63) - uint8(len(name)&63)
  1811. }
  1812. func tiSep2(name []byte) uint8 {
  1813. return 0xfe - (name[0] & 63) - uint8(len(name)&63)
  1814. }
  1815. // resolves the struct field info got from a call to rget.
  1816. // Returns a trimmed, unsorted and sorted []*structFieldInfo.
  1817. func rgetResolveSFI(rt reflect.Type, x []structFieldInfo, pv *typeInfoLoadArray) (
  1818. y, z []*structFieldInfo, ss []byte, anyOmitEmpty bool) {
  1819. sa := pv.sfiidx[:0]
  1820. sn := pv.b[:]
  1821. n := len(x)
  1822. var xn string
  1823. var ui uint16
  1824. var sep byte
  1825. for i := range x {
  1826. ui = uint16(i)
  1827. xn = x[i].encName // fieldName or encName? use encName for now.
  1828. if len(xn)+2 > cap(sn) {
  1829. sn = make([]byte, len(xn)+2)
  1830. } else {
  1831. sn = sn[:len(xn)+2]
  1832. }
  1833. // use a custom sep, so that misses are less frequent,
  1834. // since the sep (first char in search) is as unique as first char in field name.
  1835. sep = tiSep(xn)
  1836. sn[0], sn[len(sn)-1] = sep, 0xff
  1837. copy(sn[1:], xn)
  1838. j := bytes.Index(sa, sn)
  1839. if j == -1 {
  1840. sa = append(sa, sep)
  1841. sa = append(sa, xn...)
  1842. sa = append(sa, 0xff, byte(ui>>8), byte(ui))
  1843. } else {
  1844. index := uint16(sa[j+len(sn)+1]) | uint16(sa[j+len(sn)])<<8
  1845. // one of them must be cleared (reset to nil),
  1846. // and the index updated appropriately
  1847. i2clear := ui // index to be cleared
  1848. if x[i].nis < x[index].nis { // this one is shallower
  1849. // update the index to point to this later one.
  1850. sa[j+len(sn)], sa[j+len(sn)+1] = byte(ui>>8), byte(ui)
  1851. // clear the earlier one, as this later one is shallower.
  1852. i2clear = index
  1853. }
  1854. if x[i2clear].ready() {
  1855. x[i2clear].flagClr(structFieldInfoFlagReady)
  1856. n--
  1857. }
  1858. }
  1859. }
  1860. var w []structFieldInfo
  1861. sharingArray := len(x) <= typeInfoLoadArraySfisLen // sharing array with typeInfoLoadArray
  1862. if sharingArray {
  1863. w = make([]structFieldInfo, n)
  1864. }
  1865. // remove all the nils (non-ready)
  1866. y = make([]*structFieldInfo, n)
  1867. n = 0
  1868. var sslen int
  1869. for i := range x {
  1870. if !x[i].ready() {
  1871. continue
  1872. }
  1873. if !anyOmitEmpty && x[i].omitEmpty() {
  1874. anyOmitEmpty = true
  1875. }
  1876. if sharingArray {
  1877. w[n] = x[i]
  1878. y[n] = &w[n]
  1879. } else {
  1880. y[n] = &x[i]
  1881. }
  1882. sslen = sslen + len(x[i].encName) + 4
  1883. n++
  1884. }
  1885. if n != len(y) {
  1886. panicv.errorf("failure reading struct %v - expecting %d of %d valid fields, got %d",
  1887. rt, len(y), len(x), n)
  1888. }
  1889. z = make([]*structFieldInfo, len(y))
  1890. copy(z, y)
  1891. sort.Sort(sfiSortedByEncName(z))
  1892. sharingArray = len(sa) <= typeInfoLoadArraySfiidxLen
  1893. if sharingArray {
  1894. ss = make([]byte, 0, sslen)
  1895. } else {
  1896. ss = sa[:0] // reuse the newly made sa array if necessary
  1897. }
  1898. for i := range z {
  1899. xn = z[i].encName
  1900. sep = tiSep(xn)
  1901. ui = uint16(i)
  1902. ss = append(ss, sep)
  1903. ss = append(ss, xn...)
  1904. ss = append(ss, 0xff, byte(ui>>8), byte(ui))
  1905. }
  1906. return
  1907. }
  1908. func implIntf(rt, iTyp reflect.Type) (base bool, indir bool) {
  1909. return rt.Implements(iTyp), reflect.PtrTo(rt).Implements(iTyp)
  1910. }
  1911. // isEmptyStruct is only called from isEmptyValue, and checks if a struct is empty:
  1912. // - does it implement IsZero() bool
  1913. // - is it comparable, and can i compare directly using ==
  1914. // - if checkStruct, then walk through the encodable fields
  1915. // and check if they are empty or not.
  1916. func isEmptyStruct(v reflect.Value, tinfos *TypeInfos, deref, checkStruct bool) bool {
  1917. // v is a struct kind - no need to check again.
  1918. // We only check isZero on a struct kind, to reduce the amount of times
  1919. // that we lookup the rtid and typeInfo for each type as we walk the tree.
  1920. vt := v.Type()
  1921. rtid := rt2id(vt)
  1922. if tinfos == nil {
  1923. tinfos = defTypeInfos
  1924. }
  1925. ti := tinfos.get(rtid, vt)
  1926. if ti.rtid == timeTypId {
  1927. return rv2i(v).(time.Time).IsZero()
  1928. }
  1929. if ti.isFlag(tiflagIsZeroerPtr) && v.CanAddr() {
  1930. return rv2i(v.Addr()).(isZeroer).IsZero()
  1931. }
  1932. if ti.isFlag(tiflagIsZeroer) {
  1933. return rv2i(v).(isZeroer).IsZero()
  1934. }
  1935. if ti.isFlag(tiflagComparable) {
  1936. return rv2i(v) == rv2i(reflect.Zero(vt))
  1937. }
  1938. if !checkStruct {
  1939. return false
  1940. }
  1941. // We only care about what we can encode/decode,
  1942. // so that is what we use to check omitEmpty.
  1943. for _, si := range ti.sfiSrc {
  1944. sfv, valid := si.field(v, false)
  1945. if valid && !isEmptyValue(sfv, tinfos, deref, checkStruct) {
  1946. return false
  1947. }
  1948. }
  1949. return true
  1950. }
  1951. // func roundFloat(x float64) float64 {
  1952. // t := math.Trunc(x)
  1953. // if math.Abs(x-t) >= 0.5 {
  1954. // return t + math.Copysign(1, x)
  1955. // }
  1956. // return t
  1957. // }
  1958. func panicToErr(h errDecorator, err *error) {
  1959. // Note: This method MUST be called directly from defer i.e. defer panicToErr ...
  1960. // else it seems the recover is not fully handled
  1961. if recoverPanicToErr {
  1962. if x := recover(); x != nil {
  1963. // fmt.Printf("panic'ing with: %v\n", x)
  1964. // debug.PrintStack()
  1965. panicValToErr(h, x, err)
  1966. }
  1967. }
  1968. }
  1969. func isSliceBoundsError(s string) bool {
  1970. return strings.Index(s, "index out of range") != -1 ||
  1971. strings.Index(s, "slice bounds out of range") != -1
  1972. }
  1973. func panicValToErr(h errDecorator, v interface{}, err *error) {
  1974. d, dok := h.(*Decoder)
  1975. switch xerr := v.(type) {
  1976. case nil:
  1977. case error:
  1978. switch xerr {
  1979. case nil:
  1980. case io.EOF, io.ErrUnexpectedEOF, errEncoderNotInitialized, errDecoderNotInitialized:
  1981. // treat as special (bubble up)
  1982. *err = xerr
  1983. default:
  1984. if dok && d.bytes && isSliceBoundsError(xerr.Error()) {
  1985. *err = io.EOF
  1986. } else {
  1987. h.wrapErr(xerr, err)
  1988. }
  1989. }
  1990. case string:
  1991. if xerr != "" {
  1992. if dok && d.bytes && isSliceBoundsError(xerr) {
  1993. *err = io.EOF
  1994. } else {
  1995. h.wrapErr(xerr, err)
  1996. }
  1997. }
  1998. case fmt.Stringer:
  1999. if xerr != nil {
  2000. h.wrapErr(xerr, err)
  2001. }
  2002. default:
  2003. h.wrapErr(v, err)
  2004. }
  2005. }
  2006. func isImmutableKind(k reflect.Kind) (v bool) {
  2007. // return immutableKindsSet[k]
  2008. // since we know reflect.Kind is in range 0..31, then use the k%32 == k constraint
  2009. return immutableKindsSet[k%reflect.Kind(len(immutableKindsSet))] // bounds-check-elimination
  2010. }
  2011. func usableByteSlice(bs []byte, slen int) []byte {
  2012. if cap(bs) >= slen {
  2013. if bs == nil {
  2014. return []byte{}
  2015. }
  2016. return bs[:slen]
  2017. }
  2018. return make([]byte, slen)
  2019. }
  2020. // ----
  2021. type codecFnInfo struct {
  2022. ti *typeInfo
  2023. xfFn Ext
  2024. xfTag uint64
  2025. seq seqType
  2026. addrD bool
  2027. addrF bool // if addrD, this says whether decode function can take a value or a ptr
  2028. addrE bool
  2029. }
  2030. // codecFn encapsulates the captured variables and the encode function.
  2031. // This way, we only do some calculations one times, and pass to the
  2032. // code block that should be called (encapsulated in a function)
  2033. // instead of executing the checks every time.
  2034. type codecFn struct {
  2035. i codecFnInfo
  2036. fe func(*Encoder, *codecFnInfo, reflect.Value)
  2037. fd func(*Decoder, *codecFnInfo, reflect.Value)
  2038. _ [1]uint64 // padding (cache-aligned)
  2039. }
  2040. type codecRtidFn struct {
  2041. rtid uintptr
  2042. fn *codecFn
  2043. }
  2044. func makeExt(ext interface{}) Ext {
  2045. if ext == nil {
  2046. return &extFailWrapper{}
  2047. }
  2048. switch t := ext.(type) {
  2049. case nil:
  2050. return &extFailWrapper{}
  2051. case Ext:
  2052. return t
  2053. case BytesExt:
  2054. return &bytesExtWrapper{BytesExt: t}
  2055. case InterfaceExt:
  2056. return &interfaceExtWrapper{InterfaceExt: t}
  2057. }
  2058. return &extFailWrapper{}
  2059. }
  2060. func baseRV(v interface{}) (rv reflect.Value) {
  2061. for rv = rv4i(v); rv.Kind() == reflect.Ptr; rv = rv.Elem() {
  2062. }
  2063. return
  2064. }
  2065. // func newAddressableRV(t reflect.Type, k reflect.Kind) reflect.Value {
  2066. // if k == reflect.Ptr {
  2067. // return reflect.New(t.Elem()) // this is not addressable???
  2068. // }
  2069. // return reflect.New(t).Elem()
  2070. // }
  2071. // func newAddressableRV(t reflect.Type) reflect.Value {
  2072. // return reflect.New(t).Elem()
  2073. // }
  2074. // ----
  2075. // these "checkOverflow" functions must be inlinable, and not call anybody.
  2076. // Overflow means that the value cannot be represented without wrapping/overflow.
  2077. // Overflow=false does not mean that the value can be represented without losing precision
  2078. // (especially for floating point).
  2079. type checkOverflow struct{}
  2080. // func (checkOverflow) Float16(f float64) (overflow bool) {
  2081. // panicv.errorf("unimplemented")
  2082. // if f < 0 {
  2083. // f = -f
  2084. // }
  2085. // return math.MaxFloat32 < f && f <= math.MaxFloat64
  2086. // }
  2087. func (checkOverflow) Float32(v float64) (overflow bool) {
  2088. if v < 0 {
  2089. v = -v
  2090. }
  2091. return math.MaxFloat32 < v && v <= math.MaxFloat64
  2092. }
  2093. func (checkOverflow) Uint(v uint64, bitsize uint8) (overflow bool) {
  2094. if bitsize == 0 || bitsize >= 64 || v == 0 {
  2095. return
  2096. }
  2097. if trunc := (v << (64 - bitsize)) >> (64 - bitsize); v != trunc {
  2098. overflow = true
  2099. }
  2100. return
  2101. }
  2102. func (checkOverflow) Int(v int64, bitsize uint8) (overflow bool) {
  2103. if bitsize == 0 || bitsize >= 64 || v == 0 {
  2104. return
  2105. }
  2106. if trunc := (v << (64 - bitsize)) >> (64 - bitsize); v != trunc {
  2107. overflow = true
  2108. }
  2109. return
  2110. }
  2111. func (checkOverflow) SignedInt(v uint64) (overflow bool) {
  2112. //e.g. -127 to 128 for int8
  2113. pos := (v >> 63) == 0
  2114. ui2 := v & 0x7fffffffffffffff
  2115. if pos {
  2116. if ui2 > math.MaxInt64 {
  2117. overflow = true
  2118. }
  2119. } else {
  2120. if ui2 > math.MaxInt64-1 {
  2121. overflow = true
  2122. }
  2123. }
  2124. return
  2125. }
  2126. func (x checkOverflow) Float32V(v float64) float64 {
  2127. if x.Float32(v) {
  2128. panicv.errorf("float32 overflow: %v", v)
  2129. }
  2130. return v
  2131. }
  2132. func (x checkOverflow) UintV(v uint64, bitsize uint8) uint64 {
  2133. if x.Uint(v, bitsize) {
  2134. panicv.errorf("uint64 overflow: %v", v)
  2135. }
  2136. return v
  2137. }
  2138. func (x checkOverflow) IntV(v int64, bitsize uint8) int64 {
  2139. if x.Int(v, bitsize) {
  2140. panicv.errorf("int64 overflow: %v", v)
  2141. }
  2142. return v
  2143. }
  2144. func (x checkOverflow) SignedIntV(v uint64) int64 {
  2145. if x.SignedInt(v) {
  2146. panicv.errorf("uint64 to int64 overflow: %v", v)
  2147. }
  2148. return int64(v)
  2149. }
  2150. // ------------------ FLOATING POINT -----------------
  2151. func isNaN64(f float64) bool { return f != f }
  2152. func isNaN32(f float32) bool { return f != f }
  2153. func abs32(f float32) float32 {
  2154. return math.Float32frombits(math.Float32bits(f) &^ (1 << 31))
  2155. }
  2156. // Per go spec, floats are represented in memory as
  2157. // IEEE single or double precision floating point values.
  2158. //
  2159. // We also looked at the source for stdlib math/modf.go,
  2160. // reviewed https://github.com/chewxy/math32
  2161. // and read wikipedia documents describing the formats.
  2162. //
  2163. // It became clear that we could easily look at the bits to determine
  2164. // whether any fraction exists.
  2165. //
  2166. // This is all we need for now.
  2167. func noFrac64(f float64) (v bool) {
  2168. x := math.Float64bits(f)
  2169. e := uint64(x>>52)&0x7FF - 1023 // uint(x>>shift)&mask - bias
  2170. // clear top 12+e bits, the integer part; if the rest is 0, then no fraction.
  2171. if e < 52 {
  2172. // return x&((1<<64-1)>>(12+e)) == 0
  2173. return x<<(12+e) == 0
  2174. }
  2175. return
  2176. }
  2177. func noFrac32(f float32) (v bool) {
  2178. x := math.Float32bits(f)
  2179. e := uint32(x>>23)&0xFF - 127 // uint(x>>shift)&mask - bias
  2180. // clear top 9+e bits, the integer part; if the rest is 0, then no fraction.
  2181. if e < 23 {
  2182. // return x&((1<<32-1)>>(9+e)) == 0
  2183. return x<<(9+e) == 0
  2184. }
  2185. return
  2186. }
  2187. // func noFrac(f float64) bool {
  2188. // _, frac := math.Modf(float64(f))
  2189. // return frac == 0
  2190. // }
  2191. // -----------------------
  2192. type ioFlusher interface {
  2193. Flush() error
  2194. }
  2195. type ioPeeker interface {
  2196. Peek(int) ([]byte, error)
  2197. }
  2198. type ioBuffered interface {
  2199. Buffered() int
  2200. }
  2201. // -----------------------
  2202. type sfiRv struct {
  2203. v *structFieldInfo
  2204. r reflect.Value
  2205. }
  2206. // -----------------
  2207. type set []interface{}
  2208. func (s *set) add(v interface{}) (exists bool) {
  2209. // e.ci is always nil, or len >= 1
  2210. x := *s
  2211. if x == nil {
  2212. x = make([]interface{}, 1, 8)
  2213. x[0] = v
  2214. *s = x
  2215. return
  2216. }
  2217. // typically, length will be 1. make this perform.
  2218. if len(x) == 1 {
  2219. if j := x[0]; j == 0 {
  2220. x[0] = v
  2221. } else if j == v {
  2222. exists = true
  2223. } else {
  2224. x = append(x, v)
  2225. *s = x
  2226. }
  2227. return
  2228. }
  2229. // check if it exists
  2230. for _, j := range x {
  2231. if j == v {
  2232. exists = true
  2233. return
  2234. }
  2235. }
  2236. // try to replace a "deleted" slot
  2237. for i, j := range x {
  2238. if j == 0 {
  2239. x[i] = v
  2240. return
  2241. }
  2242. }
  2243. // if unable to replace deleted slot, just append it.
  2244. x = append(x, v)
  2245. *s = x
  2246. return
  2247. }
  2248. func (s *set) remove(v interface{}) (exists bool) {
  2249. x := *s
  2250. if len(x) == 0 {
  2251. return
  2252. }
  2253. if len(x) == 1 {
  2254. if x[0] == v {
  2255. x[0] = 0
  2256. }
  2257. return
  2258. }
  2259. for i, j := range x {
  2260. if j == v {
  2261. exists = true
  2262. x[i] = 0 // set it to 0, as way to delete it.
  2263. // copy(x[i:], x[i+1:])
  2264. // x = x[:len(x)-1]
  2265. return
  2266. }
  2267. }
  2268. return
  2269. }
  2270. // ------
  2271. // bitset types are better than [256]bool, because they permit the whole
  2272. // bitset array being on a single cache line and use less memory.
  2273. //
  2274. // Also, since pos is a byte (0-255), there's no bounds checks on indexing (cheap).
  2275. //
  2276. // We previously had bitset128 [16]byte, and bitset32 [4]byte, but those introduces
  2277. // bounds checking, so we discarded them, and everyone uses bitset256.
  2278. //
  2279. // given x > 0 and n > 0 and x is exactly 2^n, then pos/x === pos>>n AND pos%x === pos&(x-1).
  2280. // consequently, pos/32 === pos>>5, pos/16 === pos>>4, pos/8 === pos>>3, pos%8 == pos&7
  2281. type bitset256 [32]byte
  2282. func (x *bitset256) isset(pos byte) bool {
  2283. return x[pos>>3]&(1<<(pos&7)) != 0
  2284. }
  2285. // func (x *bitset256) issetv(pos byte) byte {
  2286. // return x[pos>>3] & (1 << (pos & 7))
  2287. // }
  2288. func (x *bitset256) set(pos byte) {
  2289. x[pos>>3] |= (1 << (pos & 7))
  2290. }
  2291. type bitset32 uint32
  2292. func (x bitset32) set(pos byte) bitset32 {
  2293. return x | (1 << pos)
  2294. }
  2295. func (x bitset32) isset(pos byte) bool {
  2296. return x&(1<<pos) != 0
  2297. }
  2298. // func (x *bitset256) unset(pos byte) {
  2299. // x[pos>>3] &^= (1 << (pos & 7))
  2300. // }
  2301. // type bit2set256 [64]byte
  2302. // func (x *bit2set256) set(pos byte, v1, v2 bool) {
  2303. // var pos2 uint8 = (pos & 3) << 1 // returning 0, 2, 4 or 6
  2304. // if v1 {
  2305. // x[pos>>2] |= 1 << (pos2 + 1)
  2306. // }
  2307. // if v2 {
  2308. // x[pos>>2] |= 1 << pos2
  2309. // }
  2310. // }
  2311. // func (x *bit2set256) get(pos byte) uint8 {
  2312. // var pos2 uint8 = (pos & 3) << 1 // returning 0, 2, 4 or 6
  2313. // return x[pos>>2] << (6 - pos2) >> 6 // 11000000 -> 00000011
  2314. // }
  2315. // ------------
  2316. // type strBytes struct {
  2317. // s string
  2318. // b []byte
  2319. // // i uint16
  2320. // }
  2321. // ------------
  2322. // type pooler struct {
  2323. // // function-scoped pooled resources
  2324. // tiload sync.Pool // for type info loading
  2325. // sfiRv8, sfiRv16, sfiRv32, sfiRv64, sfiRv128 sync.Pool // for struct encoding
  2326. // // lifetime-scoped pooled resources
  2327. // // dn sync.Pool // for decNaked
  2328. // buf256, buf1k, buf2k, buf4k, buf8k, buf16k, buf32k sync.Pool // for [N]byte
  2329. // mapStrU16, mapU16Str, mapU16Bytes sync.Pool // for Binc
  2330. // // mapU16StrBytes sync.Pool // for Binc
  2331. // }
  2332. // func (p *pooler) init() {
  2333. // p.tiload.New = func() interface{} { return new(typeInfoLoadArray) }
  2334. // p.sfiRv8.New = func() interface{} { return new([8]sfiRv) }
  2335. // p.sfiRv16.New = func() interface{} { return new([16]sfiRv) }
  2336. // p.sfiRv32.New = func() interface{} { return new([32]sfiRv) }
  2337. // p.sfiRv64.New = func() interface{} { return new([64]sfiRv) }
  2338. // p.sfiRv128.New = func() interface{} { return new([128]sfiRv) }
  2339. // // p.dn.New = func() interface{} { x := new(decNaked); x.init(); return x }
  2340. // p.buf256.New = func() interface{} { return new([256]byte) }
  2341. // p.buf1k.New = func() interface{} { return new([1 * 1024]byte) }
  2342. // p.buf2k.New = func() interface{} { return new([2 * 1024]byte) }
  2343. // p.buf4k.New = func() interface{} { return new([4 * 1024]byte) }
  2344. // p.buf8k.New = func() interface{} { return new([8 * 1024]byte) }
  2345. // p.buf16k.New = func() interface{} { return new([16 * 1024]byte) }
  2346. // p.buf32k.New = func() interface{} { return new([32 * 1024]byte) }
  2347. // // p.buf64k.New = func() interface{} { return new([64 * 1024]byte) }
  2348. // p.mapStrU16.New = func() interface{} { return make(map[string]uint16, 16) }
  2349. // p.mapU16Str.New = func() interface{} { return make(map[uint16]string, 16) }
  2350. // p.mapU16Bytes.New = func() interface{} { return make(map[uint16][]byte, 16) }
  2351. // // p.mapU16StrBytes.New = func() interface{} { return make(map[uint16]strBytes, 16) }
  2352. // }
  2353. // func (p *pooler) sfiRv8() (sp *sync.Pool, v interface{}) {
  2354. // return &p.strRv8, p.strRv8.Get()
  2355. // }
  2356. // func (p *pooler) sfiRv16() (sp *sync.Pool, v interface{}) {
  2357. // return &p.strRv16, p.strRv16.Get()
  2358. // }
  2359. // func (p *pooler) sfiRv32() (sp *sync.Pool, v interface{}) {
  2360. // return &p.strRv32, p.strRv32.Get()
  2361. // }
  2362. // func (p *pooler) sfiRv64() (sp *sync.Pool, v interface{}) {
  2363. // return &p.strRv64, p.strRv64.Get()
  2364. // }
  2365. // func (p *pooler) sfiRv128() (sp *sync.Pool, v interface{}) {
  2366. // return &p.strRv128, p.strRv128.Get()
  2367. // }
  2368. // func (p *pooler) bytes1k() (sp *sync.Pool, v interface{}) {
  2369. // return &p.buf1k, p.buf1k.Get()
  2370. // }
  2371. // func (p *pooler) bytes2k() (sp *sync.Pool, v interface{}) {
  2372. // return &p.buf2k, p.buf2k.Get()
  2373. // }
  2374. // func (p *pooler) bytes4k() (sp *sync.Pool, v interface{}) {
  2375. // return &p.buf4k, p.buf4k.Get()
  2376. // }
  2377. // func (p *pooler) bytes8k() (sp *sync.Pool, v interface{}) {
  2378. // return &p.buf8k, p.buf8k.Get()
  2379. // }
  2380. // func (p *pooler) bytes16k() (sp *sync.Pool, v interface{}) {
  2381. // return &p.buf16k, p.buf16k.Get()
  2382. // }
  2383. // func (p *pooler) bytes32k() (sp *sync.Pool, v interface{}) {
  2384. // return &p.buf32k, p.buf32k.Get()
  2385. // }
  2386. // func (p *pooler) bytes64k() (sp *sync.Pool, v interface{}) {
  2387. // return &p.buf64k, p.buf64k.Get()
  2388. // }
  2389. // func (p *pooler) tiLoad() (sp *sync.Pool, v interface{}) {
  2390. // return &p.tiload, p.tiload.Get()
  2391. // }
  2392. // func (p *pooler) decNaked() (sp *sync.Pool, v interface{}) {
  2393. // return &p.dn, p.dn.Get()
  2394. // }
  2395. // func (p *pooler) decNaked() (v *decNaked, f func(*decNaked) ) {
  2396. // sp := &(p.dn)
  2397. // vv := sp.Get()
  2398. // return vv.(*decNaked), func(x *decNaked) { sp.Put(vv) }
  2399. // }
  2400. // func (p *pooler) decNakedGet() (v interface{}) {
  2401. // return p.dn.Get()
  2402. // }
  2403. // func (p *pooler) tiLoadGet() (v interface{}) {
  2404. // return p.tiload.Get()
  2405. // }
  2406. // func (p *pooler) decNakedPut(v interface{}) {
  2407. // p.dn.Put(v)
  2408. // }
  2409. // func (p *pooler) tiLoadPut(v interface{}) {
  2410. // p.tiload.Put(v)
  2411. // }
  2412. // ----------------------------------------------------
  2413. type panicHdl struct{}
  2414. func (panicHdl) errorv(err error) {
  2415. if err != nil {
  2416. panic(err)
  2417. }
  2418. }
  2419. func (panicHdl) errorstr(message string) {
  2420. if message != "" {
  2421. panic(message)
  2422. }
  2423. }
  2424. func (panicHdl) errorf(format string, params ...interface{}) {
  2425. if len(params) != 0 {
  2426. panic(fmt.Sprintf(format, params...))
  2427. }
  2428. if len(params) == 0 {
  2429. panic(format)
  2430. }
  2431. panic("undefined error")
  2432. }
  2433. // ----------------------------------------------------
  2434. type errDecorator interface {
  2435. wrapErr(in interface{}, out *error)
  2436. }
  2437. type errDecoratorDef struct{}
  2438. func (errDecoratorDef) wrapErr(v interface{}, e *error) { *e = fmt.Errorf("%v", v) }
  2439. // ----------------------------------------------------
  2440. type must struct{}
  2441. func (must) String(s string, err error) string {
  2442. if err != nil {
  2443. panicv.errorv(err)
  2444. }
  2445. return s
  2446. }
  2447. func (must) Int(s int64, err error) int64 {
  2448. if err != nil {
  2449. panicv.errorv(err)
  2450. }
  2451. return s
  2452. }
  2453. func (must) Uint(s uint64, err error) uint64 {
  2454. if err != nil {
  2455. panicv.errorv(err)
  2456. }
  2457. return s
  2458. }
  2459. func (must) Float(s float64, err error) float64 {
  2460. if err != nil {
  2461. panicv.errorv(err)
  2462. }
  2463. return s
  2464. }
  2465. // -------------------
  2466. /*
  2467. type pooler struct {
  2468. pool *sync.Pool
  2469. poolv interface{}
  2470. }
  2471. func (z *pooler) end() {
  2472. if z.pool != nil {
  2473. z.pool.Put(z.poolv)
  2474. z.pool, z.poolv = nil, nil
  2475. }
  2476. }
  2477. // -------------------
  2478. const bytesBufPoolerMaxSize = 32 * 1024
  2479. type bytesBufPooler struct {
  2480. pooler
  2481. }
  2482. func (z *bytesBufPooler) capacity() (c int) {
  2483. switch z.pool {
  2484. case nil:
  2485. case &pool4buf256:
  2486. c = 256
  2487. case &pool4buf1k:
  2488. c = 1024
  2489. case &pool4buf2k:
  2490. c = 2 * 1024
  2491. case &pool4buf4k:
  2492. c = 4 * 1024
  2493. case &pool4buf8k:
  2494. c = 8 * 1024
  2495. case &pool4buf16k:
  2496. c = 16 * 1024
  2497. case &pool4buf32k:
  2498. c = 32 * 1024
  2499. }
  2500. return
  2501. }
  2502. // func (z *bytesBufPooler) ensureCap(newcap int, bs []byte) (bs2 []byte) {
  2503. // if z.pool == nil {
  2504. // bs2 = z.get(newcap)[:len(bs)]
  2505. // copy(bs2, bs)
  2506. // return
  2507. // }
  2508. // var bp2 bytesBufPooler
  2509. // bs2 = bp2.get(newcap)[:len(bs)]
  2510. // copy(bs2, bs)
  2511. // z.end()
  2512. // *z = bp2
  2513. // return
  2514. // }
  2515. // func (z *bytesBufPooler) buf() (buf []byte) {
  2516. // switch z.pool {
  2517. // case nil:
  2518. // case &pool.buf256:
  2519. // buf = z.poolv.(*[256]byte)[:]
  2520. // case &pool.buf1k:
  2521. // buf = z.poolv.(*[1 * 1024]byte)[:]
  2522. // case &pool.buf2k:
  2523. // buf = z.poolv.(*[2 * 1024]byte)[:]
  2524. // case &pool.buf4k:
  2525. // buf = z.poolv.(*[4 * 1024]byte)[:]
  2526. // case &pool.buf8k:
  2527. // buf = z.poolv.(*[8 * 1024]byte)[:]
  2528. // case &pool.buf16k:
  2529. // buf = z.poolv.(*[16 * 1024]byte)[:]
  2530. // case &pool.buf32k:
  2531. // buf = z.poolv.(*[32 * 1024]byte)[:]
  2532. // }
  2533. // return
  2534. // }
  2535. func (z *bytesBufPooler) get(bufsize int) (buf []byte) {
  2536. if !usePool {
  2537. return make([]byte, bufsize)
  2538. }
  2539. if bufsize > bytesBufPoolerMaxSize {
  2540. z.end()
  2541. return make([]byte, bufsize)
  2542. }
  2543. switch z.pool {
  2544. case nil:
  2545. goto NEW
  2546. case &pool4buf256:
  2547. if bufsize <= 256 {
  2548. buf = z.poolv.(*[256]byte)[:bufsize]
  2549. }
  2550. case &pool4buf1k:
  2551. if bufsize <= 1*1024 {
  2552. buf = z.poolv.(*[1 * 1024]byte)[:bufsize]
  2553. }
  2554. case &pool4buf2k:
  2555. if bufsize <= 2*1024 {
  2556. buf = z.poolv.(*[2 * 1024]byte)[:bufsize]
  2557. }
  2558. case &pool4buf4k:
  2559. if bufsize <= 4*1024 {
  2560. buf = z.poolv.(*[4 * 1024]byte)[:bufsize]
  2561. }
  2562. case &pool4buf8k:
  2563. if bufsize <= 8*1024 {
  2564. buf = z.poolv.(*[8 * 1024]byte)[:bufsize]
  2565. }
  2566. case &pool4buf16k:
  2567. if bufsize <= 16*1024 {
  2568. buf = z.poolv.(*[16 * 1024]byte)[:bufsize]
  2569. }
  2570. case &pool4buf32k:
  2571. if bufsize <= 32*1024 {
  2572. buf = z.poolv.(*[32 * 1024]byte)[:bufsize]
  2573. }
  2574. }
  2575. if buf != nil {
  2576. return
  2577. }
  2578. z.end()
  2579. NEW:
  2580. // // Try to use binary search.
  2581. // // This is not optimal, as most folks select 1k or 2k buffers
  2582. // // so a linear search is better (sequence of if/else blocks)
  2583. // if bufsize < 1 {
  2584. // bufsize = 0
  2585. // } else {
  2586. // bufsize--
  2587. // bufsize /= 1024
  2588. // }
  2589. // switch bufsize {
  2590. // case 0:
  2591. // z.pool, z.poolv = pool.bytes1k()
  2592. // buf = z.poolv.(*[1 * 1024]byte)[:]
  2593. // case 1:
  2594. // z.pool, z.poolv = pool.bytes2k()
  2595. // buf = z.poolv.(*[2 * 1024]byte)[:]
  2596. // case 2, 3:
  2597. // z.pool, z.poolv = pool.bytes4k()
  2598. // buf = z.poolv.(*[4 * 1024]byte)[:]
  2599. // case 4, 5, 6, 7:
  2600. // z.pool, z.poolv = pool.bytes8k()
  2601. // buf = z.poolv.(*[8 * 1024]byte)[:]
  2602. // case 8, 9, 10, 11, 12, 13, 14, 15:
  2603. // z.pool, z.poolv = pool.bytes16k()
  2604. // buf = z.poolv.(*[16 * 1024]byte)[:]
  2605. // case 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31:
  2606. // z.pool, z.poolv = pool.bytes32k()
  2607. // buf = z.poolv.(*[32 * 1024]byte)[:]
  2608. // default:
  2609. // z.pool, z.poolv = pool.bytes64k()
  2610. // buf = z.poolv.(*[64 * 1024]byte)[:]
  2611. // }
  2612. // return
  2613. if bufsize <= 256 {
  2614. z.pool, z.poolv = &pool4buf256, pool4buf256.Get() // pool.bytes1k()
  2615. buf = z.poolv.(*[256]byte)[:bufsize]
  2616. } else if bufsize <= 1*1024 {
  2617. z.pool, z.poolv = &pool4buf1k, pool4buf1k.Get() // pool.bytes1k()
  2618. buf = z.poolv.(*[1 * 1024]byte)[:bufsize]
  2619. } else if bufsize <= 2*1024 {
  2620. z.pool, z.poolv = &pool4buf2k, pool4buf2k.Get() // pool.bytes2k()
  2621. buf = z.poolv.(*[2 * 1024]byte)[:bufsize]
  2622. } else if bufsize <= 4*1024 {
  2623. z.pool, z.poolv = &pool4buf4k, pool4buf4k.Get() // pool.bytes4k()
  2624. buf = z.poolv.(*[4 * 1024]byte)[:bufsize]
  2625. } else if bufsize <= 8*1024 {
  2626. z.pool, z.poolv = &pool4buf8k, pool4buf8k.Get() // pool.bytes8k()
  2627. buf = z.poolv.(*[8 * 1024]byte)[:bufsize]
  2628. } else if bufsize <= 16*1024 {
  2629. z.pool, z.poolv = &pool4buf16k, pool4buf16k.Get() // pool.bytes16k()
  2630. buf = z.poolv.(*[16 * 1024]byte)[:bufsize]
  2631. } else if bufsize <= 32*1024 {
  2632. z.pool, z.poolv = &pool4buf32k, pool4buf32k.Get() // pool.bytes32k()
  2633. buf = z.poolv.(*[32 * 1024]byte)[:bufsize]
  2634. // } else {
  2635. // z.pool, z.poolv = &pool.buf64k, pool.buf64k.Get() // pool.bytes64k()
  2636. // buf = z.poolv.(*[64 * 1024]byte)[:]
  2637. }
  2638. return
  2639. }
  2640. // ----------------
  2641. type bytesBufSlicePooler struct {
  2642. bytesBufPooler
  2643. buf []byte
  2644. }
  2645. func (z *bytesBufSlicePooler) ensureExtraCap(num int) {
  2646. if cap(z.buf) < len(z.buf)+num {
  2647. z.ensureCap(len(z.buf) + num)
  2648. }
  2649. }
  2650. func (z *bytesBufSlicePooler) ensureCap(newcap int) {
  2651. if cap(z.buf) >= newcap {
  2652. return
  2653. }
  2654. var bs2 []byte
  2655. if z.pool == nil {
  2656. bs2 = z.bytesBufPooler.get(newcap)[:len(z.buf)]
  2657. if z.buf == nil {
  2658. z.buf = bs2
  2659. } else {
  2660. copy(bs2, z.buf)
  2661. z.buf = bs2
  2662. }
  2663. return
  2664. }
  2665. var bp2 bytesBufPooler
  2666. if newcap > bytesBufPoolerMaxSize {
  2667. bs2 = make([]byte, newcap)
  2668. } else {
  2669. bs2 = bp2.get(newcap)
  2670. }
  2671. bs2 = bs2[:len(z.buf)]
  2672. copy(bs2, z.buf)
  2673. z.end()
  2674. z.buf = bs2
  2675. z.bytesBufPooler = bp2
  2676. }
  2677. func (z *bytesBufSlicePooler) get(length int) {
  2678. z.buf = z.bytesBufPooler.get(length)
  2679. }
  2680. func (z *bytesBufSlicePooler) append(b byte) {
  2681. z.ensureExtraCap(1)
  2682. z.buf = append(z.buf, b)
  2683. }
  2684. func (z *bytesBufSlicePooler) appends(b []byte) {
  2685. z.ensureExtraCap(len(b))
  2686. z.buf = append(z.buf, b...)
  2687. }
  2688. func (z *bytesBufSlicePooler) end() {
  2689. z.bytesBufPooler.end()
  2690. z.buf = nil
  2691. }
  2692. func (z *bytesBufSlicePooler) resetBuf() {
  2693. if z.buf != nil {
  2694. z.buf = z.buf[:0]
  2695. }
  2696. }
  2697. // ----------------
  2698. type sfiRvPooler struct {
  2699. pooler
  2700. }
  2701. func (z *sfiRvPooler) get(newlen int) (fkvs []sfiRv) {
  2702. if newlen < 0 { // bounds-check-elimination
  2703. // cannot happen // here for bounds-check-elimination
  2704. } else if newlen <= 8 {
  2705. z.pool, z.poolv = &pool4sfiRv8, pool4sfiRv8.Get() // pool.sfiRv8()
  2706. fkvs = z.poolv.(*[8]sfiRv)[:newlen]
  2707. } else if newlen <= 16 {
  2708. z.pool, z.poolv = &pool4sfiRv16, pool4sfiRv16.Get() // pool.sfiRv16()
  2709. fkvs = z.poolv.(*[16]sfiRv)[:newlen]
  2710. } else if newlen <= 32 {
  2711. z.pool, z.poolv = &pool4sfiRv32, pool4sfiRv32.Get() // pool.sfiRv32()
  2712. fkvs = z.poolv.(*[32]sfiRv)[:newlen]
  2713. } else if newlen <= 64 {
  2714. z.pool, z.poolv = &pool4sfiRv64, pool4sfiRv64.Get() // pool.sfiRv64()
  2715. fkvs = z.poolv.(*[64]sfiRv)[:newlen]
  2716. } else if newlen <= 128 {
  2717. z.pool, z.poolv = &pool4sfiRv128, pool4sfiRv128.Get() // pool.sfiRv128()
  2718. fkvs = z.poolv.(*[128]sfiRv)[:newlen]
  2719. } else {
  2720. fkvs = make([]sfiRv, newlen)
  2721. }
  2722. return
  2723. }
  2724. */
  2725. // ----------------
  2726. func freelistCapacity(length int) (capacity int) {
  2727. for capacity = 8; capacity < length; capacity *= 2 {
  2728. }
  2729. return
  2730. }
  2731. type bytesFreelist [][]byte
  2732. func (x *bytesFreelist) get(length int) (out []byte) {
  2733. var j int = -1
  2734. for i := 0; i < len(*x); i++ {
  2735. if cap((*x)[i]) >= length && (j == -1 || cap((*x)[j]) > cap((*x)[i])) {
  2736. j = i
  2737. }
  2738. }
  2739. if j == -1 {
  2740. return make([]byte, length, freelistCapacity(length))
  2741. }
  2742. out = (*x)[j][:length]
  2743. (*x)[j] = nil
  2744. for i := 0; i < len(out); i++ {
  2745. out[i] = 0
  2746. }
  2747. return
  2748. }
  2749. func (x *bytesFreelist) put(v []byte) {
  2750. if len(v) == 0 {
  2751. return
  2752. }
  2753. for i := 0; i < len(*x); i++ {
  2754. if cap((*x)[i]) == 0 {
  2755. (*x)[i] = v
  2756. return
  2757. }
  2758. }
  2759. *x = append(*x, v)
  2760. }
  2761. func (x *bytesFreelist) check(v []byte, length int) (out []byte) {
  2762. if cap(v) < length {
  2763. x.put(v)
  2764. return x.get(length)
  2765. }
  2766. return v[:length]
  2767. }
  2768. // -------------------------
  2769. type sfiRvFreelist [][]sfiRv
  2770. func (x *sfiRvFreelist) get(length int) (out []sfiRv) {
  2771. var j int = -1
  2772. for i := 0; i < len(*x); i++ {
  2773. if cap((*x)[i]) >= length && (j == -1 || cap((*x)[j]) > cap((*x)[i])) {
  2774. j = i
  2775. }
  2776. }
  2777. if j == -1 {
  2778. return make([]sfiRv, length, freelistCapacity(length))
  2779. }
  2780. out = (*x)[j][:length]
  2781. (*x)[j] = nil
  2782. for i := 0; i < len(out); i++ {
  2783. out[i] = sfiRv{}
  2784. }
  2785. return
  2786. }
  2787. func (x *sfiRvFreelist) put(v []sfiRv) {
  2788. for i := 0; i < len(*x); i++ {
  2789. if cap((*x)[i]) == 0 {
  2790. (*x)[i] = v
  2791. return
  2792. }
  2793. }
  2794. *x = append(*x, v)
  2795. }
  2796. // -----------
  2797. // xdebugf printf. the message in red on the terminal.
  2798. // Use it in place of fmt.Printf (which it calls internally)
  2799. func xdebugf(pattern string, args ...interface{}) {
  2800. xdebugAnyf("31", pattern, args...)
  2801. }
  2802. // xdebug2f printf. the message in blue on the terminal.
  2803. // Use it in place of fmt.Printf (which it calls internally)
  2804. func xdebug2f(pattern string, args ...interface{}) {
  2805. xdebugAnyf("34", pattern, args...)
  2806. }
  2807. func xdebugAnyf(colorcode, pattern string, args ...interface{}) {
  2808. if !xdebug {
  2809. return
  2810. }
  2811. var delim string
  2812. if len(pattern) > 0 && pattern[len(pattern)-1] != '\n' {
  2813. delim = "\n"
  2814. }
  2815. fmt.Printf("\033[1;"+colorcode+"m"+pattern+delim+"\033[0m", args...)
  2816. // os.Stderr.Flush()
  2817. }
  2818. // register these here, so that staticcheck stops barfing
  2819. var _ = xdebug2f
  2820. var _ = xdebugf
  2821. var _ = isNaN32
  2822. // func isImmutableKind(k reflect.Kind) (v bool) {
  2823. // return false ||
  2824. // k == reflect.Int ||
  2825. // k == reflect.Int8 ||
  2826. // k == reflect.Int16 ||
  2827. // k == reflect.Int32 ||
  2828. // k == reflect.Int64 ||
  2829. // k == reflect.Uint ||
  2830. // k == reflect.Uint8 ||
  2831. // k == reflect.Uint16 ||
  2832. // k == reflect.Uint32 ||
  2833. // k == reflect.Uint64 ||
  2834. // k == reflect.Uintptr ||
  2835. // k == reflect.Float32 ||
  2836. // k == reflect.Float64 ||
  2837. // k == reflect.Bool ||
  2838. // k == reflect.String
  2839. // }
  2840. // func timeLocUTCName(tzint int16) string {
  2841. // if tzint == 0 {
  2842. // return "UTC"
  2843. // }
  2844. // var tzname = []byte("UTC+00:00")
  2845. // //tzname := fmt.Sprintf("UTC%s%02d:%02d", tzsign, tz/60, tz%60) //perf issue using Sprintf.. inline below.
  2846. // //tzhr, tzmin := tz/60, tz%60 //faster if u convert to int first
  2847. // var tzhr, tzmin int16
  2848. // if tzint < 0 {
  2849. // tzname[3] = '-' // (TODO: verify. this works here)
  2850. // tzhr, tzmin = -tzint/60, (-tzint)%60
  2851. // } else {
  2852. // tzhr, tzmin = tzint/60, tzint%60
  2853. // }
  2854. // tzname[4] = timeDigits[tzhr/10]
  2855. // tzname[5] = timeDigits[tzhr%10]
  2856. // tzname[7] = timeDigits[tzmin/10]
  2857. // tzname[8] = timeDigits[tzmin%10]
  2858. // return string(tzname)
  2859. // //return time.FixedZone(string(tzname), int(tzint)*60)
  2860. // }