瀏覽代碼

codec: mapRange takes a mapIter so it is stack-allocated

Previously, mapRange(...) created and returned a *mapIter.

This ensured that the mapIter was stack allocated.

Now, mapRange(...) takes a *mapIter parameter which allows the
mapIter be stack allocated in its containing function.
Ugorji Nwoke 6 年之前
父節點
當前提交
72028106ff
共有 6 個文件被更改,包括 33 次插入13 次删除
  1. 3 2
      codec/codec_test.go
  2. 2 1
      codec/encode.go
  3. 2 2
      codec/goversion_maprange_gte_go112.go
  4. 2 2
      codec/goversion_maprange_lt_go112.go
  5. 5 0
      codec/helper.go
  6. 19 6
      codec/helper_unsafe.go

+ 3 - 2
codec/codec_test.go

@@ -3274,7 +3274,8 @@ func TestMapRangeIndex(t *testing.T) {
 	mt := reflect.TypeOf(m1)
 	rvk := mapAddressableRV(mt.Key(), mt.Key().Kind())
 	rvv := mapAddressableRV(mt.Elem(), mt.Elem().Kind())
-	it := mapRange(rv4i(m1), rvk, rvv, true)
+	var it mapIter
+	mapRange(&it, rv4i(m1), rvk, rvv, true)
 	for it.Next() {
 		k := fnrv(it.Key(), rvk).Interface().(string)
 		v := fnrv(it.Value(), rvv).Interface().(*T)
@@ -3303,7 +3304,7 @@ func TestMapRangeIndex(t *testing.T) {
 	mt = reflect.TypeOf(m2)
 	rvk = mapAddressableRV(mt.Key(), mt.Key().Kind())
 	rvv = mapAddressableRV(mt.Elem(), mt.Elem().Kind())
-	it = mapRange(rv4i(m2), rvk, rvv, true)
+	mapRange(&it, rv4i(m2), rvk, rvv, true)
 	for it.Next() {
 		k := fnrv(it.Key(), rvk).Interface().(*T)
 		v := fnrv(it.Value(), rvv).Interface().(T)

+ 2 - 1
codec/encode.go

@@ -668,7 +668,8 @@ func (e *Encoder) kMap(f *codecFnInfo, rv reflect.Value) {
 
 	var rvk = mapAddressableRV(f.ti.key, ktypeKind)
 
-	it := mapRange(rv, rvk, rvv, true)
+	var it mapIter
+	mapRange(&it, rv, rvk, rvv, true)
 	validKV := it.ValidKV()
 	var vx reflect.Value
 	for it.Next() {

+ 2 - 2
codec/goversion_maprange_gte_go112.go

@@ -35,8 +35,8 @@ func (t *mapIter) Value() (r reflect.Value) {
 
 func (t *mapIter) Done() {}
 
-func mapRange(m, k, v reflect.Value, values bool) *mapIter {
-	return &mapIter{
+func mapRange(t *mapIter, m, k, v reflect.Value, values bool) {
+	*t = mapIter{
 		m:      m,
 		t:      m.MapRange(),
 		values: values,

+ 2 - 2
codec/goversion_maprange_lt_go112.go

@@ -37,8 +37,8 @@ func (t *mapIter) Value() (r reflect.Value) {
 
 func (t *mapIter) Done() {}
 
-func mapRange(m, k, v reflect.Value, values bool) *mapIter {
-	return &mapIter{
+func mapRange(t *mapIter, m, k, v reflect.Value, values bool) {
+	*t = mapIter{
 		m:      m,
 		keys:   m.MapKeys(),
 		values: values,

+ 5 - 0
codec/helper.go

@@ -114,6 +114,11 @@ package codec
 //      recover that as an io.EOF.
 //      This allows the bounds check branch to always be taken by the branch predictor,
 //      giving better performance (in theory), while ensuring that the code is shorter.
+//
+// ------------------------------------------
+// Escape Analysis
+//    - Prefer to return non-pointers if the value is used right away.
+//      Newly allocated values returned as pointers will be heap-allocated as they escape.
 
 import (
 	"bytes"

+ 19 - 6
codec/helper_unsafe.go

@@ -691,6 +691,10 @@ type unsafeMapHashIter struct {
 // 	it unsafe.Pointer
 // }
 
+type mapIter struct {
+	unsafeMapIter
+}
+
 type unsafeMapIter struct {
 	it *unsafeMapHashIter
 	// k, v             reflect.Value
@@ -699,7 +703,7 @@ type unsafeMapIter struct {
 	kisref, visref   bool
 	mapvalues        bool
 	done             bool
-
+	started          bool
 	// _ [2]uint64 // padding (cache-aligned)
 }
 
@@ -721,11 +725,12 @@ func (t *unsafeMapIter) Next() (r bool) {
 	if t == nil || t.done {
 		return
 	}
-	if t.it == nil {
-		t.it = (*unsafeMapHashIter)(mapiterinit(t.mtyp, t.mptr))
-	} else {
+	if t.started {
 		mapiternext((unsafe.Pointer)(t.it))
+	} else {
+		t.started = true
 	}
+
 	t.done = t.it.key == nil
 	if t.done {
 		return
@@ -771,17 +776,20 @@ func unsafeMapKVPtr(urv *unsafeReflectValue) unsafe.Pointer {
 	return urv.ptr
 }
 
-func mapRange(m, k, v reflect.Value, mapvalues bool) (t *unsafeMapIter) {
+func mapRange(t *mapIter, m, k, v reflect.Value, mapvalues bool) {
 	if rvIsNil(m) {
 		// return &unsafeMapIter{done: true}
+		t.done = true
 		return
 	}
+	t.done = false
+	t.started = false
 	// if unsafeMapIterUsePool {
 	// 	t = unsafeMapIterPool.Get().(*unsafeMapIter)
 	// } else {
 	//	t = new(unsafeMapIter)
 	// }
-	t = new(unsafeMapIter)
+	// t = new(unsafeMapIter)
 	// t.k = k
 	// t.v = v
 	t.mapvalues = mapvalues
@@ -792,6 +800,8 @@ func mapRange(m, k, v reflect.Value, mapvalues bool) (t *unsafeMapIter) {
 	t.mtyp = urv.typ
 	t.mptr = rv2ptr(urv)
 
+	t.it = (*unsafeMapHashIter)(mapiterinit(t.mtyp, t.mptr))
+
 	urv = (*unsafeReflectValue)(unsafe.Pointer(&k))
 	t.ktyp = urv.typ
 	t.kptr = urv.ptr
@@ -802,6 +812,9 @@ func mapRange(m, k, v reflect.Value, mapvalues bool) (t *unsafeMapIter) {
 		t.vtyp = urv.typ
 		t.vptr = urv.ptr
 		t.visref = refBitset.isset(byte(v.Kind()))
+	} else {
+		t.vtyp = nil
+		t.vptr = nil
 	}
 
 	return