cache.go 5.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295
  1. package collection
  2. import (
  3. "container/list"
  4. "sync"
  5. "sync/atomic"
  6. "time"
  7. "github.com/tal-tech/go-zero/core/logx"
  8. "github.com/tal-tech/go-zero/core/mathx"
  9. "github.com/tal-tech/go-zero/core/syncx"
  10. )
  11. const (
  12. defaultCacheName = "proc"
  13. slots = 300
  14. statInterval = time.Minute
  15. // make the expiry unstable to avoid lots of cached items expire at the same time
  16. // make the unstable expiry to be [0.95, 1.05] * seconds
  17. expiryDeviation = 0.05
  18. )
  19. var emptyLruCache = emptyLru{}
  20. type (
  21. CacheOption func(cache *Cache)
  22. Cache struct {
  23. name string
  24. lock sync.Mutex
  25. data map[string]interface{}
  26. evicts *list.List
  27. expire time.Duration
  28. timingWheel *TimingWheel
  29. lruCache lru
  30. barrier syncx.SharedCalls
  31. unstableExpiry mathx.Unstable
  32. stats *cacheStat
  33. }
  34. )
  35. func NewCache(expire time.Duration, opts ...CacheOption) (*Cache, error) {
  36. cache := &Cache{
  37. data: make(map[string]interface{}),
  38. expire: expire,
  39. lruCache: emptyLruCache,
  40. barrier: syncx.NewSharedCalls(),
  41. unstableExpiry: mathx.NewUnstable(expiryDeviation),
  42. }
  43. for _, opt := range opts {
  44. opt(cache)
  45. }
  46. if len(cache.name) == 0 {
  47. cache.name = defaultCacheName
  48. }
  49. cache.stats = newCacheStat(cache.name, cache.size)
  50. timingWheel, err := NewTimingWheel(time.Second, slots, func(k, v interface{}) {
  51. key, ok := k.(string)
  52. if !ok {
  53. return
  54. }
  55. cache.Del(key)
  56. })
  57. if err != nil {
  58. return nil, err
  59. }
  60. cache.timingWheel = timingWheel
  61. return cache, nil
  62. }
  63. func (c *Cache) Del(key string) {
  64. c.lock.Lock()
  65. delete(c.data, key)
  66. c.lruCache.remove(key)
  67. c.lock.Unlock()
  68. c.timingWheel.RemoveTimer(key)
  69. }
  70. func (c *Cache) Get(key string) (interface{}, bool) {
  71. value, ok := c.doGet(key)
  72. if ok {
  73. c.stats.IncrementHit()
  74. } else {
  75. c.stats.IncrementMiss()
  76. }
  77. return value, ok
  78. }
  79. func (c *Cache) Set(key string, value interface{}) {
  80. c.lock.Lock()
  81. _, ok := c.data[key]
  82. c.data[key] = value
  83. c.lruCache.add(key)
  84. c.lock.Unlock()
  85. expiry := c.unstableExpiry.AroundDuration(c.expire)
  86. if ok {
  87. c.timingWheel.MoveTimer(key, expiry)
  88. } else {
  89. c.timingWheel.SetTimer(key, value, expiry)
  90. }
  91. }
  92. func (c *Cache) Take(key string, fetch func() (interface{}, error)) (interface{}, error) {
  93. if val, ok := c.doGet(key); ok {
  94. c.stats.IncrementHit()
  95. return val, nil
  96. }
  97. var fresh bool
  98. val, err := c.barrier.Do(key, func() (interface{}, error) {
  99. // because O(1) on map search in memory, and fetch is an IO query
  100. // so we do double check, cache might be taken by another call
  101. if val, ok := c.doGet(key); ok {
  102. return val, nil
  103. }
  104. v, e := fetch()
  105. if e != nil {
  106. return nil, e
  107. }
  108. fresh = true
  109. c.Set(key, v)
  110. return v, nil
  111. })
  112. if err != nil {
  113. return nil, err
  114. }
  115. if fresh {
  116. c.stats.IncrementMiss()
  117. return val, nil
  118. } else {
  119. // got the result from previous ongoing query
  120. c.stats.IncrementHit()
  121. }
  122. return val, nil
  123. }
  124. func (c *Cache) doGet(key string) (interface{}, bool) {
  125. c.lock.Lock()
  126. defer c.lock.Unlock()
  127. value, ok := c.data[key]
  128. if ok {
  129. c.lruCache.add(key)
  130. }
  131. return value, ok
  132. }
  133. func (c *Cache) onEvict(key string) {
  134. // already locked
  135. delete(c.data, key)
  136. c.timingWheel.RemoveTimer(key)
  137. }
  138. func (c *Cache) size() int {
  139. c.lock.Lock()
  140. defer c.lock.Unlock()
  141. return len(c.data)
  142. }
  143. func WithLimit(limit int) CacheOption {
  144. return func(cache *Cache) {
  145. if limit > 0 {
  146. cache.lruCache = newKeyLru(limit, cache.onEvict)
  147. }
  148. }
  149. }
  150. func WithName(name string) CacheOption {
  151. return func(cache *Cache) {
  152. cache.name = name
  153. }
  154. }
  155. type (
  156. lru interface {
  157. add(key string)
  158. remove(key string)
  159. }
  160. emptyLru struct{}
  161. keyLru struct {
  162. limit int
  163. evicts *list.List
  164. elements map[string]*list.Element
  165. onEvict func(key string)
  166. }
  167. )
  168. func (elru emptyLru) add(string) {
  169. }
  170. func (elru emptyLru) remove(string) {
  171. }
  172. func newKeyLru(limit int, onEvict func(key string)) *keyLru {
  173. return &keyLru{
  174. limit: limit,
  175. evicts: list.New(),
  176. elements: make(map[string]*list.Element),
  177. onEvict: onEvict,
  178. }
  179. }
  180. func (klru *keyLru) add(key string) {
  181. if elem, ok := klru.elements[key]; ok {
  182. klru.evicts.MoveToFront(elem)
  183. return
  184. }
  185. // Add new item
  186. elem := klru.evicts.PushFront(key)
  187. klru.elements[key] = elem
  188. // Verify size not exceeded
  189. if klru.evicts.Len() > klru.limit {
  190. klru.removeOldest()
  191. }
  192. }
  193. func (klru *keyLru) remove(key string) {
  194. if elem, ok := klru.elements[key]; ok {
  195. klru.removeElement(elem)
  196. }
  197. }
  198. func (klru *keyLru) removeOldest() {
  199. elem := klru.evicts.Back()
  200. if elem != nil {
  201. klru.removeElement(elem)
  202. }
  203. }
  204. func (klru *keyLru) removeElement(e *list.Element) {
  205. klru.evicts.Remove(e)
  206. key := e.Value.(string)
  207. delete(klru.elements, key)
  208. klru.onEvict(key)
  209. }
  210. type cacheStat struct {
  211. name string
  212. hit uint64
  213. miss uint64
  214. sizeCallback func() int
  215. }
  216. func newCacheStat(name string, sizeCallback func() int) *cacheStat {
  217. st := &cacheStat{
  218. name: name,
  219. sizeCallback: sizeCallback,
  220. }
  221. go st.statLoop()
  222. return st
  223. }
  224. func (cs *cacheStat) IncrementHit() {
  225. atomic.AddUint64(&cs.hit, 1)
  226. }
  227. func (cs *cacheStat) IncrementMiss() {
  228. atomic.AddUint64(&cs.miss, 1)
  229. }
  230. func (cs *cacheStat) statLoop() {
  231. ticker := time.NewTicker(statInterval)
  232. defer ticker.Stop()
  233. for {
  234. select {
  235. case <-ticker.C:
  236. hit := atomic.SwapUint64(&cs.hit, 0)
  237. miss := atomic.SwapUint64(&cs.miss, 0)
  238. total := hit + miss
  239. if total == 0 {
  240. continue
  241. }
  242. percent := 100 * float32(hit) / float32(total)
  243. logx.Statf("cache(%s) - qpm: %d, hit_ratio: %.1f%%, elements: %d, hit: %d, miss: %d",
  244. cs.name, total, percent, cs.sizeCallback(), hit, miss)
  245. }
  246. }
  247. }