cache.go 6.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301
  1. package collection
  2. import (
  3. "container/list"
  4. "sync"
  5. "sync/atomic"
  6. "time"
  7. "git.i2edu.net/i2/go-zero/core/logx"
  8. "git.i2edu.net/i2/go-zero/core/mathx"
  9. "git.i2edu.net/i2/go-zero/core/syncx"
  10. )
  11. const (
  12. defaultCacheName = "proc"
  13. slots = 300
  14. statInterval = time.Minute
  15. // make the expiry unstable to avoid lots of cached items expire at the same time
  16. // make the unstable expiry to be [0.95, 1.05] * seconds
  17. expiryDeviation = 0.05
  18. )
  19. var emptyLruCache = emptyLru{}
  20. type (
  21. // CacheOption defines the method to customize a Cache.
  22. CacheOption func(cache *Cache)
  23. // A Cache object is a in-memory cache.
  24. Cache struct {
  25. name string
  26. lock sync.Mutex
  27. data map[string]interface{}
  28. expire time.Duration
  29. timingWheel *TimingWheel
  30. lruCache lru
  31. barrier syncx.SharedCalls
  32. unstableExpiry mathx.Unstable
  33. stats *cacheStat
  34. }
  35. )
  36. // NewCache returns a Cache with given expire.
  37. func NewCache(expire time.Duration, opts ...CacheOption) (*Cache, error) {
  38. cache := &Cache{
  39. data: make(map[string]interface{}),
  40. expire: expire,
  41. lruCache: emptyLruCache,
  42. barrier: syncx.NewSharedCalls(),
  43. unstableExpiry: mathx.NewUnstable(expiryDeviation),
  44. }
  45. for _, opt := range opts {
  46. opt(cache)
  47. }
  48. if len(cache.name) == 0 {
  49. cache.name = defaultCacheName
  50. }
  51. cache.stats = newCacheStat(cache.name, cache.size)
  52. timingWheel, err := NewTimingWheel(time.Second, slots, func(k, v interface{}) {
  53. key, ok := k.(string)
  54. if !ok {
  55. return
  56. }
  57. cache.Del(key)
  58. })
  59. if err != nil {
  60. return nil, err
  61. }
  62. cache.timingWheel = timingWheel
  63. return cache, nil
  64. }
  65. // Del deletes the item with the given key from c.
  66. func (c *Cache) Del(key string) {
  67. c.lock.Lock()
  68. delete(c.data, key)
  69. c.lruCache.remove(key)
  70. c.lock.Unlock()
  71. c.timingWheel.RemoveTimer(key)
  72. }
  73. // Get returns the item with the given key from c.
  74. func (c *Cache) Get(key string) (interface{}, bool) {
  75. value, ok := c.doGet(key)
  76. if ok {
  77. c.stats.IncrementHit()
  78. } else {
  79. c.stats.IncrementMiss()
  80. }
  81. return value, ok
  82. }
  83. // Set sets value into c with key.
  84. func (c *Cache) Set(key string, value interface{}) {
  85. c.lock.Lock()
  86. _, ok := c.data[key]
  87. c.data[key] = value
  88. c.lruCache.add(key)
  89. c.lock.Unlock()
  90. expiry := c.unstableExpiry.AroundDuration(c.expire)
  91. if ok {
  92. c.timingWheel.MoveTimer(key, expiry)
  93. } else {
  94. c.timingWheel.SetTimer(key, value, expiry)
  95. }
  96. }
  97. // Take returns the item with the given key.
  98. // If the item is in c, return it directly.
  99. // If not, use fetch method to get the item, set into c and return it.
  100. func (c *Cache) Take(key string, fetch func() (interface{}, error)) (interface{}, error) {
  101. if val, ok := c.doGet(key); ok {
  102. c.stats.IncrementHit()
  103. return val, nil
  104. }
  105. var fresh bool
  106. val, err := c.barrier.Do(key, func() (interface{}, error) {
  107. // because O(1) on map search in memory, and fetch is an IO query
  108. // so we do double check, cache might be taken by another call
  109. if val, ok := c.doGet(key); ok {
  110. return val, nil
  111. }
  112. v, e := fetch()
  113. if e != nil {
  114. return nil, e
  115. }
  116. fresh = true
  117. c.Set(key, v)
  118. return v, nil
  119. })
  120. if err != nil {
  121. return nil, err
  122. }
  123. if fresh {
  124. c.stats.IncrementMiss()
  125. return val, nil
  126. }
  127. // got the result from previous ongoing query
  128. c.stats.IncrementHit()
  129. return val, nil
  130. }
  131. func (c *Cache) doGet(key string) (interface{}, bool) {
  132. c.lock.Lock()
  133. defer c.lock.Unlock()
  134. value, ok := c.data[key]
  135. if ok {
  136. c.lruCache.add(key)
  137. }
  138. return value, ok
  139. }
  140. func (c *Cache) onEvict(key string) {
  141. // already locked
  142. delete(c.data, key)
  143. c.timingWheel.RemoveTimer(key)
  144. }
  145. func (c *Cache) size() int {
  146. c.lock.Lock()
  147. defer c.lock.Unlock()
  148. return len(c.data)
  149. }
  150. // WithLimit customizes a Cache with items up to limit.
  151. func WithLimit(limit int) CacheOption {
  152. return func(cache *Cache) {
  153. if limit > 0 {
  154. cache.lruCache = newKeyLru(limit, cache.onEvict)
  155. }
  156. }
  157. }
  158. // WithName customizes a Cache with the given name.
  159. func WithName(name string) CacheOption {
  160. return func(cache *Cache) {
  161. cache.name = name
  162. }
  163. }
  164. type (
  165. lru interface {
  166. add(key string)
  167. remove(key string)
  168. }
  169. emptyLru struct{}
  170. keyLru struct {
  171. limit int
  172. evicts *list.List
  173. elements map[string]*list.Element
  174. onEvict func(key string)
  175. }
  176. )
  177. func (elru emptyLru) add(string) {
  178. }
  179. func (elru emptyLru) remove(string) {
  180. }
  181. func newKeyLru(limit int, onEvict func(key string)) *keyLru {
  182. return &keyLru{
  183. limit: limit,
  184. evicts: list.New(),
  185. elements: make(map[string]*list.Element),
  186. onEvict: onEvict,
  187. }
  188. }
  189. func (klru *keyLru) add(key string) {
  190. if elem, ok := klru.elements[key]; ok {
  191. klru.evicts.MoveToFront(elem)
  192. return
  193. }
  194. // Add new item
  195. elem := klru.evicts.PushFront(key)
  196. klru.elements[key] = elem
  197. // Verify size not exceeded
  198. if klru.evicts.Len() > klru.limit {
  199. klru.removeOldest()
  200. }
  201. }
  202. func (klru *keyLru) remove(key string) {
  203. if elem, ok := klru.elements[key]; ok {
  204. klru.removeElement(elem)
  205. }
  206. }
  207. func (klru *keyLru) removeOldest() {
  208. elem := klru.evicts.Back()
  209. if elem != nil {
  210. klru.removeElement(elem)
  211. }
  212. }
  213. func (klru *keyLru) removeElement(e *list.Element) {
  214. klru.evicts.Remove(e)
  215. key := e.Value.(string)
  216. delete(klru.elements, key)
  217. klru.onEvict(key)
  218. }
  219. type cacheStat struct {
  220. name string
  221. hit uint64
  222. miss uint64
  223. sizeCallback func() int
  224. }
  225. func newCacheStat(name string, sizeCallback func() int) *cacheStat {
  226. st := &cacheStat{
  227. name: name,
  228. sizeCallback: sizeCallback,
  229. }
  230. go st.statLoop()
  231. return st
  232. }
  233. func (cs *cacheStat) IncrementHit() {
  234. atomic.AddUint64(&cs.hit, 1)
  235. }
  236. func (cs *cacheStat) IncrementMiss() {
  237. atomic.AddUint64(&cs.miss, 1)
  238. }
  239. func (cs *cacheStat) statLoop() {
  240. ticker := time.NewTicker(statInterval)
  241. defer ticker.Stop()
  242. for range ticker.C {
  243. hit := atomic.SwapUint64(&cs.hit, 0)
  244. miss := atomic.SwapUint64(&cs.miss, 0)
  245. total := hit + miss
  246. if total == 0 {
  247. continue
  248. }
  249. percent := 100 * float32(hit) / float32(total)
  250. logx.Statf("cache(%s) - qpm: %d, hit_ratio: %.1f%%, elements: %d, hit: %d, miss: %d",
  251. cs.name, total, percent, cs.sizeCallback(), hit, miss)
  252. }
  253. }