sharded.go 4.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192
  1. package cache
  2. import (
  3. "crypto/rand"
  4. "math"
  5. "math/big"
  6. insecurerand "math/rand"
  7. "os"
  8. "runtime"
  9. "time"
  10. )
  11. // This is an experimental and unexported (for now) attempt at making a cache
  12. // with better algorithmic complexity than the standard one, namely by
  13. // preventing write locks of the entire cache when an item is added. As of the
  14. // time of writing, the overhead of selecting buckets results in cache
  15. // operations being about twice as slow as for the standard cache with small
  16. // total cache sizes, and faster for larger ones.
  17. //
  18. // See cache_test.go for a few benchmarks.
  19. type unexportedShardedCache struct {
  20. *shardedCache
  21. }
  22. type shardedCache struct {
  23. seed uint32
  24. m uint32
  25. cs []*cache
  26. janitor *shardedJanitor
  27. }
  28. // djb2 with better shuffling. 5x faster than FNV with the hash.Hash overhead.
  29. func djb33(seed uint32, k string) uint32 {
  30. var (
  31. l = uint32(len(k))
  32. d = 5381 + seed + l
  33. i = uint32(0)
  34. )
  35. // Why is all this 5x faster than a for loop?
  36. if l >= 4 {
  37. for i < l-4 {
  38. d = (d * 33) ^ uint32(k[i])
  39. d = (d * 33) ^ uint32(k[i+1])
  40. d = (d * 33) ^ uint32(k[i+2])
  41. d = (d * 33) ^ uint32(k[i+3])
  42. i += 4
  43. }
  44. }
  45. switch l - i {
  46. case 1:
  47. case 2:
  48. d = (d * 33) ^ uint32(k[i])
  49. case 3:
  50. d = (d * 33) ^ uint32(k[i])
  51. d = (d * 33) ^ uint32(k[i+1])
  52. case 4:
  53. d = (d * 33) ^ uint32(k[i])
  54. d = (d * 33) ^ uint32(k[i+1])
  55. d = (d * 33) ^ uint32(k[i+2])
  56. }
  57. return d ^ (d >> 16)
  58. }
  59. func (sc *shardedCache) bucket(k string) *cache {
  60. return sc.cs[djb33(sc.seed, k)%sc.m]
  61. }
  62. func (sc *shardedCache) Set(k string, x interface{}, d time.Duration) {
  63. sc.bucket(k).Set(k, x, d)
  64. }
  65. func (sc *shardedCache) Add(k string, x interface{}, d time.Duration) error {
  66. return sc.bucket(k).Add(k, x, d)
  67. }
  68. func (sc *shardedCache) Replace(k string, x interface{}, d time.Duration) error {
  69. return sc.bucket(k).Replace(k, x, d)
  70. }
  71. func (sc *shardedCache) Get(k string) (interface{}, bool) {
  72. return sc.bucket(k).Get(k)
  73. }
  74. func (sc *shardedCache) Increment(k string, n int64) error {
  75. return sc.bucket(k).Increment(k, n)
  76. }
  77. func (sc *shardedCache) IncrementFloat(k string, n float64) error {
  78. return sc.bucket(k).IncrementFloat(k, n)
  79. }
  80. func (sc *shardedCache) Decrement(k string, n int64) error {
  81. return sc.bucket(k).Decrement(k, n)
  82. }
  83. func (sc *shardedCache) Delete(k string) {
  84. sc.bucket(k).Delete(k)
  85. }
  86. func (sc *shardedCache) DeleteExpired() {
  87. for _, v := range sc.cs {
  88. v.DeleteExpired()
  89. }
  90. }
  91. // Returns the items in the cache. This may include items that have expired,
  92. // but have not yet been cleaned up. If this is significant, the Expiration
  93. // fields of the items should be checked. Note that explicit synchronization
  94. // is needed to use a cache and its corresponding Items() return values at
  95. // the same time, as the maps are shared.
  96. func (sc *shardedCache) Items() []map[string]Item {
  97. res := make([]map[string]Item, len(sc.cs))
  98. for i, v := range sc.cs {
  99. res[i] = v.Items()
  100. }
  101. return res
  102. }
  103. func (sc *shardedCache) Flush() {
  104. for _, v := range sc.cs {
  105. v.Flush()
  106. }
  107. }
  108. type shardedJanitor struct {
  109. Interval time.Duration
  110. stop chan bool
  111. }
  112. func (j *shardedJanitor) Run(sc *shardedCache) {
  113. j.stop = make(chan bool)
  114. tick := time.Tick(j.Interval)
  115. for {
  116. select {
  117. case <-tick:
  118. sc.DeleteExpired()
  119. case <-j.stop:
  120. return
  121. }
  122. }
  123. }
  124. func stopShardedJanitor(sc *unexportedShardedCache) {
  125. sc.janitor.stop <- true
  126. }
  127. func runShardedJanitor(sc *shardedCache, ci time.Duration) {
  128. j := &shardedJanitor{
  129. Interval: ci,
  130. }
  131. sc.janitor = j
  132. go j.Run(sc)
  133. }
  134. func newShardedCache(n int, de time.Duration) *shardedCache {
  135. max := big.NewInt(0).SetUint64(uint64(math.MaxUint32))
  136. rnd, err := rand.Int(rand.Reader, max)
  137. var seed uint32
  138. if err != nil {
  139. os.Stderr.Write([]byte("WARNING: go-cache's newShardedCache failed to read from the system CSPRNG (/dev/urandom or equivalent.) Your system's security may be compromised. Continuing with an insecure seed.\n"))
  140. seed = insecurerand.Uint32()
  141. } else {
  142. seed = uint32(rnd.Uint64())
  143. }
  144. sc := &shardedCache{
  145. seed: seed,
  146. m: uint32(n),
  147. cs: make([]*cache, n),
  148. }
  149. for i := 0; i < n; i++ {
  150. c := &cache{
  151. defaultExpiration: de,
  152. items: map[string]Item{},
  153. }
  154. sc.cs[i] = c
  155. }
  156. return sc
  157. }
  158. func unexportedNewSharded(defaultExpiration, cleanupInterval time.Duration, shards int) *unexportedShardedCache {
  159. if defaultExpiration == 0 {
  160. defaultExpiration = -1
  161. }
  162. sc := newShardedCache(shards, defaultExpiration)
  163. SC := &unexportedShardedCache{sc}
  164. if cleanupInterval > 0 {
  165. runShardedJanitor(sc, cleanupInterval)
  166. runtime.SetFinalizer(SC, stopShardedJanitor)
  167. }
  168. return SC
  169. }