cache_test.go 7.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243
  1. /*
  2. *
  3. * Copyright 2021 gRPC authors.
  4. *
  5. * Licensed under the Apache License, Version 2.0 (the "License");
  6. * you may not use this file except in compliance with the License.
  7. * You may obtain a copy of the License at
  8. *
  9. * http://www.apache.org/licenses/LICENSE-2.0
  10. *
  11. * Unless required by applicable law or agreed to in writing, software
  12. * distributed under the License is distributed on an "AS IS" BASIS,
  13. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  14. * See the License for the specific language governing permissions and
  15. * limitations under the License.
  16. *
  17. */
  18. package rls
  19. import (
  20. "testing"
  21. "time"
  22. "github.com/google/go-cmp/cmp"
  23. "github.com/google/go-cmp/cmp/cmpopts"
  24. "google.golang.org/grpc/internal/backoff"
  25. )
  26. var (
  27. cacheKeys = []cacheKey{
  28. {path: "0", keys: "a"},
  29. {path: "1", keys: "b"},
  30. {path: "2", keys: "c"},
  31. {path: "3", keys: "d"},
  32. {path: "4", keys: "e"},
  33. }
  34. longDuration = 10 * time.Minute
  35. shortDuration = 1 * time.Millisecond
  36. cacheEntries []*cacheEntry
  37. )
  38. func initCacheEntries() {
  39. // All entries have a dummy size of 1 to simplify resize operations.
  40. cacheEntries = []*cacheEntry{
  41. {
  42. // Entry is valid and minimum expiry time has not expired.
  43. expiryTime: time.Now().Add(longDuration),
  44. earliestEvictTime: time.Now().Add(longDuration),
  45. size: 1,
  46. },
  47. {
  48. // Entry is valid and is in backoff.
  49. expiryTime: time.Now().Add(longDuration),
  50. backoffTime: time.Now().Add(longDuration),
  51. backoffState: &backoffState{timer: time.NewTimer(longDuration)},
  52. size: 1,
  53. },
  54. {
  55. // Entry is valid, and not in backoff.
  56. expiryTime: time.Now().Add(longDuration),
  57. size: 1,
  58. },
  59. {
  60. // Entry is invalid.
  61. expiryTime: time.Time{}.Add(shortDuration),
  62. size: 1,
  63. },
  64. {
  65. // Entry is invalid valid and backoff has expired.
  66. expiryTime: time.Time{}.Add(shortDuration),
  67. backoffExpiryTime: time.Time{}.Add(shortDuration),
  68. size: 1,
  69. },
  70. }
  71. }
  72. func (s) TestLRU_BasicOperations(t *testing.T) {
  73. initCacheEntries()
  74. // Create an LRU and add some entries to it.
  75. lru := newLRU()
  76. for _, k := range cacheKeys {
  77. lru.addEntry(k)
  78. }
  79. // Get the least recent entry. This should be the first entry we added.
  80. if got, want := lru.getLeastRecentlyUsed(), cacheKeys[0]; got != want {
  81. t.Fatalf("lru.getLeastRecentlyUsed() = %v, want %v", got, want)
  82. }
  83. // Iterate through the slice of keys we added earlier, making them the most
  84. // recent entry, one at a time. The least recent entry at that point should
  85. // be the next entry from our slice of keys.
  86. for i, k := range cacheKeys {
  87. lru.makeRecent(k)
  88. lruIndex := (i + 1) % len(cacheKeys)
  89. if got, want := lru.getLeastRecentlyUsed(), cacheKeys[lruIndex]; got != want {
  90. t.Fatalf("lru.getLeastRecentlyUsed() = %v, want %v", got, want)
  91. }
  92. }
  93. // Iterate through the slice of keys we added earlier, removing them one at
  94. // a time The least recent entry at that point should be the next entry from
  95. // our slice of keys, except for the last one because the lru will be empty.
  96. for i, k := range cacheKeys {
  97. lru.removeEntry(k)
  98. var want cacheKey
  99. if i < len(cacheKeys)-1 {
  100. want = cacheKeys[i+1]
  101. }
  102. if got := lru.getLeastRecentlyUsed(); got != want {
  103. t.Fatalf("lru.getLeastRecentlyUsed() = %v, want %v", got, want)
  104. }
  105. }
  106. }
  107. func (s) TestDataCache_BasicOperations(t *testing.T) {
  108. initCacheEntries()
  109. dc := newDataCache(5, nil)
  110. for i, k := range cacheKeys {
  111. dc.addEntry(k, cacheEntries[i])
  112. }
  113. for i, k := range cacheKeys {
  114. entry := dc.getEntry(k)
  115. if !cmp.Equal(entry, cacheEntries[i], cmp.AllowUnexported(cacheEntry{}, backoffState{}), cmpopts.IgnoreUnexported(time.Timer{})) {
  116. t.Fatalf("Data cache lookup for key %v returned entry %v, want %v", k, entry, cacheEntries[i])
  117. }
  118. }
  119. }
  120. func (s) TestDataCache_AddForcesResize(t *testing.T) {
  121. initCacheEntries()
  122. dc := newDataCache(1, nil)
  123. // The first entry in cacheEntries has a minimum expiry time in the future.
  124. // This entry would stop the resize operation since we do not evict entries
  125. // whose minimum expiration time is in the future. So, we do not use that
  126. // entry in this test. The entry being added has a running backoff timer.
  127. evicted, ok := dc.addEntry(cacheKeys[1], cacheEntries[1])
  128. if evicted || !ok {
  129. t.Fatalf("dataCache.addEntry() returned (%v, %v) want (false, true)", evicted, ok)
  130. }
  131. // Add another entry leading to the eviction of the above entry which has a
  132. // running backoff timer. The first return value is expected to be true.
  133. backoffCancelled, ok := dc.addEntry(cacheKeys[2], cacheEntries[2])
  134. if !backoffCancelled || !ok {
  135. t.Fatalf("dataCache.addEntry() returned (%v, %v) want (true, true)", backoffCancelled, ok)
  136. }
  137. // Add another entry leading to the eviction of the above entry which does not
  138. // have a running backoff timer. This should evict the above entry, but the
  139. // first return value is expected to be false.
  140. backoffCancelled, ok = dc.addEntry(cacheKeys[3], cacheEntries[3])
  141. if backoffCancelled || !ok {
  142. t.Fatalf("dataCache.addEntry() returned (%v, %v) want (false, true)", backoffCancelled, ok)
  143. }
  144. }
  145. func (s) TestDataCache_Resize(t *testing.T) {
  146. initCacheEntries()
  147. dc := newDataCache(5, nil)
  148. for i, k := range cacheKeys {
  149. dc.addEntry(k, cacheEntries[i])
  150. }
  151. // The first cache entry (with a key of cacheKeys[0]) that we added has an
  152. // earliestEvictTime in the future. As part of the resize operation, we
  153. // traverse the cache in least recently used order, and this will be first
  154. // entry that we will encounter. And since the earliestEvictTime is in the
  155. // future, the resize operation will stop, leaving the cache bigger than
  156. // what was asked for.
  157. if dc.resize(1) {
  158. t.Fatalf("dataCache.resize() returned true, want false")
  159. }
  160. if dc.currentSize != 5 {
  161. t.Fatalf("dataCache.size is %d, want 5", dc.currentSize)
  162. }
  163. // Remove the entry with earliestEvictTime in the future and retry the
  164. // resize operation.
  165. dc.removeEntryForTesting(cacheKeys[0])
  166. if !dc.resize(1) {
  167. t.Fatalf("dataCache.resize() returned false, want true")
  168. }
  169. if dc.currentSize != 1 {
  170. t.Fatalf("dataCache.size is %d, want 1", dc.currentSize)
  171. }
  172. }
  173. func (s) TestDataCache_EvictExpiredEntries(t *testing.T) {
  174. initCacheEntries()
  175. dc := newDataCache(5, nil)
  176. for i, k := range cacheKeys {
  177. dc.addEntry(k, cacheEntries[i])
  178. }
  179. // The last two entries in the cacheEntries list have expired, and will be
  180. // evicted. The first three should still remain in the cache.
  181. if !dc.evictExpiredEntries() {
  182. t.Fatal("dataCache.evictExpiredEntries() returned false, want true")
  183. }
  184. if dc.currentSize != 3 {
  185. t.Fatalf("dataCache.size is %d, want 3", dc.currentSize)
  186. }
  187. for i := 0; i < 3; i++ {
  188. entry := dc.getEntry(cacheKeys[i])
  189. if !cmp.Equal(entry, cacheEntries[i], cmp.AllowUnexported(cacheEntry{}, backoffState{}), cmpopts.IgnoreUnexported(time.Timer{})) {
  190. t.Fatalf("Data cache lookup for key %v returned entry %v, want %v", cacheKeys[i], entry, cacheEntries[i])
  191. }
  192. }
  193. }
  194. func (s) TestDataCache_ResetBackoffState(t *testing.T) {
  195. type fakeBackoff struct {
  196. backoff.Strategy
  197. }
  198. initCacheEntries()
  199. dc := newDataCache(5, nil)
  200. for i, k := range cacheKeys {
  201. dc.addEntry(k, cacheEntries[i])
  202. }
  203. newBackoffState := &backoffState{bs: &fakeBackoff{}}
  204. if updatePicker := dc.resetBackoffState(newBackoffState); !updatePicker {
  205. t.Fatal("dataCache.resetBackoffState() returned updatePicker is false, want true")
  206. }
  207. // Make sure that the entry with no backoff state was not touched.
  208. if entry := dc.getEntry(cacheKeys[0]); cmp.Equal(entry.backoffState, newBackoffState, cmp.AllowUnexported(backoffState{})) {
  209. t.Fatal("dataCache.resetBackoffState() touched entries without a valid backoffState")
  210. }
  211. // Make sure that the entry with a valid backoff state was reset.
  212. entry := dc.getEntry(cacheKeys[1])
  213. if diff := cmp.Diff(entry.backoffState, newBackoffState, cmp.AllowUnexported(backoffState{})); diff != "" {
  214. t.Fatalf("unexpected diff in backoffState for cache entry after dataCache.resetBackoffState(): %s", diff)
  215. }
  216. }