unifiedcache.cpp 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521
  1. // © 2016 and later: Unicode, Inc. and others.
  2. // License & terms of use: http://www.unicode.org/copyright.html
  3. /*
  4. ******************************************************************************
  5. * Copyright (C) 2015, International Business Machines Corporation and
  6. * others. All Rights Reserved.
  7. ******************************************************************************
  8. *
  9. * File unifiedcache.cpp
  10. ******************************************************************************
  11. */
  12. #include "unifiedcache.h"
  13. #include <algorithm> // For std::max()
  14. #include <mutex>
  15. #include "uassert.h"
  16. #include "uhash.h"
  17. #include "ucln_cmn.h"
  18. static icu::UnifiedCache *gCache = nullptr;
  19. static std::mutex *gCacheMutex = nullptr;
  20. static std::condition_variable *gInProgressValueAddedCond;
  21. static icu::UInitOnce gCacheInitOnce {};
  22. static const int32_t MAX_EVICT_ITERATIONS = 10;
  23. static const int32_t DEFAULT_MAX_UNUSED = 1000;
  24. static const int32_t DEFAULT_PERCENTAGE_OF_IN_USE = 100;
  25. U_CDECL_BEGIN
  26. static UBool U_CALLCONV unifiedcache_cleanup() {
  27. gCacheInitOnce.reset();
  28. delete gCache;
  29. gCache = nullptr;
  30. gCacheMutex->~mutex();
  31. gCacheMutex = nullptr;
  32. gInProgressValueAddedCond->~condition_variable();
  33. gInProgressValueAddedCond = nullptr;
  34. return true;
  35. }
  36. U_CDECL_END
  37. U_NAMESPACE_BEGIN
  38. int32_t U_EXPORT2
  39. ucache_hashKeys(const UHashTok key) {
  40. const CacheKeyBase* ckey = static_cast<const CacheKeyBase*>(key.pointer);
  41. return ckey->hashCode();
  42. }
  43. UBool U_EXPORT2
  44. ucache_compareKeys(const UHashTok key1, const UHashTok key2) {
  45. const CacheKeyBase* p1 = static_cast<const CacheKeyBase*>(key1.pointer);
  46. const CacheKeyBase* p2 = static_cast<const CacheKeyBase*>(key2.pointer);
  47. return *p1 == *p2;
  48. }
  49. void U_EXPORT2
  50. ucache_deleteKey(void *obj) {
  51. CacheKeyBase* p = static_cast<CacheKeyBase*>(obj);
  52. delete p;
  53. }
  54. CacheKeyBase::~CacheKeyBase() {
  55. }
  56. static void U_CALLCONV cacheInit(UErrorCode &status) {
  57. U_ASSERT(gCache == nullptr);
  58. ucln_common_registerCleanup(
  59. UCLN_COMMON_UNIFIED_CACHE, unifiedcache_cleanup);
  60. gCacheMutex = STATIC_NEW(std::mutex);
  61. gInProgressValueAddedCond = STATIC_NEW(std::condition_variable);
  62. gCache = new UnifiedCache(status);
  63. if (gCache == nullptr) {
  64. status = U_MEMORY_ALLOCATION_ERROR;
  65. }
  66. if (U_FAILURE(status)) {
  67. delete gCache;
  68. gCache = nullptr;
  69. return;
  70. }
  71. }
  72. UnifiedCache *UnifiedCache::getInstance(UErrorCode &status) {
  73. umtx_initOnce(gCacheInitOnce, &cacheInit, status);
  74. if (U_FAILURE(status)) {
  75. return nullptr;
  76. }
  77. U_ASSERT(gCache != nullptr);
  78. return gCache;
  79. }
  80. UnifiedCache::UnifiedCache(UErrorCode &status) :
  81. fHashtable(nullptr),
  82. fEvictPos(UHASH_FIRST),
  83. fNumValuesTotal(0),
  84. fNumValuesInUse(0),
  85. fMaxUnused(DEFAULT_MAX_UNUSED),
  86. fMaxPercentageOfInUse(DEFAULT_PERCENTAGE_OF_IN_USE),
  87. fAutoEvictedCount(0),
  88. fNoValue(nullptr) {
  89. if (U_FAILURE(status)) {
  90. return;
  91. }
  92. fNoValue = new SharedObject();
  93. if (fNoValue == nullptr) {
  94. status = U_MEMORY_ALLOCATION_ERROR;
  95. return;
  96. }
  97. fNoValue->softRefCount = 1; // Add fake references to prevent fNoValue from being deleted
  98. fNoValue->hardRefCount = 1; // when other references to it are removed.
  99. fNoValue->cachePtr = this;
  100. fHashtable = uhash_open(
  101. &ucache_hashKeys,
  102. &ucache_compareKeys,
  103. nullptr,
  104. &status);
  105. if (U_FAILURE(status)) {
  106. return;
  107. }
  108. uhash_setKeyDeleter(fHashtable, &ucache_deleteKey);
  109. }
  110. void UnifiedCache::setEvictionPolicy(
  111. int32_t count, int32_t percentageOfInUseItems, UErrorCode &status) {
  112. if (U_FAILURE(status)) {
  113. return;
  114. }
  115. if (count < 0 || percentageOfInUseItems < 0) {
  116. status = U_ILLEGAL_ARGUMENT_ERROR;
  117. return;
  118. }
  119. std::lock_guard<std::mutex> lock(*gCacheMutex);
  120. fMaxUnused = count;
  121. fMaxPercentageOfInUse = percentageOfInUseItems;
  122. }
  123. int32_t UnifiedCache::unusedCount() const {
  124. std::lock_guard<std::mutex> lock(*gCacheMutex);
  125. return uhash_count(fHashtable) - fNumValuesInUse;
  126. }
  127. int64_t UnifiedCache::autoEvictedCount() const {
  128. std::lock_guard<std::mutex> lock(*gCacheMutex);
  129. return fAutoEvictedCount;
  130. }
  131. int32_t UnifiedCache::keyCount() const {
  132. std::lock_guard<std::mutex> lock(*gCacheMutex);
  133. return uhash_count(fHashtable);
  134. }
  135. void UnifiedCache::flush() const {
  136. std::lock_guard<std::mutex> lock(*gCacheMutex);
  137. // Use a loop in case cache items that are flushed held hard references to
  138. // other cache items making those additional cache items eligible for
  139. // flushing.
  140. while (_flush(false));
  141. }
  142. void UnifiedCache::handleUnreferencedObject() const {
  143. std::lock_guard<std::mutex> lock(*gCacheMutex);
  144. --fNumValuesInUse;
  145. _runEvictionSlice();
  146. }
  147. #ifdef UNIFIED_CACHE_DEBUG
  148. #include <stdio.h>
  149. void UnifiedCache::dump() {
  150. UErrorCode status = U_ZERO_ERROR;
  151. const UnifiedCache *cache = getInstance(status);
  152. if (U_FAILURE(status)) {
  153. fprintf(stderr, "Unified Cache: Error fetching cache.\n");
  154. return;
  155. }
  156. cache->dumpContents();
  157. }
  158. void UnifiedCache::dumpContents() const {
  159. std::lock_guard<std::mutex> lock(*gCacheMutex);
  160. _dumpContents();
  161. }
  162. // Dumps content of cache.
  163. // On entry, gCacheMutex must be held.
  164. // On exit, cache contents dumped to stderr.
  165. void UnifiedCache::_dumpContents() const {
  166. int32_t pos = UHASH_FIRST;
  167. const UHashElement *element = uhash_nextElement(fHashtable, &pos);
  168. char buffer[256];
  169. int32_t cnt = 0;
  170. for (; element != nullptr; element = uhash_nextElement(fHashtable, &pos)) {
  171. const SharedObject *sharedObject =
  172. (const SharedObject *) element->value.pointer;
  173. const CacheKeyBase *key =
  174. (const CacheKeyBase *) element->key.pointer;
  175. if (sharedObject->hasHardReferences()) {
  176. ++cnt;
  177. fprintf(
  178. stderr,
  179. "Unified Cache: Key '%s', error %d, value %p, total refcount %d, soft refcount %d\n",
  180. key->writeDescription(buffer, 256),
  181. key->creationStatus,
  182. sharedObject == fNoValue ? nullptr :sharedObject,
  183. sharedObject->getRefCount(),
  184. sharedObject->getSoftRefCount());
  185. }
  186. }
  187. fprintf(stderr, "Unified Cache: %d out of a total of %d still have hard references\n", cnt, uhash_count(fHashtable));
  188. }
  189. #endif
  190. UnifiedCache::~UnifiedCache() {
  191. // Try our best to clean up first.
  192. flush();
  193. {
  194. // Now all that should be left in the cache are entries that refer to
  195. // each other and entries with hard references from outside the cache.
  196. // Nothing we can do about these so proceed to wipe out the cache.
  197. std::lock_guard<std::mutex> lock(*gCacheMutex);
  198. _flush(true);
  199. }
  200. uhash_close(fHashtable);
  201. fHashtable = nullptr;
  202. delete fNoValue;
  203. fNoValue = nullptr;
  204. }
  205. const UHashElement *
  206. UnifiedCache::_nextElement() const {
  207. const UHashElement *element = uhash_nextElement(fHashtable, &fEvictPos);
  208. if (element == nullptr) {
  209. fEvictPos = UHASH_FIRST;
  210. return uhash_nextElement(fHashtable, &fEvictPos);
  211. }
  212. return element;
  213. }
  214. UBool UnifiedCache::_flush(UBool all) const {
  215. UBool result = false;
  216. int32_t origSize = uhash_count(fHashtable);
  217. for (int32_t i = 0; i < origSize; ++i) {
  218. const UHashElement *element = _nextElement();
  219. if (element == nullptr) {
  220. break;
  221. }
  222. if (all || _isEvictable(element)) {
  223. const SharedObject *sharedObject =
  224. static_cast<const SharedObject*>(element->value.pointer);
  225. U_ASSERT(sharedObject->cachePtr == this);
  226. uhash_removeElement(fHashtable, element);
  227. removeSoftRef(sharedObject); // Deletes the sharedObject when softRefCount goes to zero.
  228. result = true;
  229. }
  230. }
  231. return result;
  232. }
  233. int32_t UnifiedCache::_computeCountOfItemsToEvict() const {
  234. int32_t totalItems = uhash_count(fHashtable);
  235. int32_t evictableItems = totalItems - fNumValuesInUse;
  236. int32_t unusedLimitByPercentage = fNumValuesInUse * fMaxPercentageOfInUse / 100;
  237. int32_t unusedLimit = std::max(unusedLimitByPercentage, fMaxUnused);
  238. int32_t countOfItemsToEvict = std::max<int32_t>(0, evictableItems - unusedLimit);
  239. return countOfItemsToEvict;
  240. }
  241. void UnifiedCache::_runEvictionSlice() const {
  242. int32_t maxItemsToEvict = _computeCountOfItemsToEvict();
  243. if (maxItemsToEvict <= 0) {
  244. return;
  245. }
  246. for (int32_t i = 0; i < MAX_EVICT_ITERATIONS; ++i) {
  247. const UHashElement *element = _nextElement();
  248. if (element == nullptr) {
  249. break;
  250. }
  251. if (_isEvictable(element)) {
  252. const SharedObject *sharedObject =
  253. static_cast<const SharedObject*>(element->value.pointer);
  254. uhash_removeElement(fHashtable, element);
  255. removeSoftRef(sharedObject); // Deletes sharedObject when SoftRefCount goes to zero.
  256. ++fAutoEvictedCount;
  257. if (--maxItemsToEvict == 0) {
  258. break;
  259. }
  260. }
  261. }
  262. }
  263. void UnifiedCache::_putNew(
  264. const CacheKeyBase &key,
  265. const SharedObject *value,
  266. const UErrorCode creationStatus,
  267. UErrorCode &status) const {
  268. if (U_FAILURE(status)) {
  269. return;
  270. }
  271. CacheKeyBase *keyToAdopt = key.clone();
  272. if (keyToAdopt == nullptr) {
  273. status = U_MEMORY_ALLOCATION_ERROR;
  274. return;
  275. }
  276. keyToAdopt->fCreationStatus = creationStatus;
  277. if (value->softRefCount == 0) {
  278. _registerPrimary(keyToAdopt, value);
  279. }
  280. void *oldValue = uhash_put(fHashtable, keyToAdopt, (void *) value, &status);
  281. U_ASSERT(oldValue == nullptr);
  282. (void)oldValue;
  283. if (U_SUCCESS(status)) {
  284. value->softRefCount++;
  285. }
  286. }
  287. void UnifiedCache::_putIfAbsentAndGet(
  288. const CacheKeyBase &key,
  289. const SharedObject *&value,
  290. UErrorCode &status) const {
  291. std::lock_guard<std::mutex> lock(*gCacheMutex);
  292. const UHashElement *element = uhash_find(fHashtable, &key);
  293. if (element != nullptr && !_inProgress(element)) {
  294. _fetch(element, value, status);
  295. return;
  296. }
  297. if (element == nullptr) {
  298. UErrorCode putError = U_ZERO_ERROR;
  299. // best-effort basis only.
  300. _putNew(key, value, status, putError);
  301. } else {
  302. _put(element, value, status);
  303. }
  304. // Run an eviction slice. This will run even if we added a primary entry
  305. // which doesn't increase the unused count, but that is still o.k
  306. _runEvictionSlice();
  307. }
  308. UBool UnifiedCache::_poll(
  309. const CacheKeyBase &key,
  310. const SharedObject *&value,
  311. UErrorCode &status) const {
  312. U_ASSERT(value == nullptr);
  313. U_ASSERT(status == U_ZERO_ERROR);
  314. std::unique_lock<std::mutex> lock(*gCacheMutex);
  315. const UHashElement *element = uhash_find(fHashtable, &key);
  316. // If the hash table contains an inProgress placeholder entry for this key,
  317. // this means that another thread is currently constructing the value object.
  318. // Loop, waiting for that construction to complete.
  319. while (element != nullptr && _inProgress(element)) {
  320. gInProgressValueAddedCond->wait(lock);
  321. element = uhash_find(fHashtable, &key);
  322. }
  323. // If the hash table contains an entry for the key,
  324. // fetch out the contents and return them.
  325. if (element != nullptr) {
  326. _fetch(element, value, status);
  327. return true;
  328. }
  329. // The hash table contained nothing for this key.
  330. // Insert an inProgress place holder value.
  331. // Our caller will create the final value and update the hash table.
  332. _putNew(key, fNoValue, U_ZERO_ERROR, status);
  333. return false;
  334. }
  335. void UnifiedCache::_get(
  336. const CacheKeyBase &key,
  337. const SharedObject *&value,
  338. const void *creationContext,
  339. UErrorCode &status) const {
  340. U_ASSERT(value == nullptr);
  341. U_ASSERT(status == U_ZERO_ERROR);
  342. if (_poll(key, value, status)) {
  343. if (value == fNoValue) {
  344. SharedObject::clearPtr(value);
  345. }
  346. return;
  347. }
  348. if (U_FAILURE(status)) {
  349. return;
  350. }
  351. value = key.createObject(creationContext, status);
  352. U_ASSERT(value == nullptr || value->hasHardReferences());
  353. U_ASSERT(value != nullptr || status != U_ZERO_ERROR);
  354. if (value == nullptr) {
  355. SharedObject::copyPtr(fNoValue, value);
  356. }
  357. _putIfAbsentAndGet(key, value, status);
  358. if (value == fNoValue) {
  359. SharedObject::clearPtr(value);
  360. }
  361. }
  362. void UnifiedCache::_registerPrimary(
  363. const CacheKeyBase *theKey, const SharedObject *value) const {
  364. theKey->fIsPrimary = true;
  365. value->cachePtr = this;
  366. ++fNumValuesTotal;
  367. ++fNumValuesInUse;
  368. }
  369. void UnifiedCache::_put(
  370. const UHashElement *element,
  371. const SharedObject *value,
  372. const UErrorCode status) const {
  373. U_ASSERT(_inProgress(element));
  374. const CacheKeyBase* theKey = static_cast<const CacheKeyBase*>(element->key.pointer);
  375. const SharedObject* oldValue = static_cast<const SharedObject*>(element->value.pointer);
  376. theKey->fCreationStatus = status;
  377. if (value->softRefCount == 0) {
  378. _registerPrimary(theKey, value);
  379. }
  380. value->softRefCount++;
  381. UHashElement *ptr = const_cast<UHashElement *>(element);
  382. ptr->value.pointer = (void *) value;
  383. U_ASSERT(oldValue == fNoValue);
  384. removeSoftRef(oldValue);
  385. // Tell waiting threads that we replace in-progress status with
  386. // an error.
  387. gInProgressValueAddedCond->notify_all();
  388. }
  389. void UnifiedCache::_fetch(
  390. const UHashElement *element,
  391. const SharedObject *&value,
  392. UErrorCode &status) const {
  393. const CacheKeyBase* theKey = static_cast<const CacheKeyBase*>(element->key.pointer);
  394. status = theKey->fCreationStatus;
  395. // Since we have the cache lock, calling regular SharedObject add/removeRef
  396. // could cause us to deadlock on ourselves since they may need to lock
  397. // the cache mutex.
  398. removeHardRef(value);
  399. value = static_cast<const SharedObject *>(element->value.pointer);
  400. addHardRef(value);
  401. }
  402. UBool UnifiedCache::_inProgress(const UHashElement* element) const {
  403. UErrorCode status = U_ZERO_ERROR;
  404. const SharedObject * value = nullptr;
  405. _fetch(element, value, status);
  406. UBool result = _inProgress(value, status);
  407. removeHardRef(value);
  408. return result;
  409. }
  410. UBool UnifiedCache::_inProgress(
  411. const SharedObject* theValue, UErrorCode creationStatus) const {
  412. return (theValue == fNoValue && creationStatus == U_ZERO_ERROR);
  413. }
  414. UBool UnifiedCache::_isEvictable(const UHashElement *element) const
  415. {
  416. const CacheKeyBase* theKey = static_cast<const CacheKeyBase*>(element->key.pointer);
  417. const SharedObject* theValue = static_cast<const SharedObject*>(element->value.pointer);
  418. // Entries that are under construction are never evictable
  419. if (_inProgress(theValue, theKey->fCreationStatus)) {
  420. return false;
  421. }
  422. // We can evict entries that are either not a primary or have just
  423. // one reference (The one reference being from the cache itself).
  424. return (!theKey->fIsPrimary || (theValue->softRefCount == 1 && theValue->noHardReferences()));
  425. }
  426. void UnifiedCache::removeSoftRef(const SharedObject *value) const {
  427. U_ASSERT(value->cachePtr == this);
  428. U_ASSERT(value->softRefCount > 0);
  429. if (--value->softRefCount == 0) {
  430. --fNumValuesTotal;
  431. if (value->noHardReferences()) {
  432. delete value;
  433. } else {
  434. // This path only happens from flush(all). Which only happens from the
  435. // UnifiedCache destructor. Nulling out value.cacheptr changes the behavior
  436. // of value.removeRef(), causing the deletion to be done there.
  437. value->cachePtr = nullptr;
  438. }
  439. }
  440. }
  441. int32_t UnifiedCache::removeHardRef(const SharedObject *value) const {
  442. int refCount = 0;
  443. if (value) {
  444. refCount = umtx_atomic_dec(&value->hardRefCount);
  445. U_ASSERT(refCount >= 0);
  446. if (refCount == 0) {
  447. --fNumValuesInUse;
  448. }
  449. }
  450. return refCount;
  451. }
  452. int32_t UnifiedCache::addHardRef(const SharedObject *value) const {
  453. int refCount = 0;
  454. if (value) {
  455. refCount = umtx_atomic_inc(&value->hardRefCount);
  456. U_ASSERT(refCount >= 1);
  457. if (refCount == 1) {
  458. fNumValuesInUse++;
  459. }
  460. }
  461. return refCount;
  462. }
  463. U_NAMESPACE_END