som_runtime.c 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535
  1. /*
  2. * Copyright (c) 2015-2016, Intel Corporation
  3. *
  4. * Redistribution and use in source and binary forms, with or without
  5. * modification, are permitted provided that the following conditions are met:
  6. *
  7. * * Redistributions of source code must retain the above copyright notice,
  8. * this list of conditions and the following disclaimer.
  9. * * Redistributions in binary form must reproduce the above copyright
  10. * notice, this list of conditions and the following disclaimer in the
  11. * documentation and/or other materials provided with the distribution.
  12. * * Neither the name of Intel Corporation nor the names of its contributors
  13. * may be used to endorse or promote products derived from this software
  14. * without specific prior written permission.
  15. *
  16. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
  17. * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
  18. * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
  19. * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
  20. * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
  21. * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
  22. * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
  23. * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
  24. * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
  25. * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
  26. * POSSIBILITY OF SUCH DAMAGE.
  27. */
  28. /** \file
  29. * \brief SOM runtime code.
  30. *
  31. *
  32. * Runtime code for SOM handling called by the Rose callback adaptors.
  33. *
  34. * Note:
  35. * Races between escapes making a som loc writeable and attempts to write to it
  36. * at the same to_offset are always resolved as if the escape arrived first
  37. * and then the request to write to that location.
  38. */
  39. #include "hs_internal.h"
  40. #include "som_operation.h"
  41. #include "som_runtime.h"
  42. #include "scratch.h"
  43. #include "ue2common.h"
  44. #include "rose/rose_internal.h"
  45. #include "nfa/nfa_api.h"
  46. #include "nfa/nfa_internal.h"
  47. #include "util/fatbit.h"
  48. #include "util/multibit.h"
  49. static really_inline
  50. void setSomLoc(struct fatbit *som_set_now, u64a *som_store, u32 som_store_count,
  51. const struct som_operation *ri, u64a to_offset) {
  52. /* validity handled by callers */
  53. assert(to_offset >= ri->aux.somDistance);
  54. u64a start_offset = to_offset - ri->aux.somDistance;
  55. u32 som_loc = ri->onmatch;
  56. /* resolve any races for matches at this point in favour of the earliest som
  57. */
  58. if (!fatbit_set(som_set_now, som_store_count, som_loc)) {
  59. som_store[som_loc] = start_offset;
  60. } else {
  61. LIMIT_TO_AT_MOST(&som_store[som_loc], start_offset);
  62. }
  63. DEBUG_PRINTF("som_store[%u] set to %llu\n", som_loc, som_store[som_loc]);
  64. }
  65. static really_inline
  66. char ok_and_mark_if_write(u8 *som_store_valid, struct fatbit *som_set_now,
  67. u8 *som_store_writable, u32 som_store_count,
  68. u32 loc) {
  69. return !mmbit_set(som_store_valid, som_store_count, loc) /* unwritten */
  70. || fatbit_isset(som_set_now, som_store_count, loc) /* write here, need
  71. * to resolve race */
  72. || mmbit_isset(som_store_writable, som_store_count, loc); /* writable */
  73. }
  74. static really_inline
  75. char ok_and_mark_if_unset(u8 *som_store_valid, struct fatbit *som_set_now,
  76. u32 som_store_count, u32 loc) {
  77. return !mmbit_set(som_store_valid, som_store_count, loc) /* unwritten */
  78. || fatbit_isset(som_set_now, som_store_count, loc); /* write here, need
  79. * to resolve race */
  80. }
  81. static
  82. int somRevCallback(UNUSED u64a start, u64a end, ReportID id, void *ctx) {
  83. DEBUG_PRINTF("offset=%llu, id=%u\n", end, id);
  84. // We use the id to store the offset adjustment (for assertions like a
  85. // leading \b or multiline mode).
  86. assert(id <= 1);
  87. u64a *from_offset = ctx;
  88. LIMIT_TO_AT_MOST(from_offset, end + id);
  89. return 1; // continue matching.
  90. }
  91. static really_inline
  92. const struct NFA *getSomRevNFA(const struct RoseEngine *t, u32 i) {
  93. assert(t->somRevOffsetOffset);
  94. const u32 *rev_offsets
  95. = (const u32 *)((const u8 *)t + t->somRevOffsetOffset);
  96. u32 nfa_offset = rev_offsets[i];
  97. assert(nfa_offset && nfa_offset < t->size);
  98. const struct NFA *n = (const struct NFA *)(((const u8 *)t + nfa_offset));
  99. assert(ISALIGNED(n));
  100. return n;
  101. }
  102. static
  103. void runRevNfa(struct hs_scratch *scratch, const struct som_operation *ri,
  104. const u64a to_offset, u64a *from_offset) {
  105. struct core_info *ci = &scratch->core_info;
  106. DEBUG_PRINTF("buf has %zu bytes total, history has %zu\n",
  107. ci->len, ci->hlen);
  108. u32 nfa_idx = ri->aux.revNfaIndex;
  109. DEBUG_PRINTF("run rev nfa %u from to_offset=%llu\n", nfa_idx, to_offset);
  110. const struct NFA *nfa = getSomRevNFA(ci->rose, nfa_idx);
  111. assert(nfa->maxWidth); // No inf width rev NFAs.
  112. size_t buf_bytes = to_offset - ci->buf_offset;
  113. size_t history_bytes = ci->hlen;
  114. DEBUG_PRINTF("nfa min/max widths [%u,%u], %zu in buffer, %zu in history\n",
  115. nfa->minWidth, nfa->maxWidth, buf_bytes, history_bytes);
  116. assert(nfa->minWidth <= buf_bytes + history_bytes);
  117. const u8 *buf = ci->buf;
  118. const u8 *hbuf = ci->hbuf;
  119. // Work out if we need to scan any history as well.
  120. if (history_bytes && buf_bytes < nfa->maxWidth) {
  121. assert(hbuf);
  122. size_t remainder = nfa->maxWidth - buf_bytes;
  123. if (remainder < history_bytes) {
  124. hbuf += history_bytes - remainder;
  125. history_bytes = remainder;
  126. }
  127. }
  128. DEBUG_PRINTF("scanning %zu from buffer and %zu from history\n", buf_bytes,
  129. history_bytes);
  130. *from_offset = to_offset;
  131. nfaBlockExecReverse(nfa, to_offset, buf, buf_bytes, hbuf, history_bytes,
  132. somRevCallback, from_offset);
  133. assert(*from_offset <= to_offset);
  134. }
  135. static really_inline
  136. void setSomLocRevNfa(struct hs_scratch *scratch, struct fatbit *som_set_now,
  137. u64a *som_store, u32 som_store_count,
  138. const struct som_operation *ri, u64a to_offset) {
  139. /* validity handled by callers */
  140. u64a from_offset = 0;
  141. runRevNfa(scratch, ri, to_offset, &from_offset);
  142. u32 som_loc = ri->onmatch;
  143. /* resolve any races for matches at this point in favour of the earliest som
  144. */
  145. if (!fatbit_set(som_set_now, som_store_count, som_loc)) {
  146. som_store[som_loc] = from_offset;
  147. } else {
  148. LIMIT_TO_AT_MOST(&som_store[som_loc], from_offset);
  149. }
  150. DEBUG_PRINTF("som_store[%u] set to %llu\n", som_loc, som_store[som_loc]);
  151. }
  152. void handleSomInternal(struct hs_scratch *scratch,
  153. const struct som_operation *ri, const u64a to_offset) {
  154. assert(scratch);
  155. assert(ri);
  156. DEBUG_PRINTF("-->som action required at %llu\n", to_offset);
  157. // SOM handling at scan time operates on data held in scratch. In
  158. // streaming mode, this data is read from / written out to stream state at
  159. // stream write boundaries.
  160. struct core_info *ci = &scratch->core_info;
  161. const struct RoseEngine *rose = ci->rose;
  162. assert(rose->hasSom);
  163. const u32 som_store_count = rose->somLocationCount;
  164. u8 *som_store_valid = (u8 *)ci->state + rose->stateOffsets.somValid;
  165. u8 *som_store_writable = (u8 *)ci->state + rose->stateOffsets.somWritable;
  166. struct fatbit *som_set_now = scratch->som_set_now;
  167. struct fatbit *som_attempted_set = scratch->som_attempted_set;
  168. u64a *som_store = scratch->som_store;
  169. u64a *som_failed_store = scratch->som_attempted_store;
  170. if (to_offset != scratch->som_set_now_offset) {
  171. assert(scratch->som_set_now_offset == ~0ULL
  172. || to_offset > scratch->som_set_now_offset);
  173. DEBUG_PRINTF("setting som_set_now_offset=%llu\n", to_offset);
  174. fatbit_clear(som_set_now);
  175. fatbit_clear(som_attempted_set);
  176. scratch->som_set_now_offset = to_offset;
  177. }
  178. switch (ri->type) {
  179. case SOM_INTERNAL_LOC_SET:
  180. DEBUG_PRINTF("SOM_INTERNAL_LOC_SET\n");
  181. mmbit_set(som_store_valid, som_store_count, ri->onmatch);
  182. setSomLoc(som_set_now, som_store, som_store_count, ri, to_offset);
  183. return;
  184. case SOM_INTERNAL_LOC_SET_IF_UNSET:
  185. DEBUG_PRINTF("SOM_INTERNAL_LOC_SET_IF_UNSET\n");
  186. if (ok_and_mark_if_unset(som_store_valid, som_set_now, som_store_count,
  187. ri->onmatch)) {
  188. setSomLoc(som_set_now, som_store, som_store_count, ri, to_offset);
  189. }
  190. return;
  191. case SOM_INTERNAL_LOC_SET_IF_WRITABLE: {
  192. u32 slot = ri->onmatch;
  193. DEBUG_PRINTF("SOM_INTERNAL_LOC_SET_IF_WRITABLE\n");
  194. if (ok_and_mark_if_write(som_store_valid, som_set_now,
  195. som_store_writable, som_store_count, slot)) {
  196. setSomLoc(som_set_now, som_store, som_store_count, ri, to_offset);
  197. mmbit_unset(som_store_writable, som_store_count, slot);
  198. } else {
  199. /* not writable, stash as an attempted write in case we are
  200. * racing our escape. */
  201. DEBUG_PRINTF("not writable, stashing attempt\n");
  202. assert(to_offset >= ri->aux.somDistance);
  203. u64a start_offset = to_offset - ri->aux.somDistance;
  204. if (!fatbit_set(som_attempted_set, som_store_count, slot)) {
  205. som_failed_store[slot] = start_offset;
  206. } else {
  207. LIMIT_TO_AT_MOST(&som_failed_store[slot], start_offset);
  208. }
  209. DEBUG_PRINTF("som_failed_store[%u] = %llu\n", slot,
  210. som_failed_store[slot]);
  211. }
  212. return;
  213. }
  214. case SOM_INTERNAL_LOC_SET_REV_NFA:
  215. DEBUG_PRINTF("SOM_INTERNAL_LOC_SET_REV_NFA\n");
  216. mmbit_set(som_store_valid, som_store_count, ri->onmatch);
  217. setSomLocRevNfa(scratch, som_set_now, som_store, som_store_count, ri,
  218. to_offset);
  219. return;
  220. case SOM_INTERNAL_LOC_SET_REV_NFA_IF_UNSET:
  221. DEBUG_PRINTF("SOM_INTERNAL_LOC_SET_REV_NFA_IF_UNSET\n");
  222. if (ok_and_mark_if_unset(som_store_valid, som_set_now, som_store_count,
  223. ri->onmatch)) {
  224. setSomLocRevNfa(scratch, som_set_now, som_store, som_store_count,
  225. ri, to_offset);
  226. }
  227. return;
  228. case SOM_INTERNAL_LOC_SET_REV_NFA_IF_WRITABLE: {
  229. u32 slot = ri->onmatch;
  230. DEBUG_PRINTF("SOM_INTERNAL_LOC_SET_IF_WRITABLE\n");
  231. if (ok_and_mark_if_write(som_store_valid, som_set_now,
  232. som_store_writable, som_store_count, slot)) {
  233. setSomLocRevNfa(scratch, som_set_now, som_store, som_store_count,
  234. ri, to_offset);
  235. mmbit_unset(som_store_writable, som_store_count, slot);
  236. } else {
  237. /* not writable, stash as an attempted write in case we are
  238. * racing our escape. */
  239. DEBUG_PRINTF("not writable, stashing attempt\n");
  240. u64a from_offset = 0;
  241. runRevNfa(scratch, ri, to_offset, &from_offset);
  242. if (!fatbit_set(som_attempted_set, som_store_count, slot)) {
  243. som_failed_store[slot] = from_offset;
  244. } else {
  245. LIMIT_TO_AT_MOST(&som_failed_store[slot], from_offset);
  246. }
  247. DEBUG_PRINTF("som_failed_store[%u] = %llu\n", slot,
  248. som_failed_store[slot]);
  249. }
  250. return;
  251. }
  252. case SOM_INTERNAL_LOC_COPY: {
  253. u32 slot_in = ri->aux.somDistance;
  254. u32 slot_out = ri->onmatch;
  255. DEBUG_PRINTF("SOM_INTERNAL_LOC_COPY S[%u] = S[%u]\n", slot_out,
  256. slot_in);
  257. assert(mmbit_isset(som_store_valid, som_store_count, slot_in));
  258. mmbit_set(som_store_valid, som_store_count, slot_out);
  259. fatbit_set(som_set_now, som_store_count, slot_out);
  260. som_store[slot_out] = som_store[slot_in];
  261. return;
  262. }
  263. case SOM_INTERNAL_LOC_COPY_IF_WRITABLE: {
  264. u32 slot_in = ri->aux.somDistance;
  265. u32 slot_out = ri->onmatch;
  266. DEBUG_PRINTF("SOM_INTERNAL_LOC_COPY_IF_WRITABLE S[%u] = S[%u]\n",
  267. slot_out, slot_in);
  268. assert(mmbit_isset(som_store_valid, som_store_count, slot_in));
  269. if (ok_and_mark_if_write(som_store_valid, som_set_now,
  270. som_store_writable, som_store_count,
  271. slot_out)) {
  272. DEBUG_PRINTF("copy, set som_store[%u]=%llu\n", slot_out,
  273. som_store[slot_in]);
  274. som_store[slot_out] = som_store[slot_in];
  275. fatbit_set(som_set_now, som_store_count, slot_out);
  276. mmbit_unset(som_store_writable, som_store_count, slot_out);
  277. } else {
  278. /* not writable, stash as an attempted write in case we are
  279. * racing our escape */
  280. DEBUG_PRINTF("not writable, stashing attempt\n");
  281. fatbit_set(som_attempted_set, som_store_count, slot_out);
  282. som_failed_store[slot_out] = som_store[slot_in];
  283. DEBUG_PRINTF("som_failed_store[%u] = %llu\n", slot_out,
  284. som_failed_store[slot_out]);
  285. }
  286. return;
  287. }
  288. case SOM_INTERNAL_LOC_MAKE_WRITABLE: {
  289. u32 slot = ri->onmatch;
  290. DEBUG_PRINTF("SOM_INTERNAL_LOC_MAKE_WRITABLE\n");
  291. /* if just written to the loc, ignore the racing escape */
  292. if (fatbit_isset(som_set_now, som_store_count, slot)) {
  293. DEBUG_PRINTF("just written\n");
  294. return;
  295. }
  296. if (fatbit_isset(som_attempted_set, som_store_count, slot)) {
  297. /* writes were waiting for an escape to arrive */
  298. DEBUG_PRINTF("setting som_store[%u] = %llu from "
  299. "som_failed_store[%u]\n", slot, som_failed_store[slot],
  300. slot);
  301. som_store[slot] = som_failed_store[slot];
  302. fatbit_set(som_set_now, som_store_count, slot);
  303. return;
  304. }
  305. mmbit_set(som_store_writable, som_store_count, slot);
  306. return;
  307. }
  308. default:
  309. DEBUG_PRINTF("unknown report type!\n");
  310. break;
  311. }
  312. // All valid som_operation types should be handled and returned above.
  313. assert(0);
  314. return;
  315. }
  316. // Returns the SOM offset.
  317. u64a handleSomExternal(struct hs_scratch *scratch,
  318. const struct som_operation *ri,
  319. const u64a to_offset) {
  320. assert(scratch);
  321. assert(ri);
  322. // SOM handling at scan time operates on data held in scratch. In
  323. // streaming mode, this data is read from / written out to stream state at
  324. // stream write boundaries.
  325. struct core_info *ci = &scratch->core_info;
  326. const struct RoseEngine *rose = ci->rose;
  327. assert(rose->hasSom);
  328. switch (ri->type) {
  329. case SOM_EXTERNAL_CALLBACK_REL:
  330. DEBUG_PRINTF("SOM_EXTERNAL_CALLBACK_REL: som is %llu chars back\n",
  331. ri->aux.somDistance);
  332. assert(to_offset >= ri->aux.somDistance);
  333. return to_offset - ri->aux.somDistance;
  334. case SOM_EXTERNAL_CALLBACK_ABS:
  335. DEBUG_PRINTF("SOM_EXTERNAL_CALLBACK_ABS: som is at %llu\n",
  336. ri->aux.somDistance);
  337. assert(to_offset >= ri->aux.somDistance);
  338. return ri->aux.somDistance;
  339. case SOM_EXTERNAL_CALLBACK_STORED: {
  340. const u64a *som_store = scratch->som_store;
  341. u32 slot = ri->aux.somDistance;
  342. DEBUG_PRINTF("SOM_EXTERNAL_CALLBACK_STORED: <- som_store[%u]=%llu\n",
  343. slot, som_store[slot]);
  344. UNUSED const u32 som_store_count = rose->somLocationCount;
  345. UNUSED const u8 *som_store_valid = (u8 *)ci->state
  346. + rose->stateOffsets.somValid;
  347. assert(mmbit_isset(som_store_valid, som_store_count, slot));
  348. return som_store[slot];
  349. }
  350. case SOM_EXTERNAL_CALLBACK_REV_NFA: {
  351. DEBUG_PRINTF("SOM_EXTERNAL_CALLBACK_REV_NFA\n");
  352. u64a from_offset = 0;
  353. runRevNfa(scratch, ri, to_offset, &from_offset);
  354. return from_offset;
  355. }
  356. default:
  357. DEBUG_PRINTF("unknown report type!\n");
  358. break;
  359. }
  360. // All valid som_operation types should be handled and returned above.
  361. assert(0);
  362. return 0;
  363. }
  364. void setSomFromSomAware(struct hs_scratch *scratch,
  365. const struct som_operation *ri, u64a from_offset,
  366. u64a to_offset) {
  367. assert(scratch);
  368. assert(ri);
  369. assert(to_offset);
  370. assert(ri->type == SOM_INTERNAL_LOC_SET_FROM
  371. || ri->type == SOM_INTERNAL_LOC_SET_FROM_IF_WRITABLE);
  372. struct core_info *ci = &scratch->core_info;
  373. const struct RoseEngine *rose = ci->rose;
  374. assert(rose->hasSom);
  375. const u32 som_store_count = rose->somLocationCount;
  376. u8 *som_store_valid = (u8 *)ci->state + rose->stateOffsets.somValid;
  377. u8 *som_store_writable = (u8 *)ci->state + rose->stateOffsets.somWritable;
  378. struct fatbit *som_set_now = scratch->som_set_now;
  379. struct fatbit *som_attempted_set = scratch->som_attempted_set;
  380. u64a *som_store = scratch->som_store;
  381. u64a *som_failed_store = scratch->som_attempted_store;
  382. if (to_offset != scratch->som_set_now_offset) {
  383. DEBUG_PRINTF("setting som_set_now_offset=%llu\n", to_offset);
  384. fatbit_clear(som_set_now);
  385. fatbit_clear(som_attempted_set);
  386. scratch->som_set_now_offset = to_offset;
  387. }
  388. if (ri->type == SOM_INTERNAL_LOC_SET_FROM) {
  389. DEBUG_PRINTF("SOM_INTERNAL_LOC_SET_FROM\n");
  390. mmbit_set(som_store_valid, som_store_count, ri->onmatch);
  391. setSomLoc(som_set_now, som_store, som_store_count, ri, from_offset);
  392. } else {
  393. DEBUG_PRINTF("SOM_INTERNAL_LOC_SET_FROM_IF_WRITABLE\n");
  394. if (ok_and_mark_if_write(som_store_valid, som_set_now,
  395. som_store_writable, som_store_count,
  396. ri->onmatch)) {
  397. setSomLoc(som_set_now, som_store, som_store_count, ri, from_offset);
  398. mmbit_unset(som_store_writable, som_store_count, ri->onmatch);
  399. } else {
  400. /* not writable, stash as an attempted write in case we are
  401. * racing our escape. */
  402. DEBUG_PRINTF("not writable, stashing attempt\n");
  403. assert(to_offset >= ri->aux.somDistance);
  404. u32 som_loc = ri->onmatch;
  405. if (!fatbit_set(som_attempted_set, som_store_count, ri->onmatch)) {
  406. som_failed_store[som_loc] = from_offset;
  407. } else {
  408. LIMIT_TO_AT_MOST(&som_failed_store[som_loc], from_offset);
  409. }
  410. DEBUG_PRINTF("som_failed_store[%u] = %llu\n", som_loc,
  411. som_failed_store[som_loc]);
  412. }
  413. }
  414. }
  415. static really_inline
  416. int clearSomLog(struct hs_scratch *scratch, u64a offset, struct fatbit *log,
  417. const u64a *starts) {
  418. DEBUG_PRINTF("at %llu\n", offset);
  419. struct core_info *ci = &scratch->core_info;
  420. const struct RoseEngine *rose = ci->rose;
  421. const u32 dkeyCount = rose->dkeyCount;
  422. const u32 *dkey_to_report = (const u32 *)
  423. ((const char *)rose + rose->invDkeyOffset);
  424. u32 flags = 0;
  425. #ifndef RELEASE_BUILD
  426. if (scratch->deduper.current_report_offset != offset) {
  427. flags |= HS_MATCH_FLAG_ADJUSTED;
  428. }
  429. #endif
  430. for (u32 it = fatbit_iterate(log, dkeyCount, MMB_INVALID);
  431. it != MMB_INVALID; it = fatbit_iterate(log, dkeyCount, it)) {
  432. u64a from_offset = starts[it];
  433. u32 onmatch = dkey_to_report[it];
  434. int halt = ci->userCallback(onmatch, from_offset, offset, flags,
  435. ci->userContext);
  436. if (halt) {
  437. ci->status |= STATUS_TERMINATED;
  438. return 1;
  439. }
  440. }
  441. fatbit_clear(log);
  442. return 0;
  443. }
  444. int flushStoredSomMatches_i(struct hs_scratch *scratch, u64a offset) {
  445. DEBUG_PRINTF("flush som matches\n");
  446. int halt = 0;
  447. assert(!told_to_stop_matching(scratch));
  448. if (scratch->deduper.current_report_offset == ~0ULL) {
  449. /* no matches recorded yet; just need to clear the logs */
  450. fatbit_clear(scratch->deduper.som_log[0]);
  451. fatbit_clear(scratch->deduper.som_log[1]);
  452. scratch->deduper.som_log_dirty = 0;
  453. return 0;
  454. }
  455. /* fire any reports from the logs and clear them */
  456. if (offset == scratch->deduper.current_report_offset + 1) {
  457. struct fatbit *done_log = scratch->deduper.som_log[offset % 2];
  458. u64a *done_starts = scratch->deduper.som_start_log[offset % 2];
  459. halt = clearSomLog(scratch, scratch->deduper.current_report_offset - 1,
  460. done_log, done_starts);
  461. scratch->deduper.som_log_dirty >>= 1;
  462. } else {
  463. /* need to report both logs */
  464. u64a f_offset = scratch->deduper.current_report_offset - 1;
  465. u64a s_offset = scratch->deduper.current_report_offset;
  466. struct fatbit *first_log = scratch->deduper.som_log[f_offset % 2];
  467. u64a *first_starts = scratch->deduper.som_start_log[f_offset % 2];
  468. struct fatbit *second_log = scratch->deduper.som_log[s_offset % 2];
  469. u64a *second_starts = scratch->deduper.som_start_log[s_offset % 2];
  470. halt = clearSomLog(scratch, f_offset, first_log, first_starts) ||
  471. clearSomLog(scratch, s_offset, second_log, second_starts);
  472. scratch->deduper.som_log_dirty = 0;
  473. }
  474. return halt;
  475. }