query.c 73 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808
  1. // SPDX-License-Identifier: GPL-3.0-or-later
  2. #include "query.h"
  3. #include "web/api/formatters/rrd2json.h"
  4. #include "rrdr.h"
  5. #include "average/average.h"
  6. #include "countif/countif.h"
  7. #include "incremental_sum/incremental_sum.h"
  8. #include "max/max.h"
  9. #include "median/median.h"
  10. #include "min/min.h"
  11. #include "sum/sum.h"
  12. #include "stddev/stddev.h"
  13. #include "ses/ses.h"
  14. #include "des/des.h"
  15. // ----------------------------------------------------------------------------
  16. static struct {
  17. const char *name;
  18. uint32_t hash;
  19. RRDR_GROUPING value;
  20. // One time initialization for the module.
  21. // This is called once, when netdata starts.
  22. void (*init)(void);
  23. // Allocate all required structures for a query.
  24. // This is called once for each netdata query.
  25. void (*create)(struct rrdresult *r, const char *options);
  26. // Cleanup collected values, but don't destroy the structures.
  27. // This is called when the query engine switches dimensions,
  28. // as part of the same query (so same chart, switching metric).
  29. void (*reset)(struct rrdresult *r);
  30. // Free all resources allocated for the query.
  31. void (*free)(struct rrdresult *r);
  32. // Add a single value into the calculation.
  33. // The module may decide to cache it, or use it in the fly.
  34. void (*add)(struct rrdresult *r, NETDATA_DOUBLE value);
  35. // Generate a single result for the values added so far.
  36. // More values and points may be requested later.
  37. // It is up to the module to reset its internal structures
  38. // when flushing it (so for a few modules it may be better to
  39. // continue after a flush as if nothing changed, for others a
  40. // cleanup of the internal structures may be required).
  41. NETDATA_DOUBLE (*flush)(struct rrdresult *r, RRDR_VALUE_FLAGS *rrdr_value_options_ptr);
  42. TIER_QUERY_FETCH tier_query_fetch;
  43. } api_v1_data_groups[] = {
  44. {.name = "average",
  45. .hash = 0,
  46. .value = RRDR_GROUPING_AVERAGE,
  47. .init = NULL,
  48. .create= grouping_create_average,
  49. .reset = grouping_reset_average,
  50. .free = grouping_free_average,
  51. .add = grouping_add_average,
  52. .flush = grouping_flush_average,
  53. .tier_query_fetch = TIER_QUERY_FETCH_AVERAGE
  54. },
  55. {.name = "mean", // alias on 'average'
  56. .hash = 0,
  57. .value = RRDR_GROUPING_AVERAGE,
  58. .init = NULL,
  59. .create= grouping_create_average,
  60. .reset = grouping_reset_average,
  61. .free = grouping_free_average,
  62. .add = grouping_add_average,
  63. .flush = grouping_flush_average,
  64. .tier_query_fetch = TIER_QUERY_FETCH_AVERAGE
  65. },
  66. {.name = "incremental_sum",
  67. .hash = 0,
  68. .value = RRDR_GROUPING_INCREMENTAL_SUM,
  69. .init = NULL,
  70. .create= grouping_create_incremental_sum,
  71. .reset = grouping_reset_incremental_sum,
  72. .free = grouping_free_incremental_sum,
  73. .add = grouping_add_incremental_sum,
  74. .flush = grouping_flush_incremental_sum,
  75. .tier_query_fetch = TIER_QUERY_FETCH_AVERAGE
  76. },
  77. {.name = "incremental-sum",
  78. .hash = 0,
  79. .value = RRDR_GROUPING_INCREMENTAL_SUM,
  80. .init = NULL,
  81. .create= grouping_create_incremental_sum,
  82. .reset = grouping_reset_incremental_sum,
  83. .free = grouping_free_incremental_sum,
  84. .add = grouping_add_incremental_sum,
  85. .flush = grouping_flush_incremental_sum,
  86. .tier_query_fetch = TIER_QUERY_FETCH_AVERAGE
  87. },
  88. {.name = "median",
  89. .hash = 0,
  90. .value = RRDR_GROUPING_MEDIAN,
  91. .init = NULL,
  92. .create= grouping_create_median,
  93. .reset = grouping_reset_median,
  94. .free = grouping_free_median,
  95. .add = grouping_add_median,
  96. .flush = grouping_flush_median,
  97. .tier_query_fetch = TIER_QUERY_FETCH_AVERAGE
  98. },
  99. {.name = "min",
  100. .hash = 0,
  101. .value = RRDR_GROUPING_MIN,
  102. .init = NULL,
  103. .create= grouping_create_min,
  104. .reset = grouping_reset_min,
  105. .free = grouping_free_min,
  106. .add = grouping_add_min,
  107. .flush = grouping_flush_min,
  108. .tier_query_fetch = TIER_QUERY_FETCH_MIN
  109. },
  110. {.name = "max",
  111. .hash = 0,
  112. .value = RRDR_GROUPING_MAX,
  113. .init = NULL,
  114. .create= grouping_create_max,
  115. .reset = grouping_reset_max,
  116. .free = grouping_free_max,
  117. .add = grouping_add_max,
  118. .flush = grouping_flush_max,
  119. .tier_query_fetch = TIER_QUERY_FETCH_MAX
  120. },
  121. {.name = "sum",
  122. .hash = 0,
  123. .value = RRDR_GROUPING_SUM,
  124. .init = NULL,
  125. .create= grouping_create_sum,
  126. .reset = grouping_reset_sum,
  127. .free = grouping_free_sum,
  128. .add = grouping_add_sum,
  129. .flush = grouping_flush_sum,
  130. .tier_query_fetch = TIER_QUERY_FETCH_SUM
  131. },
  132. // standard deviation
  133. {.name = "stddev",
  134. .hash = 0,
  135. .value = RRDR_GROUPING_STDDEV,
  136. .init = NULL,
  137. .create= grouping_create_stddev,
  138. .reset = grouping_reset_stddev,
  139. .free = grouping_free_stddev,
  140. .add = grouping_add_stddev,
  141. .flush = grouping_flush_stddev,
  142. .tier_query_fetch = TIER_QUERY_FETCH_AVERAGE
  143. },
  144. {.name = "cv", // coefficient variation is calculated by stddev
  145. .hash = 0,
  146. .value = RRDR_GROUPING_CV,
  147. .init = NULL,
  148. .create= grouping_create_stddev, // not an error, stddev calculates this too
  149. .reset = grouping_reset_stddev, // not an error, stddev calculates this too
  150. .free = grouping_free_stddev, // not an error, stddev calculates this too
  151. .add = grouping_add_stddev, // not an error, stddev calculates this too
  152. .flush = grouping_flush_coefficient_of_variation,
  153. .tier_query_fetch = TIER_QUERY_FETCH_AVERAGE
  154. },
  155. {.name = "rsd", // alias of 'cv'
  156. .hash = 0,
  157. .value = RRDR_GROUPING_CV,
  158. .init = NULL,
  159. .create= grouping_create_stddev, // not an error, stddev calculates this too
  160. .reset = grouping_reset_stddev, // not an error, stddev calculates this too
  161. .free = grouping_free_stddev, // not an error, stddev calculates this too
  162. .add = grouping_add_stddev, // not an error, stddev calculates this too
  163. .flush = grouping_flush_coefficient_of_variation,
  164. .tier_query_fetch = TIER_QUERY_FETCH_AVERAGE
  165. },
  166. /*
  167. {.name = "mean", // same as average, no need to define it again
  168. .hash = 0,
  169. .value = RRDR_GROUPING_MEAN,
  170. .setup = NULL,
  171. .create= grouping_create_stddev,
  172. .reset = grouping_reset_stddev,
  173. .free = grouping_free_stddev,
  174. .add = grouping_add_stddev,
  175. .flush = grouping_flush_mean,
  176. .tier_query_fetch = TIER_QUERY_FETCH_AVERAGE
  177. },
  178. */
  179. /*
  180. {.name = "variance", // meaningless to offer
  181. .hash = 0,
  182. .value = RRDR_GROUPING_VARIANCE,
  183. .setup = NULL,
  184. .create= grouping_create_stddev,
  185. .reset = grouping_reset_stddev,
  186. .free = grouping_free_stddev,
  187. .add = grouping_add_stddev,
  188. .flush = grouping_flush_variance,
  189. .tier_query_fetch = TIER_QUERY_FETCH_AVERAGE
  190. },
  191. */
  192. // single exponential smoothing
  193. {.name = "ses",
  194. .hash = 0,
  195. .value = RRDR_GROUPING_SES,
  196. .init = grouping_init_ses,
  197. .create= grouping_create_ses,
  198. .reset = grouping_reset_ses,
  199. .free = grouping_free_ses,
  200. .add = grouping_add_ses,
  201. .flush = grouping_flush_ses,
  202. .tier_query_fetch = TIER_QUERY_FETCH_AVERAGE
  203. },
  204. {.name = "ema", // alias for 'ses'
  205. .hash = 0,
  206. .value = RRDR_GROUPING_SES,
  207. .init = NULL,
  208. .create= grouping_create_ses,
  209. .reset = grouping_reset_ses,
  210. .free = grouping_free_ses,
  211. .add = grouping_add_ses,
  212. .flush = grouping_flush_ses,
  213. .tier_query_fetch = TIER_QUERY_FETCH_AVERAGE
  214. },
  215. {.name = "ewma", // alias for ses
  216. .hash = 0,
  217. .value = RRDR_GROUPING_SES,
  218. .init = NULL,
  219. .create= grouping_create_ses,
  220. .reset = grouping_reset_ses,
  221. .free = grouping_free_ses,
  222. .add = grouping_add_ses,
  223. .flush = grouping_flush_ses,
  224. .tier_query_fetch = TIER_QUERY_FETCH_AVERAGE
  225. },
  226. // double exponential smoothing
  227. {.name = "des",
  228. .hash = 0,
  229. .value = RRDR_GROUPING_DES,
  230. .init = grouping_init_des,
  231. .create= grouping_create_des,
  232. .reset = grouping_reset_des,
  233. .free = grouping_free_des,
  234. .add = grouping_add_des,
  235. .flush = grouping_flush_des,
  236. .tier_query_fetch = TIER_QUERY_FETCH_AVERAGE
  237. },
  238. {.name = "countif",
  239. .hash = 0,
  240. .value = RRDR_GROUPING_COUNTIF,
  241. .init = NULL,
  242. .create= grouping_create_countif,
  243. .reset = grouping_reset_countif,
  244. .free = grouping_free_countif,
  245. .add = grouping_add_countif,
  246. .flush = grouping_flush_countif,
  247. .tier_query_fetch = TIER_QUERY_FETCH_AVERAGE
  248. },
  249. // terminator
  250. {.name = NULL,
  251. .hash = 0,
  252. .value = RRDR_GROUPING_UNDEFINED,
  253. .init = NULL,
  254. .create= grouping_create_average,
  255. .reset = grouping_reset_average,
  256. .free = grouping_free_average,
  257. .add = grouping_add_average,
  258. .flush = grouping_flush_average,
  259. .tier_query_fetch = TIER_QUERY_FETCH_AVERAGE
  260. }
  261. };
  262. void web_client_api_v1_init_grouping(void) {
  263. int i;
  264. for(i = 0; api_v1_data_groups[i].name ; i++) {
  265. api_v1_data_groups[i].hash = simple_hash(api_v1_data_groups[i].name);
  266. if(api_v1_data_groups[i].init)
  267. api_v1_data_groups[i].init();
  268. }
  269. }
  270. const char *group_method2string(RRDR_GROUPING group) {
  271. int i;
  272. for(i = 0; api_v1_data_groups[i].name ; i++) {
  273. if(api_v1_data_groups[i].value == group) {
  274. return api_v1_data_groups[i].name;
  275. }
  276. }
  277. return "unknown-group-method";
  278. }
  279. RRDR_GROUPING web_client_api_request_v1_data_group(const char *name, RRDR_GROUPING def) {
  280. int i;
  281. uint32_t hash = simple_hash(name);
  282. for(i = 0; api_v1_data_groups[i].name ; i++)
  283. if(unlikely(hash == api_v1_data_groups[i].hash && !strcmp(name, api_v1_data_groups[i].name)))
  284. return api_v1_data_groups[i].value;
  285. return def;
  286. }
  287. const char *web_client_api_request_v1_data_group_to_string(RRDR_GROUPING group) {
  288. int i;
  289. for(i = 0; api_v1_data_groups[i].name ; i++)
  290. if(unlikely(group == api_v1_data_groups[i].value))
  291. return api_v1_data_groups[i].name;
  292. return "unknown";
  293. }
  294. static void rrdr_set_grouping_function(RRDR *r, RRDR_GROUPING group_method) {
  295. int i, found = 0;
  296. for(i = 0; !found && api_v1_data_groups[i].name ;i++) {
  297. if(api_v1_data_groups[i].value == group_method) {
  298. r->internal.grouping_create = api_v1_data_groups[i].create;
  299. r->internal.grouping_reset = api_v1_data_groups[i].reset;
  300. r->internal.grouping_free = api_v1_data_groups[i].free;
  301. r->internal.grouping_add = api_v1_data_groups[i].add;
  302. r->internal.grouping_flush = api_v1_data_groups[i].flush;
  303. r->internal.tier_query_fetch = api_v1_data_groups[i].tier_query_fetch;
  304. found = 1;
  305. }
  306. }
  307. if(!found) {
  308. errno = 0;
  309. internal_error(true, "QUERY: grouping method %u not found. Using 'average'", (unsigned int)group_method);
  310. r->internal.grouping_create = grouping_create_average;
  311. r->internal.grouping_reset = grouping_reset_average;
  312. r->internal.grouping_free = grouping_free_average;
  313. r->internal.grouping_add = grouping_add_average;
  314. r->internal.grouping_flush = grouping_flush_average;
  315. r->internal.tier_query_fetch = TIER_QUERY_FETCH_AVERAGE;
  316. }
  317. }
  318. // ----------------------------------------------------------------------------
  319. static void rrdr_disable_not_selected_dimensions(RRDR *r, RRDR_OPTIONS options, const char *dims,
  320. struct context_param *context_param_list)
  321. {
  322. RRDDIM *temp_rd = context_param_list ? context_param_list->rd : NULL;
  323. int should_lock = (!context_param_list || !(context_param_list->flags & CONTEXT_FLAGS_ARCHIVE));
  324. if (should_lock)
  325. rrdset_check_rdlock(r->st);
  326. if(unlikely(!dims || !*dims || (dims[0] == '*' && dims[1] == '\0'))) return;
  327. int match_ids = 0, match_names = 0;
  328. if(unlikely(options & RRDR_OPTION_MATCH_IDS))
  329. match_ids = 1;
  330. if(unlikely(options & RRDR_OPTION_MATCH_NAMES))
  331. match_names = 1;
  332. if(likely(!match_ids && !match_names))
  333. match_ids = match_names = 1;
  334. SIMPLE_PATTERN *pattern = simple_pattern_create(dims, ",|\t\r\n\f\v", SIMPLE_PATTERN_EXACT);
  335. RRDDIM *d;
  336. long c, dims_selected = 0, dims_not_hidden_not_zero = 0;
  337. for(c = 0, d = temp_rd?temp_rd:r->st->dimensions; d ;c++, d = d->next) {
  338. if( (match_ids && simple_pattern_matches(pattern, d->id))
  339. || (match_names && simple_pattern_matches(pattern, d->name))
  340. ) {
  341. r->od[c] |= RRDR_DIMENSION_SELECTED;
  342. if(unlikely(r->od[c] & RRDR_DIMENSION_HIDDEN)) r->od[c] &= ~RRDR_DIMENSION_HIDDEN;
  343. dims_selected++;
  344. // since the user needs this dimension
  345. // make it appear as NONZERO, to return it
  346. // even if the dimension has only zeros
  347. // unless option non_zero is set
  348. if(unlikely(!(options & RRDR_OPTION_NONZERO)))
  349. r->od[c] |= RRDR_DIMENSION_NONZERO;
  350. // count the visible dimensions
  351. if(likely(r->od[c] & RRDR_DIMENSION_NONZERO))
  352. dims_not_hidden_not_zero++;
  353. }
  354. else {
  355. r->od[c] |= RRDR_DIMENSION_HIDDEN;
  356. if(unlikely(r->od[c] & RRDR_DIMENSION_SELECTED)) r->od[c] &= ~RRDR_DIMENSION_SELECTED;
  357. }
  358. }
  359. simple_pattern_free(pattern);
  360. // check if all dimensions are hidden
  361. if(unlikely(!dims_not_hidden_not_zero && dims_selected)) {
  362. // there are a few selected dimensions,
  363. // but they are all zero
  364. // enable the selected ones
  365. // to avoid returning an empty chart
  366. for(c = 0, d = temp_rd?temp_rd:r->st->dimensions; d ;c++, d = d->next)
  367. if(unlikely(r->od[c] & RRDR_DIMENSION_SELECTED))
  368. r->od[c] |= RRDR_DIMENSION_NONZERO;
  369. }
  370. }
  371. // ----------------------------------------------------------------------------
  372. // helpers to find our way in RRDR
  373. static inline RRDR_VALUE_FLAGS *UNUSED_FUNCTION(rrdr_line_options)(RRDR *r, long rrdr_line) {
  374. return &r->o[ rrdr_line * r->d ];
  375. }
  376. static inline NETDATA_DOUBLE *UNUSED_FUNCTION(rrdr_line_values)(RRDR *r, long rrdr_line) {
  377. return &r->v[ rrdr_line * r->d ];
  378. }
  379. static inline long rrdr_line_init(RRDR *r, time_t t, long rrdr_line) {
  380. rrdr_line++;
  381. internal_error(rrdr_line >= r->n,
  382. "QUERY: requested to step above RRDR size for chart '%s'",
  383. r->st->name);
  384. internal_error(r->t[rrdr_line] != 0 && r->t[rrdr_line] != t,
  385. "QUERY: overwriting the timestamp of RRDR line %zu from %zu to %zu, of chart '%s'",
  386. (size_t)rrdr_line, (size_t)r->t[rrdr_line], (size_t)t, r->st->name);
  387. // save the time
  388. r->t[rrdr_line] = t;
  389. return rrdr_line;
  390. }
  391. static inline void rrdr_done(RRDR *r, long rrdr_line) {
  392. r->rows = rrdr_line + 1;
  393. }
  394. // ----------------------------------------------------------------------------
  395. // tier management
  396. static int rrddim_find_best_tier_for_timeframe(RRDDIM *rd, time_t after_wanted, time_t before_wanted, long points_wanted) {
  397. if(unlikely(storage_tiers < 2))
  398. return 0;
  399. if(unlikely(after_wanted == before_wanted || points_wanted <= 0 || !rd || !rd->rrdset)) {
  400. if(!rd)
  401. internal_error(true, "QUERY: NULL dimension - invalid params to tier calculation");
  402. else
  403. internal_error(true, "QUERY: chart '%s' dimension '%s' invalid params to tier calculation",
  404. (rd->rrdset)?rd->rrdset->name:"unknown", rd->name);
  405. return 0;
  406. }
  407. //BUFFER *wb = buffer_create(1000);
  408. //buffer_sprintf(wb, "Best tier for chart '%s', dim '%s', from %ld to %ld (dur %ld, every %d), points %ld",
  409. // rd->rrdset->name, rd->name, after_wanted, before_wanted, before_wanted - after_wanted, rd->update_every, points_wanted);
  410. long weight[storage_tiers];
  411. for(int tier = 0; tier < storage_tiers ; tier++) {
  412. if(unlikely(!rd->tiers[tier])) {
  413. internal_error(true, "QUERY: tier %d of chart '%s' dimension '%s' not initialized",
  414. tier, rd->rrdset->name, rd->name);
  415. // buffer_free(wb);
  416. return 0;
  417. }
  418. time_t first_t = rd->tiers[tier]->query_ops.oldest_time(rd->tiers[tier]->db_metric_handle);
  419. time_t last_t = rd->tiers[tier]->query_ops.latest_time(rd->tiers[tier]->db_metric_handle);
  420. time_t common_after = MAX(first_t, after_wanted);
  421. time_t common_before = MIN(last_t, before_wanted);
  422. long time_coverage = (common_before - common_after) * 1000 / (before_wanted - after_wanted);
  423. if(time_coverage < 0) time_coverage = 0;
  424. int update_every = (int)rd->tiers[tier]->tier_grouping * (int)rd->update_every;
  425. if(unlikely(update_every == 0)) {
  426. internal_error(true, "QUERY: update_every of tier %d for chart '%s' dimension '%s' is zero. tg = %d, ue = %d",
  427. tier, rd->rrdset->name, rd->name, rd->tiers[tier]->tier_grouping, rd->update_every);
  428. // buffer_free(wb);
  429. return 0;
  430. }
  431. long points_available = (before_wanted - after_wanted) / update_every;
  432. long points_delta = points_available - points_wanted;
  433. long points_coverage = (points_delta < 0) ? points_available * 1000 / points_wanted: 1000;
  434. if(points_available <= 0)
  435. weight[tier] = -LONG_MAX;
  436. else
  437. weight[tier] = points_coverage;
  438. // buffer_sprintf(wb, ": tier %d, first %ld, last %ld (dur %ld, tg %d, every %d), points %ld, tcoverage %ld, pcoverage %ld, weight %ld",
  439. // tier, first_t, last_t, last_t - first_t, rd->tiers[tier]->tier_grouping, update_every,
  440. // points_available, time_coverage, points_coverage, weight[tier]);
  441. }
  442. int best_tier = 0;
  443. for(int tier = 1; tier < storage_tiers ; tier++) {
  444. if(weight[tier] >= weight[best_tier])
  445. best_tier = tier;
  446. }
  447. if(weight[best_tier] == -LONG_MAX)
  448. best_tier = 0;
  449. //buffer_sprintf(wb, ": final best tier %d", best_tier);
  450. //internal_error(true, "%s", buffer_tostring(wb));
  451. //buffer_free(wb);
  452. return best_tier;
  453. }
  454. static int rrdset_find_natural_update_every_for_timeframe(RRDSET *st, time_t after_wanted, time_t before_wanted, long points_wanted, RRDR_OPTIONS options, int tier) {
  455. int ret = st->update_every;
  456. if(unlikely(!st->dimensions))
  457. return ret;
  458. rrdset_rdlock(st);
  459. int best_tier;
  460. if(options & RRDR_OPTION_SELECTED_TIER && tier >= 0 && tier < storage_tiers)
  461. best_tier = tier;
  462. else
  463. best_tier = rrddim_find_best_tier_for_timeframe(st->dimensions, after_wanted, before_wanted, points_wanted);
  464. if(!st->dimensions->tiers[best_tier]) {
  465. internal_error(
  466. true,
  467. "QUERY: tier %d on chart '%s', is not initialized", best_tier, st->name);
  468. }
  469. else {
  470. ret = (int)st->dimensions->tiers[best_tier]->tier_grouping * (int)st->update_every;
  471. if(unlikely(!ret)) {
  472. internal_error(
  473. true,
  474. "QUERY: update_every calculated to be zero on chart '%s', tier_grouping %d, update_every %d",
  475. st->name, st->dimensions->tiers[best_tier]->tier_grouping, st->update_every);
  476. ret = st->update_every;
  477. }
  478. }
  479. rrdset_unlock(st);
  480. return ret;
  481. }
  482. // ----------------------------------------------------------------------------
  483. // query ops
  484. typedef struct query_point {
  485. time_t end_time;
  486. time_t start_time;
  487. NETDATA_DOUBLE value;
  488. size_t anomaly;
  489. SN_FLAGS flags;
  490. #ifdef NETDATA_INTERNAL_CHECKS
  491. size_t id;
  492. #endif
  493. } QUERY_POINT;
  494. QUERY_POINT QUERY_POINT_EMPTY = {
  495. .end_time = 0,
  496. .start_time = 0,
  497. .value = NAN,
  498. .anomaly = 0,
  499. .flags = SN_EMPTY_SLOT,
  500. #ifdef NETDATA_INTERNAL_CHECKS
  501. .id = 0,
  502. #endif
  503. };
  504. #ifdef NETDATA_INTERNAL_CHECKS
  505. #define query_point_set_id(point, point_id) (point).id = point_id
  506. #else
  507. #define query_point_set_id(point, point_id) debug_dummy()
  508. #endif
  509. typedef struct query_plan_entry {
  510. size_t tier;
  511. time_t after;
  512. time_t before;
  513. } QUERY_PLAN_ENTRY;
  514. typedef struct query_plan {
  515. size_t entries;
  516. QUERY_PLAN_ENTRY data[RRD_STORAGE_TIERS*2];
  517. } QUERY_PLAN;
  518. typedef struct query_engine_ops {
  519. // configuration
  520. RRDR *r;
  521. RRDDIM *rd;
  522. time_t view_update_every;
  523. time_t query_granularity;
  524. TIER_QUERY_FETCH tier_query_fetch;
  525. // query planer
  526. QUERY_PLAN plan;
  527. size_t current_plan;
  528. time_t current_plan_expire_time;
  529. // storage queries
  530. size_t tier;
  531. struct rrddim_tier *tier_ptr;
  532. struct rrddim_query_handle handle;
  533. STORAGE_POINT (*next_metric)(struct rrddim_query_handle *handle);
  534. int (*is_finished)(struct rrddim_query_handle *handle);
  535. void (*finalize)(struct rrddim_query_handle *handle);
  536. // aggregating points over time
  537. void (*grouping_add)(struct rrdresult *r, NETDATA_DOUBLE value);
  538. NETDATA_DOUBLE (*grouping_flush)(struct rrdresult *r, RRDR_VALUE_FLAGS *rrdr_value_options_ptr);
  539. size_t group_points_non_zero;
  540. size_t group_points_added;
  541. size_t group_anomaly_rate;
  542. RRDR_VALUE_FLAGS group_value_flags;
  543. // statistics
  544. size_t db_total_points_read;
  545. size_t db_points_read_per_tier[RRD_STORAGE_TIERS];
  546. } QUERY_ENGINE_OPS;
  547. // ----------------------------------------------------------------------------
  548. // query planer
  549. #define query_plan_should_switch_plan(ops, now) ((now) >= (ops).current_plan_expire_time)
  550. static void query_planer_activate_plan(QUERY_ENGINE_OPS *ops, size_t plan_id, time_t overwrite_after) {
  551. if(unlikely(plan_id >= ops->plan.entries))
  552. plan_id = ops->plan.entries - 1;
  553. time_t after = ops->plan.data[plan_id].after;
  554. time_t before = ops->plan.data[plan_id].before;
  555. if(overwrite_after > after && overwrite_after < before)
  556. after = overwrite_after;
  557. ops->tier = ops->plan.data[plan_id].tier;
  558. ops->tier_ptr = ops->rd->tiers[ops->tier];
  559. ops->tier_ptr->query_ops.init(ops->tier_ptr->db_metric_handle, &ops->handle, after, before, ops->r->internal.tier_query_fetch);
  560. ops->next_metric = ops->tier_ptr->query_ops.next_metric;
  561. ops->is_finished = ops->tier_ptr->query_ops.is_finished;
  562. ops->finalize = ops->tier_ptr->query_ops.finalize;
  563. ops->current_plan = plan_id;
  564. ops->current_plan_expire_time = ops->plan.data[plan_id].before;
  565. }
  566. static void query_planer_next_plan(QUERY_ENGINE_OPS *ops, time_t now, time_t last_point_end_time) {
  567. internal_error(now < ops->current_plan_expire_time && now < ops->plan.data[ops->current_plan].before,
  568. "QUERY: switching query plan too early!");
  569. time_t next_plan_before_time;
  570. do {
  571. ops->current_plan++;
  572. if (ops->current_plan >= ops->plan.entries) {
  573. ops->current_plan = ops->plan.entries - 1;
  574. return;
  575. }
  576. next_plan_before_time = ops->plan.data[ops->current_plan].before;
  577. } while(now >= next_plan_before_time || last_point_end_time >= next_plan_before_time);
  578. if(ops->finalize) {
  579. ops->finalize(&ops->handle);
  580. ops->finalize = NULL;
  581. }
  582. query_planer_activate_plan(ops, ops->current_plan, MIN(now, last_point_end_time));
  583. // internal_error(true, "QUERY: switched plan to %zu (all is %zu), previous expiration was %ld, this starts at %ld, now is %ld, last_point_end_time %ld", ops->current_plan, ops->plan.entries, ops->plan.data[ops->current_plan-1].before, ops->plan.data[ops->current_plan].after, now, last_point_end_time);
  584. }
  585. static int compare_query_plan_entries_on_start_time(const void *a, const void *b) {
  586. QUERY_PLAN_ENTRY *p1 = (QUERY_PLAN_ENTRY *)a;
  587. QUERY_PLAN_ENTRY *p2 = (QUERY_PLAN_ENTRY *)b;
  588. return (p1->after < p2->after)?-1:1;
  589. }
  590. static void query_plan(QUERY_ENGINE_OPS *ops, time_t after_wanted, time_t before_wanted, long points_wanted) {
  591. RRDDIM *rd = ops->rd;
  592. //BUFFER *wb = buffer_create(1000);
  593. //buffer_sprintf(wb, "QUERY PLAN for chart '%s' dimension '%s', from %ld to %ld:", rd->rrdset->name, rd->name, after_wanted, before_wanted);
  594. // put our selected tier as the first plan
  595. size_t selected_tier;
  596. if(ops->r->internal.query_options & RRDR_OPTION_SELECTED_TIER && ops->r->internal.query_tier >= 0 && ops->r->internal.query_tier < storage_tiers) {
  597. selected_tier = ops->r->internal.query_tier;
  598. }
  599. else {
  600. selected_tier = rrddim_find_best_tier_for_timeframe(rd, after_wanted, before_wanted, points_wanted);
  601. if(ops->r->internal.query_options & RRDR_OPTION_SELECTED_TIER)
  602. ops->r->internal.query_options &= ~RRDR_OPTION_SELECTED_TIER;
  603. }
  604. ops->plan.entries = 1;
  605. ops->plan.data[0].tier = selected_tier;
  606. ops->plan.data[0].after = rd->tiers[selected_tier]->query_ops.oldest_time(rd->tiers[selected_tier]->db_metric_handle);
  607. ops->plan.data[0].before = rd->tiers[selected_tier]->query_ops.latest_time(rd->tiers[selected_tier]->db_metric_handle);
  608. if(!(ops->r->internal.query_options & RRDR_OPTION_SELECTED_TIER)) {
  609. // the selected tier
  610. time_t selected_tier_first_time_t = ops->plan.data[0].after;
  611. time_t selected_tier_last_time_t = ops->plan.data[0].before;
  612. //buffer_sprintf(wb, ": SELECTED tier %zu, from %ld to %ld", selected_tier, ops->plan.data[0].after, ops->plan.data[0].before);
  613. // check if our selected tier can start the query
  614. if (selected_tier_first_time_t > after_wanted) {
  615. // we need some help from other tiers
  616. for (int tr = (int)selected_tier + 1; tr < storage_tiers; tr++) {
  617. // find the first time of this tier
  618. time_t first_time_t = rd->tiers[tr]->query_ops.oldest_time(rd->tiers[tr]->db_metric_handle);
  619. //buffer_sprintf(wb, ": EVAL AFTER tier %d, %ld", tier, first_time_t);
  620. // can it help?
  621. if (first_time_t < selected_tier_first_time_t) {
  622. // it can help us add detail at the beginning of the query
  623. QUERY_PLAN_ENTRY t = {
  624. .tier = tr,
  625. .after = (first_time_t < after_wanted) ? after_wanted : first_time_t,
  626. .before = selected_tier_first_time_t};
  627. ops->plan.data[ops->plan.entries++] = t;
  628. // prepare for the tier
  629. selected_tier_first_time_t = t.after;
  630. if (t.after <= after_wanted)
  631. break;
  632. }
  633. }
  634. }
  635. // check if our selected tier can finish the query
  636. if (selected_tier_last_time_t < before_wanted) {
  637. // we need some help from other tiers
  638. for (int tr = (int)selected_tier - 1; tr >= 0; tr--) {
  639. // find the last time of this tier
  640. time_t last_time_t = rd->tiers[tr]->query_ops.latest_time(rd->tiers[tr]->db_metric_handle);
  641. //buffer_sprintf(wb, ": EVAL BEFORE tier %d, %ld", tier, last_time_t);
  642. // can it help?
  643. if (last_time_t > selected_tier_last_time_t) {
  644. // it can help us add detail at the end of the query
  645. QUERY_PLAN_ENTRY t = {
  646. .tier = tr,
  647. .after = selected_tier_last_time_t,
  648. .before = (last_time_t > before_wanted) ? before_wanted : last_time_t};
  649. ops->plan.data[ops->plan.entries++] = t;
  650. // prepare for the tier
  651. selected_tier_last_time_t = t.before;
  652. if (t.before >= before_wanted)
  653. break;
  654. }
  655. }
  656. }
  657. }
  658. // sort the query plan
  659. if(ops->plan.entries > 1)
  660. qsort(&ops->plan.data, ops->plan.entries, sizeof(QUERY_PLAN_ENTRY), compare_query_plan_entries_on_start_time);
  661. // make sure it has the whole timeframe we need
  662. ops->plan.data[0].after = after_wanted;
  663. ops->plan.data[ops->plan.entries - 1].before = before_wanted;
  664. //buffer_sprintf(wb, ": FINAL STEPS %zu", ops->plan.entries);
  665. //for(size_t i = 0; i < ops->plan.entries ;i++)
  666. // buffer_sprintf(wb, ": STEP %zu = use tier %zu from %ld to %ld", i+1, ops->plan.data[i].tier, ops->plan.data[i].after, ops->plan.data[i].before);
  667. //internal_error(true, "%s", buffer_tostring(wb));
  668. query_planer_activate_plan(ops, 0, 0);
  669. }
  670. // ----------------------------------------------------------------------------
  671. // dimension level query engine
  672. #define query_interpolate_point(this_point, last_point, now) do { \
  673. if(likely( \
  674. /* the point to interpolate is more than 1s wide */ \
  675. (this_point).end_time - (this_point).start_time > 1 \
  676. \
  677. /* the two points are exactly next to each other */ \
  678. && (last_point).end_time == (this_point).start_time \
  679. \
  680. /* both points are valid numbers */ \
  681. && netdata_double_isnumber((this_point).value) \
  682. && netdata_double_isnumber((last_point).value) \
  683. \
  684. )) { \
  685. (this_point).value = (last_point).value + ((this_point).value - (last_point).value) * (1.0 - (NETDATA_DOUBLE)((this_point).end_time - (now)) / (NETDATA_DOUBLE)((this_point).end_time - (this_point).start_time)); \
  686. (this_point).end_time = now; \
  687. } \
  688. } while(0)
  689. #define query_add_point_to_group(r, point, ops) do { \
  690. if(likely(netdata_double_isnumber((point).value))) { \
  691. if(likely((point).value != 0.0)) \
  692. (ops).group_points_non_zero++; \
  693. \
  694. if(unlikely((point).flags & SN_EXISTS_RESET)) \
  695. (ops).group_value_flags |= RRDR_VALUE_RESET; \
  696. \
  697. (ops).grouping_add(r, (point).value); \
  698. } \
  699. \
  700. (ops).group_points_added++; \
  701. (ops).group_anomaly_rate += (point).anomaly; \
  702. } while(0)
  703. static inline void rrd2rrdr_do_dimension(
  704. RRDR *r
  705. , long points_wanted
  706. , RRDDIM *rd
  707. , long dim_id_in_rrdr
  708. , time_t after_wanted
  709. , time_t before_wanted
  710. ){
  711. time_t max_date = 0,
  712. min_date = 0;
  713. size_t points_added = 0;
  714. QUERY_ENGINE_OPS ops = {
  715. .r = r,
  716. .rd = rd,
  717. .grouping_add = r->internal.grouping_add,
  718. .grouping_flush = r->internal.grouping_flush,
  719. .tier_query_fetch = r->internal.tier_query_fetch,
  720. .view_update_every = r->update_every,
  721. .query_granularity = r->update_every / r->group,
  722. .group_value_flags = RRDR_VALUE_NOTHING
  723. };
  724. long rrdr_line = -1;
  725. bool use_anomaly_bit_as_value = (r->internal.query_options & RRDR_OPTION_ANOMALY_BIT) ? true : false;
  726. query_plan(&ops, after_wanted, before_wanted, points_wanted);
  727. NETDATA_DOUBLE min = r->min, max = r->max;
  728. QUERY_POINT last2_point = QUERY_POINT_EMPTY;
  729. QUERY_POINT last1_point = QUERY_POINT_EMPTY;
  730. QUERY_POINT new_point = QUERY_POINT_EMPTY;
  731. time_t now_start_time = after_wanted - ops.query_granularity;
  732. time_t now_end_time = after_wanted + ops.view_update_every - ops.query_granularity;
  733. // The main loop, based on the query granularity we need
  734. for( ; (long)points_added < points_wanted ; now_start_time = now_end_time, now_end_time += ops.view_update_every) {
  735. if(query_plan_should_switch_plan(ops, now_end_time))
  736. query_planer_next_plan(&ops, now_end_time, new_point.end_time);
  737. // read all the points of the db, prior to the time we need (now_end_time)
  738. size_t count_same_end_time = 0;
  739. while(count_same_end_time < 100) {
  740. if(likely(count_same_end_time == 0)) {
  741. last2_point = last1_point;
  742. last1_point = new_point;
  743. }
  744. if(unlikely(ops.is_finished(&ops.handle))) {
  745. if(count_same_end_time != 0) {
  746. last2_point = last1_point;
  747. last1_point = new_point;
  748. }
  749. new_point = QUERY_POINT_EMPTY;
  750. new_point.start_time = last1_point.end_time;
  751. new_point.end_time = now_end_time;
  752. break;
  753. }
  754. // fetch the new point
  755. {
  756. STORAGE_POINT sp = ops.next_metric(&ops.handle);
  757. ops.db_points_read_per_tier[ops.tier]++;
  758. ops.db_total_points_read++;
  759. new_point.start_time = sp.start_time;
  760. new_point.end_time = sp.end_time;
  761. new_point.anomaly = sp.count ? sp.anomaly_count * 100 / sp.count : 0;
  762. query_point_set_id(new_point, ops.db_total_points_read);
  763. // set the right value to the point we got
  764. if(likely(!storage_point_is_unset(sp) && !storage_point_is_empty(sp))) {
  765. if(unlikely(use_anomaly_bit_as_value))
  766. new_point.value = (NETDATA_DOUBLE)new_point.anomaly;
  767. else {
  768. switch (ops.tier_query_fetch) {
  769. default:
  770. case TIER_QUERY_FETCH_AVERAGE:
  771. new_point.value = sp.sum / sp.count;
  772. break;
  773. case TIER_QUERY_FETCH_MIN:
  774. new_point.value = sp.min;
  775. break;
  776. case TIER_QUERY_FETCH_MAX:
  777. new_point.value = sp.max;
  778. break;
  779. case TIER_QUERY_FETCH_SUM:
  780. new_point.value = sp.sum;
  781. break;
  782. };
  783. }
  784. }
  785. else {
  786. new_point.value = NAN;
  787. new_point.flags = SN_EMPTY_SLOT;
  788. }
  789. }
  790. // check if the db is giving us zero duration points
  791. if(unlikely(new_point.start_time == new_point.end_time)) {
  792. internal_error(true, "QUERY: next_metric(%s, %s) returned point %zu start time %ld, end time %ld, that are both equal",
  793. rd->rrdset->name, rd->name, new_point.id, new_point.start_time, new_point.end_time);
  794. new_point.start_time = new_point.end_time - ((time_t)ops.tier_ptr->tier_grouping * (time_t)ops.rd->update_every);
  795. }
  796. // check if the db is advancing the query
  797. if(unlikely(new_point.end_time <= last1_point.end_time)) {
  798. internal_error(true, "QUERY: next_metric(%s, %s) returned point %zu from %ld time %ld, before the last point %zu end time %ld, now is %ld to %ld",
  799. rd->rrdset->name, rd->name, new_point.id, new_point.start_time, new_point.end_time,
  800. last1_point.id, last1_point.end_time, now_start_time, now_end_time);
  801. count_same_end_time++;
  802. continue;
  803. }
  804. count_same_end_time = 0;
  805. // decide how to use this point
  806. if(likely(new_point.end_time < now_end_time)) { // likely to favor tier0
  807. // this db point ends before our now_end_time
  808. if(likely(new_point.end_time >= now_start_time)) { // likely to favor tier0
  809. // this db point ends after our now_start time
  810. query_add_point_to_group(r, new_point, ops);
  811. }
  812. else {
  813. // we don't need this db point
  814. // it is totally outside our current time-frame
  815. // this is desirable for the first point of the query
  816. // because it allows us to interpolate the next point
  817. // at exactly the time we will want
  818. // we only log if this is not point 1
  819. internal_error(new_point.end_time < after_wanted && new_point.id > 1,
  820. "QUERY: next_metric(%s, %s) returned point %zu from %ld time %ld, which is entirely before our current timeframe %ld to %ld (and before the entire query, after %ld, before %ld)",
  821. rd->rrdset->name, rd->name,
  822. new_point.id, new_point.start_time, new_point.end_time,
  823. now_start_time, now_end_time,
  824. after_wanted, before_wanted);
  825. }
  826. }
  827. else {
  828. // the point ends in the future
  829. // so, we will interpolate it below, at the inner loop
  830. break;
  831. }
  832. }
  833. if(unlikely(count_same_end_time)) {
  834. internal_error(true,
  835. "QUERY: the database does not advance the query, it returned an end time less or equal to the end time of the last point we got %ld, %zu times",
  836. last1_point.end_time, count_same_end_time);
  837. if(unlikely(new_point.end_time <= last1_point.end_time))
  838. new_point.end_time = now_end_time;
  839. }
  840. // the inner loop
  841. // we have 3 points in memory: last2, last1, new
  842. // we select the one to use based on their timestamps
  843. size_t iterations = 0;
  844. for ( ; now_end_time <= new_point.end_time && (long)points_added < points_wanted ;
  845. now_end_time += ops.view_update_every, iterations++) {
  846. // now_start_time is wrong in this loop
  847. // but, we don't need it
  848. QUERY_POINT current_point;
  849. if(likely(now_end_time > new_point.start_time)) {
  850. // it is time for our NEW point to be used
  851. current_point = new_point;
  852. query_interpolate_point(current_point, last1_point, now_end_time);
  853. internal_error(current_point.id > 0 && last1_point.id == 0 && current_point.end_time > after_wanted && current_point.end_time > now_end_time,
  854. "QUERY: on '%s', dim '%s', after %ld, before %ld, view update every %ld, query granularity %ld,"
  855. " interpolating point %zu (from %ld to %ld) at %ld, but we could really favor by having last_point1 in this query.",
  856. rd->rrdset->name, rd->name, after_wanted, before_wanted, ops.view_update_every, ops.query_granularity,
  857. current_point.id, current_point.start_time, current_point.end_time, now_end_time);
  858. }
  859. else if(likely(now_end_time <= last1_point.end_time)) {
  860. // our LAST point is still valid
  861. current_point = last1_point;
  862. query_interpolate_point(current_point, last2_point, now_end_time);
  863. internal_error(current_point.id > 0 && last2_point.id == 0 && current_point.end_time > after_wanted && current_point.end_time > now_end_time,
  864. "QUERY: on '%s', dim '%s', after %ld, before %ld, view update every %ld, query granularity %ld,"
  865. " interpolating point %zu (from %ld to %ld) at %ld, but we could really favor by having last_point2 in this query.",
  866. rd->rrdset->name, rd->name, after_wanted, before_wanted, ops.view_update_every, ops.query_granularity,
  867. current_point.id, current_point.start_time, current_point.end_time, now_end_time);
  868. }
  869. else {
  870. // a GAP, we don't have a value this time
  871. current_point = QUERY_POINT_EMPTY;
  872. }
  873. query_add_point_to_group(r, current_point, ops);
  874. rrdr_line = rrdr_line_init(r, now_end_time, rrdr_line);
  875. size_t rrdr_o_v_index = rrdr_line * r->d + dim_id_in_rrdr;
  876. if(unlikely(!min_date)) min_date = now_end_time;
  877. max_date = now_end_time;
  878. // find the place to store our values
  879. RRDR_VALUE_FLAGS *rrdr_value_options_ptr = &r->o[rrdr_o_v_index];
  880. // update the dimension options
  881. if(likely(ops.group_points_non_zero))
  882. r->od[dim_id_in_rrdr] |= RRDR_DIMENSION_NONZERO;
  883. // store the specific point options
  884. *rrdr_value_options_ptr = ops.group_value_flags;
  885. // store the group value
  886. NETDATA_DOUBLE group_value = ops.grouping_flush(r, rrdr_value_options_ptr);
  887. r->v[rrdr_o_v_index] = group_value;
  888. // we only store uint8_t anomaly rates,
  889. // so let's get double precision by storing
  890. // anomaly rates in the range 0 - 200
  891. ops.group_anomaly_rate = (ops.group_anomaly_rate << 1) / ops.group_points_added;
  892. r->ar[rrdr_o_v_index] = (uint8_t)ops.group_anomaly_rate;
  893. if(likely(points_added || dim_id_in_rrdr)) {
  894. // find the min/max across all dimensions
  895. if(unlikely(group_value < min)) min = group_value;
  896. if(unlikely(group_value > max)) max = group_value;
  897. }
  898. else {
  899. // runs only when dim_id_in_rrdr == 0 && points_added == 0
  900. // so, on the first point added for the query.
  901. min = max = group_value;
  902. }
  903. points_added++;
  904. ops.group_points_added = 0;
  905. ops.group_value_flags = RRDR_VALUE_NOTHING;
  906. ops.group_points_non_zero = 0;
  907. ops.group_anomaly_rate = 0;
  908. }
  909. // the loop above increased "now" by query_granularity,
  910. // but the main loop will increase it too,
  911. // so, let's undo the last iteration of this loop
  912. if(iterations)
  913. now_end_time -= ops.view_update_every;
  914. }
  915. ops.finalize(&ops.handle);
  916. r->internal.result_points_generated += points_added;
  917. r->internal.db_points_read += ops.db_total_points_read;
  918. for(int tr = 0; tr < storage_tiers ; tr++)
  919. r->internal.tier_points_read[tr] += ops.db_points_read_per_tier[tr];
  920. r->min = min;
  921. r->max = max;
  922. r->before = max_date;
  923. r->after = min_date - ops.view_update_every + ops.query_granularity;
  924. rrdr_done(r, rrdr_line);
  925. internal_error((long)points_added != points_wanted,
  926. "QUERY: query on %s/%s requested %zu points, but RRDR added %zu (%zu db points read).",
  927. r->st->name, rd->name, (size_t)points_wanted, (size_t)points_added, ops.db_total_points_read);
  928. }
  929. // ----------------------------------------------------------------------------
  930. // fill the gap of a tier
  931. extern void store_metric_at_tier(RRDDIM *rd, struct rrddim_tier *t, STORAGE_POINT sp, usec_t now_ut);
  932. void rrdr_fill_tier_gap_from_smaller_tiers(RRDDIM *rd, int tier, time_t now) {
  933. if(unlikely(tier < 0 || tier >= storage_tiers)) return;
  934. if(storage_tiers_backfill[tier] == RRD_BACKFILL_NONE) return;
  935. struct rrddim_tier *t = rd->tiers[tier];
  936. if(unlikely(!t)) return;
  937. time_t latest_time_t = t->query_ops.latest_time(t->db_metric_handle);
  938. time_t granularity = (time_t)t->tier_grouping * (time_t)rd->update_every;
  939. time_t time_diff = now - latest_time_t;
  940. // if the user wants only NEW backfilling, and we don't have any data
  941. if(storage_tiers_backfill[tier] == RRD_BACKFILL_NEW && latest_time_t <= 0) return;
  942. // there is really nothing we can do
  943. if(now <= latest_time_t || time_diff < granularity) return;
  944. struct rrddim_query_handle handle;
  945. size_t all_points_read = 0;
  946. // for each lower tier
  947. for(int tr = tier - 1; tr >= 0 ;tr--){
  948. time_t smaller_tier_first_time = rd->tiers[tr]->query_ops.oldest_time(rd->tiers[tr]->db_metric_handle);
  949. time_t smaller_tier_last_time = rd->tiers[tr]->query_ops.latest_time(rd->tiers[tr]->db_metric_handle);
  950. if(smaller_tier_last_time <= latest_time_t) continue; // it is as bad as we are
  951. long after_wanted = (latest_time_t < smaller_tier_first_time) ? smaller_tier_first_time : latest_time_t;
  952. long before_wanted = smaller_tier_last_time;
  953. struct rrddim_tier *tmp = rd->tiers[tr];
  954. tmp->query_ops.init(tmp->db_metric_handle, &handle, after_wanted, before_wanted, TIER_QUERY_FETCH_AVERAGE);
  955. size_t points = 0;
  956. while(!tmp->query_ops.is_finished(&handle)) {
  957. STORAGE_POINT sp = tmp->query_ops.next_metric(&handle);
  958. if(sp.end_time > latest_time_t) {
  959. latest_time_t = sp.end_time;
  960. store_metric_at_tier(rd, t, sp, sp.end_time * USEC_PER_SEC);
  961. points++;
  962. }
  963. }
  964. all_points_read += points;
  965. tmp->query_ops.finalize(&handle);
  966. //internal_error(true, "DBENGINE: backfilled chart '%s', dimension '%s', tier %d, from %ld to %ld, with %zu points from tier %d",
  967. // rd->rrdset->name, rd->name, tier, after_wanted, before_wanted, points, tr);
  968. }
  969. rrdr_query_completed(all_points_read, all_points_read);
  970. }
  971. // ----------------------------------------------------------------------------
  972. // fill RRDR for the whole chart
  973. #ifdef NETDATA_INTERNAL_CHECKS
  974. static void rrd2rrdr_log_request_response_metadata(RRDR *r
  975. , RRDR_OPTIONS options __maybe_unused
  976. , RRDR_GROUPING group_method
  977. , bool aligned
  978. , long group
  979. , long resampling_time
  980. , long resampling_group
  981. , time_t after_wanted
  982. , time_t after_requested
  983. , time_t before_wanted
  984. , time_t before_requested
  985. , long points_requested
  986. , long points_wanted
  987. //, size_t after_slot
  988. //, size_t before_slot
  989. , const char *msg
  990. ) {
  991. netdata_rwlock_rdlock(&r->st->rrdset_rwlock);
  992. info("INTERNAL ERROR: rrd2rrdr() on %s update every %d with %s grouping %s (group: %ld, resampling_time: %ld, resampling_group: %ld), "
  993. "after (got: %zu, want: %zu, req: %ld, db: %zu), "
  994. "before (got: %zu, want: %zu, req: %ld, db: %zu), "
  995. "duration (got: %zu, want: %zu, req: %ld, db: %zu), "
  996. //"slot (after: %zu, before: %zu, delta: %zu), "
  997. "points (got: %ld, want: %ld, req: %ld, db: %ld), "
  998. "%s"
  999. , r->st->name
  1000. , r->st->update_every
  1001. // grouping
  1002. , (aligned) ? "aligned" : "unaligned"
  1003. , group_method2string(group_method)
  1004. , group
  1005. , resampling_time
  1006. , resampling_group
  1007. // after
  1008. , (size_t)r->after
  1009. , (size_t)after_wanted
  1010. , after_requested
  1011. , (size_t)rrdset_first_entry_t_nolock(r->st)
  1012. // before
  1013. , (size_t)r->before
  1014. , (size_t)before_wanted
  1015. , before_requested
  1016. , (size_t)rrdset_last_entry_t_nolock(r->st)
  1017. // duration
  1018. , (size_t)(r->before - r->after + r->st->update_every)
  1019. , (size_t)(before_wanted - after_wanted + r->st->update_every)
  1020. , before_requested - after_requested
  1021. , (size_t)((rrdset_last_entry_t_nolock(r->st) - rrdset_first_entry_t_nolock(r->st)) + r->st->update_every)
  1022. // slot
  1023. /*
  1024. , after_slot
  1025. , before_slot
  1026. , (after_slot > before_slot) ? (r->st->entries - after_slot + before_slot) : (before_slot - after_slot)
  1027. */
  1028. // points
  1029. , r->rows
  1030. , points_wanted
  1031. , points_requested
  1032. , r->st->entries
  1033. // message
  1034. , msg
  1035. );
  1036. netdata_rwlock_unlock(&r->st->rrdset_rwlock);
  1037. }
  1038. #endif // NETDATA_INTERNAL_CHECKS
  1039. // Returns 1 if an absolute period was requested or 0 if it was a relative period
  1040. int rrdr_relative_window_to_absolute(long long *after, long long *before) {
  1041. time_t now = now_realtime_sec() - 1;
  1042. int absolute_period_requested = -1;
  1043. long long after_requested, before_requested;
  1044. before_requested = *before;
  1045. after_requested = *after;
  1046. // allow relative for before (smaller than API_RELATIVE_TIME_MAX)
  1047. if(ABS(before_requested) <= API_RELATIVE_TIME_MAX) {
  1048. // if the user asked for a positive relative time,
  1049. // flip it to a negative
  1050. if(before_requested > 0)
  1051. before_requested = -before_requested;
  1052. before_requested = now + before_requested;
  1053. absolute_period_requested = 0;
  1054. }
  1055. // allow relative for after (smaller than API_RELATIVE_TIME_MAX)
  1056. if(ABS(after_requested) <= API_RELATIVE_TIME_MAX) {
  1057. if(after_requested > 0)
  1058. after_requested = -after_requested;
  1059. // if the user didn't give an after, use the number of points
  1060. // to give a sane default
  1061. if(after_requested == 0)
  1062. after_requested = -600;
  1063. // since the query engine now returns inclusive timestamps
  1064. // it is awkward to return 6 points when after=-5 is given
  1065. // so for relative queries we add 1 second, to give
  1066. // more predictable results to users.
  1067. after_requested = before_requested + after_requested + 1;
  1068. absolute_period_requested = 0;
  1069. }
  1070. if(absolute_period_requested == -1)
  1071. absolute_period_requested = 1;
  1072. // check if the parameters are flipped
  1073. if(after_requested > before_requested) {
  1074. long long t = before_requested;
  1075. before_requested = after_requested;
  1076. after_requested = t;
  1077. }
  1078. // if the query requests future data
  1079. // shift the query back to be in the present time
  1080. // (this may also happen because of the rules above)
  1081. if(before_requested > now) {
  1082. long long delta = before_requested - now;
  1083. before_requested -= delta;
  1084. after_requested -= delta;
  1085. }
  1086. *before = before_requested;
  1087. *after = after_requested;
  1088. return absolute_period_requested;
  1089. }
  1090. // #define DEBUG_QUERY_LOGIC 1
  1091. #ifdef DEBUG_QUERY_LOGIC
  1092. #define query_debug_log_init() BUFFER *debug_log = buffer_create(1000)
  1093. #define query_debug_log(args...) buffer_sprintf(debug_log, ##args)
  1094. #define query_debug_log_fin() { \
  1095. info("QUERY: chart '%s', after:%lld, before:%lld, duration:%lld, points:%ld, res:%ld - wanted => after:%lld, before:%lld, points:%ld, group:%ld, granularity:%ld, resgroup:%ld, resdiv:" NETDATA_DOUBLE_FORMAT_AUTO " %s", st->name, after_requested, before_requested, before_requested - after_requested, points_requested, resampling_time_requested, after_wanted, before_wanted, points_wanted, group, query_granularity, resampling_group, resampling_divisor, buffer_tostring(debug_log)); \
  1096. buffer_free(debug_log); \
  1097. debug_log = NULL; \
  1098. }
  1099. #define query_debug_log_free() do { buffer_free(debug_log); } while(0)
  1100. #else
  1101. #define query_debug_log_init() debug_dummy()
  1102. #define query_debug_log(args...) debug_dummy()
  1103. #define query_debug_log_fin() debug_dummy()
  1104. #define query_debug_log_free() debug_dummy()
  1105. #endif
  1106. RRDR *rrd2rrdr(
  1107. ONEWAYALLOC *owa
  1108. , RRDSET *st
  1109. , long points_requested
  1110. , long long after_requested
  1111. , long long before_requested
  1112. , RRDR_GROUPING group_method
  1113. , long resampling_time_requested
  1114. , RRDR_OPTIONS options
  1115. , const char *dimensions
  1116. , struct context_param *context_param_list
  1117. , const char *group_options
  1118. , int timeout
  1119. , int tier
  1120. ) {
  1121. // RULES
  1122. // points_requested = 0
  1123. // the user wants all the natural points the database has
  1124. //
  1125. // after_requested = 0
  1126. // the user wants to start the query from the oldest point in our database
  1127. //
  1128. // before_requested = 0
  1129. // the user wants the query to end to the latest point in our database
  1130. //
  1131. // when natural points are wanted, the query has to be aligned to the update_every
  1132. // of the database
  1133. long points_wanted = points_requested;
  1134. long long after_wanted = after_requested;
  1135. long long before_wanted = before_requested;
  1136. int update_every = st->update_every;
  1137. bool aligned = !(options & RRDR_OPTION_NOT_ALIGNED);
  1138. bool automatic_natural_points = (points_wanted == 0);
  1139. bool relative_period_requested = false;
  1140. bool natural_points = (options & RRDR_OPTION_NATURAL_POINTS) || automatic_natural_points;
  1141. bool before_is_aligned_to_db_end = false;
  1142. query_debug_log_init();
  1143. // make sure points_wanted is positive
  1144. if(points_wanted < 0) {
  1145. points_wanted = -points_wanted;
  1146. query_debug_log(":-points_wanted %ld", points_wanted);
  1147. }
  1148. if(ABS(before_requested) <= API_RELATIVE_TIME_MAX || ABS(after_requested) <= API_RELATIVE_TIME_MAX) {
  1149. relative_period_requested = true;
  1150. natural_points = true;
  1151. options |= RRDR_OPTION_NATURAL_POINTS;
  1152. query_debug_log(":relative+natural");
  1153. }
  1154. // if the user wants virtual points, make sure we do it
  1155. if(options & RRDR_OPTION_VIRTUAL_POINTS)
  1156. natural_points = false;
  1157. // set the right flag about natural and virtual points
  1158. if(natural_points) {
  1159. options |= RRDR_OPTION_NATURAL_POINTS;
  1160. if(options & RRDR_OPTION_VIRTUAL_POINTS)
  1161. options &= ~RRDR_OPTION_VIRTUAL_POINTS;
  1162. }
  1163. else {
  1164. options |= RRDR_OPTION_VIRTUAL_POINTS;
  1165. if(options & RRDR_OPTION_NATURAL_POINTS)
  1166. options &= ~RRDR_OPTION_NATURAL_POINTS;
  1167. }
  1168. if(after_wanted == 0 || before_wanted == 0) {
  1169. // for non-context queries we have to find the duration of the database
  1170. // for context queries we will assume 600 seconds duration
  1171. if(!context_param_list) {
  1172. relative_period_requested = true;
  1173. rrdset_rdlock(st);
  1174. time_t first_entry_t = rrdset_first_entry_t_nolock(st);
  1175. time_t last_entry_t = rrdset_last_entry_t_nolock(st);
  1176. rrdset_unlock(st);
  1177. if(first_entry_t == 0 || last_entry_t == 0) {
  1178. internal_error(true, "QUERY: chart without data detected on '%s'", st->name);
  1179. query_debug_log_free();
  1180. return NULL;
  1181. }
  1182. query_debug_log(":first_entry_t %ld, last_entry_t %ld", first_entry_t, last_entry_t);
  1183. if (after_wanted == 0) {
  1184. after_wanted = first_entry_t;
  1185. query_debug_log(":zero after_wanted %lld", after_wanted);
  1186. }
  1187. if (before_wanted == 0) {
  1188. before_wanted = last_entry_t;
  1189. before_is_aligned_to_db_end = true;
  1190. query_debug_log(":zero before_wanted %lld", before_wanted);
  1191. }
  1192. if(points_wanted == 0) {
  1193. points_wanted = (last_entry_t - first_entry_t) / update_every;
  1194. query_debug_log(":zero points_wanted %ld", points_wanted);
  1195. }
  1196. }
  1197. // if they are still zero, assume 600
  1198. if(after_wanted == 0) {
  1199. after_wanted = -600;
  1200. query_debug_log(":zero600 after_wanted %lld", after_wanted);
  1201. }
  1202. }
  1203. if(points_wanted == 0) {
  1204. points_wanted = 600;
  1205. query_debug_log(":zero600 points_wanted %ld", points_wanted);
  1206. }
  1207. // convert our before_wanted and after_wanted to absolute
  1208. rrdr_relative_window_to_absolute(&after_wanted, &before_wanted);
  1209. query_debug_log(":relative2absolute after %lld, before %lld", after_wanted, before_wanted);
  1210. if(natural_points && (options & RRDR_OPTION_SELECTED_TIER) && tier > 0 && storage_tiers > 1) {
  1211. update_every = rrdset_find_natural_update_every_for_timeframe(st, after_wanted, before_wanted, points_wanted, options, tier);
  1212. if(update_every <= 0) update_every = st->update_every;
  1213. query_debug_log(":natural update every %d", update_every);
  1214. }
  1215. // this is the update_every of the query
  1216. // it may be different to the update_every of the database
  1217. time_t query_granularity = (natural_points)?update_every:1;
  1218. if(query_granularity <= 0) query_granularity = 1;
  1219. query_debug_log(":query_granularity %ld", query_granularity);
  1220. // align before_wanted and after_wanted to query_granularity
  1221. if (before_wanted % query_granularity) {
  1222. before_wanted -= before_wanted % query_granularity;
  1223. query_debug_log(":granularity align before_wanted %lld", before_wanted);
  1224. }
  1225. if (after_wanted % query_granularity) {
  1226. after_wanted -= after_wanted % query_granularity;
  1227. query_debug_log(":granularity align after_wanted %lld", after_wanted);
  1228. }
  1229. // automatic_natural_points is set when the user wants all the points available in the database
  1230. if(automatic_natural_points) {
  1231. points_wanted = (before_wanted - after_wanted + 1) / query_granularity;
  1232. if(unlikely(points_wanted <= 0)) points_wanted = 1;
  1233. query_debug_log(":auto natural points_wanted %ld", points_wanted);
  1234. }
  1235. time_t duration = before_wanted - after_wanted;
  1236. // if the resampling time is too big, extend the duration to the past
  1237. if (unlikely(resampling_time_requested > duration)) {
  1238. after_wanted = before_wanted - resampling_time_requested;
  1239. duration = before_wanted - after_wanted;
  1240. query_debug_log(":resampling after_wanted %lld", after_wanted);
  1241. }
  1242. // if the duration is not aligned to resampling time
  1243. // extend the duration to the past, to avoid a gap at the chart
  1244. // only when the missing duration is above 1/10th of a point
  1245. if(resampling_time_requested > query_granularity && duration % resampling_time_requested) {
  1246. time_t delta = duration % resampling_time_requested;
  1247. if(delta > resampling_time_requested / 10) {
  1248. after_wanted -= resampling_time_requested - delta;
  1249. duration = before_wanted - after_wanted;
  1250. query_debug_log(":resampling2 after_wanted %lld", after_wanted);
  1251. }
  1252. }
  1253. // the available points of the query
  1254. long points_available = (duration + 1) / query_granularity;
  1255. if(unlikely(points_available <= 0)) points_available = 1;
  1256. query_debug_log(":points_available %ld", points_available);
  1257. if(points_wanted > points_available) {
  1258. points_wanted = points_available;
  1259. query_debug_log(":max points_wanted %ld", points_wanted);
  1260. }
  1261. // calculate the desired grouping of source data points
  1262. long group = points_available / points_wanted;
  1263. if(group <= 0) group = 1;
  1264. // round "group" to the closest integer
  1265. if(points_available % points_wanted > points_wanted / 2)
  1266. group++;
  1267. query_debug_log(":group %ld", group);
  1268. if(points_wanted * group * query_granularity < duration) {
  1269. // the grouping we are going to do, is not enough
  1270. // to cover the entire duration requested, so
  1271. // we have to change the number of points, to make sure we will
  1272. // respect the timeframe as closely as possibly
  1273. // let's see how many points are the optimal
  1274. points_wanted = points_available / group;
  1275. if(points_wanted * group < points_available)
  1276. points_wanted++;
  1277. if(unlikely(points_wanted <= 0))
  1278. points_wanted = 1;
  1279. query_debug_log(":optimal points %ld", points_wanted);
  1280. }
  1281. // resampling_time_requested enforces a certain grouping multiple
  1282. NETDATA_DOUBLE resampling_divisor = 1.0;
  1283. long resampling_group = 1;
  1284. if(unlikely(resampling_time_requested > query_granularity)) {
  1285. // the points we should group to satisfy gtime
  1286. resampling_group = resampling_time_requested / query_granularity;
  1287. if(unlikely(resampling_time_requested % query_granularity))
  1288. resampling_group++;
  1289. query_debug_log(":resampling group %ld", resampling_group);
  1290. // adapt group according to resampling_group
  1291. if(unlikely(group < resampling_group)) {
  1292. group = resampling_group; // do not allow grouping below the desired one
  1293. query_debug_log(":group less res %ld", group);
  1294. }
  1295. if(unlikely(group % resampling_group)) {
  1296. group += resampling_group - (group % resampling_group); // make sure group is multiple of resampling_group
  1297. query_debug_log(":group mod res %ld", group);
  1298. }
  1299. // resampling_divisor = group / resampling_group;
  1300. resampling_divisor = (NETDATA_DOUBLE)(group * query_granularity) / (NETDATA_DOUBLE)resampling_time_requested;
  1301. query_debug_log(":resampling divisor " NETDATA_DOUBLE_FORMAT, resampling_divisor);
  1302. }
  1303. // now that we have group, align the requested timeframe to fit it.
  1304. if(aligned && before_wanted % (group * query_granularity)) {
  1305. if(before_is_aligned_to_db_end)
  1306. before_wanted -= before_wanted % (group * query_granularity);
  1307. else
  1308. before_wanted += (group * query_granularity) - before_wanted % (group * query_granularity);
  1309. query_debug_log(":align before_wanted %lld", before_wanted);
  1310. }
  1311. after_wanted = before_wanted - (points_wanted * group * query_granularity) + query_granularity;
  1312. query_debug_log(":final after_wanted %lld", after_wanted);
  1313. duration = before_wanted - after_wanted;
  1314. query_debug_log(":final duration %ld", duration + 1);
  1315. // check the context query based on the starting time of the query
  1316. if (context_param_list && !(context_param_list->flags & CONTEXT_FLAGS_ARCHIVE)) {
  1317. rebuild_context_param_list(owa, context_param_list, after_wanted);
  1318. st = context_param_list->rd ? context_param_list->rd->rrdset : NULL;
  1319. if(unlikely(!st))
  1320. return NULL;
  1321. }
  1322. internal_error(points_wanted != duration / (query_granularity * group) + 1,
  1323. "QUERY: points_wanted %ld is not points %ld",
  1324. points_wanted, duration / (query_granularity * group) + 1);
  1325. internal_error(group < resampling_group,
  1326. "QUERY: group %ld is less than the desired group points %ld",
  1327. group, resampling_group);
  1328. internal_error(group > resampling_group && group % resampling_group,
  1329. "QUERY: group %ld is not a multiple of the desired group points %ld",
  1330. group, resampling_group);
  1331. // -------------------------------------------------------------------------
  1332. // initialize our result set
  1333. // this also locks the chart for us
  1334. RRDR *r = rrdr_create(owa, st, points_wanted, context_param_list);
  1335. if(unlikely(!r)) {
  1336. internal_error(true, "QUERY: cannot create RRDR for %s, after=%u, before=%u, duration=%u, points=%ld",
  1337. st->id, (uint32_t)after_wanted, (uint32_t)before_wanted, (uint32_t)duration, points_wanted);
  1338. return NULL;
  1339. }
  1340. if(unlikely(!r->d || !points_wanted)) {
  1341. internal_error(true, "QUERY: returning empty RRDR (no dimensions in RRDSET) for %s, after=%u, before=%u, duration=%zu, points=%ld",
  1342. st->id, (uint32_t)after_wanted, (uint32_t)before_wanted, (size_t)duration, points_wanted);
  1343. return r;
  1344. }
  1345. if(relative_period_requested)
  1346. r->result_options |= RRDR_RESULT_OPTION_RELATIVE;
  1347. else
  1348. r->result_options |= RRDR_RESULT_OPTION_ABSOLUTE;
  1349. // find how many dimensions we have
  1350. long dimensions_count = r->d;
  1351. // -------------------------------------------------------------------------
  1352. // initialize RRDR
  1353. r->group = group;
  1354. r->update_every = (int)(group * query_granularity);
  1355. r->before = before_wanted;
  1356. r->after = after_wanted;
  1357. r->internal.points_wanted = points_wanted;
  1358. r->internal.resampling_group = resampling_group;
  1359. r->internal.resampling_divisor = resampling_divisor;
  1360. r->internal.query_options = options;
  1361. r->internal.query_tier = tier;
  1362. // -------------------------------------------------------------------------
  1363. // assign the processor functions
  1364. rrdr_set_grouping_function(r, group_method);
  1365. // allocate any memory required by the grouping method
  1366. r->internal.grouping_create(r, group_options);
  1367. // -------------------------------------------------------------------------
  1368. // disable the not-wanted dimensions
  1369. if (context_param_list && !(context_param_list->flags & CONTEXT_FLAGS_ARCHIVE))
  1370. rrdset_check_rdlock(st);
  1371. if(dimensions)
  1372. rrdr_disable_not_selected_dimensions(r, options, dimensions, context_param_list);
  1373. query_debug_log_fin();
  1374. // -------------------------------------------------------------------------
  1375. // do the work for each dimension
  1376. time_t max_after = 0, min_before = 0;
  1377. long max_rows = 0;
  1378. RRDDIM *first_rd = context_param_list ? context_param_list->rd : st->dimensions;
  1379. RRDDIM *rd;
  1380. long c, dimensions_used = 0, dimensions_nonzero = 0;
  1381. struct timeval query_start_time;
  1382. struct timeval query_current_time;
  1383. if (timeout) now_realtime_timeval(&query_start_time);
  1384. for(rd = first_rd, c = 0 ; rd && c < dimensions_count ; rd = rd->next, c++) {
  1385. // if we need a percentage, we need to calculate all dimensions
  1386. if(unlikely(!(options & RRDR_OPTION_PERCENTAGE) && (r->od[c] & RRDR_DIMENSION_HIDDEN))) {
  1387. if(unlikely(r->od[c] & RRDR_DIMENSION_SELECTED)) r->od[c] &= ~RRDR_DIMENSION_SELECTED;
  1388. continue;
  1389. }
  1390. r->od[c] |= RRDR_DIMENSION_SELECTED;
  1391. // reset the grouping for the new dimension
  1392. r->internal.grouping_reset(r);
  1393. rrd2rrdr_do_dimension(r, points_wanted, rd, c, after_wanted, before_wanted);
  1394. if (timeout)
  1395. now_realtime_timeval(&query_current_time);
  1396. if(r->od[c] & RRDR_DIMENSION_NONZERO)
  1397. dimensions_nonzero++;
  1398. // verify all dimensions are aligned
  1399. if(unlikely(!dimensions_used)) {
  1400. min_before = r->before;
  1401. max_after = r->after;
  1402. max_rows = r->rows;
  1403. }
  1404. else {
  1405. if(r->after != max_after) {
  1406. internal_error(true, "QUERY: 'after' mismatch between dimensions for chart '%s': max is %zu, dimension '%s' has %zu",
  1407. st->name, (size_t)max_after, rd->name, (size_t)r->after);
  1408. r->after = (r->after > max_after) ? r->after : max_after;
  1409. }
  1410. if(r->before != min_before) {
  1411. internal_error(true, "QUERY: 'before' mismatch between dimensions for chart '%s': max is %zu, dimension '%s' has %zu",
  1412. st->name, (size_t)min_before, rd->name, (size_t)r->before);
  1413. r->before = (r->before < min_before) ? r->before : min_before;
  1414. }
  1415. if(r->rows != max_rows) {
  1416. internal_error(true, "QUERY: 'rows' mismatch between dimensions for chart '%s': max is %zu, dimension '%s' has %zu",
  1417. st->name, (size_t)max_rows, rd->name, (size_t)r->rows);
  1418. r->rows = (r->rows > max_rows) ? r->rows : max_rows;
  1419. }
  1420. }
  1421. dimensions_used++;
  1422. if (timeout && ((NETDATA_DOUBLE)dt_usec(&query_start_time, &query_current_time) / 1000.0) > timeout) {
  1423. log_access("QUERY CANCELED RUNTIME EXCEEDED %0.2f ms (LIMIT %d ms)",
  1424. (NETDATA_DOUBLE)dt_usec(&query_start_time, &query_current_time) / 1000.0, timeout);
  1425. r->result_options |= RRDR_RESULT_OPTION_CANCEL;
  1426. break;
  1427. }
  1428. }
  1429. #ifdef NETDATA_INTERNAL_CHECKS
  1430. if (dimensions_used) {
  1431. if(r->internal.log)
  1432. rrd2rrdr_log_request_response_metadata(r, options, group_method, aligned, group, resampling_time_requested, resampling_group,
  1433. after_wanted, after_requested, before_wanted, before_requested,
  1434. points_requested, points_wanted, /*after_slot, before_slot,*/
  1435. r->internal.log);
  1436. if(r->rows != points_wanted)
  1437. rrd2rrdr_log_request_response_metadata(r, options, group_method, aligned, group, resampling_time_requested, resampling_group,
  1438. after_wanted, after_requested, before_wanted, before_requested,
  1439. points_requested, points_wanted, /*after_slot, before_slot,*/
  1440. "got 'points' is not wanted 'points'");
  1441. if(aligned && (r->before % (group * query_granularity)) != 0)
  1442. rrd2rrdr_log_request_response_metadata(r, options, group_method, aligned, group, resampling_time_requested, resampling_group,
  1443. after_wanted, after_requested, before_wanted,before_wanted,
  1444. points_requested, points_wanted, /*after_slot, before_slot,*/
  1445. "'before' is not aligned but alignment is required");
  1446. // 'after' should not be aligned, since we start inside the first group
  1447. //if(aligned && (r->after % group) != 0)
  1448. // rrd2rrdr_log_request_response_metadata(r, options, group_method, aligned, group, resampling_time_requested, resampling_group, after_wanted, after_requested, before_wanted, before_requested, points_requested, points_wanted, after_slot, before_slot, "'after' is not aligned but alignment is required");
  1449. if(r->before != before_wanted)
  1450. rrd2rrdr_log_request_response_metadata(r, options, group_method, aligned, group, resampling_time_requested, resampling_group,
  1451. after_wanted, after_requested, before_wanted, before_requested,
  1452. points_requested, points_wanted, /*after_slot, before_slot,*/
  1453. "chart is not aligned to requested 'before'");
  1454. if(r->before != before_wanted)
  1455. rrd2rrdr_log_request_response_metadata(r, options, group_method, aligned, group, resampling_time_requested, resampling_group,
  1456. after_wanted, after_requested, before_wanted, before_requested,
  1457. points_requested, points_wanted, /*after_slot, before_slot,*/
  1458. "got 'before' is not wanted 'before'");
  1459. // reported 'after' varies, depending on group
  1460. if(r->after != after_wanted)
  1461. rrd2rrdr_log_request_response_metadata(r, options, group_method, aligned, group, resampling_time_requested, resampling_group,
  1462. after_wanted, after_requested, before_wanted, before_requested,
  1463. points_requested, points_wanted, /*after_slot, before_slot,*/
  1464. "got 'after' is not wanted 'after'");
  1465. }
  1466. #endif
  1467. // free all resources used by the grouping method
  1468. r->internal.grouping_free(r);
  1469. // when all the dimensions are zero, we should return all of them
  1470. if(unlikely(options & RRDR_OPTION_NONZERO && !dimensions_nonzero && !(r->result_options & RRDR_RESULT_OPTION_CANCEL))) {
  1471. // all the dimensions are zero
  1472. // mark them as NONZERO to send them all
  1473. for(rd = first_rd, c = 0 ; rd && c < dimensions_count ; rd = rd->next, c++) {
  1474. if(unlikely(r->od[c] & RRDR_DIMENSION_HIDDEN)) continue;
  1475. r->od[c] |= RRDR_DIMENSION_NONZERO;
  1476. }
  1477. }
  1478. rrdr_query_completed(r->internal.db_points_read, r->internal.result_points_generated);
  1479. return r;
  1480. }