query.c 86 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117
  1. // SPDX-License-Identifier: GPL-3.0-or-later
  2. #include "query.h"
  3. #include "web/api/formatters/rrd2json.h"
  4. #include "rrdr.h"
  5. #include "average/average.h"
  6. #include "countif/countif.h"
  7. #include "incremental_sum/incremental_sum.h"
  8. #include "max/max.h"
  9. #include "median/median.h"
  10. #include "min/min.h"
  11. #include "sum/sum.h"
  12. #include "stddev/stddev.h"
  13. #include "ses/ses.h"
  14. #include "des/des.h"
  15. #include "percentile/percentile.h"
  16. #include "trimmed_mean/trimmed_mean.h"
  17. // ----------------------------------------------------------------------------
  18. static struct {
  19. const char *name;
  20. uint32_t hash;
  21. RRDR_GROUPING value;
  22. // One time initialization for the module.
  23. // This is called once, when netdata starts.
  24. void (*init)(void);
  25. // Allocate all required structures for a query.
  26. // This is called once for each netdata query.
  27. void (*create)(struct rrdresult *r, const char *options);
  28. // Cleanup collected values, but don't destroy the structures.
  29. // This is called when the query engine switches dimensions,
  30. // as part of the same query (so same chart, switching metric).
  31. void (*reset)(struct rrdresult *r);
  32. // Free all resources allocated for the query.
  33. void (*free)(struct rrdresult *r);
  34. // Add a single value into the calculation.
  35. // The module may decide to cache it, or use it in the fly.
  36. void (*add)(struct rrdresult *r, NETDATA_DOUBLE value);
  37. // Generate a single result for the values added so far.
  38. // More values and points may be requested later.
  39. // It is up to the module to reset its internal structures
  40. // when flushing it (so for a few modules it may be better to
  41. // continue after a flush as if nothing changed, for others a
  42. // cleanup of the internal structures may be required).
  43. NETDATA_DOUBLE (*flush)(struct rrdresult *r, RRDR_VALUE_FLAGS *rrdr_value_options_ptr);
  44. TIER_QUERY_FETCH tier_query_fetch;
  45. } api_v1_data_groups[] = {
  46. {.name = "average",
  47. .hash = 0,
  48. .value = RRDR_GROUPING_AVERAGE,
  49. .init = NULL,
  50. .create= grouping_create_average,
  51. .reset = grouping_reset_average,
  52. .free = grouping_free_average,
  53. .add = grouping_add_average,
  54. .flush = grouping_flush_average,
  55. .tier_query_fetch = TIER_QUERY_FETCH_AVERAGE
  56. },
  57. {.name = "mean", // alias on 'average'
  58. .hash = 0,
  59. .value = RRDR_GROUPING_AVERAGE,
  60. .init = NULL,
  61. .create= grouping_create_average,
  62. .reset = grouping_reset_average,
  63. .free = grouping_free_average,
  64. .add = grouping_add_average,
  65. .flush = grouping_flush_average,
  66. .tier_query_fetch = TIER_QUERY_FETCH_AVERAGE
  67. },
  68. {.name = "trimmed-mean1",
  69. .hash = 0,
  70. .value = RRDR_GROUPING_TRIMMED_MEAN1,
  71. .init = NULL,
  72. .create= grouping_create_trimmed_mean1,
  73. .reset = grouping_reset_trimmed_mean,
  74. .free = grouping_free_trimmed_mean,
  75. .add = grouping_add_trimmed_mean,
  76. .flush = grouping_flush_trimmed_mean,
  77. .tier_query_fetch = TIER_QUERY_FETCH_AVERAGE
  78. },
  79. {.name = "trimmed-mean2",
  80. .hash = 0,
  81. .value = RRDR_GROUPING_TRIMMED_MEAN2,
  82. .init = NULL,
  83. .create= grouping_create_trimmed_mean2,
  84. .reset = grouping_reset_trimmed_mean,
  85. .free = grouping_free_trimmed_mean,
  86. .add = grouping_add_trimmed_mean,
  87. .flush = grouping_flush_trimmed_mean,
  88. .tier_query_fetch = TIER_QUERY_FETCH_AVERAGE
  89. },
  90. {.name = "trimmed-mean3",
  91. .hash = 0,
  92. .value = RRDR_GROUPING_TRIMMED_MEAN3,
  93. .init = NULL,
  94. .create= grouping_create_trimmed_mean3,
  95. .reset = grouping_reset_trimmed_mean,
  96. .free = grouping_free_trimmed_mean,
  97. .add = grouping_add_trimmed_mean,
  98. .flush = grouping_flush_trimmed_mean,
  99. .tier_query_fetch = TIER_QUERY_FETCH_AVERAGE
  100. },
  101. {.name = "trimmed-mean5",
  102. .hash = 0,
  103. .value = RRDR_GROUPING_TRIMMED_MEAN5,
  104. .init = NULL,
  105. .create= grouping_create_trimmed_mean5,
  106. .reset = grouping_reset_trimmed_mean,
  107. .free = grouping_free_trimmed_mean,
  108. .add = grouping_add_trimmed_mean,
  109. .flush = grouping_flush_trimmed_mean,
  110. .tier_query_fetch = TIER_QUERY_FETCH_AVERAGE
  111. },
  112. {.name = "trimmed-mean10",
  113. .hash = 0,
  114. .value = RRDR_GROUPING_TRIMMED_MEAN10,
  115. .init = NULL,
  116. .create= grouping_create_trimmed_mean10,
  117. .reset = grouping_reset_trimmed_mean,
  118. .free = grouping_free_trimmed_mean,
  119. .add = grouping_add_trimmed_mean,
  120. .flush = grouping_flush_trimmed_mean,
  121. .tier_query_fetch = TIER_QUERY_FETCH_AVERAGE
  122. },
  123. {.name = "trimmed-mean15",
  124. .hash = 0,
  125. .value = RRDR_GROUPING_TRIMMED_MEAN15,
  126. .init = NULL,
  127. .create= grouping_create_trimmed_mean15,
  128. .reset = grouping_reset_trimmed_mean,
  129. .free = grouping_free_trimmed_mean,
  130. .add = grouping_add_trimmed_mean,
  131. .flush = grouping_flush_trimmed_mean,
  132. .tier_query_fetch = TIER_QUERY_FETCH_AVERAGE
  133. },
  134. {.name = "trimmed-mean20",
  135. .hash = 0,
  136. .value = RRDR_GROUPING_TRIMMED_MEAN20,
  137. .init = NULL,
  138. .create= grouping_create_trimmed_mean20,
  139. .reset = grouping_reset_trimmed_mean,
  140. .free = grouping_free_trimmed_mean,
  141. .add = grouping_add_trimmed_mean,
  142. .flush = grouping_flush_trimmed_mean,
  143. .tier_query_fetch = TIER_QUERY_FETCH_AVERAGE
  144. },
  145. {.name = "trimmed-mean25",
  146. .hash = 0,
  147. .value = RRDR_GROUPING_TRIMMED_MEAN25,
  148. .init = NULL,
  149. .create= grouping_create_trimmed_mean25,
  150. .reset = grouping_reset_trimmed_mean,
  151. .free = grouping_free_trimmed_mean,
  152. .add = grouping_add_trimmed_mean,
  153. .flush = grouping_flush_trimmed_mean,
  154. .tier_query_fetch = TIER_QUERY_FETCH_AVERAGE
  155. },
  156. {.name = "trimmed-mean",
  157. .hash = 0,
  158. .value = RRDR_GROUPING_TRIMMED_MEAN5,
  159. .init = NULL,
  160. .create= grouping_create_trimmed_mean5,
  161. .reset = grouping_reset_trimmed_mean,
  162. .free = grouping_free_trimmed_mean,
  163. .add = grouping_add_trimmed_mean,
  164. .flush = grouping_flush_trimmed_mean,
  165. .tier_query_fetch = TIER_QUERY_FETCH_AVERAGE
  166. },
  167. {.name = "incremental_sum",
  168. .hash = 0,
  169. .value = RRDR_GROUPING_INCREMENTAL_SUM,
  170. .init = NULL,
  171. .create= grouping_create_incremental_sum,
  172. .reset = grouping_reset_incremental_sum,
  173. .free = grouping_free_incremental_sum,
  174. .add = grouping_add_incremental_sum,
  175. .flush = grouping_flush_incremental_sum,
  176. .tier_query_fetch = TIER_QUERY_FETCH_AVERAGE
  177. },
  178. {.name = "incremental-sum",
  179. .hash = 0,
  180. .value = RRDR_GROUPING_INCREMENTAL_SUM,
  181. .init = NULL,
  182. .create= grouping_create_incremental_sum,
  183. .reset = grouping_reset_incremental_sum,
  184. .free = grouping_free_incremental_sum,
  185. .add = grouping_add_incremental_sum,
  186. .flush = grouping_flush_incremental_sum,
  187. .tier_query_fetch = TIER_QUERY_FETCH_AVERAGE
  188. },
  189. {.name = "median",
  190. .hash = 0,
  191. .value = RRDR_GROUPING_MEDIAN,
  192. .init = NULL,
  193. .create= grouping_create_median,
  194. .reset = grouping_reset_median,
  195. .free = grouping_free_median,
  196. .add = grouping_add_median,
  197. .flush = grouping_flush_median,
  198. .tier_query_fetch = TIER_QUERY_FETCH_AVERAGE
  199. },
  200. {.name = "trimmed-median1",
  201. .hash = 0,
  202. .value = RRDR_GROUPING_TRIMMED_MEDIAN1,
  203. .init = NULL,
  204. .create= grouping_create_trimmed_median1,
  205. .reset = grouping_reset_median,
  206. .free = grouping_free_median,
  207. .add = grouping_add_median,
  208. .flush = grouping_flush_median,
  209. .tier_query_fetch = TIER_QUERY_FETCH_AVERAGE
  210. },
  211. {.name = "trimmed-median2",
  212. .hash = 0,
  213. .value = RRDR_GROUPING_TRIMMED_MEDIAN2,
  214. .init = NULL,
  215. .create= grouping_create_trimmed_median2,
  216. .reset = grouping_reset_median,
  217. .free = grouping_free_median,
  218. .add = grouping_add_median,
  219. .flush = grouping_flush_median,
  220. .tier_query_fetch = TIER_QUERY_FETCH_AVERAGE
  221. },
  222. {.name = "trimmed-median3",
  223. .hash = 0,
  224. .value = RRDR_GROUPING_TRIMMED_MEDIAN3,
  225. .init = NULL,
  226. .create= grouping_create_trimmed_median3,
  227. .reset = grouping_reset_median,
  228. .free = grouping_free_median,
  229. .add = grouping_add_median,
  230. .flush = grouping_flush_median,
  231. .tier_query_fetch = TIER_QUERY_FETCH_AVERAGE
  232. },
  233. {.name = "trimmed-median5",
  234. .hash = 0,
  235. .value = RRDR_GROUPING_TRIMMED_MEDIAN5,
  236. .init = NULL,
  237. .create= grouping_create_trimmed_median5,
  238. .reset = grouping_reset_median,
  239. .free = grouping_free_median,
  240. .add = grouping_add_median,
  241. .flush = grouping_flush_median,
  242. .tier_query_fetch = TIER_QUERY_FETCH_AVERAGE
  243. },
  244. {.name = "trimmed-median10",
  245. .hash = 0,
  246. .value = RRDR_GROUPING_TRIMMED_MEDIAN10,
  247. .init = NULL,
  248. .create= grouping_create_trimmed_median10,
  249. .reset = grouping_reset_median,
  250. .free = grouping_free_median,
  251. .add = grouping_add_median,
  252. .flush = grouping_flush_median,
  253. .tier_query_fetch = TIER_QUERY_FETCH_AVERAGE
  254. },
  255. {.name = "trimmed-median15",
  256. .hash = 0,
  257. .value = RRDR_GROUPING_TRIMMED_MEDIAN15,
  258. .init = NULL,
  259. .create= grouping_create_trimmed_median15,
  260. .reset = grouping_reset_median,
  261. .free = grouping_free_median,
  262. .add = grouping_add_median,
  263. .flush = grouping_flush_median,
  264. .tier_query_fetch = TIER_QUERY_FETCH_AVERAGE
  265. },
  266. {.name = "trimmed-median20",
  267. .hash = 0,
  268. .value = RRDR_GROUPING_TRIMMED_MEDIAN20,
  269. .init = NULL,
  270. .create= grouping_create_trimmed_median20,
  271. .reset = grouping_reset_median,
  272. .free = grouping_free_median,
  273. .add = grouping_add_median,
  274. .flush = grouping_flush_median,
  275. .tier_query_fetch = TIER_QUERY_FETCH_AVERAGE
  276. },
  277. {.name = "trimmed-median25",
  278. .hash = 0,
  279. .value = RRDR_GROUPING_TRIMMED_MEDIAN25,
  280. .init = NULL,
  281. .create= grouping_create_trimmed_median25,
  282. .reset = grouping_reset_median,
  283. .free = grouping_free_median,
  284. .add = grouping_add_median,
  285. .flush = grouping_flush_median,
  286. .tier_query_fetch = TIER_QUERY_FETCH_AVERAGE
  287. },
  288. {.name = "trimmed-median",
  289. .hash = 0,
  290. .value = RRDR_GROUPING_TRIMMED_MEDIAN5,
  291. .init = NULL,
  292. .create= grouping_create_trimmed_median5,
  293. .reset = grouping_reset_median,
  294. .free = grouping_free_median,
  295. .add = grouping_add_median,
  296. .flush = grouping_flush_median,
  297. .tier_query_fetch = TIER_QUERY_FETCH_AVERAGE
  298. },
  299. {.name = "percentile25",
  300. .hash = 0,
  301. .value = RRDR_GROUPING_PERCENTILE25,
  302. .init = NULL,
  303. .create= grouping_create_percentile25,
  304. .reset = grouping_reset_percentile,
  305. .free = grouping_free_percentile,
  306. .add = grouping_add_percentile,
  307. .flush = grouping_flush_percentile,
  308. .tier_query_fetch = TIER_QUERY_FETCH_AVERAGE
  309. },
  310. {.name = "percentile50",
  311. .hash = 0,
  312. .value = RRDR_GROUPING_PERCENTILE50,
  313. .init = NULL,
  314. .create= grouping_create_percentile50,
  315. .reset = grouping_reset_percentile,
  316. .free = grouping_free_percentile,
  317. .add = grouping_add_percentile,
  318. .flush = grouping_flush_percentile,
  319. .tier_query_fetch = TIER_QUERY_FETCH_AVERAGE
  320. },
  321. {.name = "percentile75",
  322. .hash = 0,
  323. .value = RRDR_GROUPING_PERCENTILE75,
  324. .init = NULL,
  325. .create= grouping_create_percentile75,
  326. .reset = grouping_reset_percentile,
  327. .free = grouping_free_percentile,
  328. .add = grouping_add_percentile,
  329. .flush = grouping_flush_percentile,
  330. .tier_query_fetch = TIER_QUERY_FETCH_AVERAGE
  331. },
  332. {.name = "percentile80",
  333. .hash = 0,
  334. .value = RRDR_GROUPING_PERCENTILE80,
  335. .init = NULL,
  336. .create= grouping_create_percentile80,
  337. .reset = grouping_reset_percentile,
  338. .free = grouping_free_percentile,
  339. .add = grouping_add_percentile,
  340. .flush = grouping_flush_percentile,
  341. .tier_query_fetch = TIER_QUERY_FETCH_AVERAGE
  342. },
  343. {.name = "percentile90",
  344. .hash = 0,
  345. .value = RRDR_GROUPING_PERCENTILE90,
  346. .init = NULL,
  347. .create= grouping_create_percentile90,
  348. .reset = grouping_reset_percentile,
  349. .free = grouping_free_percentile,
  350. .add = grouping_add_percentile,
  351. .flush = grouping_flush_percentile,
  352. .tier_query_fetch = TIER_QUERY_FETCH_AVERAGE
  353. },
  354. {.name = "percentile95",
  355. .hash = 0,
  356. .value = RRDR_GROUPING_PERCENTILE95,
  357. .init = NULL,
  358. .create= grouping_create_percentile95,
  359. .reset = grouping_reset_percentile,
  360. .free = grouping_free_percentile,
  361. .add = grouping_add_percentile,
  362. .flush = grouping_flush_percentile,
  363. .tier_query_fetch = TIER_QUERY_FETCH_AVERAGE
  364. },
  365. {.name = "percentile97",
  366. .hash = 0,
  367. .value = RRDR_GROUPING_PERCENTILE97,
  368. .init = NULL,
  369. .create= grouping_create_percentile97,
  370. .reset = grouping_reset_percentile,
  371. .free = grouping_free_percentile,
  372. .add = grouping_add_percentile,
  373. .flush = grouping_flush_percentile,
  374. .tier_query_fetch = TIER_QUERY_FETCH_AVERAGE
  375. },
  376. {.name = "percentile98",
  377. .hash = 0,
  378. .value = RRDR_GROUPING_PERCENTILE98,
  379. .init = NULL,
  380. .create= grouping_create_percentile98,
  381. .reset = grouping_reset_percentile,
  382. .free = grouping_free_percentile,
  383. .add = grouping_add_percentile,
  384. .flush = grouping_flush_percentile,
  385. .tier_query_fetch = TIER_QUERY_FETCH_AVERAGE
  386. },
  387. {.name = "percentile99",
  388. .hash = 0,
  389. .value = RRDR_GROUPING_PERCENTILE99,
  390. .init = NULL,
  391. .create= grouping_create_percentile99,
  392. .reset = grouping_reset_percentile,
  393. .free = grouping_free_percentile,
  394. .add = grouping_add_percentile,
  395. .flush = grouping_flush_percentile,
  396. .tier_query_fetch = TIER_QUERY_FETCH_AVERAGE
  397. },
  398. {.name = "percentile",
  399. .hash = 0,
  400. .value = RRDR_GROUPING_PERCENTILE95,
  401. .init = NULL,
  402. .create= grouping_create_percentile95,
  403. .reset = grouping_reset_percentile,
  404. .free = grouping_free_percentile,
  405. .add = grouping_add_percentile,
  406. .flush = grouping_flush_percentile,
  407. .tier_query_fetch = TIER_QUERY_FETCH_AVERAGE
  408. },
  409. {.name = "min",
  410. .hash = 0,
  411. .value = RRDR_GROUPING_MIN,
  412. .init = NULL,
  413. .create= grouping_create_min,
  414. .reset = grouping_reset_min,
  415. .free = grouping_free_min,
  416. .add = grouping_add_min,
  417. .flush = grouping_flush_min,
  418. .tier_query_fetch = TIER_QUERY_FETCH_MIN
  419. },
  420. {.name = "max",
  421. .hash = 0,
  422. .value = RRDR_GROUPING_MAX,
  423. .init = NULL,
  424. .create= grouping_create_max,
  425. .reset = grouping_reset_max,
  426. .free = grouping_free_max,
  427. .add = grouping_add_max,
  428. .flush = grouping_flush_max,
  429. .tier_query_fetch = TIER_QUERY_FETCH_MAX
  430. },
  431. {.name = "sum",
  432. .hash = 0,
  433. .value = RRDR_GROUPING_SUM,
  434. .init = NULL,
  435. .create= grouping_create_sum,
  436. .reset = grouping_reset_sum,
  437. .free = grouping_free_sum,
  438. .add = grouping_add_sum,
  439. .flush = grouping_flush_sum,
  440. .tier_query_fetch = TIER_QUERY_FETCH_SUM
  441. },
  442. // standard deviation
  443. {.name = "stddev",
  444. .hash = 0,
  445. .value = RRDR_GROUPING_STDDEV,
  446. .init = NULL,
  447. .create= grouping_create_stddev,
  448. .reset = grouping_reset_stddev,
  449. .free = grouping_free_stddev,
  450. .add = grouping_add_stddev,
  451. .flush = grouping_flush_stddev,
  452. .tier_query_fetch = TIER_QUERY_FETCH_AVERAGE
  453. },
  454. {.name = "cv", // coefficient variation is calculated by stddev
  455. .hash = 0,
  456. .value = RRDR_GROUPING_CV,
  457. .init = NULL,
  458. .create= grouping_create_stddev, // not an error, stddev calculates this too
  459. .reset = grouping_reset_stddev, // not an error, stddev calculates this too
  460. .free = grouping_free_stddev, // not an error, stddev calculates this too
  461. .add = grouping_add_stddev, // not an error, stddev calculates this too
  462. .flush = grouping_flush_coefficient_of_variation,
  463. .tier_query_fetch = TIER_QUERY_FETCH_AVERAGE
  464. },
  465. {.name = "rsd", // alias of 'cv'
  466. .hash = 0,
  467. .value = RRDR_GROUPING_CV,
  468. .init = NULL,
  469. .create= grouping_create_stddev, // not an error, stddev calculates this too
  470. .reset = grouping_reset_stddev, // not an error, stddev calculates this too
  471. .free = grouping_free_stddev, // not an error, stddev calculates this too
  472. .add = grouping_add_stddev, // not an error, stddev calculates this too
  473. .flush = grouping_flush_coefficient_of_variation,
  474. .tier_query_fetch = TIER_QUERY_FETCH_AVERAGE
  475. },
  476. /*
  477. {.name = "mean", // same as average, no need to define it again
  478. .hash = 0,
  479. .value = RRDR_GROUPING_MEAN,
  480. .setup = NULL,
  481. .create= grouping_create_stddev,
  482. .reset = grouping_reset_stddev,
  483. .free = grouping_free_stddev,
  484. .add = grouping_add_stddev,
  485. .flush = grouping_flush_mean,
  486. .tier_query_fetch = TIER_QUERY_FETCH_AVERAGE
  487. },
  488. */
  489. /*
  490. {.name = "variance", // meaningless to offer
  491. .hash = 0,
  492. .value = RRDR_GROUPING_VARIANCE,
  493. .setup = NULL,
  494. .create= grouping_create_stddev,
  495. .reset = grouping_reset_stddev,
  496. .free = grouping_free_stddev,
  497. .add = grouping_add_stddev,
  498. .flush = grouping_flush_variance,
  499. .tier_query_fetch = TIER_QUERY_FETCH_AVERAGE
  500. },
  501. */
  502. // single exponential smoothing
  503. {.name = "ses",
  504. .hash = 0,
  505. .value = RRDR_GROUPING_SES,
  506. .init = grouping_init_ses,
  507. .create= grouping_create_ses,
  508. .reset = grouping_reset_ses,
  509. .free = grouping_free_ses,
  510. .add = grouping_add_ses,
  511. .flush = grouping_flush_ses,
  512. .tier_query_fetch = TIER_QUERY_FETCH_AVERAGE
  513. },
  514. {.name = "ema", // alias for 'ses'
  515. .hash = 0,
  516. .value = RRDR_GROUPING_SES,
  517. .init = NULL,
  518. .create= grouping_create_ses,
  519. .reset = grouping_reset_ses,
  520. .free = grouping_free_ses,
  521. .add = grouping_add_ses,
  522. .flush = grouping_flush_ses,
  523. .tier_query_fetch = TIER_QUERY_FETCH_AVERAGE
  524. },
  525. {.name = "ewma", // alias for ses
  526. .hash = 0,
  527. .value = RRDR_GROUPING_SES,
  528. .init = NULL,
  529. .create= grouping_create_ses,
  530. .reset = grouping_reset_ses,
  531. .free = grouping_free_ses,
  532. .add = grouping_add_ses,
  533. .flush = grouping_flush_ses,
  534. .tier_query_fetch = TIER_QUERY_FETCH_AVERAGE
  535. },
  536. // double exponential smoothing
  537. {.name = "des",
  538. .hash = 0,
  539. .value = RRDR_GROUPING_DES,
  540. .init = grouping_init_des,
  541. .create= grouping_create_des,
  542. .reset = grouping_reset_des,
  543. .free = grouping_free_des,
  544. .add = grouping_add_des,
  545. .flush = grouping_flush_des,
  546. .tier_query_fetch = TIER_QUERY_FETCH_AVERAGE
  547. },
  548. {.name = "countif",
  549. .hash = 0,
  550. .value = RRDR_GROUPING_COUNTIF,
  551. .init = NULL,
  552. .create= grouping_create_countif,
  553. .reset = grouping_reset_countif,
  554. .free = grouping_free_countif,
  555. .add = grouping_add_countif,
  556. .flush = grouping_flush_countif,
  557. .tier_query_fetch = TIER_QUERY_FETCH_AVERAGE
  558. },
  559. // terminator
  560. {.name = NULL,
  561. .hash = 0,
  562. .value = RRDR_GROUPING_UNDEFINED,
  563. .init = NULL,
  564. .create= grouping_create_average,
  565. .reset = grouping_reset_average,
  566. .free = grouping_free_average,
  567. .add = grouping_add_average,
  568. .flush = grouping_flush_average,
  569. .tier_query_fetch = TIER_QUERY_FETCH_AVERAGE
  570. }
  571. };
  572. void web_client_api_v1_init_grouping(void) {
  573. int i;
  574. for(i = 0; api_v1_data_groups[i].name ; i++) {
  575. api_v1_data_groups[i].hash = simple_hash(api_v1_data_groups[i].name);
  576. if(api_v1_data_groups[i].init)
  577. api_v1_data_groups[i].init();
  578. }
  579. }
  580. const char *group_method2string(RRDR_GROUPING group) {
  581. int i;
  582. for(i = 0; api_v1_data_groups[i].name ; i++) {
  583. if(api_v1_data_groups[i].value == group) {
  584. return api_v1_data_groups[i].name;
  585. }
  586. }
  587. return "unknown-group-method";
  588. }
  589. RRDR_GROUPING web_client_api_request_v1_data_group(const char *name, RRDR_GROUPING def) {
  590. int i;
  591. uint32_t hash = simple_hash(name);
  592. for(i = 0; api_v1_data_groups[i].name ; i++)
  593. if(unlikely(hash == api_v1_data_groups[i].hash && !strcmp(name, api_v1_data_groups[i].name)))
  594. return api_v1_data_groups[i].value;
  595. return def;
  596. }
  597. const char *web_client_api_request_v1_data_group_to_string(RRDR_GROUPING group) {
  598. int i;
  599. for(i = 0; api_v1_data_groups[i].name ; i++)
  600. if(unlikely(group == api_v1_data_groups[i].value))
  601. return api_v1_data_groups[i].name;
  602. return "unknown";
  603. }
  604. static void rrdr_set_grouping_function(RRDR *r, RRDR_GROUPING group_method) {
  605. int i, found = 0;
  606. for(i = 0; !found && api_v1_data_groups[i].name ;i++) {
  607. if(api_v1_data_groups[i].value == group_method) {
  608. r->internal.grouping_create = api_v1_data_groups[i].create;
  609. r->internal.grouping_reset = api_v1_data_groups[i].reset;
  610. r->internal.grouping_free = api_v1_data_groups[i].free;
  611. r->internal.grouping_add = api_v1_data_groups[i].add;
  612. r->internal.grouping_flush = api_v1_data_groups[i].flush;
  613. r->internal.tier_query_fetch = api_v1_data_groups[i].tier_query_fetch;
  614. found = 1;
  615. }
  616. }
  617. if(!found) {
  618. errno = 0;
  619. internal_error(true, "QUERY: grouping method %u not found. Using 'average'", (unsigned int)group_method);
  620. r->internal.grouping_create = grouping_create_average;
  621. r->internal.grouping_reset = grouping_reset_average;
  622. r->internal.grouping_free = grouping_free_average;
  623. r->internal.grouping_add = grouping_add_average;
  624. r->internal.grouping_flush = grouping_flush_average;
  625. r->internal.tier_query_fetch = TIER_QUERY_FETCH_AVERAGE;
  626. }
  627. }
  628. // ----------------------------------------------------------------------------
  629. static void rrdr_disable_not_selected_dimensions(RRDR *r, RRDR_OPTIONS options, const char *dims,
  630. struct context_param *context_param_list)
  631. {
  632. RRDDIM *temp_rd = context_param_list ? context_param_list->rd : NULL;
  633. int should_lock = (!context_param_list || !(context_param_list->flags & CONTEXT_FLAGS_ARCHIVE));
  634. if (should_lock)
  635. rrdset_check_rdlock(r->st);
  636. if(unlikely(!dims || !*dims || (dims[0] == '*' && dims[1] == '\0'))) return;
  637. int match_ids = 0, match_names = 0;
  638. if(unlikely(options & RRDR_OPTION_MATCH_IDS))
  639. match_ids = 1;
  640. if(unlikely(options & RRDR_OPTION_MATCH_NAMES))
  641. match_names = 1;
  642. if(likely(!match_ids && !match_names))
  643. match_ids = match_names = 1;
  644. SIMPLE_PATTERN *pattern = simple_pattern_create(dims, ",|\t\r\n\f\v", SIMPLE_PATTERN_EXACT);
  645. RRDDIM *d;
  646. long c, dims_selected = 0, dims_not_hidden_not_zero = 0;
  647. for(c = 0, d = temp_rd?temp_rd:r->st->dimensions; d ;c++, d = d->next) {
  648. if( (match_ids && simple_pattern_matches(pattern, d->id))
  649. || (match_names && simple_pattern_matches(pattern, d->name))
  650. ) {
  651. r->od[c] |= RRDR_DIMENSION_SELECTED;
  652. if(unlikely(r->od[c] & RRDR_DIMENSION_HIDDEN)) r->od[c] &= ~RRDR_DIMENSION_HIDDEN;
  653. dims_selected++;
  654. // since the user needs this dimension
  655. // make it appear as NONZERO, to return it
  656. // even if the dimension has only zeros
  657. // unless option non_zero is set
  658. if(unlikely(!(options & RRDR_OPTION_NONZERO)))
  659. r->od[c] |= RRDR_DIMENSION_NONZERO;
  660. // count the visible dimensions
  661. if(likely(r->od[c] & RRDR_DIMENSION_NONZERO))
  662. dims_not_hidden_not_zero++;
  663. }
  664. else {
  665. r->od[c] |= RRDR_DIMENSION_HIDDEN;
  666. if(unlikely(r->od[c] & RRDR_DIMENSION_SELECTED)) r->od[c] &= ~RRDR_DIMENSION_SELECTED;
  667. }
  668. }
  669. simple_pattern_free(pattern);
  670. // check if all dimensions are hidden
  671. if(unlikely(!dims_not_hidden_not_zero && dims_selected)) {
  672. // there are a few selected dimensions,
  673. // but they are all zero
  674. // enable the selected ones
  675. // to avoid returning an empty chart
  676. for(c = 0, d = temp_rd?temp_rd:r->st->dimensions; d ;c++, d = d->next)
  677. if(unlikely(r->od[c] & RRDR_DIMENSION_SELECTED))
  678. r->od[c] |= RRDR_DIMENSION_NONZERO;
  679. }
  680. }
  681. // ----------------------------------------------------------------------------
  682. // helpers to find our way in RRDR
  683. static inline RRDR_VALUE_FLAGS *UNUSED_FUNCTION(rrdr_line_options)(RRDR *r, long rrdr_line) {
  684. return &r->o[ rrdr_line * r->d ];
  685. }
  686. static inline NETDATA_DOUBLE *UNUSED_FUNCTION(rrdr_line_values)(RRDR *r, long rrdr_line) {
  687. return &r->v[ rrdr_line * r->d ];
  688. }
  689. static inline long rrdr_line_init(RRDR *r, time_t t, long rrdr_line) {
  690. rrdr_line++;
  691. internal_error(rrdr_line >= r->n,
  692. "QUERY: requested to step above RRDR size for chart '%s'",
  693. r->st->name);
  694. internal_error(r->t[rrdr_line] != 0 && r->t[rrdr_line] != t,
  695. "QUERY: overwriting the timestamp of RRDR line %zu from %zu to %zu, of chart '%s'",
  696. (size_t)rrdr_line, (size_t)r->t[rrdr_line], (size_t)t, r->st->name);
  697. // save the time
  698. r->t[rrdr_line] = t;
  699. return rrdr_line;
  700. }
  701. static inline void rrdr_done(RRDR *r, long rrdr_line) {
  702. r->rows = rrdr_line + 1;
  703. }
  704. // ----------------------------------------------------------------------------
  705. // tier management
  706. static int rrddim_find_best_tier_for_timeframe(RRDDIM *rd, time_t after_wanted, time_t before_wanted, long points_wanted) {
  707. if(unlikely(storage_tiers < 2))
  708. return 0;
  709. if(unlikely(after_wanted == before_wanted || points_wanted <= 0 || !rd || !rd->rrdset)) {
  710. if(!rd)
  711. internal_error(true, "QUERY: NULL dimension - invalid params to tier calculation");
  712. else
  713. internal_error(true, "QUERY: chart '%s' dimension '%s' invalid params to tier calculation",
  714. (rd->rrdset)?rd->rrdset->name:"unknown", rd->name);
  715. return 0;
  716. }
  717. //BUFFER *wb = buffer_create(1000);
  718. //buffer_sprintf(wb, "Best tier for chart '%s', dim '%s', from %ld to %ld (dur %ld, every %d), points %ld",
  719. // rd->rrdset->name, rd->name, after_wanted, before_wanted, before_wanted - after_wanted, rd->update_every, points_wanted);
  720. long weight[storage_tiers];
  721. for(int tier = 0; tier < storage_tiers ; tier++) {
  722. if(unlikely(!rd->tiers[tier])) {
  723. internal_error(true, "QUERY: tier %d of chart '%s' dimension '%s' not initialized",
  724. tier, rd->rrdset->name, rd->name);
  725. // buffer_free(wb);
  726. return 0;
  727. }
  728. time_t first_t = rd->tiers[tier]->query_ops.oldest_time(rd->tiers[tier]->db_metric_handle);
  729. time_t last_t = rd->tiers[tier]->query_ops.latest_time(rd->tiers[tier]->db_metric_handle);
  730. time_t common_after = MAX(first_t, after_wanted);
  731. time_t common_before = MIN(last_t, before_wanted);
  732. long time_coverage = (common_before - common_after) * 1000 / (before_wanted - after_wanted);
  733. if(time_coverage < 0) time_coverage = 0;
  734. int update_every = (int)rd->tiers[tier]->tier_grouping * (int)rd->update_every;
  735. if(unlikely(update_every == 0)) {
  736. internal_error(true, "QUERY: update_every of tier %d for chart '%s' dimension '%s' is zero. tg = %d, ue = %d",
  737. tier, rd->rrdset->name, rd->name, rd->tiers[tier]->tier_grouping, rd->update_every);
  738. // buffer_free(wb);
  739. return 0;
  740. }
  741. long points_available = (before_wanted - after_wanted) / update_every;
  742. long points_delta = points_available - points_wanted;
  743. long points_coverage = (points_delta < 0) ? points_available * 1000 / points_wanted: 1000;
  744. if(points_available <= 0)
  745. weight[tier] = -LONG_MAX;
  746. else
  747. weight[tier] = points_coverage;
  748. // buffer_sprintf(wb, ": tier %d, first %ld, last %ld (dur %ld, tg %d, every %d), points %ld, tcoverage %ld, pcoverage %ld, weight %ld",
  749. // tier, first_t, last_t, last_t - first_t, rd->tiers[tier]->tier_grouping, update_every,
  750. // points_available, time_coverage, points_coverage, weight[tier]);
  751. }
  752. int best_tier = 0;
  753. for(int tier = 1; tier < storage_tiers ; tier++) {
  754. if(weight[tier] >= weight[best_tier])
  755. best_tier = tier;
  756. }
  757. if(weight[best_tier] == -LONG_MAX)
  758. best_tier = 0;
  759. //buffer_sprintf(wb, ": final best tier %d", best_tier);
  760. //internal_error(true, "%s", buffer_tostring(wb));
  761. //buffer_free(wb);
  762. return best_tier;
  763. }
  764. static int rrdset_find_natural_update_every_for_timeframe(RRDSET *st, time_t after_wanted, time_t before_wanted, long points_wanted, RRDR_OPTIONS options, int tier) {
  765. int ret = st->update_every;
  766. if(unlikely(!st->dimensions))
  767. return ret;
  768. rrdset_rdlock(st);
  769. int best_tier;
  770. if(options & RRDR_OPTION_SELECTED_TIER && tier >= 0 && tier < storage_tiers)
  771. best_tier = tier;
  772. else
  773. best_tier = rrddim_find_best_tier_for_timeframe(st->dimensions, after_wanted, before_wanted, points_wanted);
  774. if(!st->dimensions->tiers[best_tier]) {
  775. internal_error(
  776. true,
  777. "QUERY: tier %d on chart '%s', is not initialized", best_tier, st->name);
  778. }
  779. else {
  780. ret = (int)st->dimensions->tiers[best_tier]->tier_grouping * (int)st->update_every;
  781. if(unlikely(!ret)) {
  782. internal_error(
  783. true,
  784. "QUERY: update_every calculated to be zero on chart '%s', tier_grouping %d, update_every %d",
  785. st->name, st->dimensions->tiers[best_tier]->tier_grouping, st->update_every);
  786. ret = st->update_every;
  787. }
  788. }
  789. rrdset_unlock(st);
  790. return ret;
  791. }
  792. // ----------------------------------------------------------------------------
  793. // query ops
  794. typedef struct query_point {
  795. time_t end_time;
  796. time_t start_time;
  797. NETDATA_DOUBLE value;
  798. NETDATA_DOUBLE anomaly;
  799. SN_FLAGS flags;
  800. #ifdef NETDATA_INTERNAL_CHECKS
  801. size_t id;
  802. #endif
  803. } QUERY_POINT;
  804. QUERY_POINT QUERY_POINT_EMPTY = {
  805. .end_time = 0,
  806. .start_time = 0,
  807. .value = NAN,
  808. .anomaly = 0,
  809. .flags = SN_FLAG_NONE,
  810. #ifdef NETDATA_INTERNAL_CHECKS
  811. .id = 0,
  812. #endif
  813. };
  814. #ifdef NETDATA_INTERNAL_CHECKS
  815. #define query_point_set_id(point, point_id) (point).id = point_id
  816. #else
  817. #define query_point_set_id(point, point_id) debug_dummy()
  818. #endif
  819. typedef struct query_plan_entry {
  820. size_t tier;
  821. time_t after;
  822. time_t before;
  823. } QUERY_PLAN_ENTRY;
  824. typedef struct query_plan {
  825. size_t entries;
  826. QUERY_PLAN_ENTRY data[RRD_STORAGE_TIERS*2];
  827. } QUERY_PLAN;
  828. typedef struct query_engine_ops {
  829. // configuration
  830. RRDR *r;
  831. RRDDIM *rd;
  832. time_t view_update_every;
  833. time_t query_granularity;
  834. TIER_QUERY_FETCH tier_query_fetch;
  835. // query planer
  836. QUERY_PLAN plan;
  837. size_t current_plan;
  838. time_t current_plan_expire_time;
  839. // storage queries
  840. size_t tier;
  841. struct rrddim_tier *tier_ptr;
  842. struct rrddim_query_handle handle;
  843. STORAGE_POINT (*next_metric)(struct rrddim_query_handle *handle);
  844. int (*is_finished)(struct rrddim_query_handle *handle);
  845. void (*finalize)(struct rrddim_query_handle *handle);
  846. // aggregating points over time
  847. void (*grouping_add)(struct rrdresult *r, NETDATA_DOUBLE value);
  848. NETDATA_DOUBLE (*grouping_flush)(struct rrdresult *r, RRDR_VALUE_FLAGS *rrdr_value_options_ptr);
  849. size_t group_points_non_zero;
  850. size_t group_points_added;
  851. NETDATA_DOUBLE group_anomaly_rate;
  852. RRDR_VALUE_FLAGS group_value_flags;
  853. // statistics
  854. size_t db_total_points_read;
  855. size_t db_points_read_per_tier[RRD_STORAGE_TIERS];
  856. } QUERY_ENGINE_OPS;
  857. // ----------------------------------------------------------------------------
  858. // query planer
  859. #define query_plan_should_switch_plan(ops, now) ((now) >= (ops).current_plan_expire_time)
  860. static void query_planer_activate_plan(QUERY_ENGINE_OPS *ops, size_t plan_id, time_t overwrite_after) {
  861. if(unlikely(plan_id >= ops->plan.entries))
  862. plan_id = ops->plan.entries - 1;
  863. time_t after = ops->plan.data[plan_id].after;
  864. time_t before = ops->plan.data[plan_id].before;
  865. if(overwrite_after > after && overwrite_after < before)
  866. after = overwrite_after;
  867. ops->tier = ops->plan.data[plan_id].tier;
  868. ops->tier_ptr = ops->rd->tiers[ops->tier];
  869. ops->tier_ptr->query_ops.init(ops->tier_ptr->db_metric_handle, &ops->handle, after, before, ops->r->internal.tier_query_fetch);
  870. ops->next_metric = ops->tier_ptr->query_ops.next_metric;
  871. ops->is_finished = ops->tier_ptr->query_ops.is_finished;
  872. ops->finalize = ops->tier_ptr->query_ops.finalize;
  873. ops->current_plan = plan_id;
  874. ops->current_plan_expire_time = ops->plan.data[plan_id].before;
  875. }
  876. static void query_planer_next_plan(QUERY_ENGINE_OPS *ops, time_t now, time_t last_point_end_time) {
  877. internal_error(now < ops->current_plan_expire_time && now < ops->plan.data[ops->current_plan].before,
  878. "QUERY: switching query plan too early!");
  879. time_t next_plan_before_time;
  880. do {
  881. ops->current_plan++;
  882. if (ops->current_plan >= ops->plan.entries) {
  883. ops->current_plan = ops->plan.entries - 1;
  884. return;
  885. }
  886. next_plan_before_time = ops->plan.data[ops->current_plan].before;
  887. } while(now >= next_plan_before_time || last_point_end_time >= next_plan_before_time);
  888. if(ops->finalize) {
  889. ops->finalize(&ops->handle);
  890. ops->finalize = NULL;
  891. }
  892. query_planer_activate_plan(ops, ops->current_plan, MIN(now, last_point_end_time));
  893. // internal_error(true, "QUERY: switched plan to %zu (all is %zu), previous expiration was %ld, this starts at %ld, now is %ld, last_point_end_time %ld", ops->current_plan, ops->plan.entries, ops->plan.data[ops->current_plan-1].before, ops->plan.data[ops->current_plan].after, now, last_point_end_time);
  894. }
  895. static int compare_query_plan_entries_on_start_time(const void *a, const void *b) {
  896. QUERY_PLAN_ENTRY *p1 = (QUERY_PLAN_ENTRY *)a;
  897. QUERY_PLAN_ENTRY *p2 = (QUERY_PLAN_ENTRY *)b;
  898. return (p1->after < p2->after)?-1:1;
  899. }
  900. static void query_plan(QUERY_ENGINE_OPS *ops, time_t after_wanted, time_t before_wanted, long points_wanted) {
  901. RRDDIM *rd = ops->rd;
  902. //BUFFER *wb = buffer_create(1000);
  903. //buffer_sprintf(wb, "QUERY PLAN for chart '%s' dimension '%s', from %ld to %ld:", rd->rrdset->name, rd->name, after_wanted, before_wanted);
  904. // put our selected tier as the first plan
  905. size_t selected_tier;
  906. if(ops->r->internal.query_options & RRDR_OPTION_SELECTED_TIER && ops->r->internal.query_tier >= 0 && ops->r->internal.query_tier < storage_tiers) {
  907. selected_tier = ops->r->internal.query_tier;
  908. }
  909. else {
  910. selected_tier = rrddim_find_best_tier_for_timeframe(rd, after_wanted, before_wanted, points_wanted);
  911. if(ops->r->internal.query_options & RRDR_OPTION_SELECTED_TIER)
  912. ops->r->internal.query_options &= ~RRDR_OPTION_SELECTED_TIER;
  913. }
  914. ops->plan.entries = 1;
  915. ops->plan.data[0].tier = selected_tier;
  916. ops->plan.data[0].after = rd->tiers[selected_tier]->query_ops.oldest_time(rd->tiers[selected_tier]->db_metric_handle);
  917. ops->plan.data[0].before = rd->tiers[selected_tier]->query_ops.latest_time(rd->tiers[selected_tier]->db_metric_handle);
  918. if(!(ops->r->internal.query_options & RRDR_OPTION_SELECTED_TIER)) {
  919. // the selected tier
  920. time_t selected_tier_first_time_t = ops->plan.data[0].after;
  921. time_t selected_tier_last_time_t = ops->plan.data[0].before;
  922. //buffer_sprintf(wb, ": SELECTED tier %zu, from %ld to %ld", selected_tier, ops->plan.data[0].after, ops->plan.data[0].before);
  923. // check if our selected tier can start the query
  924. if (selected_tier_first_time_t > after_wanted) {
  925. // we need some help from other tiers
  926. for (int tr = (int)selected_tier + 1; tr < storage_tiers; tr++) {
  927. // find the first time of this tier
  928. time_t first_time_t = rd->tiers[tr]->query_ops.oldest_time(rd->tiers[tr]->db_metric_handle);
  929. //buffer_sprintf(wb, ": EVAL AFTER tier %d, %ld", tier, first_time_t);
  930. // can it help?
  931. if (first_time_t < selected_tier_first_time_t) {
  932. // it can help us add detail at the beginning of the query
  933. QUERY_PLAN_ENTRY t = {
  934. .tier = tr,
  935. .after = (first_time_t < after_wanted) ? after_wanted : first_time_t,
  936. .before = selected_tier_first_time_t};
  937. ops->plan.data[ops->plan.entries++] = t;
  938. // prepare for the tier
  939. selected_tier_first_time_t = t.after;
  940. if (t.after <= after_wanted)
  941. break;
  942. }
  943. }
  944. }
  945. // check if our selected tier can finish the query
  946. if (selected_tier_last_time_t < before_wanted) {
  947. // we need some help from other tiers
  948. for (int tr = (int)selected_tier - 1; tr >= 0; tr--) {
  949. // find the last time of this tier
  950. time_t last_time_t = rd->tiers[tr]->query_ops.latest_time(rd->tiers[tr]->db_metric_handle);
  951. //buffer_sprintf(wb, ": EVAL BEFORE tier %d, %ld", tier, last_time_t);
  952. // can it help?
  953. if (last_time_t > selected_tier_last_time_t) {
  954. // it can help us add detail at the end of the query
  955. QUERY_PLAN_ENTRY t = {
  956. .tier = tr,
  957. .after = selected_tier_last_time_t,
  958. .before = (last_time_t > before_wanted) ? before_wanted : last_time_t};
  959. ops->plan.data[ops->plan.entries++] = t;
  960. // prepare for the tier
  961. selected_tier_last_time_t = t.before;
  962. if (t.before >= before_wanted)
  963. break;
  964. }
  965. }
  966. }
  967. }
  968. // sort the query plan
  969. if(ops->plan.entries > 1)
  970. qsort(&ops->plan.data, ops->plan.entries, sizeof(QUERY_PLAN_ENTRY), compare_query_plan_entries_on_start_time);
  971. // make sure it has the whole timeframe we need
  972. ops->plan.data[0].after = after_wanted;
  973. ops->plan.data[ops->plan.entries - 1].before = before_wanted;
  974. //buffer_sprintf(wb, ": FINAL STEPS %zu", ops->plan.entries);
  975. //for(size_t i = 0; i < ops->plan.entries ;i++)
  976. // buffer_sprintf(wb, ": STEP %zu = use tier %zu from %ld to %ld", i+1, ops->plan.data[i].tier, ops->plan.data[i].after, ops->plan.data[i].before);
  977. //internal_error(true, "%s", buffer_tostring(wb));
  978. query_planer_activate_plan(ops, 0, 0);
  979. }
  980. // ----------------------------------------------------------------------------
  981. // dimension level query engine
  982. #define query_interpolate_point(this_point, last_point, now) do { \
  983. if(likely( \
  984. /* the point to interpolate is more than 1s wide */ \
  985. (this_point).end_time - (this_point).start_time > 1 \
  986. \
  987. /* the two points are exactly next to each other */ \
  988. && (last_point).end_time == (this_point).start_time \
  989. \
  990. /* both points are valid numbers */ \
  991. && netdata_double_isnumber((this_point).value) \
  992. && netdata_double_isnumber((last_point).value) \
  993. \
  994. )) { \
  995. (this_point).value = (last_point).value + ((this_point).value - (last_point).value) * (1.0 - (NETDATA_DOUBLE)((this_point).end_time - (now)) / (NETDATA_DOUBLE)((this_point).end_time - (this_point).start_time)); \
  996. (this_point).end_time = now; \
  997. } \
  998. } while(0)
  999. #define query_add_point_to_group(r, point, ops) do { \
  1000. if(likely(netdata_double_isnumber((point).value))) { \
  1001. if(likely(fpclassify((point).value) != FP_ZERO)) \
  1002. (ops).group_points_non_zero++; \
  1003. \
  1004. if(unlikely((point).flags & SN_FLAG_RESET)) \
  1005. (ops).group_value_flags |= RRDR_VALUE_RESET; \
  1006. \
  1007. (ops).grouping_add(r, (point).value); \
  1008. } \
  1009. \
  1010. (ops).group_points_added++; \
  1011. (ops).group_anomaly_rate += (point).anomaly; \
  1012. } while(0)
  1013. static inline void rrd2rrdr_do_dimension(
  1014. RRDR *r
  1015. , long points_wanted
  1016. , RRDDIM *rd
  1017. , long dim_id_in_rrdr
  1018. , time_t after_wanted
  1019. , time_t before_wanted
  1020. ){
  1021. time_t max_date = 0,
  1022. min_date = 0;
  1023. size_t points_added = 0;
  1024. QUERY_ENGINE_OPS ops = {
  1025. .r = r,
  1026. .rd = rd,
  1027. .grouping_add = r->internal.grouping_add,
  1028. .grouping_flush = r->internal.grouping_flush,
  1029. .tier_query_fetch = r->internal.tier_query_fetch,
  1030. .view_update_every = r->update_every,
  1031. .query_granularity = r->update_every / r->group,
  1032. .group_value_flags = RRDR_VALUE_NOTHING
  1033. };
  1034. long rrdr_line = -1;
  1035. bool use_anomaly_bit_as_value = (r->internal.query_options & RRDR_OPTION_ANOMALY_BIT) ? true : false;
  1036. query_plan(&ops, after_wanted, before_wanted, points_wanted);
  1037. NETDATA_DOUBLE min = r->min, max = r->max;
  1038. QUERY_POINT last2_point = QUERY_POINT_EMPTY;
  1039. QUERY_POINT last1_point = QUERY_POINT_EMPTY;
  1040. QUERY_POINT new_point = QUERY_POINT_EMPTY;
  1041. time_t now_start_time = after_wanted - ops.query_granularity;
  1042. time_t now_end_time = after_wanted + ops.view_update_every - ops.query_granularity;
  1043. // The main loop, based on the query granularity we need
  1044. for( ; (long)points_added < points_wanted ; now_start_time = now_end_time, now_end_time += ops.view_update_every) {
  1045. if(query_plan_should_switch_plan(ops, now_end_time))
  1046. query_planer_next_plan(&ops, now_end_time, new_point.end_time);
  1047. // read all the points of the db, prior to the time we need (now_end_time)
  1048. size_t count_same_end_time = 0;
  1049. while(count_same_end_time < 100) {
  1050. if(likely(count_same_end_time == 0)) {
  1051. last2_point = last1_point;
  1052. last1_point = new_point;
  1053. }
  1054. if(unlikely(ops.is_finished(&ops.handle))) {
  1055. if(count_same_end_time != 0) {
  1056. last2_point = last1_point;
  1057. last1_point = new_point;
  1058. }
  1059. new_point = QUERY_POINT_EMPTY;
  1060. new_point.start_time = last1_point.end_time;
  1061. new_point.end_time = now_end_time;
  1062. break;
  1063. }
  1064. // fetch the new point
  1065. {
  1066. STORAGE_POINT sp = ops.next_metric(&ops.handle);
  1067. ops.db_points_read_per_tier[ops.tier]++;
  1068. ops.db_total_points_read++;
  1069. new_point.start_time = sp.start_time;
  1070. new_point.end_time = sp.end_time;
  1071. new_point.anomaly = sp.count ? (NETDATA_DOUBLE)sp.anomaly_count * 100.0 / (NETDATA_DOUBLE)sp.count : 0.0;
  1072. query_point_set_id(new_point, ops.db_total_points_read);
  1073. // set the right value to the point we got
  1074. if(likely(!storage_point_is_unset(sp) && !storage_point_is_empty(sp))) {
  1075. if(unlikely(use_anomaly_bit_as_value))
  1076. new_point.value = new_point.anomaly;
  1077. else {
  1078. switch (ops.tier_query_fetch) {
  1079. default:
  1080. case TIER_QUERY_FETCH_AVERAGE:
  1081. new_point.value = sp.sum / sp.count;
  1082. break;
  1083. case TIER_QUERY_FETCH_MIN:
  1084. new_point.value = sp.min;
  1085. break;
  1086. case TIER_QUERY_FETCH_MAX:
  1087. new_point.value = sp.max;
  1088. break;
  1089. case TIER_QUERY_FETCH_SUM:
  1090. new_point.value = sp.sum;
  1091. break;
  1092. };
  1093. }
  1094. }
  1095. else {
  1096. new_point.value = NAN;
  1097. new_point.flags = SN_FLAG_NONE;
  1098. }
  1099. }
  1100. // check if the db is giving us zero duration points
  1101. if(unlikely(new_point.start_time == new_point.end_time)) {
  1102. internal_error(true, "QUERY: next_metric(%s, %s) returned point %zu start time %ld, end time %ld, that are both equal",
  1103. rd->rrdset->name, rd->name, new_point.id, new_point.start_time, new_point.end_time);
  1104. new_point.start_time = new_point.end_time - ((time_t)ops.tier_ptr->tier_grouping * (time_t)ops.rd->update_every);
  1105. }
  1106. // check if the db is advancing the query
  1107. if(unlikely(new_point.end_time <= last1_point.end_time)) {
  1108. internal_error(true, "QUERY: next_metric(%s, %s) returned point %zu from %ld time %ld, before the last point %zu end time %ld, now is %ld to %ld",
  1109. rd->rrdset->name, rd->name, new_point.id, new_point.start_time, new_point.end_time,
  1110. last1_point.id, last1_point.end_time, now_start_time, now_end_time);
  1111. count_same_end_time++;
  1112. continue;
  1113. }
  1114. count_same_end_time = 0;
  1115. // decide how to use this point
  1116. if(likely(new_point.end_time < now_end_time)) { // likely to favor tier0
  1117. // this db point ends before our now_end_time
  1118. if(likely(new_point.end_time >= now_start_time)) { // likely to favor tier0
  1119. // this db point ends after our now_start time
  1120. query_add_point_to_group(r, new_point, ops);
  1121. }
  1122. else {
  1123. // we don't need this db point
  1124. // it is totally outside our current time-frame
  1125. // this is desirable for the first point of the query
  1126. // because it allows us to interpolate the next point
  1127. // at exactly the time we will want
  1128. // we only log if this is not point 1
  1129. internal_error(new_point.end_time < after_wanted && new_point.id > 1,
  1130. "QUERY: next_metric(%s, %s) returned point %zu from %ld time %ld, which is entirely before our current timeframe %ld to %ld (and before the entire query, after %ld, before %ld)",
  1131. rd->rrdset->name, rd->name,
  1132. new_point.id, new_point.start_time, new_point.end_time,
  1133. now_start_time, now_end_time,
  1134. after_wanted, before_wanted);
  1135. }
  1136. }
  1137. else {
  1138. // the point ends in the future
  1139. // so, we will interpolate it below, at the inner loop
  1140. break;
  1141. }
  1142. }
  1143. if(unlikely(count_same_end_time)) {
  1144. internal_error(true,
  1145. "QUERY: the database does not advance the query, it returned an end time less or equal to the end time of the last point we got %ld, %zu times",
  1146. last1_point.end_time, count_same_end_time);
  1147. if(unlikely(new_point.end_time <= last1_point.end_time))
  1148. new_point.end_time = now_end_time;
  1149. }
  1150. // the inner loop
  1151. // we have 3 points in memory: last2, last1, new
  1152. // we select the one to use based on their timestamps
  1153. size_t iterations = 0;
  1154. for ( ; now_end_time <= new_point.end_time && (long)points_added < points_wanted ;
  1155. now_end_time += ops.view_update_every, iterations++) {
  1156. // now_start_time is wrong in this loop
  1157. // but, we don't need it
  1158. QUERY_POINT current_point;
  1159. if(likely(now_end_time > new_point.start_time)) {
  1160. // it is time for our NEW point to be used
  1161. current_point = new_point;
  1162. query_interpolate_point(current_point, last1_point, now_end_time);
  1163. internal_error(current_point.id > 0 && last1_point.id == 0 && current_point.end_time > after_wanted && current_point.end_time > now_end_time,
  1164. "QUERY: on '%s', dim '%s', after %ld, before %ld, view update every %ld, query granularity %ld,"
  1165. " interpolating point %zu (from %ld to %ld) at %ld, but we could really favor by having last_point1 in this query.",
  1166. rd->rrdset->name, rd->name, after_wanted, before_wanted, ops.view_update_every, ops.query_granularity,
  1167. current_point.id, current_point.start_time, current_point.end_time, now_end_time);
  1168. }
  1169. else if(likely(now_end_time <= last1_point.end_time)) {
  1170. // our LAST point is still valid
  1171. current_point = last1_point;
  1172. query_interpolate_point(current_point, last2_point, now_end_time);
  1173. internal_error(current_point.id > 0 && last2_point.id == 0 && current_point.end_time > after_wanted && current_point.end_time > now_end_time,
  1174. "QUERY: on '%s', dim '%s', after %ld, before %ld, view update every %ld, query granularity %ld,"
  1175. " interpolating point %zu (from %ld to %ld) at %ld, but we could really favor by having last_point2 in this query.",
  1176. rd->rrdset->name, rd->name, after_wanted, before_wanted, ops.view_update_every, ops.query_granularity,
  1177. current_point.id, current_point.start_time, current_point.end_time, now_end_time);
  1178. }
  1179. else {
  1180. // a GAP, we don't have a value this time
  1181. current_point = QUERY_POINT_EMPTY;
  1182. }
  1183. query_add_point_to_group(r, current_point, ops);
  1184. rrdr_line = rrdr_line_init(r, now_end_time, rrdr_line);
  1185. size_t rrdr_o_v_index = rrdr_line * r->d + dim_id_in_rrdr;
  1186. if(unlikely(!min_date)) min_date = now_end_time;
  1187. max_date = now_end_time;
  1188. // find the place to store our values
  1189. RRDR_VALUE_FLAGS *rrdr_value_options_ptr = &r->o[rrdr_o_v_index];
  1190. // update the dimension options
  1191. if(likely(ops.group_points_non_zero))
  1192. r->od[dim_id_in_rrdr] |= RRDR_DIMENSION_NONZERO;
  1193. // store the specific point options
  1194. *rrdr_value_options_ptr = ops.group_value_flags;
  1195. // store the group value
  1196. NETDATA_DOUBLE group_value = ops.grouping_flush(r, rrdr_value_options_ptr);
  1197. r->v[rrdr_o_v_index] = group_value;
  1198. // we only store uint8_t anomaly rates,
  1199. // so let's get double precision by storing
  1200. // anomaly rates in the range 0 - 200
  1201. r->ar[rrdr_o_v_index] = ops.group_anomaly_rate / (NETDATA_DOUBLE)ops.group_points_added;
  1202. if(likely(points_added || dim_id_in_rrdr)) {
  1203. // find the min/max across all dimensions
  1204. if(unlikely(group_value < min)) min = group_value;
  1205. if(unlikely(group_value > max)) max = group_value;
  1206. }
  1207. else {
  1208. // runs only when dim_id_in_rrdr == 0 && points_added == 0
  1209. // so, on the first point added for the query.
  1210. min = max = group_value;
  1211. }
  1212. points_added++;
  1213. ops.group_points_added = 0;
  1214. ops.group_value_flags = RRDR_VALUE_NOTHING;
  1215. ops.group_points_non_zero = 0;
  1216. ops.group_anomaly_rate = 0;
  1217. }
  1218. // the loop above increased "now" by query_granularity,
  1219. // but the main loop will increase it too,
  1220. // so, let's undo the last iteration of this loop
  1221. if(iterations)
  1222. now_end_time -= ops.view_update_every;
  1223. }
  1224. ops.finalize(&ops.handle);
  1225. r->internal.result_points_generated += points_added;
  1226. r->internal.db_points_read += ops.db_total_points_read;
  1227. for(int tr = 0; tr < storage_tiers ; tr++)
  1228. r->internal.tier_points_read[tr] += ops.db_points_read_per_tier[tr];
  1229. r->min = min;
  1230. r->max = max;
  1231. r->before = max_date;
  1232. r->after = min_date - ops.view_update_every + ops.query_granularity;
  1233. rrdr_done(r, rrdr_line);
  1234. internal_error((long)points_added != points_wanted,
  1235. "QUERY: query on %s/%s requested %zu points, but RRDR added %zu (%zu db points read).",
  1236. r->st->name, rd->name, (size_t)points_wanted, (size_t)points_added, ops.db_total_points_read);
  1237. }
  1238. // ----------------------------------------------------------------------------
  1239. // fill the gap of a tier
  1240. extern void store_metric_at_tier(RRDDIM *rd, struct rrddim_tier *t, STORAGE_POINT sp, usec_t now_ut);
  1241. void rrdr_fill_tier_gap_from_smaller_tiers(RRDDIM *rd, int tier, time_t now) {
  1242. if(unlikely(tier < 0 || tier >= storage_tiers)) return;
  1243. if(storage_tiers_backfill[tier] == RRD_BACKFILL_NONE) return;
  1244. struct rrddim_tier *t = rd->tiers[tier];
  1245. if(unlikely(!t)) return;
  1246. time_t latest_time_t = t->query_ops.latest_time(t->db_metric_handle);
  1247. time_t granularity = (time_t)t->tier_grouping * (time_t)rd->update_every;
  1248. time_t time_diff = now - latest_time_t;
  1249. // if the user wants only NEW backfilling, and we don't have any data
  1250. if(storage_tiers_backfill[tier] == RRD_BACKFILL_NEW && latest_time_t <= 0) return;
  1251. // there is really nothing we can do
  1252. if(now <= latest_time_t || time_diff < granularity) return;
  1253. struct rrddim_query_handle handle;
  1254. size_t all_points_read = 0;
  1255. // for each lower tier
  1256. for(int tr = tier - 1; tr >= 0 ;tr--){
  1257. time_t smaller_tier_first_time = rd->tiers[tr]->query_ops.oldest_time(rd->tiers[tr]->db_metric_handle);
  1258. time_t smaller_tier_last_time = rd->tiers[tr]->query_ops.latest_time(rd->tiers[tr]->db_metric_handle);
  1259. if(smaller_tier_last_time <= latest_time_t) continue; // it is as bad as we are
  1260. long after_wanted = (latest_time_t < smaller_tier_first_time) ? smaller_tier_first_time : latest_time_t;
  1261. long before_wanted = smaller_tier_last_time;
  1262. struct rrddim_tier *tmp = rd->tiers[tr];
  1263. tmp->query_ops.init(tmp->db_metric_handle, &handle, after_wanted, before_wanted, TIER_QUERY_FETCH_AVERAGE);
  1264. size_t points = 0;
  1265. while(!tmp->query_ops.is_finished(&handle)) {
  1266. STORAGE_POINT sp = tmp->query_ops.next_metric(&handle);
  1267. if(sp.end_time > latest_time_t) {
  1268. latest_time_t = sp.end_time;
  1269. store_metric_at_tier(rd, t, sp, sp.end_time * USEC_PER_SEC);
  1270. points++;
  1271. }
  1272. }
  1273. all_points_read += points;
  1274. tmp->query_ops.finalize(&handle);
  1275. //internal_error(true, "DBENGINE: backfilled chart '%s', dimension '%s', tier %d, from %ld to %ld, with %zu points from tier %d",
  1276. // rd->rrdset->name, rd->name, tier, after_wanted, before_wanted, points, tr);
  1277. }
  1278. rrdr_query_completed(all_points_read, all_points_read);
  1279. }
  1280. // ----------------------------------------------------------------------------
  1281. // fill RRDR for the whole chart
  1282. #ifdef NETDATA_INTERNAL_CHECKS
  1283. static void rrd2rrdr_log_request_response_metadata(RRDR *r
  1284. , RRDR_OPTIONS options __maybe_unused
  1285. , RRDR_GROUPING group_method
  1286. , bool aligned
  1287. , long group
  1288. , long resampling_time
  1289. , long resampling_group
  1290. , time_t after_wanted
  1291. , time_t after_requested
  1292. , time_t before_wanted
  1293. , time_t before_requested
  1294. , long points_requested
  1295. , long points_wanted
  1296. //, size_t after_slot
  1297. //, size_t before_slot
  1298. , const char *msg
  1299. ) {
  1300. netdata_rwlock_rdlock(&r->st->rrdset_rwlock);
  1301. info("INTERNAL ERROR: rrd2rrdr() on %s update every %d with %s grouping %s (group: %ld, resampling_time: %ld, resampling_group: %ld), "
  1302. "after (got: %zu, want: %zu, req: %ld, db: %zu), "
  1303. "before (got: %zu, want: %zu, req: %ld, db: %zu), "
  1304. "duration (got: %zu, want: %zu, req: %ld, db: %zu), "
  1305. //"slot (after: %zu, before: %zu, delta: %zu), "
  1306. "points (got: %ld, want: %ld, req: %ld, db: %ld), "
  1307. "%s"
  1308. , r->st->name
  1309. , r->st->update_every
  1310. // grouping
  1311. , (aligned) ? "aligned" : "unaligned"
  1312. , group_method2string(group_method)
  1313. , group
  1314. , resampling_time
  1315. , resampling_group
  1316. // after
  1317. , (size_t)r->after
  1318. , (size_t)after_wanted
  1319. , after_requested
  1320. , (size_t)rrdset_first_entry_t_nolock(r->st)
  1321. // before
  1322. , (size_t)r->before
  1323. , (size_t)before_wanted
  1324. , before_requested
  1325. , (size_t)rrdset_last_entry_t_nolock(r->st)
  1326. // duration
  1327. , (size_t)(r->before - r->after + r->st->update_every)
  1328. , (size_t)(before_wanted - after_wanted + r->st->update_every)
  1329. , before_requested - after_requested
  1330. , (size_t)((rrdset_last_entry_t_nolock(r->st) - rrdset_first_entry_t_nolock(r->st)) + r->st->update_every)
  1331. // slot
  1332. /*
  1333. , after_slot
  1334. , before_slot
  1335. , (after_slot > before_slot) ? (r->st->entries - after_slot + before_slot) : (before_slot - after_slot)
  1336. */
  1337. // points
  1338. , r->rows
  1339. , points_wanted
  1340. , points_requested
  1341. , r->st->entries
  1342. // message
  1343. , msg
  1344. );
  1345. netdata_rwlock_unlock(&r->st->rrdset_rwlock);
  1346. }
  1347. #endif // NETDATA_INTERNAL_CHECKS
  1348. // Returns 1 if an absolute period was requested or 0 if it was a relative period
  1349. int rrdr_relative_window_to_absolute(long long *after, long long *before) {
  1350. time_t now = now_realtime_sec() - 1;
  1351. int absolute_period_requested = -1;
  1352. long long after_requested, before_requested;
  1353. before_requested = *before;
  1354. after_requested = *after;
  1355. // allow relative for before (smaller than API_RELATIVE_TIME_MAX)
  1356. if(ABS(before_requested) <= API_RELATIVE_TIME_MAX) {
  1357. // if the user asked for a positive relative time,
  1358. // flip it to a negative
  1359. if(before_requested > 0)
  1360. before_requested = -before_requested;
  1361. before_requested = now + before_requested;
  1362. absolute_period_requested = 0;
  1363. }
  1364. // allow relative for after (smaller than API_RELATIVE_TIME_MAX)
  1365. if(ABS(after_requested) <= API_RELATIVE_TIME_MAX) {
  1366. if(after_requested > 0)
  1367. after_requested = -after_requested;
  1368. // if the user didn't give an after, use the number of points
  1369. // to give a sane default
  1370. if(after_requested == 0)
  1371. after_requested = -600;
  1372. // since the query engine now returns inclusive timestamps
  1373. // it is awkward to return 6 points when after=-5 is given
  1374. // so for relative queries we add 1 second, to give
  1375. // more predictable results to users.
  1376. after_requested = before_requested + after_requested + 1;
  1377. absolute_period_requested = 0;
  1378. }
  1379. if(absolute_period_requested == -1)
  1380. absolute_period_requested = 1;
  1381. // check if the parameters are flipped
  1382. if(after_requested > before_requested) {
  1383. long long t = before_requested;
  1384. before_requested = after_requested;
  1385. after_requested = t;
  1386. }
  1387. // if the query requests future data
  1388. // shift the query back to be in the present time
  1389. // (this may also happen because of the rules above)
  1390. if(before_requested > now) {
  1391. long long delta = before_requested - now;
  1392. before_requested -= delta;
  1393. after_requested -= delta;
  1394. }
  1395. *before = before_requested;
  1396. *after = after_requested;
  1397. return absolute_period_requested;
  1398. }
  1399. // #define DEBUG_QUERY_LOGIC 1
  1400. #ifdef DEBUG_QUERY_LOGIC
  1401. #define query_debug_log_init() BUFFER *debug_log = buffer_create(1000)
  1402. #define query_debug_log(args...) buffer_sprintf(debug_log, ##args)
  1403. #define query_debug_log_fin() { \
  1404. info("QUERY: chart '%s', after:%lld, before:%lld, duration:%lld, points:%ld, res:%ld - wanted => after:%lld, before:%lld, points:%ld, group:%ld, granularity:%ld, resgroup:%ld, resdiv:" NETDATA_DOUBLE_FORMAT_AUTO " %s", st->name, after_requested, before_requested, before_requested - after_requested, points_requested, resampling_time_requested, after_wanted, before_wanted, points_wanted, group, query_granularity, resampling_group, resampling_divisor, buffer_tostring(debug_log)); \
  1405. buffer_free(debug_log); \
  1406. debug_log = NULL; \
  1407. }
  1408. #define query_debug_log_free() do { buffer_free(debug_log); } while(0)
  1409. #else
  1410. #define query_debug_log_init() debug_dummy()
  1411. #define query_debug_log(args...) debug_dummy()
  1412. #define query_debug_log_fin() debug_dummy()
  1413. #define query_debug_log_free() debug_dummy()
  1414. #endif
  1415. RRDR *rrd2rrdr(
  1416. ONEWAYALLOC *owa
  1417. , RRDSET *st
  1418. , long points_requested
  1419. , long long after_requested
  1420. , long long before_requested
  1421. , RRDR_GROUPING group_method
  1422. , long resampling_time_requested
  1423. , RRDR_OPTIONS options
  1424. , const char *dimensions
  1425. , struct context_param *context_param_list
  1426. , const char *group_options
  1427. , int timeout
  1428. , int tier
  1429. ) {
  1430. // RULES
  1431. // points_requested = 0
  1432. // the user wants all the natural points the database has
  1433. //
  1434. // after_requested = 0
  1435. // the user wants to start the query from the oldest point in our database
  1436. //
  1437. // before_requested = 0
  1438. // the user wants the query to end to the latest point in our database
  1439. //
  1440. // when natural points are wanted, the query has to be aligned to the update_every
  1441. // of the database
  1442. long points_wanted = points_requested;
  1443. long long after_wanted = after_requested;
  1444. long long before_wanted = before_requested;
  1445. int update_every = st->update_every;
  1446. bool aligned = !(options & RRDR_OPTION_NOT_ALIGNED);
  1447. bool automatic_natural_points = (points_wanted == 0);
  1448. bool relative_period_requested = false;
  1449. bool natural_points = (options & RRDR_OPTION_NATURAL_POINTS) || automatic_natural_points;
  1450. bool before_is_aligned_to_db_end = false;
  1451. query_debug_log_init();
  1452. // make sure points_wanted is positive
  1453. if(points_wanted < 0) {
  1454. points_wanted = -points_wanted;
  1455. query_debug_log(":-points_wanted %ld", points_wanted);
  1456. }
  1457. if(ABS(before_requested) <= API_RELATIVE_TIME_MAX || ABS(after_requested) <= API_RELATIVE_TIME_MAX) {
  1458. relative_period_requested = true;
  1459. natural_points = true;
  1460. options |= RRDR_OPTION_NATURAL_POINTS;
  1461. query_debug_log(":relative+natural");
  1462. }
  1463. // if the user wants virtual points, make sure we do it
  1464. if(options & RRDR_OPTION_VIRTUAL_POINTS)
  1465. natural_points = false;
  1466. // set the right flag about natural and virtual points
  1467. if(natural_points) {
  1468. options |= RRDR_OPTION_NATURAL_POINTS;
  1469. if(options & RRDR_OPTION_VIRTUAL_POINTS)
  1470. options &= ~RRDR_OPTION_VIRTUAL_POINTS;
  1471. }
  1472. else {
  1473. options |= RRDR_OPTION_VIRTUAL_POINTS;
  1474. if(options & RRDR_OPTION_NATURAL_POINTS)
  1475. options &= ~RRDR_OPTION_NATURAL_POINTS;
  1476. }
  1477. if(after_wanted == 0 || before_wanted == 0) {
  1478. // for non-context queries we have to find the duration of the database
  1479. // for context queries we will assume 600 seconds duration
  1480. if(!context_param_list) {
  1481. relative_period_requested = true;
  1482. rrdset_rdlock(st);
  1483. time_t first_entry_t = rrdset_first_entry_t_nolock(st);
  1484. time_t last_entry_t = rrdset_last_entry_t_nolock(st);
  1485. rrdset_unlock(st);
  1486. if(first_entry_t == 0 || last_entry_t == 0) {
  1487. internal_error(true, "QUERY: chart without data detected on '%s'", st->name);
  1488. query_debug_log_free();
  1489. return NULL;
  1490. }
  1491. query_debug_log(":first_entry_t %ld, last_entry_t %ld", first_entry_t, last_entry_t);
  1492. if (after_wanted == 0) {
  1493. after_wanted = first_entry_t;
  1494. query_debug_log(":zero after_wanted %lld", after_wanted);
  1495. }
  1496. if (before_wanted == 0) {
  1497. before_wanted = last_entry_t;
  1498. before_is_aligned_to_db_end = true;
  1499. query_debug_log(":zero before_wanted %lld", before_wanted);
  1500. }
  1501. if(points_wanted == 0) {
  1502. points_wanted = (last_entry_t - first_entry_t) / update_every;
  1503. query_debug_log(":zero points_wanted %ld", points_wanted);
  1504. }
  1505. }
  1506. // if they are still zero, assume 600
  1507. if(after_wanted == 0) {
  1508. after_wanted = -600;
  1509. query_debug_log(":zero600 after_wanted %lld", after_wanted);
  1510. }
  1511. }
  1512. if(points_wanted == 0) {
  1513. points_wanted = 600;
  1514. query_debug_log(":zero600 points_wanted %ld", points_wanted);
  1515. }
  1516. // convert our before_wanted and after_wanted to absolute
  1517. rrdr_relative_window_to_absolute(&after_wanted, &before_wanted);
  1518. query_debug_log(":relative2absolute after %lld, before %lld", after_wanted, before_wanted);
  1519. if(natural_points && (options & RRDR_OPTION_SELECTED_TIER) && tier > 0 && storage_tiers > 1) {
  1520. update_every = rrdset_find_natural_update_every_for_timeframe(st, after_wanted, before_wanted, points_wanted, options, tier);
  1521. if(update_every <= 0) update_every = st->update_every;
  1522. query_debug_log(":natural update every %d", update_every);
  1523. }
  1524. // this is the update_every of the query
  1525. // it may be different to the update_every of the database
  1526. time_t query_granularity = (natural_points)?update_every:1;
  1527. if(query_granularity <= 0) query_granularity = 1;
  1528. query_debug_log(":query_granularity %ld", query_granularity);
  1529. // align before_wanted and after_wanted to query_granularity
  1530. if (before_wanted % query_granularity) {
  1531. before_wanted -= before_wanted % query_granularity;
  1532. query_debug_log(":granularity align before_wanted %lld", before_wanted);
  1533. }
  1534. if (after_wanted % query_granularity) {
  1535. after_wanted -= after_wanted % query_granularity;
  1536. query_debug_log(":granularity align after_wanted %lld", after_wanted);
  1537. }
  1538. // automatic_natural_points is set when the user wants all the points available in the database
  1539. if(automatic_natural_points) {
  1540. points_wanted = (before_wanted - after_wanted + 1) / query_granularity;
  1541. if(unlikely(points_wanted <= 0)) points_wanted = 1;
  1542. query_debug_log(":auto natural points_wanted %ld", points_wanted);
  1543. }
  1544. time_t duration = before_wanted - after_wanted;
  1545. // if the resampling time is too big, extend the duration to the past
  1546. if (unlikely(resampling_time_requested > duration)) {
  1547. after_wanted = before_wanted - resampling_time_requested;
  1548. duration = before_wanted - after_wanted;
  1549. query_debug_log(":resampling after_wanted %lld", after_wanted);
  1550. }
  1551. // if the duration is not aligned to resampling time
  1552. // extend the duration to the past, to avoid a gap at the chart
  1553. // only when the missing duration is above 1/10th of a point
  1554. if(resampling_time_requested > query_granularity && duration % resampling_time_requested) {
  1555. time_t delta = duration % resampling_time_requested;
  1556. if(delta > resampling_time_requested / 10) {
  1557. after_wanted -= resampling_time_requested - delta;
  1558. duration = before_wanted - after_wanted;
  1559. query_debug_log(":resampling2 after_wanted %lld", after_wanted);
  1560. }
  1561. }
  1562. // the available points of the query
  1563. long points_available = (duration + 1) / query_granularity;
  1564. if(unlikely(points_available <= 0)) points_available = 1;
  1565. query_debug_log(":points_available %ld", points_available);
  1566. if(points_wanted > points_available) {
  1567. points_wanted = points_available;
  1568. query_debug_log(":max points_wanted %ld", points_wanted);
  1569. }
  1570. // calculate the desired grouping of source data points
  1571. long group = points_available / points_wanted;
  1572. if(group <= 0) group = 1;
  1573. // round "group" to the closest integer
  1574. if(points_available % points_wanted > points_wanted / 2)
  1575. group++;
  1576. query_debug_log(":group %ld", group);
  1577. if(points_wanted * group * query_granularity < duration) {
  1578. // the grouping we are going to do, is not enough
  1579. // to cover the entire duration requested, so
  1580. // we have to change the number of points, to make sure we will
  1581. // respect the timeframe as closely as possibly
  1582. // let's see how many points are the optimal
  1583. points_wanted = points_available / group;
  1584. if(points_wanted * group < points_available)
  1585. points_wanted++;
  1586. if(unlikely(points_wanted <= 0))
  1587. points_wanted = 1;
  1588. query_debug_log(":optimal points %ld", points_wanted);
  1589. }
  1590. // resampling_time_requested enforces a certain grouping multiple
  1591. NETDATA_DOUBLE resampling_divisor = 1.0;
  1592. long resampling_group = 1;
  1593. if(unlikely(resampling_time_requested > query_granularity)) {
  1594. // the points we should group to satisfy gtime
  1595. resampling_group = resampling_time_requested / query_granularity;
  1596. if(unlikely(resampling_time_requested % query_granularity))
  1597. resampling_group++;
  1598. query_debug_log(":resampling group %ld", resampling_group);
  1599. // adapt group according to resampling_group
  1600. if(unlikely(group < resampling_group)) {
  1601. group = resampling_group; // do not allow grouping below the desired one
  1602. query_debug_log(":group less res %ld", group);
  1603. }
  1604. if(unlikely(group % resampling_group)) {
  1605. group += resampling_group - (group % resampling_group); // make sure group is multiple of resampling_group
  1606. query_debug_log(":group mod res %ld", group);
  1607. }
  1608. // resampling_divisor = group / resampling_group;
  1609. resampling_divisor = (NETDATA_DOUBLE)(group * query_granularity) / (NETDATA_DOUBLE)resampling_time_requested;
  1610. query_debug_log(":resampling divisor " NETDATA_DOUBLE_FORMAT, resampling_divisor);
  1611. }
  1612. // now that we have group, align the requested timeframe to fit it.
  1613. if(aligned && before_wanted % (group * query_granularity)) {
  1614. if(before_is_aligned_to_db_end)
  1615. before_wanted -= before_wanted % (group * query_granularity);
  1616. else
  1617. before_wanted += (group * query_granularity) - before_wanted % (group * query_granularity);
  1618. query_debug_log(":align before_wanted %lld", before_wanted);
  1619. }
  1620. after_wanted = before_wanted - (points_wanted * group * query_granularity) + query_granularity;
  1621. query_debug_log(":final after_wanted %lld", after_wanted);
  1622. duration = before_wanted - after_wanted;
  1623. query_debug_log(":final duration %ld", duration + 1);
  1624. // check the context query based on the starting time of the query
  1625. if (context_param_list && !(context_param_list->flags & CONTEXT_FLAGS_ARCHIVE)) {
  1626. rebuild_context_param_list(owa, context_param_list, after_wanted);
  1627. st = context_param_list->rd ? context_param_list->rd->rrdset : NULL;
  1628. if(unlikely(!st))
  1629. return NULL;
  1630. }
  1631. internal_error(points_wanted != duration / (query_granularity * group) + 1,
  1632. "QUERY: points_wanted %ld is not points %ld",
  1633. points_wanted, duration / (query_granularity * group) + 1);
  1634. internal_error(group < resampling_group,
  1635. "QUERY: group %ld is less than the desired group points %ld",
  1636. group, resampling_group);
  1637. internal_error(group > resampling_group && group % resampling_group,
  1638. "QUERY: group %ld is not a multiple of the desired group points %ld",
  1639. group, resampling_group);
  1640. // -------------------------------------------------------------------------
  1641. // initialize our result set
  1642. // this also locks the chart for us
  1643. RRDR *r = rrdr_create(owa, st, points_wanted, context_param_list);
  1644. if(unlikely(!r)) {
  1645. internal_error(true, "QUERY: cannot create RRDR for %s, after=%u, before=%u, duration=%u, points=%ld",
  1646. st->id, (uint32_t)after_wanted, (uint32_t)before_wanted, (uint32_t)duration, points_wanted);
  1647. return NULL;
  1648. }
  1649. if(unlikely(!r->d || !points_wanted)) {
  1650. internal_error(true, "QUERY: returning empty RRDR (no dimensions in RRDSET) for %s, after=%u, before=%u, duration=%zu, points=%ld",
  1651. st->id, (uint32_t)after_wanted, (uint32_t)before_wanted, (size_t)duration, points_wanted);
  1652. return r;
  1653. }
  1654. if(relative_period_requested)
  1655. r->result_options |= RRDR_RESULT_OPTION_RELATIVE;
  1656. else
  1657. r->result_options |= RRDR_RESULT_OPTION_ABSOLUTE;
  1658. // find how many dimensions we have
  1659. long dimensions_count = r->d;
  1660. // -------------------------------------------------------------------------
  1661. // initialize RRDR
  1662. r->group = group;
  1663. r->update_every = (int)(group * query_granularity);
  1664. r->before = before_wanted;
  1665. r->after = after_wanted;
  1666. r->internal.points_wanted = points_wanted;
  1667. r->internal.resampling_group = resampling_group;
  1668. r->internal.resampling_divisor = resampling_divisor;
  1669. r->internal.query_options = options;
  1670. r->internal.query_tier = tier;
  1671. // -------------------------------------------------------------------------
  1672. // assign the processor functions
  1673. rrdr_set_grouping_function(r, group_method);
  1674. // allocate any memory required by the grouping method
  1675. r->internal.grouping_create(r, group_options);
  1676. // -------------------------------------------------------------------------
  1677. // disable the not-wanted dimensions
  1678. if (context_param_list && !(context_param_list->flags & CONTEXT_FLAGS_ARCHIVE))
  1679. rrdset_check_rdlock(st);
  1680. if(dimensions)
  1681. rrdr_disable_not_selected_dimensions(r, options, dimensions, context_param_list);
  1682. query_debug_log_fin();
  1683. // -------------------------------------------------------------------------
  1684. // do the work for each dimension
  1685. time_t max_after = 0, min_before = 0;
  1686. long max_rows = 0;
  1687. RRDDIM *first_rd = context_param_list ? context_param_list->rd : st->dimensions;
  1688. RRDDIM *rd;
  1689. long c, dimensions_used = 0, dimensions_nonzero = 0;
  1690. struct timeval query_start_time;
  1691. struct timeval query_current_time;
  1692. if (timeout) now_realtime_timeval(&query_start_time);
  1693. for(rd = first_rd, c = 0 ; rd && c < dimensions_count ; rd = rd->next, c++) {
  1694. // if we need a percentage, we need to calculate all dimensions
  1695. if(unlikely(!(options & RRDR_OPTION_PERCENTAGE) && (r->od[c] & RRDR_DIMENSION_HIDDEN))) {
  1696. if(unlikely(r->od[c] & RRDR_DIMENSION_SELECTED)) r->od[c] &= ~RRDR_DIMENSION_SELECTED;
  1697. continue;
  1698. }
  1699. r->od[c] |= RRDR_DIMENSION_SELECTED;
  1700. // reset the grouping for the new dimension
  1701. r->internal.grouping_reset(r);
  1702. rrd2rrdr_do_dimension(r, points_wanted, rd, c, after_wanted, before_wanted);
  1703. if (timeout)
  1704. now_realtime_timeval(&query_current_time);
  1705. if(r->od[c] & RRDR_DIMENSION_NONZERO)
  1706. dimensions_nonzero++;
  1707. // verify all dimensions are aligned
  1708. if(unlikely(!dimensions_used)) {
  1709. min_before = r->before;
  1710. max_after = r->after;
  1711. max_rows = r->rows;
  1712. }
  1713. else {
  1714. if(r->after != max_after) {
  1715. internal_error(true, "QUERY: 'after' mismatch between dimensions for chart '%s': max is %zu, dimension '%s' has %zu",
  1716. st->name, (size_t)max_after, rd->name, (size_t)r->after);
  1717. r->after = (r->after > max_after) ? r->after : max_after;
  1718. }
  1719. if(r->before != min_before) {
  1720. internal_error(true, "QUERY: 'before' mismatch between dimensions for chart '%s': max is %zu, dimension '%s' has %zu",
  1721. st->name, (size_t)min_before, rd->name, (size_t)r->before);
  1722. r->before = (r->before < min_before) ? r->before : min_before;
  1723. }
  1724. if(r->rows != max_rows) {
  1725. internal_error(true, "QUERY: 'rows' mismatch between dimensions for chart '%s': max is %zu, dimension '%s' has %zu",
  1726. st->name, (size_t)max_rows, rd->name, (size_t)r->rows);
  1727. r->rows = (r->rows > max_rows) ? r->rows : max_rows;
  1728. }
  1729. }
  1730. dimensions_used++;
  1731. if (timeout && ((NETDATA_DOUBLE)dt_usec(&query_start_time, &query_current_time) / 1000.0) > timeout) {
  1732. log_access("QUERY CANCELED RUNTIME EXCEEDED %0.2f ms (LIMIT %d ms)",
  1733. (NETDATA_DOUBLE)dt_usec(&query_start_time, &query_current_time) / 1000.0, timeout);
  1734. r->result_options |= RRDR_RESULT_OPTION_CANCEL;
  1735. break;
  1736. }
  1737. }
  1738. #ifdef NETDATA_INTERNAL_CHECKS
  1739. if (dimensions_used) {
  1740. if(r->internal.log)
  1741. rrd2rrdr_log_request_response_metadata(r, options, group_method, aligned, group, resampling_time_requested, resampling_group,
  1742. after_wanted, after_requested, before_wanted, before_requested,
  1743. points_requested, points_wanted, /*after_slot, before_slot,*/
  1744. r->internal.log);
  1745. if(r->rows != points_wanted)
  1746. rrd2rrdr_log_request_response_metadata(r, options, group_method, aligned, group, resampling_time_requested, resampling_group,
  1747. after_wanted, after_requested, before_wanted, before_requested,
  1748. points_requested, points_wanted, /*after_slot, before_slot,*/
  1749. "got 'points' is not wanted 'points'");
  1750. if(aligned && (r->before % (group * query_granularity)) != 0)
  1751. rrd2rrdr_log_request_response_metadata(r, options, group_method, aligned, group, resampling_time_requested, resampling_group,
  1752. after_wanted, after_requested, before_wanted,before_wanted,
  1753. points_requested, points_wanted, /*after_slot, before_slot,*/
  1754. "'before' is not aligned but alignment is required");
  1755. // 'after' should not be aligned, since we start inside the first group
  1756. //if(aligned && (r->after % group) != 0)
  1757. // rrd2rrdr_log_request_response_metadata(r, options, group_method, aligned, group, resampling_time_requested, resampling_group, after_wanted, after_requested, before_wanted, before_requested, points_requested, points_wanted, after_slot, before_slot, "'after' is not aligned but alignment is required");
  1758. if(r->before != before_wanted)
  1759. rrd2rrdr_log_request_response_metadata(r, options, group_method, aligned, group, resampling_time_requested, resampling_group,
  1760. after_wanted, after_requested, before_wanted, before_requested,
  1761. points_requested, points_wanted, /*after_slot, before_slot,*/
  1762. "chart is not aligned to requested 'before'");
  1763. if(r->before != before_wanted)
  1764. rrd2rrdr_log_request_response_metadata(r, options, group_method, aligned, group, resampling_time_requested, resampling_group,
  1765. after_wanted, after_requested, before_wanted, before_requested,
  1766. points_requested, points_wanted, /*after_slot, before_slot,*/
  1767. "got 'before' is not wanted 'before'");
  1768. // reported 'after' varies, depending on group
  1769. if(r->after != after_wanted)
  1770. rrd2rrdr_log_request_response_metadata(r, options, group_method, aligned, group, resampling_time_requested, resampling_group,
  1771. after_wanted, after_requested, before_wanted, before_requested,
  1772. points_requested, points_wanted, /*after_slot, before_slot,*/
  1773. "got 'after' is not wanted 'after'");
  1774. }
  1775. #endif
  1776. // free all resources used by the grouping method
  1777. r->internal.grouping_free(r);
  1778. // when all the dimensions are zero, we should return all of them
  1779. if(unlikely(options & RRDR_OPTION_NONZERO && !dimensions_nonzero && !(r->result_options & RRDR_RESULT_OPTION_CANCEL))) {
  1780. // all the dimensions are zero
  1781. // mark them as NONZERO to send them all
  1782. for(rd = first_rd, c = 0 ; rd && c < dimensions_count ; rd = rd->next, c++) {
  1783. if(unlikely(r->od[c] & RRDR_DIMENSION_HIDDEN)) continue;
  1784. r->od[c] |= RRDR_DIMENSION_NONZERO;
  1785. }
  1786. }
  1787. rrdr_query_completed(r->internal.db_points_read, r->internal.result_points_generated);
  1788. return r;
  1789. }