123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409 |
- // SPDX-License-Identifier: GPL-3.0-or-later
- #include "query.h"
- #include "web/api/formatters/rrd2json.h"
- #include "rrdr.h"
- #include "average/average.h"
- #include "countif/countif.h"
- #include "incremental_sum/incremental_sum.h"
- #include "max/max.h"
- #include "median/median.h"
- #include "min/min.h"
- #include "sum/sum.h"
- #include "stddev/stddev.h"
- #include "ses/ses.h"
- #include "des/des.h"
- #include "percentile/percentile.h"
- #include "trimmed_mean/trimmed_mean.h"
- #define POINTS_TO_EXPAND_QUERY 5
- // ----------------------------------------------------------------------------
- static struct {
- const char *name;
- uint32_t hash;
- RRDR_GROUPING value;
- // One time initialization for the module.
- // This is called once, when netdata starts.
- void (*init)(void);
- // Allocate all required structures for a query.
- // This is called once for each netdata query.
- void (*create)(struct rrdresult *r, const char *options);
- // Cleanup collected values, but don't destroy the structures.
- // This is called when the query engine switches dimensions,
- // as part of the same query (so same chart, switching metric).
- void (*reset)(struct rrdresult *r);
- // Free all resources allocated for the query.
- void (*free)(struct rrdresult *r);
- // Add a single value into the calculation.
- // The module may decide to cache it, or use it in the fly.
- void (*add)(struct rrdresult *r, NETDATA_DOUBLE value);
- // Generate a single result for the values added so far.
- // More values and points may be requested later.
- // It is up to the module to reset its internal structures
- // when flushing it (so for a few modules it may be better to
- // continue after a flush as if nothing changed, for others a
- // cleanup of the internal structures may be required).
- NETDATA_DOUBLE (*flush)(struct rrdresult *r, RRDR_VALUE_FLAGS *rrdr_value_options_ptr);
- TIER_QUERY_FETCH tier_query_fetch;
- } api_v1_data_groups[] = {
- {.name = "average",
- .hash = 0,
- .value = RRDR_GROUPING_AVERAGE,
- .init = NULL,
- .create= grouping_create_average,
- .reset = grouping_reset_average,
- .free = grouping_free_average,
- .add = grouping_add_average,
- .flush = grouping_flush_average,
- .tier_query_fetch = TIER_QUERY_FETCH_AVERAGE
- },
- {.name = "mean", // alias on 'average'
- .hash = 0,
- .value = RRDR_GROUPING_AVERAGE,
- .init = NULL,
- .create= grouping_create_average,
- .reset = grouping_reset_average,
- .free = grouping_free_average,
- .add = grouping_add_average,
- .flush = grouping_flush_average,
- .tier_query_fetch = TIER_QUERY_FETCH_AVERAGE
- },
- {.name = "trimmed-mean1",
- .hash = 0,
- .value = RRDR_GROUPING_TRIMMED_MEAN1,
- .init = NULL,
- .create= grouping_create_trimmed_mean1,
- .reset = grouping_reset_trimmed_mean,
- .free = grouping_free_trimmed_mean,
- .add = grouping_add_trimmed_mean,
- .flush = grouping_flush_trimmed_mean,
- .tier_query_fetch = TIER_QUERY_FETCH_AVERAGE
- },
- {.name = "trimmed-mean2",
- .hash = 0,
- .value = RRDR_GROUPING_TRIMMED_MEAN2,
- .init = NULL,
- .create= grouping_create_trimmed_mean2,
- .reset = grouping_reset_trimmed_mean,
- .free = grouping_free_trimmed_mean,
- .add = grouping_add_trimmed_mean,
- .flush = grouping_flush_trimmed_mean,
- .tier_query_fetch = TIER_QUERY_FETCH_AVERAGE
- },
- {.name = "trimmed-mean3",
- .hash = 0,
- .value = RRDR_GROUPING_TRIMMED_MEAN3,
- .init = NULL,
- .create= grouping_create_trimmed_mean3,
- .reset = grouping_reset_trimmed_mean,
- .free = grouping_free_trimmed_mean,
- .add = grouping_add_trimmed_mean,
- .flush = grouping_flush_trimmed_mean,
- .tier_query_fetch = TIER_QUERY_FETCH_AVERAGE
- },
- {.name = "trimmed-mean5",
- .hash = 0,
- .value = RRDR_GROUPING_TRIMMED_MEAN5,
- .init = NULL,
- .create= grouping_create_trimmed_mean5,
- .reset = grouping_reset_trimmed_mean,
- .free = grouping_free_trimmed_mean,
- .add = grouping_add_trimmed_mean,
- .flush = grouping_flush_trimmed_mean,
- .tier_query_fetch = TIER_QUERY_FETCH_AVERAGE
- },
- {.name = "trimmed-mean10",
- .hash = 0,
- .value = RRDR_GROUPING_TRIMMED_MEAN10,
- .init = NULL,
- .create= grouping_create_trimmed_mean10,
- .reset = grouping_reset_trimmed_mean,
- .free = grouping_free_trimmed_mean,
- .add = grouping_add_trimmed_mean,
- .flush = grouping_flush_trimmed_mean,
- .tier_query_fetch = TIER_QUERY_FETCH_AVERAGE
- },
- {.name = "trimmed-mean15",
- .hash = 0,
- .value = RRDR_GROUPING_TRIMMED_MEAN15,
- .init = NULL,
- .create= grouping_create_trimmed_mean15,
- .reset = grouping_reset_trimmed_mean,
- .free = grouping_free_trimmed_mean,
- .add = grouping_add_trimmed_mean,
- .flush = grouping_flush_trimmed_mean,
- .tier_query_fetch = TIER_QUERY_FETCH_AVERAGE
- },
- {.name = "trimmed-mean20",
- .hash = 0,
- .value = RRDR_GROUPING_TRIMMED_MEAN20,
- .init = NULL,
- .create= grouping_create_trimmed_mean20,
- .reset = grouping_reset_trimmed_mean,
- .free = grouping_free_trimmed_mean,
- .add = grouping_add_trimmed_mean,
- .flush = grouping_flush_trimmed_mean,
- .tier_query_fetch = TIER_QUERY_FETCH_AVERAGE
- },
- {.name = "trimmed-mean25",
- .hash = 0,
- .value = RRDR_GROUPING_TRIMMED_MEAN25,
- .init = NULL,
- .create= grouping_create_trimmed_mean25,
- .reset = grouping_reset_trimmed_mean,
- .free = grouping_free_trimmed_mean,
- .add = grouping_add_trimmed_mean,
- .flush = grouping_flush_trimmed_mean,
- .tier_query_fetch = TIER_QUERY_FETCH_AVERAGE
- },
- {.name = "trimmed-mean",
- .hash = 0,
- .value = RRDR_GROUPING_TRIMMED_MEAN5,
- .init = NULL,
- .create= grouping_create_trimmed_mean5,
- .reset = grouping_reset_trimmed_mean,
- .free = grouping_free_trimmed_mean,
- .add = grouping_add_trimmed_mean,
- .flush = grouping_flush_trimmed_mean,
- .tier_query_fetch = TIER_QUERY_FETCH_AVERAGE
- },
- {.name = "incremental_sum",
- .hash = 0,
- .value = RRDR_GROUPING_INCREMENTAL_SUM,
- .init = NULL,
- .create= grouping_create_incremental_sum,
- .reset = grouping_reset_incremental_sum,
- .free = grouping_free_incremental_sum,
- .add = grouping_add_incremental_sum,
- .flush = grouping_flush_incremental_sum,
- .tier_query_fetch = TIER_QUERY_FETCH_AVERAGE
- },
- {.name = "incremental-sum",
- .hash = 0,
- .value = RRDR_GROUPING_INCREMENTAL_SUM,
- .init = NULL,
- .create= grouping_create_incremental_sum,
- .reset = grouping_reset_incremental_sum,
- .free = grouping_free_incremental_sum,
- .add = grouping_add_incremental_sum,
- .flush = grouping_flush_incremental_sum,
- .tier_query_fetch = TIER_QUERY_FETCH_AVERAGE
- },
- {.name = "median",
- .hash = 0,
- .value = RRDR_GROUPING_MEDIAN,
- .init = NULL,
- .create= grouping_create_median,
- .reset = grouping_reset_median,
- .free = grouping_free_median,
- .add = grouping_add_median,
- .flush = grouping_flush_median,
- .tier_query_fetch = TIER_QUERY_FETCH_AVERAGE
- },
- {.name = "trimmed-median1",
- .hash = 0,
- .value = RRDR_GROUPING_TRIMMED_MEDIAN1,
- .init = NULL,
- .create= grouping_create_trimmed_median1,
- .reset = grouping_reset_median,
- .free = grouping_free_median,
- .add = grouping_add_median,
- .flush = grouping_flush_median,
- .tier_query_fetch = TIER_QUERY_FETCH_AVERAGE
- },
- {.name = "trimmed-median2",
- .hash = 0,
- .value = RRDR_GROUPING_TRIMMED_MEDIAN2,
- .init = NULL,
- .create= grouping_create_trimmed_median2,
- .reset = grouping_reset_median,
- .free = grouping_free_median,
- .add = grouping_add_median,
- .flush = grouping_flush_median,
- .tier_query_fetch = TIER_QUERY_FETCH_AVERAGE
- },
- {.name = "trimmed-median3",
- .hash = 0,
- .value = RRDR_GROUPING_TRIMMED_MEDIAN3,
- .init = NULL,
- .create= grouping_create_trimmed_median3,
- .reset = grouping_reset_median,
- .free = grouping_free_median,
- .add = grouping_add_median,
- .flush = grouping_flush_median,
- .tier_query_fetch = TIER_QUERY_FETCH_AVERAGE
- },
- {.name = "trimmed-median5",
- .hash = 0,
- .value = RRDR_GROUPING_TRIMMED_MEDIAN5,
- .init = NULL,
- .create= grouping_create_trimmed_median5,
- .reset = grouping_reset_median,
- .free = grouping_free_median,
- .add = grouping_add_median,
- .flush = grouping_flush_median,
- .tier_query_fetch = TIER_QUERY_FETCH_AVERAGE
- },
- {.name = "trimmed-median10",
- .hash = 0,
- .value = RRDR_GROUPING_TRIMMED_MEDIAN10,
- .init = NULL,
- .create= grouping_create_trimmed_median10,
- .reset = grouping_reset_median,
- .free = grouping_free_median,
- .add = grouping_add_median,
- .flush = grouping_flush_median,
- .tier_query_fetch = TIER_QUERY_FETCH_AVERAGE
- },
- {.name = "trimmed-median15",
- .hash = 0,
- .value = RRDR_GROUPING_TRIMMED_MEDIAN15,
- .init = NULL,
- .create= grouping_create_trimmed_median15,
- .reset = grouping_reset_median,
- .free = grouping_free_median,
- .add = grouping_add_median,
- .flush = grouping_flush_median,
- .tier_query_fetch = TIER_QUERY_FETCH_AVERAGE
- },
- {.name = "trimmed-median20",
- .hash = 0,
- .value = RRDR_GROUPING_TRIMMED_MEDIAN20,
- .init = NULL,
- .create= grouping_create_trimmed_median20,
- .reset = grouping_reset_median,
- .free = grouping_free_median,
- .add = grouping_add_median,
- .flush = grouping_flush_median,
- .tier_query_fetch = TIER_QUERY_FETCH_AVERAGE
- },
- {.name = "trimmed-median25",
- .hash = 0,
- .value = RRDR_GROUPING_TRIMMED_MEDIAN25,
- .init = NULL,
- .create= grouping_create_trimmed_median25,
- .reset = grouping_reset_median,
- .free = grouping_free_median,
- .add = grouping_add_median,
- .flush = grouping_flush_median,
- .tier_query_fetch = TIER_QUERY_FETCH_AVERAGE
- },
- {.name = "trimmed-median",
- .hash = 0,
- .value = RRDR_GROUPING_TRIMMED_MEDIAN5,
- .init = NULL,
- .create= grouping_create_trimmed_median5,
- .reset = grouping_reset_median,
- .free = grouping_free_median,
- .add = grouping_add_median,
- .flush = grouping_flush_median,
- .tier_query_fetch = TIER_QUERY_FETCH_AVERAGE
- },
- {.name = "percentile25",
- .hash = 0,
- .value = RRDR_GROUPING_PERCENTILE25,
- .init = NULL,
- .create= grouping_create_percentile25,
- .reset = grouping_reset_percentile,
- .free = grouping_free_percentile,
- .add = grouping_add_percentile,
- .flush = grouping_flush_percentile,
- .tier_query_fetch = TIER_QUERY_FETCH_AVERAGE
- },
- {.name = "percentile50",
- .hash = 0,
- .value = RRDR_GROUPING_PERCENTILE50,
- .init = NULL,
- .create= grouping_create_percentile50,
- .reset = grouping_reset_percentile,
- .free = grouping_free_percentile,
- .add = grouping_add_percentile,
- .flush = grouping_flush_percentile,
- .tier_query_fetch = TIER_QUERY_FETCH_AVERAGE
- },
- {.name = "percentile75",
- .hash = 0,
- .value = RRDR_GROUPING_PERCENTILE75,
- .init = NULL,
- .create= grouping_create_percentile75,
- .reset = grouping_reset_percentile,
- .free = grouping_free_percentile,
- .add = grouping_add_percentile,
- .flush = grouping_flush_percentile,
- .tier_query_fetch = TIER_QUERY_FETCH_AVERAGE
- },
- {.name = "percentile80",
- .hash = 0,
- .value = RRDR_GROUPING_PERCENTILE80,
- .init = NULL,
- .create= grouping_create_percentile80,
- .reset = grouping_reset_percentile,
- .free = grouping_free_percentile,
- .add = grouping_add_percentile,
- .flush = grouping_flush_percentile,
- .tier_query_fetch = TIER_QUERY_FETCH_AVERAGE
- },
- {.name = "percentile90",
- .hash = 0,
- .value = RRDR_GROUPING_PERCENTILE90,
- .init = NULL,
- .create= grouping_create_percentile90,
- .reset = grouping_reset_percentile,
- .free = grouping_free_percentile,
- .add = grouping_add_percentile,
- .flush = grouping_flush_percentile,
- .tier_query_fetch = TIER_QUERY_FETCH_AVERAGE
- },
- {.name = "percentile95",
- .hash = 0,
- .value = RRDR_GROUPING_PERCENTILE95,
- .init = NULL,
- .create= grouping_create_percentile95,
- .reset = grouping_reset_percentile,
- .free = grouping_free_percentile,
- .add = grouping_add_percentile,
- .flush = grouping_flush_percentile,
- .tier_query_fetch = TIER_QUERY_FETCH_AVERAGE
- },
- {.name = "percentile97",
- .hash = 0,
- .value = RRDR_GROUPING_PERCENTILE97,
- .init = NULL,
- .create= grouping_create_percentile97,
- .reset = grouping_reset_percentile,
- .free = grouping_free_percentile,
- .add = grouping_add_percentile,
- .flush = grouping_flush_percentile,
- .tier_query_fetch = TIER_QUERY_FETCH_AVERAGE
- },
- {.name = "percentile98",
- .hash = 0,
- .value = RRDR_GROUPING_PERCENTILE98,
- .init = NULL,
- .create= grouping_create_percentile98,
- .reset = grouping_reset_percentile,
- .free = grouping_free_percentile,
- .add = grouping_add_percentile,
- .flush = grouping_flush_percentile,
- .tier_query_fetch = TIER_QUERY_FETCH_AVERAGE
- },
- {.name = "percentile99",
- .hash = 0,
- .value = RRDR_GROUPING_PERCENTILE99,
- .init = NULL,
- .create= grouping_create_percentile99,
- .reset = grouping_reset_percentile,
- .free = grouping_free_percentile,
- .add = grouping_add_percentile,
- .flush = grouping_flush_percentile,
- .tier_query_fetch = TIER_QUERY_FETCH_AVERAGE
- },
- {.name = "percentile",
- .hash = 0,
- .value = RRDR_GROUPING_PERCENTILE95,
- .init = NULL,
- .create= grouping_create_percentile95,
- .reset = grouping_reset_percentile,
- .free = grouping_free_percentile,
- .add = grouping_add_percentile,
- .flush = grouping_flush_percentile,
- .tier_query_fetch = TIER_QUERY_FETCH_AVERAGE
- },
- {.name = "min",
- .hash = 0,
- .value = RRDR_GROUPING_MIN,
- .init = NULL,
- .create= grouping_create_min,
- .reset = grouping_reset_min,
- .free = grouping_free_min,
- .add = grouping_add_min,
- .flush = grouping_flush_min,
- .tier_query_fetch = TIER_QUERY_FETCH_MIN
- },
- {.name = "max",
- .hash = 0,
- .value = RRDR_GROUPING_MAX,
- .init = NULL,
- .create= grouping_create_max,
- .reset = grouping_reset_max,
- .free = grouping_free_max,
- .add = grouping_add_max,
- .flush = grouping_flush_max,
- .tier_query_fetch = TIER_QUERY_FETCH_MAX
- },
- {.name = "sum",
- .hash = 0,
- .value = RRDR_GROUPING_SUM,
- .init = NULL,
- .create= grouping_create_sum,
- .reset = grouping_reset_sum,
- .free = grouping_free_sum,
- .add = grouping_add_sum,
- .flush = grouping_flush_sum,
- .tier_query_fetch = TIER_QUERY_FETCH_SUM
- },
- // standard deviation
- {.name = "stddev",
- .hash = 0,
- .value = RRDR_GROUPING_STDDEV,
- .init = NULL,
- .create= grouping_create_stddev,
- .reset = grouping_reset_stddev,
- .free = grouping_free_stddev,
- .add = grouping_add_stddev,
- .flush = grouping_flush_stddev,
- .tier_query_fetch = TIER_QUERY_FETCH_AVERAGE
- },
- {.name = "cv", // coefficient variation is calculated by stddev
- .hash = 0,
- .value = RRDR_GROUPING_CV,
- .init = NULL,
- .create= grouping_create_stddev, // not an error, stddev calculates this too
- .reset = grouping_reset_stddev, // not an error, stddev calculates this too
- .free = grouping_free_stddev, // not an error, stddev calculates this too
- .add = grouping_add_stddev, // not an error, stddev calculates this too
- .flush = grouping_flush_coefficient_of_variation,
- .tier_query_fetch = TIER_QUERY_FETCH_AVERAGE
- },
- {.name = "rsd", // alias of 'cv'
- .hash = 0,
- .value = RRDR_GROUPING_CV,
- .init = NULL,
- .create= grouping_create_stddev, // not an error, stddev calculates this too
- .reset = grouping_reset_stddev, // not an error, stddev calculates this too
- .free = grouping_free_stddev, // not an error, stddev calculates this too
- .add = grouping_add_stddev, // not an error, stddev calculates this too
- .flush = grouping_flush_coefficient_of_variation,
- .tier_query_fetch = TIER_QUERY_FETCH_AVERAGE
- },
- /*
- {.name = "mean", // same as average, no need to define it again
- .hash = 0,
- .value = RRDR_GROUPING_MEAN,
- .setup = NULL,
- .create= grouping_create_stddev,
- .reset = grouping_reset_stddev,
- .free = grouping_free_stddev,
- .add = grouping_add_stddev,
- .flush = grouping_flush_mean,
- .tier_query_fetch = TIER_QUERY_FETCH_AVERAGE
- },
- */
- /*
- {.name = "variance", // meaningless to offer
- .hash = 0,
- .value = RRDR_GROUPING_VARIANCE,
- .setup = NULL,
- .create= grouping_create_stddev,
- .reset = grouping_reset_stddev,
- .free = grouping_free_stddev,
- .add = grouping_add_stddev,
- .flush = grouping_flush_variance,
- .tier_query_fetch = TIER_QUERY_FETCH_AVERAGE
- },
- */
- // single exponential smoothing
- {.name = "ses",
- .hash = 0,
- .value = RRDR_GROUPING_SES,
- .init = grouping_init_ses,
- .create= grouping_create_ses,
- .reset = grouping_reset_ses,
- .free = grouping_free_ses,
- .add = grouping_add_ses,
- .flush = grouping_flush_ses,
- .tier_query_fetch = TIER_QUERY_FETCH_AVERAGE
- },
- {.name = "ema", // alias for 'ses'
- .hash = 0,
- .value = RRDR_GROUPING_SES,
- .init = NULL,
- .create= grouping_create_ses,
- .reset = grouping_reset_ses,
- .free = grouping_free_ses,
- .add = grouping_add_ses,
- .flush = grouping_flush_ses,
- .tier_query_fetch = TIER_QUERY_FETCH_AVERAGE
- },
- {.name = "ewma", // alias for ses
- .hash = 0,
- .value = RRDR_GROUPING_SES,
- .init = NULL,
- .create= grouping_create_ses,
- .reset = grouping_reset_ses,
- .free = grouping_free_ses,
- .add = grouping_add_ses,
- .flush = grouping_flush_ses,
- .tier_query_fetch = TIER_QUERY_FETCH_AVERAGE
- },
- // double exponential smoothing
- {.name = "des",
- .hash = 0,
- .value = RRDR_GROUPING_DES,
- .init = grouping_init_des,
- .create= grouping_create_des,
- .reset = grouping_reset_des,
- .free = grouping_free_des,
- .add = grouping_add_des,
- .flush = grouping_flush_des,
- .tier_query_fetch = TIER_QUERY_FETCH_AVERAGE
- },
- {.name = "countif",
- .hash = 0,
- .value = RRDR_GROUPING_COUNTIF,
- .init = NULL,
- .create= grouping_create_countif,
- .reset = grouping_reset_countif,
- .free = grouping_free_countif,
- .add = grouping_add_countif,
- .flush = grouping_flush_countif,
- .tier_query_fetch = TIER_QUERY_FETCH_AVERAGE
- },
- // terminator
- {.name = NULL,
- .hash = 0,
- .value = RRDR_GROUPING_UNDEFINED,
- .init = NULL,
- .create= grouping_create_average,
- .reset = grouping_reset_average,
- .free = grouping_free_average,
- .add = grouping_add_average,
- .flush = grouping_flush_average,
- .tier_query_fetch = TIER_QUERY_FETCH_AVERAGE
- }
- };
- void web_client_api_v1_init_grouping(void) {
- int i;
- for(i = 0; api_v1_data_groups[i].name ; i++) {
- api_v1_data_groups[i].hash = simple_hash(api_v1_data_groups[i].name);
- if(api_v1_data_groups[i].init)
- api_v1_data_groups[i].init();
- }
- }
- const char *group_method2string(RRDR_GROUPING group) {
- int i;
- for(i = 0; api_v1_data_groups[i].name ; i++) {
- if(api_v1_data_groups[i].value == group) {
- return api_v1_data_groups[i].name;
- }
- }
- return "unknown-group-method";
- }
- RRDR_GROUPING web_client_api_request_v1_data_group(const char *name, RRDR_GROUPING def) {
- int i;
- uint32_t hash = simple_hash(name);
- for(i = 0; api_v1_data_groups[i].name ; i++)
- if(unlikely(hash == api_v1_data_groups[i].hash && !strcmp(name, api_v1_data_groups[i].name)))
- return api_v1_data_groups[i].value;
- return def;
- }
- const char *web_client_api_request_v1_data_group_to_string(RRDR_GROUPING group) {
- int i;
- for(i = 0; api_v1_data_groups[i].name ; i++)
- if(unlikely(group == api_v1_data_groups[i].value))
- return api_v1_data_groups[i].name;
- return "unknown";
- }
- static void rrdr_set_grouping_function(RRDR *r, RRDR_GROUPING group_method) {
- int i, found = 0;
- for(i = 0; !found && api_v1_data_groups[i].name ;i++) {
- if(api_v1_data_groups[i].value == group_method) {
- r->internal.grouping_create = api_v1_data_groups[i].create;
- r->internal.grouping_reset = api_v1_data_groups[i].reset;
- r->internal.grouping_free = api_v1_data_groups[i].free;
- r->internal.grouping_add = api_v1_data_groups[i].add;
- r->internal.grouping_flush = api_v1_data_groups[i].flush;
- r->internal.tier_query_fetch = api_v1_data_groups[i].tier_query_fetch;
- found = 1;
- }
- }
- if(!found) {
- errno = 0;
- internal_error(true, "QUERY: grouping method %u not found. Using 'average'", (unsigned int)group_method);
- r->internal.grouping_create = grouping_create_average;
- r->internal.grouping_reset = grouping_reset_average;
- r->internal.grouping_free = grouping_free_average;
- r->internal.grouping_add = grouping_add_average;
- r->internal.grouping_flush = grouping_flush_average;
- r->internal.tier_query_fetch = TIER_QUERY_FETCH_AVERAGE;
- }
- }
- // ----------------------------------------------------------------------------
- // helpers to find our way in RRDR
- static inline RRDR_VALUE_FLAGS *UNUSED_FUNCTION(rrdr_line_options)(RRDR *r, long rrdr_line) {
- return &r->o[ rrdr_line * r->d ];
- }
- static inline NETDATA_DOUBLE *UNUSED_FUNCTION(rrdr_line_values)(RRDR *r, long rrdr_line) {
- return &r->v[ rrdr_line * r->d ];
- }
- static inline long rrdr_line_init(RRDR *r, time_t t, long rrdr_line) {
- rrdr_line++;
- internal_error(rrdr_line >= (long)r->n,
- "QUERY: requested to step above RRDR size for query '%s'",
- r->internal.qt->id);
- internal_error(r->t[rrdr_line] != 0 && r->t[rrdr_line] != t,
- "QUERY: overwriting the timestamp of RRDR line %zu from %zu to %zu, of query '%s'",
- (size_t)rrdr_line, (size_t)r->t[rrdr_line], (size_t)t, r->internal.qt->id);
- // save the time
- r->t[rrdr_line] = t;
- return rrdr_line;
- }
- static inline void rrdr_done(RRDR *r, long rrdr_line) {
- r->rows = rrdr_line + 1;
- }
- // ----------------------------------------------------------------------------
- // tier management
- static bool query_metric_is_valid_tier(QUERY_METRIC *qm, size_t tier) {
- if(!qm->tiers[tier].db_metric_handle || !qm->tiers[tier].db_first_time_s || !qm->tiers[tier].db_last_time_s || !qm->tiers[tier].db_update_every_s)
- return false;
- return true;
- }
- static size_t query_metric_first_working_tier(QUERY_METRIC *qm) {
- for(size_t tier = 0; tier < storage_tiers ; tier++) {
- // find the db time-range for this tier for all metrics
- STORAGE_METRIC_HANDLE *db_metric_handle = qm->tiers[tier].db_metric_handle;
- time_t first_time_s = qm->tiers[tier].db_first_time_s;
- time_t last_time_s = qm->tiers[tier].db_last_time_s;
- time_t update_every_s = qm->tiers[tier].db_update_every_s;
- if(!db_metric_handle || !first_time_s || !last_time_s || !update_every_s)
- continue;
- return tier;
- }
- return 0;
- }
- static long query_plan_points_coverage_weight(time_t db_first_time_s, time_t db_last_time_s, time_t db_update_every_s, time_t after_wanted, time_t before_wanted, size_t points_wanted, size_t tier __maybe_unused) {
- if(db_first_time_s == 0 ||
- db_last_time_s == 0 ||
- db_update_every_s == 0 ||
- db_first_time_s > before_wanted ||
- db_last_time_s < after_wanted)
- return -LONG_MAX;
- long long common_first_t = MAX(db_first_time_s, after_wanted);
- long long common_last_t = MIN(db_last_time_s, before_wanted);
- long long time_coverage = (common_last_t - common_first_t) * 1000000LL / (before_wanted - after_wanted);
- long long points_wanted_in_coverage = (long long)points_wanted * time_coverage / 1000000LL;
- long long points_available = (common_last_t - common_first_t) / db_update_every_s;
- long long points_delta = (long)(points_available - points_wanted_in_coverage);
- long long points_coverage = (points_delta < 0) ? (long)(points_available * time_coverage / points_wanted_in_coverage) : time_coverage;
- // a way to benefit higher tiers
- // points_coverage += (long)tier * 10000;
- if(points_available <= 0)
- return -LONG_MAX;
- return (long)(points_coverage + (25000LL * tier)); // 2.5% benefit for each higher tier
- }
- static size_t query_metric_best_tier_for_timeframe(QUERY_METRIC *qm, time_t after_wanted, time_t before_wanted, size_t points_wanted) {
- if(unlikely(storage_tiers < 2))
- return 0;
- if(unlikely(after_wanted == before_wanted || points_wanted <= 0))
- return query_metric_first_working_tier(qm);
- time_t min_first_time_s = 0;
- time_t max_last_time_s = 0;
- for(size_t tier = 0; tier < storage_tiers ; tier++) {
- time_t first_time_s = qm->tiers[tier].db_first_time_s;
- time_t last_time_s = qm->tiers[tier].db_last_time_s;
- if(!min_first_time_s || (first_time_s && first_time_s < min_first_time_s))
- min_first_time_s = first_time_s;
- if(!max_last_time_s || (last_time_s && last_time_s > max_last_time_s))
- max_last_time_s = last_time_s;
- }
- for(size_t tier = 0; tier < storage_tiers ; tier++) {
- // find the db time-range for this tier for all metrics
- STORAGE_METRIC_HANDLE *db_metric_handle = qm->tiers[tier].db_metric_handle;
- time_t first_time_s = qm->tiers[tier].db_first_time_s;
- time_t last_time_s = qm->tiers[tier].db_last_time_s;
- time_t update_every_s = qm->tiers[tier].db_update_every_s;
- if( !db_metric_handle ||
- !first_time_s ||
- !last_time_s ||
- !update_every_s ||
- first_time_s > before_wanted ||
- last_time_s < after_wanted
- ) {
- qm->tiers[tier].weight = -LONG_MAX;
- continue;
- }
- internal_fatal(first_time_s > before_wanted || last_time_s < after_wanted, "QUERY: invalid db durations");
- qm->tiers[tier].weight = query_plan_points_coverage_weight(
- min_first_time_s, max_last_time_s, update_every_s,
- after_wanted, before_wanted, points_wanted, tier);
- }
- size_t best_tier = 0;
- for(size_t tier = 1; tier < storage_tiers ; tier++) {
- if(qm->tiers[tier].weight >= qm->tiers[best_tier].weight)
- best_tier = tier;
- }
- return best_tier;
- }
- static size_t rrddim_find_best_tier_for_timeframe(QUERY_TARGET *qt, time_t after_wanted, time_t before_wanted, size_t points_wanted) {
- if(unlikely(storage_tiers < 2))
- return 0;
- if(unlikely(after_wanted == before_wanted || points_wanted <= 0)) {
- internal_error(true, "QUERY: '%s' has invalid params to tier calculation", qt->id);
- return 0;
- }
- long weight[storage_tiers];
- for(size_t tier = 0; tier < storage_tiers ; tier++) {
- time_t common_first_time_s = 0;
- time_t common_last_time_s = 0;
- time_t common_update_every_s = 0;
- // find the db time-range for this tier for all metrics
- for(size_t i = 0, used = qt->query.used; i < used ; i++) {
- QUERY_METRIC *qm = &qt->query.array[i];
- time_t first_time_s = qm->tiers[tier].db_first_time_s;
- time_t last_time_s = qm->tiers[tier].db_last_time_s;
- time_t update_every_s = qm->tiers[tier].db_update_every_s;
- if(!first_time_s || !last_time_s || !update_every_s)
- continue;
- if(!common_first_time_s)
- common_first_time_s = first_time_s;
- else
- common_first_time_s = MIN(first_time_s, common_first_time_s);
- if(!common_last_time_s)
- common_last_time_s = last_time_s;
- else
- common_last_time_s = MAX(last_time_s, common_last_time_s);
- if(!common_update_every_s)
- common_update_every_s = update_every_s;
- else
- common_update_every_s = MIN(update_every_s, common_update_every_s);
- }
- weight[tier] = query_plan_points_coverage_weight(common_first_time_s, common_last_time_s, common_update_every_s, after_wanted, before_wanted, points_wanted, tier);
- }
- size_t best_tier = 0;
- for(size_t tier = 1; tier < storage_tiers ; tier++) {
- if(weight[tier] >= weight[best_tier])
- best_tier = tier;
- }
- if(weight[best_tier] == -LONG_MAX)
- best_tier = 0;
- return best_tier;
- }
- static time_t rrdset_find_natural_update_every_for_timeframe(QUERY_TARGET *qt, time_t after_wanted, time_t before_wanted, size_t points_wanted, RRDR_OPTIONS options, size_t tier) {
- size_t best_tier;
- if((options & RRDR_OPTION_SELECTED_TIER) && tier < storage_tiers)
- best_tier = tier;
- else
- best_tier = rrddim_find_best_tier_for_timeframe(qt, after_wanted, before_wanted, points_wanted);
- // find the db minimum update every for this tier for all metrics
- time_t common_update_every_s = default_rrd_update_every;
- for(size_t i = 0, used = qt->query.used; i < used ; i++) {
- QUERY_METRIC *qm = &qt->query.array[i];
- time_t update_every_s = qm->tiers[best_tier].db_update_every_s;
- if(!i)
- common_update_every_s = update_every_s;
- else
- common_update_every_s = MIN(update_every_s, common_update_every_s);
- }
- return common_update_every_s;
- }
- // ----------------------------------------------------------------------------
- // query ops
- typedef struct query_point {
- time_t end_time;
- time_t start_time;
- NETDATA_DOUBLE value;
- NETDATA_DOUBLE anomaly;
- SN_FLAGS flags;
- #ifdef NETDATA_INTERNAL_CHECKS
- size_t id;
- #endif
- } QUERY_POINT;
- QUERY_POINT QUERY_POINT_EMPTY = {
- .end_time = 0,
- .start_time = 0,
- .value = NAN,
- .anomaly = 0,
- .flags = SN_FLAG_NONE,
- #ifdef NETDATA_INTERNAL_CHECKS
- .id = 0,
- #endif
- };
- #ifdef NETDATA_INTERNAL_CHECKS
- #define query_point_set_id(point, point_id) (point).id = point_id
- #else
- #define query_point_set_id(point, point_id) debug_dummy()
- #endif
- typedef struct query_engine_ops {
- // configuration
- RRDR *r;
- QUERY_METRIC *qm;
- time_t view_update_every;
- time_t query_granularity;
- TIER_QUERY_FETCH tier_query_fetch;
- // query planer
- size_t current_plan;
- time_t current_plan_expire_time;
- time_t plan_expanded_after;
- time_t plan_expanded_before;
- // storage queries
- size_t tier;
- struct query_metric_tier *tier_ptr;
- struct storage_engine_query_handle *handle;
- STORAGE_POINT (*next_metric)(struct storage_engine_query_handle *handle);
- int (*is_finished)(struct storage_engine_query_handle *handle);
- void (*finalize)(struct storage_engine_query_handle *handle);
- // aggregating points over time
- void (*grouping_add)(struct rrdresult *r, NETDATA_DOUBLE value);
- NETDATA_DOUBLE (*grouping_flush)(struct rrdresult *r, RRDR_VALUE_FLAGS *rrdr_value_options_ptr);
- size_t group_points_non_zero;
- size_t group_points_added;
- NETDATA_DOUBLE group_anomaly_rate;
- RRDR_VALUE_FLAGS group_value_flags;
- // statistics
- size_t db_total_points_read;
- size_t db_points_read_per_tier[RRD_STORAGE_TIERS];
- } QUERY_ENGINE_OPS;
- // ----------------------------------------------------------------------------
- // query planer
- #define query_plan_should_switch_plan(ops, now) ((now) >= (ops)->current_plan_expire_time)
- static size_t query_planer_expand_duration_in_points(time_t this_update_every, time_t next_update_every) {
- time_t delta = this_update_every - next_update_every;
- if(delta < 0) delta = -delta;
- size_t points;
- if(delta < this_update_every * POINTS_TO_EXPAND_QUERY)
- points = POINTS_TO_EXPAND_QUERY;
- else
- points = (delta + this_update_every - 1) / this_update_every;
- return points;
- }
- static void query_planer_initialize_plans(QUERY_ENGINE_OPS *ops) {
- QUERY_METRIC *qm = ops->qm;
- for(size_t p = 0; p < qm->plan.used ; p++) {
- size_t tier = qm->plan.array[p].tier;
- time_t update_every = qm->tiers[tier].db_update_every_s;
- size_t points_to_add_to_after;
- if(p > 0) {
- // there is another plan before to this
- size_t tier0 = qm->plan.array[p - 1].tier;
- time_t update_every0 = qm->tiers[tier0].db_update_every_s;
- points_to_add_to_after = query_planer_expand_duration_in_points(update_every, update_every0);
- }
- else
- points_to_add_to_after = (tier == 0) ? 0 : POINTS_TO_EXPAND_QUERY;
- size_t points_to_add_to_before;
- if(p + 1 < qm->plan.used) {
- // there is another plan after to this
- size_t tier1 = qm->plan.array[p+1].tier;
- time_t update_every1 = qm->tiers[tier1].db_update_every_s;
- points_to_add_to_before = query_planer_expand_duration_in_points(update_every, update_every1);
- }
- else
- points_to_add_to_before = POINTS_TO_EXPAND_QUERY;
- time_t after = qm->plan.array[p].after - (time_t)(update_every * points_to_add_to_after);
- time_t before = qm->plan.array[p].before + (time_t)(update_every * points_to_add_to_before);
- qm->plan.array[p].expanded_after = after;
- qm->plan.array[p].expanded_before = before;
- struct query_metric_tier *tier_ptr = &qm->tiers[tier];
- tier_ptr->eng->api.query_ops.init(
- tier_ptr->db_metric_handle,
- &qm->plan.array[p].handle,
- after, before,
- ops->r->internal.qt->request.priority);
- qm->plan.array[p].next_metric = tier_ptr->eng->api.query_ops.next_metric;
- qm->plan.array[p].is_finished = tier_ptr->eng->api.query_ops.is_finished;
- qm->plan.array[p].finalize = tier_ptr->eng->api.query_ops.finalize;
- qm->plan.array[p].initialized = true;
- qm->plan.array[p].finalized = false;
- }
- }
- static void query_planer_finalize_plan(QUERY_ENGINE_OPS *ops, size_t plan_id) {
- QUERY_METRIC *qm = ops->qm;
- if(qm->plan.array[plan_id].initialized && !qm->plan.array[plan_id].finalized) {
- qm->plan.array[plan_id].finalize(&qm->plan.array[plan_id].handle);
- qm->plan.array[plan_id].initialized = false;
- qm->plan.array[plan_id].finalized = true;
- qm->plan.array[plan_id].next_metric = NULL;
- qm->plan.array[plan_id].is_finished = NULL;
- qm->plan.array[plan_id].finalize = NULL;
- if(ops->current_plan == plan_id) {
- ops->next_metric = NULL;
- ops->is_finished = NULL;
- ops->finalize = NULL;
- }
- }
- }
- static void query_planer_finalize_remaining_plans(QUERY_ENGINE_OPS *ops) {
- QUERY_METRIC *qm = ops->qm;
- for(size_t p = 0; p < qm->plan.used ; p++)
- query_planer_finalize_plan(ops, p);
- }
- static void query_planer_activate_plan(QUERY_ENGINE_OPS *ops, size_t plan_id, time_t overwrite_after __maybe_unused) {
- QUERY_METRIC *qm = ops->qm;
- internal_fatal(plan_id >= qm->plan.used, "QUERY: invalid plan_id given");
- internal_fatal(!qm->plan.array[plan_id].initialized, "QUERY: plan has not been initialized");
- internal_fatal(qm->plan.array[plan_id].finalized, "QUERY: plan has been finalized");
- internal_fatal(qm->plan.array[plan_id].after > qm->plan.array[plan_id].before, "QUERY: flipped after/before");
- ops->tier = qm->plan.array[plan_id].tier;
- ops->tier_ptr = &qm->tiers[ops->tier];
- ops->handle = &qm->plan.array[plan_id].handle;
- ops->next_metric = qm->plan.array[plan_id].next_metric;
- ops->is_finished = qm->plan.array[plan_id].is_finished;
- ops->finalize = qm->plan.array[plan_id].finalize;
- ops->current_plan = plan_id;
- if(plan_id + 1 < qm->plan.used && qm->plan.array[plan_id + 1].after < qm->plan.array[plan_id].before)
- ops->current_plan_expire_time = qm->plan.array[plan_id + 1].after;
- else
- ops->current_plan_expire_time = qm->plan.array[plan_id].before;
- ops->plan_expanded_after = qm->plan.array[plan_id].expanded_after;
- ops->plan_expanded_before = qm->plan.array[plan_id].expanded_before;
- }
- static bool query_planer_next_plan(QUERY_ENGINE_OPS *ops, time_t now, time_t last_point_end_time) {
- QUERY_METRIC *qm = ops->qm;
- size_t old_plan = ops->current_plan;
- time_t next_plan_before_time;
- do {
- ops->current_plan++;
- if (ops->current_plan >= qm->plan.used) {
- ops->current_plan = old_plan;
- ops->current_plan_expire_time = ops->r->internal.qt->window.before;
- // let the query run with current plan
- // we will not switch it
- return false;
- }
- next_plan_before_time = qm->plan.array[ops->current_plan].before;
- } while(now >= next_plan_before_time || last_point_end_time >= next_plan_before_time);
- if(!query_metric_is_valid_tier(qm, qm->plan.array[ops->current_plan].tier)) {
- ops->current_plan = old_plan;
- ops->current_plan_expire_time = ops->r->internal.qt->window.before;
- return false;
- }
- query_planer_finalize_plan(ops, old_plan);
- query_planer_activate_plan(ops, ops->current_plan, MIN(now, last_point_end_time));
- return true;
- }
- static int compare_query_plan_entries_on_start_time(const void *a, const void *b) {
- QUERY_PLAN_ENTRY *p1 = (QUERY_PLAN_ENTRY *)a;
- QUERY_PLAN_ENTRY *p2 = (QUERY_PLAN_ENTRY *)b;
- return (p1->after < p2->after)?-1:1;
- }
- static bool query_plan(QUERY_ENGINE_OPS *ops, time_t after_wanted, time_t before_wanted, size_t points_wanted) {
- QUERY_METRIC *qm = ops->qm;
- // put our selected tier as the first plan
- size_t selected_tier;
- if(ops->r->internal.query_options & RRDR_OPTION_SELECTED_TIER
- && ops->r->internal.qt->window.tier < storage_tiers
- && query_metric_is_valid_tier(qm, ops->r->internal.qt->window.tier)) {
- selected_tier = ops->r->internal.qt->window.tier;
- }
- else {
- selected_tier = query_metric_best_tier_for_timeframe(qm, after_wanted, before_wanted, points_wanted);
- if(ops->r->internal.query_options & RRDR_OPTION_SELECTED_TIER)
- ops->r->internal.query_options &= ~RRDR_OPTION_SELECTED_TIER;
- if(!query_metric_is_valid_tier(qm, selected_tier))
- return false;
- if(qm->tiers[selected_tier].db_first_time_s > before_wanted ||
- qm->tiers[selected_tier].db_last_time_s < after_wanted)
- return false;
- }
- qm->plan.used = 1;
- qm->plan.array[0].tier = selected_tier;
- qm->plan.array[0].after = (qm->tiers[selected_tier].db_first_time_s < after_wanted) ? after_wanted : qm->tiers[selected_tier].db_first_time_s;
- qm->plan.array[0].before = (qm->tiers[selected_tier].db_last_time_s > before_wanted) ? before_wanted : qm->tiers[selected_tier].db_last_time_s;
- if(!(ops->r->internal.query_options & RRDR_OPTION_SELECTED_TIER)) {
- // the selected tier
- time_t selected_tier_first_time_s = qm->plan.array[0].after;
- time_t selected_tier_last_time_s = qm->plan.array[0].before;
- // check if our selected tier can start the query
- if (selected_tier_first_time_s > after_wanted) {
- // we need some help from other tiers
- for (size_t tr = (int)selected_tier + 1; tr < storage_tiers; tr++) {
- if(!query_metric_is_valid_tier(qm, tr))
- continue;
- // find the first time of this tier
- time_t tier_first_time_s = qm->tiers[tr].db_first_time_s;
- // can it help?
- if (tier_first_time_s < selected_tier_first_time_s) {
- // it can help us add detail at the beginning of the query
- QUERY_PLAN_ENTRY t = {
- .tier = tr,
- .after = (tier_first_time_s < after_wanted) ? after_wanted : tier_first_time_s,
- .before = selected_tier_first_time_s,
- .initialized = false,
- .finalized = false,
- };
- qm->plan.array[qm->plan.used++] = t;
- internal_fatal(!t.after || !t.before, "QUERY: invalid plan selected");
- // prepare for the tier
- selected_tier_first_time_s = t.after;
- if (t.after <= after_wanted)
- break;
- }
- }
- }
- // check if our selected tier can finish the query
- if (selected_tier_last_time_s < before_wanted) {
- // we need some help from other tiers
- for (int tr = (int)selected_tier - 1; tr >= 0; tr--) {
- if(!query_metric_is_valid_tier(qm, tr))
- continue;
- // find the last time of this tier
- time_t tier_last_time_s = qm->tiers[tr].db_last_time_s;
- //buffer_sprintf(wb, ": EVAL BEFORE tier %d, %ld", tier, last_time_s);
- // can it help?
- if (tier_last_time_s > selected_tier_last_time_s) {
- // it can help us add detail at the end of the query
- QUERY_PLAN_ENTRY t = {
- .tier = tr,
- .after = selected_tier_last_time_s,
- .before = (tier_last_time_s > before_wanted) ? before_wanted : tier_last_time_s,
- .initialized = false,
- .finalized = false,
- };
- qm->plan.array[qm->plan.used++] = t;
- // prepare for the tier
- selected_tier_last_time_s = t.before;
- internal_fatal(!t.after || !t.before, "QUERY: invalid plan selected");
- if (t.before >= before_wanted)
- break;
- }
- }
- }
- }
- // sort the query plan
- if(qm->plan.used > 1)
- qsort(&qm->plan.array, qm->plan.used, sizeof(QUERY_PLAN_ENTRY), compare_query_plan_entries_on_start_time);
- if(!query_metric_is_valid_tier(qm, qm->plan.array[0].tier))
- return false;
- #ifdef NETDATA_INTERNAL_CHECKS
- for(size_t p = 0; p < qm->plan.used ;p++) {
- internal_fatal(qm->plan.array[p].after > qm->plan.array[p].before, "QUERY: flipped after/before");
- internal_fatal(qm->plan.array[p].after < after_wanted, "QUERY: too small plan first time");
- internal_fatal(qm->plan.array[p].before > before_wanted, "QUERY: too big plan last time");
- }
- #endif
- query_planer_initialize_plans(ops);
- query_planer_activate_plan(ops, 0, 0);
- return true;
- }
- // ----------------------------------------------------------------------------
- // dimension level query engine
- #define query_interpolate_point(this_point, last_point, now) do { \
- if(likely( \
- /* the point to interpolate is more than 1s wide */ \
- (this_point).end_time - (this_point).start_time > 1 \
- \
- /* the two points are exactly next to each other */ \
- && (last_point).end_time == (this_point).start_time \
- \
- /* both points are valid numbers */ \
- && netdata_double_isnumber((this_point).value) \
- && netdata_double_isnumber((last_point).value) \
- \
- )) { \
- (this_point).value = (last_point).value + ((this_point).value - (last_point).value) * (1.0 - (NETDATA_DOUBLE)((this_point).end_time - (now)) / (NETDATA_DOUBLE)((this_point).end_time - (this_point).start_time)); \
- (this_point).end_time = now; \
- } \
- } while(0)
- #define query_add_point_to_group(r, point, ops) do { \
- if(likely(netdata_double_isnumber((point).value))) { \
- if(likely(fpclassify((point).value) != FP_ZERO)) \
- (ops)->group_points_non_zero++; \
- \
- if(unlikely((point).flags & SN_FLAG_RESET)) \
- (ops)->group_value_flags |= RRDR_VALUE_RESET; \
- \
- (ops)->grouping_add(r, (point).value); \
- } \
- \
- (ops)->group_points_added++; \
- (ops)->group_anomaly_rate += (point).anomaly; \
- } while(0)
- static QUERY_ENGINE_OPS *rrd2rrdr_query_prep(RRDR *r, size_t dim_id_in_rrdr) {
- QUERY_TARGET *qt = r->internal.qt;
- QUERY_ENGINE_OPS *ops = onewayalloc_mallocz(r->internal.owa, sizeof(QUERY_ENGINE_OPS));
- *ops = (QUERY_ENGINE_OPS) {
- .r = r,
- .qm = &qt->query.array[dim_id_in_rrdr],
- .grouping_add = r->internal.grouping_add,
- .grouping_flush = r->internal.grouping_flush,
- .tier_query_fetch = r->internal.tier_query_fetch,
- .view_update_every = r->update_every,
- .query_granularity = (time_t)(r->update_every / r->group),
- .group_value_flags = RRDR_VALUE_NOTHING,
- };
- if(!query_plan(ops, qt->window.after, qt->window.before, qt->window.points))
- return NULL;
- return ops;
- }
- static void rrd2rrdr_query_execute(RRDR *r, size_t dim_id_in_rrdr, QUERY_ENGINE_OPS *ops) {
- QUERY_TARGET *qt = r->internal.qt;
- QUERY_METRIC *qm = &qt->query.array[dim_id_in_rrdr]; (void)qm;
- size_t points_wanted = qt->window.points;
- time_t after_wanted = qt->window.after;
- time_t before_wanted = qt->window.before; (void)before_wanted;
- // bool debug_this = false;
- // if(strcmp("user", string2str(rd->id)) == 0 && strcmp("system.cpu", string2str(rd->rrdset->id)) == 0)
- // debug_this = true;
- time_t max_date = 0,
- min_date = 0;
- size_t points_added = 0;
- long rrdr_line = -1;
- bool use_anomaly_bit_as_value = (r->internal.query_options & RRDR_OPTION_ANOMALY_BIT) ? true : false;
- NETDATA_DOUBLE min = r->min, max = r->max;
- QUERY_POINT last2_point = QUERY_POINT_EMPTY;
- QUERY_POINT last1_point = QUERY_POINT_EMPTY;
- QUERY_POINT new_point = QUERY_POINT_EMPTY;
- // ONE POINT READ-AHEAD
- // when we switch plans, we read-ahead a point from the next plan
- // to join them smoothly at the exact time the next plan begins
- STORAGE_POINT next1_point = STORAGE_POINT_UNSET;
- time_t now_start_time = after_wanted - ops->query_granularity;
- time_t now_end_time = after_wanted + ops->view_update_every - ops->query_granularity;
- size_t db_points_read_since_plan_switch = 0; (void)db_points_read_since_plan_switch;
- // The main loop, based on the query granularity we need
- for( ; points_added < points_wanted ; now_start_time = now_end_time, now_end_time += ops->view_update_every) {
- if(unlikely(query_plan_should_switch_plan(ops, now_end_time))) {
- query_planer_next_plan(ops, now_end_time, new_point.end_time);
- db_points_read_since_plan_switch = 0;
- }
- // read all the points of the db, prior to the time we need (now_end_time)
- size_t count_same_end_time = 0;
- while(count_same_end_time < 100) {
- if(likely(count_same_end_time == 0)) {
- last2_point = last1_point;
- last1_point = new_point;
- }
- if(unlikely(ops->is_finished(ops->handle))) {
- if(count_same_end_time != 0) {
- last2_point = last1_point;
- last1_point = new_point;
- }
- new_point = QUERY_POINT_EMPTY;
- new_point.start_time = last1_point.end_time;
- new_point.end_time = now_end_time;
- //
- // if(debug_this) info("QUERY: is finished() returned true");
- //
- break;
- }
- // fetch the new point
- {
- STORAGE_POINT sp;
- if(likely(storage_point_is_unset(next1_point))) {
- db_points_read_since_plan_switch++;
- sp = ops->next_metric(ops->handle);
- }
- else {
- // ONE POINT READ-AHEAD
- sp = next1_point;
- storage_point_unset(next1_point);
- db_points_read_since_plan_switch = 1;
- }
- // ONE POINT READ-AHEAD
- if(unlikely(query_plan_should_switch_plan(ops, sp.end_time_s) &&
- query_planer_next_plan(ops, now_end_time, new_point.end_time))) {
- // The end time of the current point, crosses our plans (tiers)
- // so, we switched plan (tier)
- //
- // There are 2 cases now:
- //
- // A. the entire point of the previous plan is to the future of point from the next plan
- // B. part of the point of the previous plan overlaps with the point from the next plan
- STORAGE_POINT sp2 = ops->next_metric(ops->handle);
- if(sp.start_time_s > sp2.start_time_s)
- // the point from the previous plan is useless
- sp = sp2;
- else
- // let the query run from the previous plan
- // but setting this will also cut off the interpolation
- // of the point from the previous plan
- next1_point = sp2;
- }
- ops->db_points_read_per_tier[ops->tier]++;
- ops->db_total_points_read++;
- new_point.start_time = sp.start_time_s;
- new_point.end_time = sp.end_time_s;
- new_point.anomaly = sp.count ? (NETDATA_DOUBLE)sp.anomaly_count * 100.0 / (NETDATA_DOUBLE)sp.count : 0.0;
- query_point_set_id(new_point, ops->db_total_points_read);
- // if(debug_this)
- // info("QUERY: got point %zu, from time %ld to %ld // now from %ld to %ld // query from %ld to %ld",
- // new_point.id, new_point.start_time, new_point.end_time, now_start_time, now_end_time, after_wanted, before_wanted);
- //
- // get the right value from the point we got
- if(likely(!storage_point_is_unset(sp) && !storage_point_is_gap(sp))) {
- if(unlikely(use_anomaly_bit_as_value))
- new_point.value = new_point.anomaly;
- else {
- switch (ops->tier_query_fetch) {
- default:
- case TIER_QUERY_FETCH_AVERAGE:
- new_point.value = sp.sum / sp.count;
- break;
- case TIER_QUERY_FETCH_MIN:
- new_point.value = sp.min;
- break;
- case TIER_QUERY_FETCH_MAX:
- new_point.value = sp.max;
- break;
- case TIER_QUERY_FETCH_SUM:
- new_point.value = sp.sum;
- break;
- };
- }
- }
- else {
- new_point.value = NAN;
- new_point.flags = SN_FLAG_NONE;
- }
- }
- // check if the db is giving us zero duration points
- if(unlikely(db_points_read_since_plan_switch > 1 &&
- new_point.start_time == new_point.end_time)) {
- internal_error(true, "QUERY: '%s', dimension '%s' next_metric() returned "
- "point %zu from %ld to %ld, that are both equal",
- qt->id, string2str(qm->dimension.id),
- new_point.id, new_point.start_time, new_point.end_time);
- new_point.start_time = new_point.end_time - ops->tier_ptr->db_update_every_s;
- }
- // check if the db is advancing the query
- if(unlikely(db_points_read_since_plan_switch > 1 &&
- new_point.end_time <= last1_point.end_time)) {
- internal_error(true,
- "QUERY: '%s', dimension '%s' next_metric() returned "
- "point %zu from %ld to %ld, before the "
- "last point %zu from %ld to %ld, "
- "now is %ld to %ld",
- qt->id, string2str(qm->dimension.id),
- new_point.id, new_point.start_time, new_point.end_time,
- last1_point.id, last1_point.start_time, last1_point.end_time,
- now_start_time, now_end_time);
- count_same_end_time++;
- continue;
- }
- count_same_end_time = 0;
- // decide how to use this point
- if(likely(new_point.end_time < now_end_time)) { // likely to favor tier0
- // this db point ends before our now_end_time
- if(likely(new_point.end_time >= now_start_time)) { // likely to favor tier0
- // this db point ends after our now_start time
- query_add_point_to_group(r, new_point, ops);
- }
- else {
- // we don't need this db point
- // it is totally outside our current time-frame
- // this is desirable for the first point of the query
- // because it allows us to interpolate the next point
- // at exactly the time we will want
- // we only log if this is not point 1
- internal_error(new_point.end_time < ops->plan_expanded_after &&
- db_points_read_since_plan_switch > 1,
- "QUERY: '%s', dimension '%s' next_metric() "
- "returned point %zu from %ld time %ld, "
- "which is entirely before our current timeframe %ld to %ld "
- "(and before the entire query, after %ld, before %ld)",
- qt->id, string2str(qm->dimension.id),
- new_point.id, new_point.start_time, new_point.end_time,
- now_start_time, now_end_time,
- ops->plan_expanded_after, ops->plan_expanded_before);
- }
- }
- else {
- // the point ends in the future
- // so, we will interpolate it below, at the inner loop
- break;
- }
- }
- if(unlikely(count_same_end_time)) {
- internal_error(true,
- "QUERY: '%s', dimension '%s', the database does not advance the query,"
- " it returned an end time less or equal to the end time of the last "
- "point we got %ld, %zu times",
- qt->id, string2str(qm->dimension.id),
- last1_point.end_time, count_same_end_time);
- if(unlikely(new_point.end_time <= last1_point.end_time))
- new_point.end_time = now_end_time;
- }
- time_t stop_time = new_point.end_time;
- if(unlikely(!storage_point_is_unset(next1_point))) {
- // ONE POINT READ-AHEAD
- // the point crosses the start time of the
- // read ahead storage point we have read
- stop_time = next1_point.start_time_s;
- }
- // the inner loop
- // we have 3 points in memory: last2, last1, new
- // we select the one to use based on their timestamps
- size_t iterations = 0;
- for ( ; now_end_time <= stop_time && points_added < points_wanted ;
- now_end_time += ops->view_update_every, iterations++) {
- // now_start_time is wrong in this loop
- // but, we don't need it
- QUERY_POINT current_point;
- if(likely(now_end_time > new_point.start_time)) {
- // it is time for our NEW point to be used
- current_point = new_point;
- query_interpolate_point(current_point, last1_point, now_end_time);
- // internal_error(current_point.id > 0
- // && last1_point.id == 0
- // && current_point.end_time > after_wanted
- // && current_point.end_time > now_end_time,
- // "QUERY: '%s', dimension '%s', after %ld, before %ld, view update every %ld,"
- // " query granularity %ld, interpolating point %zu (from %ld to %ld) at %ld,"
- // " but we could really favor by having last_point1 in this query.",
- // qt->id, string2str(qm->dimension.id),
- // after_wanted, before_wanted,
- // ops.view_update_every, ops.query_granularity,
- // current_point.id, current_point.start_time, current_point.end_time,
- // now_end_time);
- }
- else if(likely(now_end_time <= last1_point.end_time)) {
- // our LAST point is still valid
- current_point = last1_point;
- query_interpolate_point(current_point, last2_point, now_end_time);
- // internal_error(current_point.id > 0
- // && last2_point.id == 0
- // && current_point.end_time > after_wanted
- // && current_point.end_time > now_end_time,
- // "QUERY: '%s', dimension '%s', after %ld, before %ld, view update every %ld,"
- // " query granularity %ld, interpolating point %zu (from %ld to %ld) at %ld,"
- // " but we could really favor by having last_point2 in this query.",
- // qt->id, string2str(qm->dimension.id),
- // after_wanted, before_wanted, ops.view_update_every, ops.query_granularity,
- // current_point.id, current_point.start_time, current_point.end_time,
- // now_end_time);
- }
- else {
- // a GAP, we don't have a value this time
- current_point = QUERY_POINT_EMPTY;
- }
- query_add_point_to_group(r, current_point, ops);
- rrdr_line = rrdr_line_init(r, now_end_time, rrdr_line);
- size_t rrdr_o_v_index = rrdr_line * r->d + dim_id_in_rrdr;
- if(unlikely(!min_date)) min_date = now_end_time;
- max_date = now_end_time;
- // find the place to store our values
- RRDR_VALUE_FLAGS *rrdr_value_options_ptr = &r->o[rrdr_o_v_index];
- // update the dimension options
- if(likely(ops->group_points_non_zero))
- r->od[dim_id_in_rrdr] |= RRDR_DIMENSION_NONZERO;
- // store the specific point options
- *rrdr_value_options_ptr = ops->group_value_flags;
- // store the group value
- NETDATA_DOUBLE group_value = ops->grouping_flush(r, rrdr_value_options_ptr);
- r->v[rrdr_o_v_index] = group_value;
- // we only store uint8_t anomaly rates,
- // so let's get double precision by storing
- // anomaly rates in the range 0 - 200
- r->ar[rrdr_o_v_index] = ops->group_anomaly_rate / (NETDATA_DOUBLE)ops->group_points_added;
- if(likely(points_added || dim_id_in_rrdr)) {
- // find the min/max across all dimensions
- if(unlikely(group_value < min)) min = group_value;
- if(unlikely(group_value > max)) max = group_value;
- }
- else {
- // runs only when dim_id_in_rrdr == 0 && points_added == 0
- // so, on the first point added for the query.
- min = max = group_value;
- }
- points_added++;
- ops->group_points_added = 0;
- ops->group_value_flags = RRDR_VALUE_NOTHING;
- ops->group_points_non_zero = 0;
- ops->group_anomaly_rate = 0;
- }
- // the loop above increased "now" by query_granularity,
- // but the main loop will increase it too,
- // so, let's undo the last iteration of this loop
- if(iterations)
- now_end_time -= ops->view_update_every;
- }
- query_planer_finalize_remaining_plans(ops);
- r->internal.result_points_generated += points_added;
- r->internal.db_points_read += ops->db_total_points_read;
- for(size_t tr = 0; tr < storage_tiers ; tr++)
- r->internal.tier_points_read[tr] += ops->db_points_read_per_tier[tr];
- r->min = min;
- r->max = max;
- r->before = max_date;
- r->after = min_date - ops->view_update_every + ops->query_granularity;
- rrdr_done(r, rrdr_line);
- internal_error(points_added != points_wanted,
- "QUERY: '%s', dimension '%s', requested %zu points, but RRDR added %zu (%zu db points read).",
- qt->id, string2str(qm->dimension.id),
- (size_t)points_wanted, (size_t)points_added, ops->db_total_points_read);
- }
- // ----------------------------------------------------------------------------
- // fill the gap of a tier
- void store_metric_at_tier(RRDDIM *rd, size_t tier, struct rrddim_tier *t, STORAGE_POINT sp, usec_t now_ut);
- void rrdr_fill_tier_gap_from_smaller_tiers(RRDDIM *rd, size_t tier, time_t now_s) {
- if(unlikely(tier >= storage_tiers)) return;
- if(storage_tiers_backfill[tier] == RRD_BACKFILL_NONE) return;
- struct rrddim_tier *t = &rd->tiers[tier];
- if(unlikely(!t)) return;
- time_t latest_time_s = t->query_ops->latest_time_s(t->db_metric_handle);
- time_t granularity = (time_t)t->tier_grouping * (time_t)rd->update_every;
- time_t time_diff = now_s - latest_time_s;
- // if the user wants only NEW backfilling, and we don't have any data
- if(storage_tiers_backfill[tier] == RRD_BACKFILL_NEW && latest_time_s <= 0) return;
- // there is really nothing we can do
- if(now_s <= latest_time_s || time_diff < granularity) return;
- struct storage_engine_query_handle handle;
- // for each lower tier
- for(int read_tier = (int)tier - 1; read_tier >= 0 ; read_tier--){
- time_t smaller_tier_first_time = rd->tiers[read_tier].query_ops->oldest_time_s(rd->tiers[read_tier].db_metric_handle);
- time_t smaller_tier_last_time = rd->tiers[read_tier].query_ops->latest_time_s(rd->tiers[read_tier].db_metric_handle);
- if(smaller_tier_last_time <= latest_time_s) continue; // it is as bad as we are
- long after_wanted = (latest_time_s < smaller_tier_first_time) ? smaller_tier_first_time : latest_time_s;
- long before_wanted = smaller_tier_last_time;
- struct rrddim_tier *tmp = &rd->tiers[read_tier];
- tmp->query_ops->init(tmp->db_metric_handle, &handle, after_wanted, before_wanted, STORAGE_PRIORITY_HIGH);
- size_t points_read = 0;
- while(!tmp->query_ops->is_finished(&handle)) {
- STORAGE_POINT sp = tmp->query_ops->next_metric(&handle);
- points_read++;
- if(sp.end_time_s > latest_time_s) {
- latest_time_s = sp.end_time_s;
- store_metric_at_tier(rd, tier, t, sp, sp.end_time_s * USEC_PER_SEC);
- }
- }
- tmp->query_ops->finalize(&handle);
- store_metric_collection_completed();
- global_statistics_backfill_query_completed(points_read);
- //internal_error(true, "DBENGINE: backfilled chart '%s', dimension '%s', tier %d, from %ld to %ld, with %zu points from tier %d",
- // rd->rrdset->name, rd->name, tier, after_wanted, before_wanted, points, tr);
- }
- }
- // ----------------------------------------------------------------------------
- // fill RRDR for the whole chart
- #ifdef NETDATA_INTERNAL_CHECKS
- static void rrd2rrdr_log_request_response_metadata(RRDR *r
- , RRDR_OPTIONS options __maybe_unused
- , RRDR_GROUPING group_method
- , bool aligned
- , size_t group
- , time_t resampling_time
- , size_t resampling_group
- , time_t after_wanted
- , time_t after_requested
- , time_t before_wanted
- , time_t before_requested
- , size_t points_requested
- , size_t points_wanted
- //, size_t after_slot
- //, size_t before_slot
- , const char *msg
- ) {
- time_t first_entry_s = r->internal.qt->db.first_time_s;
- time_t last_entry_s = r->internal.qt->db.last_time_s;
- internal_error(
- true,
- "rrd2rrdr() on %s update every %ld with %s grouping %s (group: %zu, resampling_time: %ld, resampling_group: %zu), "
- "after (got: %ld, want: %ld, req: %ld, db: %ld), "
- "before (got: %ld, want: %ld, req: %ld, db: %ld), "
- "duration (got: %ld, want: %ld, req: %ld, db: %ld), "
- "points (got: %zu, want: %zu, req: %zu), "
- "%s"
- , r->internal.qt->id
- , r->internal.qt->window.query_granularity
- // grouping
- , (aligned) ? "aligned" : "unaligned"
- , group_method2string(group_method)
- , group
- , resampling_time
- , resampling_group
- // after
- , r->after
- , after_wanted
- , after_requested
- , first_entry_s
- // before
- , r->before
- , before_wanted
- , before_requested
- , last_entry_s
- // duration
- , (long)(r->before - r->after + r->internal.qt->window.query_granularity)
- , (long)(before_wanted - after_wanted + r->internal.qt->window.query_granularity)
- , (long)before_requested - after_requested
- , (long)((last_entry_s - first_entry_s) + r->internal.qt->window.query_granularity)
- // points
- , r->rows
- , points_wanted
- , points_requested
- // message
- , msg
- );
- }
- #endif // NETDATA_INTERNAL_CHECKS
- // Returns 1 if an absolute period was requested or 0 if it was a relative period
- bool rrdr_relative_window_to_absolute(time_t *after, time_t *before) {
- time_t now = now_realtime_sec() - 1;
- int absolute_period_requested = -1;
- long long after_requested, before_requested;
- before_requested = *before;
- after_requested = *after;
- // allow relative for before (smaller than API_RELATIVE_TIME_MAX)
- if(ABS(before_requested) <= API_RELATIVE_TIME_MAX) {
- // if the user asked for a positive relative time,
- // flip it to a negative
- if(before_requested > 0)
- before_requested = -before_requested;
- before_requested = now + before_requested;
- absolute_period_requested = 0;
- }
- // allow relative for after (smaller than API_RELATIVE_TIME_MAX)
- if(ABS(after_requested) <= API_RELATIVE_TIME_MAX) {
- if(after_requested > 0)
- after_requested = -after_requested;
- // if the user didn't give an after, use the number of points
- // to give a sane default
- if(after_requested == 0)
- after_requested = -600;
- // since the query engine now returns inclusive timestamps
- // it is awkward to return 6 points when after=-5 is given
- // so for relative queries we add 1 second, to give
- // more predictable results to users.
- after_requested = before_requested + after_requested + 1;
- absolute_period_requested = 0;
- }
- if(absolute_period_requested == -1)
- absolute_period_requested = 1;
- // check if the parameters are flipped
- if(after_requested > before_requested) {
- long long t = before_requested;
- before_requested = after_requested;
- after_requested = t;
- }
- // if the query requests future data
- // shift the query back to be in the present time
- // (this may also happen because of the rules above)
- if(before_requested > now) {
- long long delta = before_requested - now;
- before_requested -= delta;
- after_requested -= delta;
- }
- time_t absolute_minimum_time = now - (10 * 365 * 86400);
- time_t absolute_maximum_time = now + (1 * 365 * 86400);
- if (after_requested < absolute_minimum_time && !unittest_running)
- after_requested = absolute_minimum_time;
- if (after_requested > absolute_maximum_time && !unittest_running)
- after_requested = absolute_maximum_time;
- if (before_requested < absolute_minimum_time && !unittest_running)
- before_requested = absolute_minimum_time;
- if (before_requested > absolute_maximum_time && !unittest_running)
- before_requested = absolute_maximum_time;
- *before = before_requested;
- *after = after_requested;
- return (absolute_period_requested != 1);
- }
- // #define DEBUG_QUERY_LOGIC 1
- #ifdef DEBUG_QUERY_LOGIC
- #define query_debug_log_init() BUFFER *debug_log = buffer_create(1000)
- #define query_debug_log(args...) buffer_sprintf(debug_log, ##args)
- #define query_debug_log_fin() { \
- info("QUERY: '%s', after:%ld, before:%ld, duration:%ld, points:%zu, res:%ld - wanted => after:%ld, before:%ld, points:%zu, group:%zu, granularity:%ld, resgroup:%ld, resdiv:" NETDATA_DOUBLE_FORMAT_AUTO " %s", qt->id, after_requested, before_requested, before_requested - after_requested, points_requested, resampling_time_requested, after_wanted, before_wanted, points_wanted, group, query_granularity, resampling_group, resampling_divisor, buffer_tostring(debug_log)); \
- buffer_free(debug_log); \
- debug_log = NULL; \
- }
- #define query_debug_log_free() do { buffer_free(debug_log); } while(0)
- #else
- #define query_debug_log_init() debug_dummy()
- #define query_debug_log(args...) debug_dummy()
- #define query_debug_log_fin() debug_dummy()
- #define query_debug_log_free() debug_dummy()
- #endif
- bool query_target_calculate_window(QUERY_TARGET *qt) {
- if (unlikely(!qt)) return false;
- size_t points_requested = (long)qt->request.points;
- time_t after_requested = qt->request.after;
- time_t before_requested = qt->request.before;
- RRDR_GROUPING group_method = qt->request.group_method;
- time_t resampling_time_requested = qt->request.resampling_time;
- RRDR_OPTIONS options = qt->request.options;
- size_t tier = qt->request.tier;
- time_t update_every = qt->db.minimum_latest_update_every_s;
- // RULES
- // points_requested = 0
- // the user wants all the natural points the database has
- //
- // after_requested = 0
- // the user wants to start the query from the oldest point in our database
- //
- // before_requested = 0
- // the user wants the query to end to the latest point in our database
- //
- // when natural points are wanted, the query has to be aligned to the update_every
- // of the database
- size_t points_wanted = points_requested;
- time_t after_wanted = after_requested;
- time_t before_wanted = before_requested;
- bool aligned = !(options & RRDR_OPTION_NOT_ALIGNED);
- bool automatic_natural_points = (points_wanted == 0);
- bool relative_period_requested = false;
- bool natural_points = (options & RRDR_OPTION_NATURAL_POINTS) || automatic_natural_points;
- bool before_is_aligned_to_db_end = false;
- query_debug_log_init();
- if (ABS(before_requested) <= API_RELATIVE_TIME_MAX || ABS(after_requested) <= API_RELATIVE_TIME_MAX) {
- relative_period_requested = true;
- natural_points = true;
- options |= RRDR_OPTION_NATURAL_POINTS;
- query_debug_log(":relative+natural");
- }
- // if the user wants virtual points, make sure we do it
- if (options & RRDR_OPTION_VIRTUAL_POINTS)
- natural_points = false;
- // set the right flag about natural and virtual points
- if (natural_points) {
- options |= RRDR_OPTION_NATURAL_POINTS;
- if (options & RRDR_OPTION_VIRTUAL_POINTS)
- options &= ~RRDR_OPTION_VIRTUAL_POINTS;
- }
- else {
- options |= RRDR_OPTION_VIRTUAL_POINTS;
- if (options & RRDR_OPTION_NATURAL_POINTS)
- options &= ~RRDR_OPTION_NATURAL_POINTS;
- }
- if (after_wanted == 0 || before_wanted == 0) {
- relative_period_requested = true;
- time_t first_entry_s = qt->db.first_time_s;
- time_t last_entry_s = qt->db.last_time_s;
- if (first_entry_s == 0 || last_entry_s == 0) {
- internal_error(true, "QUERY: no data detected on query '%s' (db first_entry_t = %ld, last_entry_t = %ld", qt->id, first_entry_s, last_entry_s);
- query_debug_log_free();
- return false;
- }
- query_debug_log(":first_entry_t %ld, last_entry_t %ld", first_entry_s, last_entry_s);
- if (after_wanted == 0) {
- after_wanted = first_entry_s;
- query_debug_log(":zero after_wanted %ld", after_wanted);
- }
- if (before_wanted == 0) {
- before_wanted = last_entry_s;
- before_is_aligned_to_db_end = true;
- query_debug_log(":zero before_wanted %ld", before_wanted);
- }
- if (points_wanted == 0) {
- points_wanted = (last_entry_s - first_entry_s) / update_every;
- query_debug_log(":zero points_wanted %zu", points_wanted);
- }
- }
- if (points_wanted == 0) {
- points_wanted = 600;
- query_debug_log(":zero600 points_wanted %zu", points_wanted);
- }
- // convert our before_wanted and after_wanted to absolute
- rrdr_relative_window_to_absolute(&after_wanted, &before_wanted);
- query_debug_log(":relative2absolute after %ld, before %ld", after_wanted, before_wanted);
- if (natural_points && (options & RRDR_OPTION_SELECTED_TIER) && tier > 0 && storage_tiers > 1) {
- update_every = rrdset_find_natural_update_every_for_timeframe(
- qt, after_wanted, before_wanted, points_wanted, options, tier);
- if (update_every <= 0) update_every = qt->db.minimum_latest_update_every_s;
- query_debug_log(":natural update every %ld", update_every);
- }
- // this is the update_every of the query
- // it may be different to the update_every of the database
- time_t query_granularity = (natural_points) ? update_every : 1;
- if (query_granularity <= 0) query_granularity = 1;
- query_debug_log(":query_granularity %ld", query_granularity);
- // align before_wanted and after_wanted to query_granularity
- if (before_wanted % query_granularity) {
- before_wanted -= before_wanted % query_granularity;
- query_debug_log(":granularity align before_wanted %ld", before_wanted);
- }
- if (after_wanted % query_granularity) {
- after_wanted -= after_wanted % query_granularity;
- query_debug_log(":granularity align after_wanted %ld", after_wanted);
- }
- // automatic_natural_points is set when the user wants all the points available in the database
- if (automatic_natural_points) {
- points_wanted = (before_wanted - after_wanted + 1) / query_granularity;
- if (unlikely(points_wanted <= 0)) points_wanted = 1;
- query_debug_log(":auto natural points_wanted %zu", points_wanted);
- }
- time_t duration = before_wanted - after_wanted;
- // if the resampling time is too big, extend the duration to the past
- if (unlikely(resampling_time_requested > duration)) {
- after_wanted = before_wanted - resampling_time_requested;
- duration = before_wanted - after_wanted;
- query_debug_log(":resampling after_wanted %ld", after_wanted);
- }
- // if the duration is not aligned to resampling time
- // extend the duration to the past, to avoid a gap at the chart
- // only when the missing duration is above 1/10th of a point
- if (resampling_time_requested > query_granularity && duration % resampling_time_requested) {
- time_t delta = duration % resampling_time_requested;
- if (delta > resampling_time_requested / 10) {
- after_wanted -= resampling_time_requested - delta;
- duration = before_wanted - after_wanted;
- query_debug_log(":resampling2 after_wanted %ld", after_wanted);
- }
- }
- // the available points of the query
- size_t points_available = (duration + 1) / query_granularity;
- if (unlikely(points_available <= 0)) points_available = 1;
- query_debug_log(":points_available %zu", points_available);
- if (points_wanted > points_available) {
- points_wanted = points_available;
- query_debug_log(":max points_wanted %zu", points_wanted);
- }
- if(points_wanted > 86400 && !unittest_running) {
- points_wanted = 86400;
- query_debug_log(":absolute max points_wanted %zu", points_wanted);
- }
- // calculate the desired grouping of source data points
- size_t group = points_available / points_wanted;
- if (group == 0) group = 1;
- // round "group" to the closest integer
- if (points_available % points_wanted > points_wanted / 2)
- group++;
- query_debug_log(":group %zu", group);
- if (points_wanted * group * query_granularity < (size_t)duration) {
- // the grouping we are going to do, is not enough
- // to cover the entire duration requested, so
- // we have to change the number of points, to make sure we will
- // respect the timeframe as closely as possibly
- // let's see how many points are the optimal
- points_wanted = points_available / group;
- if (points_wanted * group < points_available)
- points_wanted++;
- if (unlikely(points_wanted == 0))
- points_wanted = 1;
- query_debug_log(":optimal points %zu", points_wanted);
- }
- // resampling_time_requested enforces a certain grouping multiple
- NETDATA_DOUBLE resampling_divisor = 1.0;
- size_t resampling_group = 1;
- if (unlikely(resampling_time_requested > query_granularity)) {
- // the points we should group to satisfy gtime
- resampling_group = resampling_time_requested / query_granularity;
- if (unlikely(resampling_time_requested % query_granularity))
- resampling_group++;
- query_debug_log(":resampling group %zu", resampling_group);
- // adapt group according to resampling_group
- if (unlikely(group < resampling_group)) {
- group = resampling_group; // do not allow grouping below the desired one
- query_debug_log(":group less res %zu", group);
- }
- if (unlikely(group % resampling_group)) {
- group += resampling_group - (group % resampling_group); // make sure group is multiple of resampling_group
- query_debug_log(":group mod res %zu", group);
- }
- // resampling_divisor = group / resampling_group;
- resampling_divisor = (NETDATA_DOUBLE) (group * query_granularity) / (NETDATA_DOUBLE) resampling_time_requested;
- query_debug_log(":resampling divisor " NETDATA_DOUBLE_FORMAT, resampling_divisor);
- }
- // now that we have group, align the requested timeframe to fit it.
- if (aligned && before_wanted % (group * query_granularity)) {
- if (before_is_aligned_to_db_end)
- before_wanted -= before_wanted % (time_t)(group * query_granularity);
- else
- before_wanted += (time_t)(group * query_granularity) - before_wanted % (time_t)(group * query_granularity);
- query_debug_log(":align before_wanted %ld", before_wanted);
- }
- after_wanted = before_wanted - (time_t)(points_wanted * group * query_granularity) + query_granularity;
- query_debug_log(":final after_wanted %ld", after_wanted);
- duration = before_wanted - after_wanted;
- query_debug_log(":final duration %ld", duration + 1);
- query_debug_log_fin();
- internal_error(points_wanted != duration / (query_granularity * group) + 1,
- "QUERY: points_wanted %zu is not points %zu",
- points_wanted, (size_t)(duration / (query_granularity * group) + 1));
- internal_error(group < resampling_group,
- "QUERY: group %zu is less than the desired group points %zu",
- group, resampling_group);
- internal_error(group > resampling_group && group % resampling_group,
- "QUERY: group %zu is not a multiple of the desired group points %zu",
- group, resampling_group);
- // -------------------------------------------------------------------------
- // update QUERY_TARGET with our calculations
- qt->window.after = after_wanted;
- qt->window.before = before_wanted;
- qt->window.relative = relative_period_requested;
- qt->window.points = points_wanted;
- qt->window.group = group;
- qt->window.group_method = group_method;
- qt->window.group_options = qt->request.group_options;
- qt->window.query_granularity = query_granularity;
- qt->window.resampling_group = resampling_group;
- qt->window.resampling_divisor = resampling_divisor;
- qt->window.options = options;
- qt->window.tier = tier;
- qt->window.aligned = aligned;
- return true;
- }
- RRDR *rrd2rrdr_legacy(
- ONEWAYALLOC *owa,
- RRDSET *st, size_t points, time_t after, time_t before,
- RRDR_GROUPING group_method, time_t resampling_time, RRDR_OPTIONS options, const char *dimensions,
- const char *group_options, time_t timeout, size_t tier, QUERY_SOURCE query_source,
- STORAGE_PRIORITY priority) {
- QUERY_TARGET_REQUEST qtr = {
- .st = st,
- .points = points,
- .after = after,
- .before = before,
- .group_method = group_method,
- .resampling_time = resampling_time,
- .options = options,
- .dimensions = dimensions,
- .group_options = group_options,
- .timeout = timeout,
- .tier = tier,
- .query_source = query_source,
- .priority = priority,
- };
- return rrd2rrdr(owa, query_target_create(&qtr));
- }
- RRDR *rrd2rrdr(ONEWAYALLOC *owa, QUERY_TARGET *qt) {
- if(!qt)
- return NULL;
- if(!owa) {
- query_target_release(qt);
- return NULL;
- }
- // qt.window members are the WANTED ones.
- // qt.request members are the REQUESTED ones.
- RRDR *r = rrdr_create(owa, qt);
- if(unlikely(!r)) {
- internal_error(true, "QUERY: cannot create RRDR for %s, after=%ld, before=%ld, points=%zu",
- qt->id, qt->window.after, qt->window.before, qt->window.points);
- return NULL;
- }
- if(unlikely(!r->d || !qt->window.points)) {
- internal_error(true, "QUERY: returning empty RRDR (no dimensions in RRDSET) for %s, after=%ld, before=%ld, points=%zu",
- qt->id, qt->window.after, qt->window.before, qt->window.points);
- return r;
- }
- if(qt->window.relative)
- r->result_options |= RRDR_RESULT_OPTION_RELATIVE;
- else
- r->result_options |= RRDR_RESULT_OPTION_ABSOLUTE;
- // -------------------------------------------------------------------------
- // initialize RRDR
- r->group = qt->window.group;
- r->update_every = (int) (qt->window.group * qt->window.query_granularity);
- r->before = qt->window.before;
- r->after = qt->window.after;
- r->internal.points_wanted = qt->window.points;
- r->internal.resampling_group = qt->window.resampling_group;
- r->internal.resampling_divisor = qt->window.resampling_divisor;
- r->internal.query_options = qt->window.options;
- // -------------------------------------------------------------------------
- // assign the processor functions
- rrdr_set_grouping_function(r, qt->window.group_method);
- // allocate any memory required by the grouping method
- r->internal.grouping_create(r, qt->window.group_options);
- // -------------------------------------------------------------------------
- // do the work for each dimension
- time_t max_after = 0, min_before = 0;
- size_t max_rows = 0;
- long dimensions_used = 0, dimensions_nonzero = 0;
- struct timeval query_start_time;
- struct timeval query_current_time;
- if (qt->request.timeout)
- now_realtime_timeval(&query_start_time);
- size_t last_db_points_read = 0;
- size_t last_result_points_generated = 0;
- QUERY_ENGINE_OPS **ops = onewayalloc_callocz(r->internal.owa, qt->query.used, sizeof(QUERY_ENGINE_OPS *));
- size_t capacity = libuv_worker_threads * 2;
- size_t max_queries_to_prepare = (qt->query.used > (capacity - 1)) ? (capacity - 1) : qt->query.used;
- size_t queries_prepared = 0;
- while(queries_prepared < max_queries_to_prepare) {
- // preload another query
- ops[queries_prepared] = rrd2rrdr_query_prep(r, queries_prepared);
- queries_prepared++;
- }
- for(size_t c = 0, max = qt->query.used; c < max ; c++) {
- if(queries_prepared < max) {
- // preload another query
- ops[queries_prepared] = rrd2rrdr_query_prep(r, queries_prepared);
- queries_prepared++;
- }
- // set the query target dimension options to rrdr
- r->od[c] = qt->query.array[c].dimension.options;
- // reset the grouping for the new dimension
- r->internal.grouping_reset(r);
- if(ops[c]) {
- r->od[c] |= RRDR_DIMENSION_QUERIED;
- rrd2rrdr_query_execute(r, c, ops[c]);
- }
- else
- continue;
- global_statistics_rrdr_query_completed(
- 1,
- r->internal.db_points_read - last_db_points_read,
- r->internal.result_points_generated - last_result_points_generated,
- qt->request.query_source);
- last_db_points_read = r->internal.db_points_read;
- last_result_points_generated = r->internal.result_points_generated;
- if (qt->request.timeout)
- now_realtime_timeval(&query_current_time);
- if(r->od[c] & RRDR_DIMENSION_NONZERO)
- dimensions_nonzero++;
- // verify all dimensions are aligned
- if(unlikely(!dimensions_used)) {
- min_before = r->before;
- max_after = r->after;
- max_rows = r->rows;
- }
- else {
- if(r->after != max_after) {
- internal_error(true, "QUERY: 'after' mismatch between dimensions for chart '%s': max is %zu, dimension '%s' has %zu",
- string2str(qt->query.array[c].dimension.id), (size_t)max_after, string2str(qt->query.array[c].dimension.name), (size_t)r->after);
- r->after = (r->after > max_after) ? r->after : max_after;
- }
- if(r->before != min_before) {
- internal_error(true, "QUERY: 'before' mismatch between dimensions for chart '%s': max is %zu, dimension '%s' has %zu",
- string2str(qt->query.array[c].dimension.id), (size_t)min_before, string2str(qt->query.array[c].dimension.name), (size_t)r->before);
- r->before = (r->before < min_before) ? r->before : min_before;
- }
- if(r->rows != max_rows) {
- internal_error(true, "QUERY: 'rows' mismatch between dimensions for chart '%s': max is %zu, dimension '%s' has %zu",
- string2str(qt->query.array[c].dimension.id), (size_t)max_rows, string2str(qt->query.array[c].dimension.name), (size_t)r->rows);
- r->rows = (r->rows > max_rows) ? r->rows : max_rows;
- }
- }
- dimensions_used++;
- if (qt->request.timeout && ((NETDATA_DOUBLE)dt_usec(&query_start_time, &query_current_time) / 1000.0) > (NETDATA_DOUBLE)qt->request.timeout) {
- log_access("QUERY CANCELED RUNTIME EXCEEDED %0.2f ms (LIMIT %lld ms)",
- (NETDATA_DOUBLE)dt_usec(&query_start_time, &query_current_time) / 1000.0, (long long)qt->request.timeout);
- r->result_options |= RRDR_RESULT_OPTION_CANCEL;
- for(size_t i = c + 1; i < queries_prepared ; i++) {
- if(ops[i])
- query_planer_finalize_remaining_plans(ops[i]);
- }
- break;
- }
- }
- #ifdef NETDATA_INTERNAL_CHECKS
- if (dimensions_used) {
- if(r->internal.log)
- rrd2rrdr_log_request_response_metadata(r, qt->window.options, qt->window.group_method, qt->window.aligned, qt->window.group, qt->request.resampling_time, qt->window.resampling_group,
- qt->window.after, qt->request.after, qt->window.before, qt->request.before,
- qt->request.points, qt->window.points, /*after_slot, before_slot,*/
- r->internal.log);
- if(r->rows != qt->window.points)
- rrd2rrdr_log_request_response_metadata(r, qt->window.options, qt->window.group_method, qt->window.aligned, qt->window.group, qt->request.resampling_time, qt->window.resampling_group,
- qt->window.after, qt->request.after, qt->window.before, qt->request.before,
- qt->request.points, qt->window.points, /*after_slot, before_slot,*/
- "got 'points' is not wanted 'points'");
- if(qt->window.aligned && (r->before % (qt->window.group * qt->window.query_granularity)) != 0)
- rrd2rrdr_log_request_response_metadata(r, qt->window.options, qt->window.group_method, qt->window.aligned, qt->window.group, qt->request.resampling_time, qt->window.resampling_group,
- qt->window.after, qt->request.after, qt->window.before,qt->request.before,
- qt->request.points, qt->window.points, /*after_slot, before_slot,*/
- "'before' is not aligned but alignment is required");
- // 'after' should not be aligned, since we start inside the first group
- //if(qt->window.aligned && (r->after % group) != 0)
- // rrd2rrdr_log_request_response_metadata(r, qt->window.options, qt->window.group_method, qt->window.aligned, qt->window.group, qt->request.resampling_time, qt->window.resampling_group, qt->window.after, after_requested, before_wanted, before_requested, points_requested, points_wanted, after_slot, before_slot, "'after' is not aligned but alignment is required");
- if(r->before != qt->window.before)
- rrd2rrdr_log_request_response_metadata(r, qt->window.options, qt->window.group_method, qt->window.aligned, qt->window.group, qt->request.resampling_time, qt->window.resampling_group,
- qt->window.after, qt->request.after, qt->window.before, qt->request.before,
- qt->request.points, qt->window.points, /*after_slot, before_slot,*/
- "chart is not aligned to requested 'before'");
- if(r->before != qt->window.before)
- rrd2rrdr_log_request_response_metadata(r, qt->window.options, qt->window.group_method, qt->window.aligned, qt->window.group, qt->request.resampling_time, qt->window.resampling_group,
- qt->window.after, qt->request.after, qt->window.before, qt->request.before,
- qt->request.points, qt->window.points, /*after_slot, before_slot,*/
- "got 'before' is not wanted 'before'");
- // reported 'after' varies, depending on group
- if(r->after != qt->window.after)
- rrd2rrdr_log_request_response_metadata(r, qt->window.options, qt->window.group_method, qt->window.aligned, qt->window.group, qt->request.resampling_time, qt->window.resampling_group,
- qt->window.after, qt->request.after, qt->window.before, qt->request.before,
- qt->request.points, qt->window.points, /*after_slot, before_slot,*/
- "got 'after' is not wanted 'after'");
- }
- #endif
- // free all resources used by the grouping method
- r->internal.grouping_free(r);
- if(likely(dimensions_used)) {
- // when all the dimensions are zero, we should return all of them
- if (unlikely((qt->window.options & RRDR_OPTION_NONZERO) && !dimensions_nonzero &&
- !(r->result_options & RRDR_RESULT_OPTION_CANCEL))) {
- // all the dimensions are zero
- // mark them as NONZERO to send them all
- for (size_t c = 0, max = qt->query.used; c < max; c++) {
- if (unlikely(r->od[c] & RRDR_DIMENSION_HIDDEN)) continue;
- if (unlikely(!(r->od[c] & RRDR_DIMENSION_QUERIED))) continue;
- r->od[c] |= RRDR_DIMENSION_NONZERO;
- }
- }
- return r;
- }
- // we couldn't query any dimension
- rrdr_free(owa, r);
- return NULL;
- }
|