123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526 |
- import uuid
- from datetime import datetime, timezone
- from unittest import mock
- import pytest
- import urllib3
- from sentry.testutils.helpers import parse_link_header
- from tests.snuba.api.endpoints.test_organization_events import OrganizationEventsEndpointTestBase
- class OrganizationEventsSpanIndexedEndpointTest(OrganizationEventsEndpointTestBase):
- is_eap = False
- use_rpc = False
- """Test the indexed spans dataset.
- To run this locally you may need to set the ENABLE_SPANS_CONSUMER flag to True in Snuba.
- A way to do this is
- 1. run: `sentry devservices down snuba`
- 2. clone snuba locally
- 3. run: `export ENABLE_SPANS_CONSUMER=True`
- 4. run snuba
- At this point tests should work locally
- Once span ingestion is on by default this will no longer need to be done
- """
- @property
- def dataset(self):
- if self.is_eap:
- return "spans"
- else:
- return "spansIndexed"
- def do_request(self, query, features=None, **kwargs):
- query["useRpc"] = "1" if self.use_rpc else "0"
- return super().do_request(query, features, **kwargs)
- def setUp(self):
- super().setUp()
- self.features = {
- "organizations:starfish-view": True,
- }
- @pytest.mark.querybuilder
- def test_simple(self):
- self.store_spans(
- [
- self.create_span(
- {"description": "foo", "sentry_tags": {"status": "success"}},
- start_ts=self.ten_mins_ago,
- ),
- self.create_span(
- {"description": "bar", "sentry_tags": {"status": "invalid_argument"}},
- start_ts=self.ten_mins_ago,
- ),
- ],
- is_eap=self.is_eap,
- )
- response = self.do_request(
- {
- "field": ["span.status", "description", "count()"],
- "query": "",
- "orderby": "description",
- "project": self.project.id,
- "dataset": self.dataset,
- }
- )
- assert response.status_code == 200, response.content
- data = response.data["data"]
- meta = response.data["meta"]
- assert len(data) == 2
- assert data == [
- {
- "span.status": "invalid_argument",
- "description": "bar",
- "count()": 1,
- },
- {
- "span.status": "ok",
- "description": "foo",
- "count()": 1,
- },
- ]
- assert meta["dataset"] == self.dataset
- def test_spm(self):
- self.store_spans(
- [
- self.create_span(
- {"description": "foo", "sentry_tags": {"status": "success"}},
- start_ts=self.ten_mins_ago,
- ),
- ],
- is_eap=self.is_eap,
- )
- response = self.do_request(
- {
- "field": ["description", "spm()"],
- "query": "",
- "orderby": "description",
- "project": self.project.id,
- "dataset": self.dataset,
- }
- )
- assert response.status_code == 200, response.content
- data = response.data["data"]
- meta = response.data["meta"]
- assert len(data) == 1
- assert data == [
- {
- "description": "foo",
- "spm()": 1 / (90 * 24 * 60),
- },
- ]
- assert meta["dataset"] == self.dataset
- def test_id_fields(self):
- self.store_spans(
- [
- self.create_span(
- {"description": "foo", "sentry_tags": {"status": "success"}},
- start_ts=self.ten_mins_ago,
- ),
- self.create_span(
- {"description": "bar", "sentry_tags": {"status": "invalid_argument"}},
- start_ts=self.ten_mins_ago,
- ),
- ],
- is_eap=self.is_eap,
- )
- response = self.do_request(
- {
- "field": ["id", "span_id"],
- "query": "",
- "orderby": "id",
- "project": self.project.id,
- "dataset": self.dataset,
- }
- )
- assert response.status_code == 200, response.content
- data = response.data["data"]
- meta = response.data["meta"]
- assert len(data) == 2
- for obj in data:
- assert obj["id"] == obj["span_id"]
- assert meta["dataset"] == self.dataset
- def test_sentry_tags_vs_tags(self):
- self.store_spans(
- [
- self.create_span(
- {"sentry_tags": {"transaction.method": "foo"}}, start_ts=self.ten_mins_ago
- ),
- ],
- is_eap=self.is_eap,
- )
- response = self.do_request(
- {
- "field": ["transaction.method", "count()"],
- "query": "",
- "orderby": "count()",
- "project": self.project.id,
- "dataset": self.dataset,
- }
- )
- assert response.status_code == 200, response.content
- data = response.data["data"]
- meta = response.data["meta"]
- assert len(data) == 1
- assert data[0]["transaction.method"] == "foo"
- assert meta["dataset"] == self.dataset
- def test_sentry_tags_syntax(self):
- self.store_spans(
- [
- self.create_span(
- {"sentry_tags": {"transaction.method": "foo"}}, start_ts=self.ten_mins_ago
- ),
- ],
- is_eap=self.is_eap,
- )
- response = self.do_request(
- {
- "field": ["sentry_tags[transaction.method]", "count()"],
- "query": "",
- "orderby": "count()",
- "project": self.project.id,
- "dataset": self.dataset,
- }
- )
- assert response.status_code == 200, response.content
- data = response.data["data"]
- meta = response.data["meta"]
- assert len(data) == 1
- assert data[0]["sentry_tags[transaction.method]"] == "foo"
- assert meta["dataset"] == self.dataset
- def test_module_alias(self):
- # Delegates `span.module` to `sentry_tags[category]`. Maps `"db.redis"` spans to the `"cache"` module
- self.store_spans(
- [
- self.create_span(
- {
- "op": "db.redis",
- "description": "EXEC *",
- "sentry_tags": {
- "description": "EXEC *",
- "category": "db",
- "op": "db.redis",
- "transaction": "/app/index",
- },
- },
- start_ts=self.ten_mins_ago,
- ),
- ],
- is_eap=self.is_eap,
- )
- response = self.do_request(
- {
- "field": ["span.module", "span.description"],
- "query": "span.module:cache",
- "project": self.project.id,
- "dataset": self.dataset,
- }
- )
- assert response.status_code == 200, response.content
- data = response.data["data"]
- meta = response.data["meta"]
- assert len(data) == 1
- assert data[0]["span.module"] == "cache"
- assert data[0]["span.description"] == "EXEC *"
- assert meta["dataset"] == self.dataset
- def test_device_class_filter_unknown(self):
- self.store_spans(
- [
- self.create_span({"sentry_tags": {"device.class": ""}}, start_ts=self.ten_mins_ago),
- ],
- is_eap=self.is_eap,
- )
- response = self.do_request(
- {
- "field": ["device.class", "count()"],
- "query": "device.class:Unknown",
- "orderby": "count()",
- "project": self.project.id,
- "dataset": self.dataset,
- }
- )
- assert response.status_code == 200, response.content
- data = response.data["data"]
- meta = response.data["meta"]
- assert len(data) == 1
- assert data[0]["device.class"] == "Unknown"
- assert meta["dataset"] == self.dataset
- def test_span_module(self):
- self.store_spans(
- [
- self.create_span(
- {
- "sentry_tags": {
- "op": "http",
- "category": "http",
- }
- },
- start_ts=self.ten_mins_ago,
- ),
- self.create_span(
- {
- "sentry_tags": {
- "op": "alternative",
- "category": "other",
- }
- },
- start_ts=self.ten_mins_ago,
- ),
- self.create_span(
- {
- "sentry_tags": {
- "op": "alternative",
- "category": "other",
- }
- },
- start_ts=self.ten_mins_ago,
- ),
- ],
- is_eap=self.is_eap,
- )
- response = self.do_request(
- {
- "field": ["span.module", "count()"],
- "query": "",
- "orderby": "-count()",
- "project": self.project.id,
- "dataset": self.dataset,
- }
- )
- assert response.status_code == 200, response.content
- data = response.data["data"]
- meta = response.data["meta"]
- assert len(data) == 2
- assert data[0]["span.module"] == "other"
- assert data[1]["span.module"] == "http"
- assert meta["dataset"] == self.dataset
- def test_network_span(self):
- self.store_spans(
- [
- self.create_span(
- {
- "sentry_tags": {
- "action": "GET",
- "category": "http",
- "description": "GET https://*.resource.com",
- "domain": "*.resource.com",
- "op": "http.client",
- "status_code": "200",
- "transaction": "/api/0/data/",
- "transaction.method": "GET",
- "transaction.op": "http.server",
- }
- },
- start_ts=self.ten_mins_ago,
- ),
- ],
- is_eap=self.is_eap,
- )
- response = self.do_request(
- {
- "field": ["span.op", "span.status_code"],
- "query": "span.status_code:200",
- "project": self.project.id,
- "dataset": self.dataset,
- }
- )
- assert response.status_code == 200, response.content
- data = response.data["data"]
- meta = response.data["meta"]
- assert len(data) == 1
- assert data[0]["span.op"] == "http.client"
- assert data[0]["span.status_code"] == "200"
- assert meta["dataset"] == self.dataset
- def test_other_category_span(self):
- self.store_spans(
- [
- self.create_span(
- {
- "sentry_tags": {
- "action": "GET",
- "category": "alternative",
- "description": "GET https://*.resource.com",
- "domain": "*.resource.com",
- "op": "alternative",
- "status_code": "200",
- "transaction": "/api/0/data/",
- "transaction.method": "GET",
- "transaction.op": "http.server",
- }
- },
- start_ts=self.ten_mins_ago,
- ),
- ],
- is_eap=self.is_eap,
- )
- response = self.do_request(
- {
- "field": ["span.op", "span.status_code"],
- "query": "span.module:other span.status_code:200",
- "project": self.project.id,
- "dataset": self.dataset,
- }
- )
- assert response.status_code == 200, response.content
- data = response.data["data"]
- meta = response.data["meta"]
- assert len(data) == 1
- assert data[0]["span.op"] == "alternative"
- assert data[0]["span.status_code"] == "200"
- assert meta["dataset"] == self.dataset
- def test_inp_span(self):
- replay_id = uuid.uuid4().hex
- self.store_spans(
- [
- self.create_span(
- {
- "sentry_tags": {
- "replay_id": replay_id,
- "browser.name": "Chrome",
- "transaction": "/pageloads/",
- }
- },
- start_ts=self.ten_mins_ago,
- ),
- ],
- is_eap=self.is_eap,
- )
- response = self.do_request(
- {
- "field": ["replay.id", "browser.name", "origin.transaction", "count()"],
- "query": f"replay.id:{replay_id} AND browser.name:Chrome AND origin.transaction:/pageloads/",
- "orderby": "count()",
- "project": self.project.id,
- "dataset": self.dataset,
- }
- )
- assert response.status_code == 200, response.content
- data = response.data["data"]
- meta = response.data["meta"]
- assert len(data) == 1
- assert data[0]["replay.id"] == replay_id
- assert data[0]["browser.name"] == "Chrome"
- assert data[0]["origin.transaction"] == "/pageloads/"
- assert meta["dataset"] == self.dataset
- def test_id_filtering(self):
- span = self.create_span({"description": "foo"}, start_ts=self.ten_mins_ago)
- self.store_span(span, is_eap=self.is_eap)
- response = self.do_request(
- {
- "field": ["description", "count()"],
- "query": f"id:{span['span_id']}",
- "orderby": "description",
- "project": self.project.id,
- "dataset": self.dataset,
- }
- )
- assert response.status_code == 200, response.content
- data = response.data["data"]
- meta = response.data["meta"]
- assert len(data) == 1
- assert data[0]["description"] == "foo"
- assert meta["dataset"] == self.dataset
- response = self.do_request(
- {
- "field": ["description", "count()"],
- "query": f"transaction.id:{span['event_id']}",
- "orderby": "description",
- "project": self.project.id,
- "dataset": self.dataset,
- }
- )
- assert response.status_code == 200, response.content
- data = response.data["data"]
- meta = response.data["meta"]
- assert len(data) == 1
- assert data[0]["description"] == "foo"
- assert meta["dataset"] == self.dataset
- def test_span_op_casing(self):
- self.store_spans(
- [
- self.create_span(
- {
- "sentry_tags": {
- "replay_id": "abc123",
- "browser.name": "Chrome",
- "transaction": "/pageloads/",
- "op": "this is a transaction",
- }
- },
- start_ts=self.ten_mins_ago,
- ),
- ],
- is_eap=self.is_eap,
- )
- response = self.do_request(
- {
- "field": ["span.op", "count()"],
- "query": 'span.op:"ThIs Is a TraNSActiON"',
- "orderby": "count()",
- "project": self.project.id,
- "dataset": self.dataset,
- }
- )
- assert response.status_code == 200, response.content
- data = response.data["data"]
- meta = response.data["meta"]
- assert len(data) == 1
- assert data[0]["span.op"] == "this is a transaction"
- assert meta["dataset"] == self.dataset
- def test_queue_span(self):
- self.store_spans(
- [
- self.create_span(
- {
- "measurements": {
- "messaging.message.body.size": {"value": 1024, "unit": "byte"},
- "messaging.message.receive.latency": {
- "value": 1000,
- "unit": "millisecond",
- },
- "messaging.message.retry.count": {"value": 2, "unit": "none"},
- },
- "sentry_tags": {
- "transaction": "queue-processor",
- "messaging.destination.name": "events",
- "messaging.message.id": "abc123",
- "trace.status": "ok",
- },
- },
- start_ts=self.ten_mins_ago,
- ),
- ],
- is_eap=self.is_eap,
- )
- response = self.do_request(
- {
- "field": [
- "transaction",
- "messaging.destination.name",
- "messaging.message.id",
- "measurements.messaging.message.receive.latency",
- "measurements.messaging.message.body.size",
- "measurements.messaging.message.retry.count",
- "trace.status",
- "count()",
- ],
- "query": 'messaging.destination.name:"events"',
- "orderby": "count()",
- "project": self.project.id,
- "dataset": self.dataset,
- }
- )
- assert response.status_code == 200, response.content
- data = response.data["data"]
- meta = response.data["meta"]
- assert len(data) == 1
- assert data[0]["transaction"] == "queue-processor"
- assert data[0]["messaging.destination.name"] == "events"
- assert data[0]["messaging.message.id"] == "abc123"
- assert data[0]["trace.status"] == "ok"
- assert data[0]["measurements.messaging.message.receive.latency"] == 1000
- assert data[0]["measurements.messaging.message.body.size"] == 1024
- assert data[0]["measurements.messaging.message.retry.count"] == 2
- assert meta["dataset"] == self.dataset
- def test_tag_wildcards(self):
- self.store_spans(
- [
- self.create_span(
- {"description": "foo", "tags": {"foo": "BaR"}},
- start_ts=self.ten_mins_ago,
- ),
- self.create_span(
- {"description": "qux", "tags": {"foo": "QuX"}},
- start_ts=self.ten_mins_ago,
- ),
- ],
- is_eap=self.is_eap,
- )
- for query in [
- "foo:b*",
- "foo:*r",
- "foo:*a*",
- "foo:b*r",
- ]:
- response = self.do_request(
- {
- "field": ["foo", "count()"],
- "query": query,
- "project": self.project.id,
- "dataset": self.dataset,
- }
- )
- assert response.status_code == 200, response.content
- assert response.data["data"] == [{"foo": "BaR", "count()": 1}]
- def test_query_for_missing_tag(self):
- self.store_spans(
- [
- self.create_span(
- {"description": "foo"},
- start_ts=self.ten_mins_ago,
- ),
- self.create_span(
- {"description": "qux", "tags": {"foo": "bar"}},
- start_ts=self.ten_mins_ago,
- ),
- ],
- is_eap=self.is_eap,
- )
- response = self.do_request(
- {
- "field": ["foo", "count()"],
- "query": 'foo:""',
- "project": self.project.id,
- "dataset": self.dataset,
- }
- )
- assert response.status_code == 200, response.content
- assert response.data["data"] == [{"foo": "", "count()": 1}]
- def test_count_field_type(self):
- response = self.do_request(
- {
- "field": ["count()"],
- "project": self.project.id,
- "dataset": self.dataset,
- }
- )
- assert response.status_code == 200, response.content
- assert response.data["meta"]["fields"] == {"count()": "integer"}
- assert response.data["meta"]["units"] == {"count()": None}
- assert response.data["data"] == [{"count()": 0}]
- def _test_simple_measurements(self, keys):
- self.store_spans(
- [
- self.create_span(
- {
- "description": "foo",
- "sentry_tags": {"status": "success"},
- "tags": {"bar": "bar2"},
- },
- measurements={k: {"value": (i + 1) / 10} for i, (k, _, _) in enumerate(keys)},
- start_ts=self.ten_mins_ago,
- ),
- ],
- is_eap=self.is_eap,
- )
- for i, (k, type, unit) in enumerate(keys):
- key = f"measurements.{k}"
- response = self.do_request(
- {
- "field": [key],
- "query": "description:foo",
- "project": self.project.id,
- "dataset": self.dataset,
- }
- )
- assert response.status_code == 200, response.content
- expected = {
- "dataset": mock.ANY,
- "datasetReason": "unchanged",
- "fields": {
- key: type,
- "id": "string",
- "project.name": "string",
- },
- "isMetricsData": False,
- "isMetricsExtractedData": False,
- "tips": {},
- "units": {
- key: unit,
- "id": None,
- "project.name": None,
- },
- }
- if self.use_rpc:
- expected["accuracy"] = {
- "confidence": [{}],
- }
- assert response.data["meta"] == expected
- assert response.data["data"] == [
- {
- key: pytest.approx((i + 1) / 10),
- "id": mock.ANY,
- "project.name": self.project.slug,
- }
- ]
- def test_simple_measurements(self):
- keys = [
- ("app_start_cold", "duration", "millisecond"),
- ("app_start_warm", "duration", "millisecond"),
- ("frames_frozen", "number", None), # should be integer but keeping it consistent
- ("frames_frozen_rate", "percentage", None),
- ("frames_slow", "number", None), # should be integer but keeping it consistent
- ("frames_slow_rate", "percentage", None),
- ("frames_total", "number", None), # should be integer but keeping it consistent
- ("time_to_initial_display", "duration", "millisecond"),
- ("time_to_full_display", "duration", "millisecond"),
- ("stall_count", "number", None), # should be integer but keeping it consistent
- ("stall_percentage", "percentage", None),
- ("stall_stall_longest_time", "number", None),
- ("stall_stall_total_time", "number", None),
- ("cls", "number", None),
- ("fcp", "duration", "millisecond"),
- ("fid", "duration", "millisecond"),
- ("fp", "duration", "millisecond"),
- ("inp", "duration", "millisecond"),
- ("lcp", "duration", "millisecond"),
- ("ttfb", "duration", "millisecond"),
- ("ttfb.requesttime", "duration", "millisecond"),
- ("score.cls", "number", None),
- ("score.fcp", "number", None),
- ("score.fid", "number", None),
- ("score.inp", "number", None),
- ("score.lcp", "number", None),
- ("score.ttfb", "number", None),
- ("score.total", "number", None),
- ("score.weight.cls", "number", None),
- ("score.weight.fcp", "number", None),
- ("score.weight.fid", "number", None),
- ("score.weight.inp", "number", None),
- ("score.weight.lcp", "number", None),
- ("score.weight.ttfb", "number", None),
- ("messaging.message.receive.latency", "duration", "millisecond"),
- ("messaging.message.retry.count", "number", None),
- # size fields aren't property support pre-RPC
- ("cache.item_size", "number", None),
- ("messaging.message.body.size", "number", None),
- ]
- self._test_simple_measurements(keys)
- def test_environment(self):
- self.create_environment(self.project, name="prod")
- self.create_environment(self.project, name="test")
- self.store_spans(
- [
- self.create_span(
- {"description": "foo", "sentry_tags": {"environment": "prod"}},
- start_ts=self.ten_mins_ago,
- ),
- self.create_span(
- {"description": "foo", "sentry_tags": {"environment": "test"}},
- start_ts=self.ten_mins_ago,
- ),
- ],
- is_eap=self.is_eap,
- )
- response = self.do_request(
- {
- "field": ["environment", "count()"],
- "project": self.project.id,
- "environment": "prod",
- "orderby": "environment",
- "dataset": self.dataset,
- }
- )
- assert response.status_code == 200, response.content
- assert response.data["data"] == [
- {"environment": "prod", "count()": 1},
- ]
- response = self.do_request(
- {
- "field": ["environment", "count()"],
- "project": self.project.id,
- "environment": ["prod", "test"],
- "orderby": "environment",
- "dataset": self.dataset,
- }
- )
- assert response.status_code == 200, response.content
- assert response.data["data"] == [
- {"environment": "prod", "count()": 1},
- {"environment": "test", "count()": 1},
- ]
- def test_transaction(self):
- self.store_spans(
- [
- self.create_span(
- {"description": "foo", "sentry_tags": {"transaction": "bar"}},
- start_ts=self.ten_mins_ago,
- ),
- ],
- is_eap=self.is_eap,
- )
- response = self.do_request(
- {
- "field": ["description", "count()"],
- "query": "transaction:bar",
- "orderby": "description",
- "project": self.project.id,
- "dataset": self.dataset,
- }
- )
- assert response.status_code == 200, response.content
- data = response.data["data"]
- meta = response.data["meta"]
- assert len(data) == 1
- assert data == [
- {
- "description": "foo",
- "count()": 1,
- },
- ]
- assert meta["dataset"] == self.dataset
- def test_orderby_alias(self):
- self.store_spans(
- [
- self.create_span(
- {"description": "foo", "sentry_tags": {"status": "success"}},
- start_ts=self.ten_mins_ago,
- ),
- self.create_span(
- {"description": "bar", "sentry_tags": {"status": "invalid_argument"}},
- duration=2000,
- start_ts=self.ten_mins_ago,
- ),
- ],
- is_eap=self.is_eap,
- )
- response = self.do_request(
- {
- "field": ["span.description", "sum(span.self_time)"],
- "query": "",
- "orderby": "sum_span_self_time",
- "project": self.project.id,
- "dataset": self.dataset,
- }
- )
- assert response.status_code == 200, response.content
- data = response.data["data"]
- meta = response.data["meta"]
- assert len(data) == 2
- assert data == [
- {
- "span.description": "foo",
- "sum(span.self_time)": 1000,
- },
- {
- "span.description": "bar",
- "sum(span.self_time)": 2000,
- },
- ]
- assert meta["dataset"] == self.dataset
- @pytest.mark.querybuilder
- def test_explore_sample_query(self):
- spans = [
- self.create_span(
- {"description": "foo", "sentry_tags": {"status": "success"}},
- start_ts=self.ten_mins_ago,
- ),
- self.create_span(
- {"description": "bar", "sentry_tags": {"status": "invalid_argument"}},
- start_ts=self.nine_mins_ago,
- ),
- ]
- self.store_spans(
- spans,
- is_eap=self.is_eap,
- )
- response = self.do_request(
- {
- "field": [
- "id",
- "project",
- "span.op",
- "span.description",
- "span.duration",
- "timestamp",
- "trace",
- "transaction.span_id",
- ],
- # This is to skip INP spans
- "query": "!transaction.span_id:00",
- "orderby": "timestamp",
- "statsPeriod": "1h",
- "project": self.project.id,
- "dataset": self.dataset,
- }
- )
- assert response.status_code == 200, response.content
- data = response.data["data"]
- meta = response.data["meta"]
- assert len(data) == 2
- for source, result in zip(spans, data):
- assert result["id"] == source["span_id"], "id"
- assert result["span.duration"] == 1000.0, "duration"
- assert result["span.op"] == "", "op"
- assert result["span.description"] == source["description"], "description"
- ts = datetime.fromisoformat(result["timestamp"])
- assert ts.tzinfo == timezone.utc
- assert ts.timestamp() == pytest.approx(
- source["end_timestamp_precise"], abs=5
- ), "timestamp"
- assert result["transaction.span_id"] == source["segment_id"], "transaction.span_id"
- assert result["project"] == result["project.name"] == self.project.slug, "project"
- assert meta["dataset"] == self.dataset
- def test_span_status(self):
- self.store_spans(
- [
- self.create_span(
- {"description": "foo", "sentry_tags": {"status": "internal_error"}},
- start_ts=self.ten_mins_ago,
- ),
- ],
- is_eap=self.is_eap,
- )
- response = self.do_request(
- {
- "field": ["description", "count()"],
- "query": "span.status:internal_error",
- "orderby": "description",
- "project": self.project.id,
- "dataset": self.dataset,
- }
- )
- assert response.status_code == 200, response.content
- data = response.data["data"]
- meta = response.data["meta"]
- assert len(data) == 1
- assert data == [
- {
- "description": "foo",
- "count()": 1,
- },
- ]
- assert meta["dataset"] == self.dataset
- def test_handle_nans_from_snuba(self):
- self.store_spans(
- [self.create_span({"description": "foo"}, start_ts=self.ten_mins_ago)],
- is_eap=self.is_eap,
- )
- response = self.do_request(
- {
- "field": ["description", "count()"],
- "query": "span.status:internal_error",
- "orderby": "description",
- "project": self.project.id,
- "dataset": self.dataset,
- }
- )
- assert response.status_code == 200, response.content
- def test_in_filter(self):
- self.store_spans(
- [
- self.create_span(
- {"description": "foo", "sentry_tags": {"transaction": "bar"}},
- start_ts=self.ten_mins_ago,
- ),
- self.create_span(
- {"description": "foo", "sentry_tags": {"transaction": "baz"}},
- start_ts=self.ten_mins_ago,
- ),
- self.create_span(
- {"description": "foo", "sentry_tags": {"transaction": "bat"}},
- start_ts=self.ten_mins_ago,
- ),
- ],
- is_eap=self.is_eap,
- )
- response = self.do_request(
- {
- "field": ["transaction", "count()"],
- "query": "transaction:[bar, baz]",
- "orderby": "transaction",
- "project": self.project.id,
- "dataset": self.dataset,
- }
- )
- assert response.status_code == 200, response.content
- data = response.data["data"]
- meta = response.data["meta"]
- assert len(data) == 2
- assert data == [
- {
- "transaction": "bar",
- "count()": 1,
- },
- {
- "transaction": "baz",
- "count()": 1,
- },
- ]
- assert meta["dataset"] == self.dataset
- def _test_aggregate_filter(self, queries):
- self.store_spans(
- [
- self.create_span(
- {"sentry_tags": {"transaction": "foo"}},
- measurements={
- "lcp": {"value": 5000},
- "http.response_content_length": {"value": 5000},
- },
- start_ts=self.ten_mins_ago,
- ),
- self.create_span(
- {"sentry_tags": {"transaction": "foo"}},
- measurements={
- "lcp": {"value": 5000},
- "http.response_content_length": {"value": 5000},
- },
- start_ts=self.ten_mins_ago,
- ),
- self.create_span(
- {"sentry_tags": {"transaction": "bar"}},
- measurements={
- "lcp": {"value": 1000},
- "http.response_content_length": {"value": 1000},
- },
- start_ts=self.ten_mins_ago,
- ),
- ],
- is_eap=self.is_eap,
- )
- for query in queries:
- response = self.do_request(
- {
- "field": ["transaction", "count()"],
- "query": query,
- "orderby": "transaction",
- "project": self.project.id,
- "dataset": self.dataset,
- }
- )
- assert response.status_code == 200, response.content
- data = response.data["data"]
- meta = response.data["meta"]
- assert len(data) == 1
- assert data[0]["transaction"] == "foo"
- assert data[0]["count()"] == 2
- assert meta["dataset"] == self.dataset
- def test_aggregate_filter(self):
- self._test_aggregate_filter(
- [
- "count():2",
- "count():>1",
- "avg(measurements.lcp):>3000",
- "avg(measurements.lcp):>3s",
- "count():>1 avg(measurements.lcp):>3000",
- "count():>1 AND avg(measurements.lcp):>3000",
- "count():>1 OR avg(measurements.lcp):>3000",
- ]
- )
- def test_pagination_samples(self):
- self.store_spans(
- [
- self.create_span(
- {"description": "a"},
- start_ts=self.ten_mins_ago,
- ),
- self.create_span(
- {"description": "b"},
- start_ts=self.ten_mins_ago,
- ),
- ],
- is_eap=self.is_eap,
- )
- response = self.do_request(
- {
- "field": ["description"],
- "query": "",
- "orderby": "description",
- "project": self.project.id,
- "dataset": self.dataset,
- "per_page": 1,
- }
- )
- assert response.status_code == 200, response.content
- assert response.data["data"] == [
- {
- "id": mock.ANY,
- "project.name": self.project.slug,
- "description": "a",
- },
- ]
- links = {}
- for url, attrs in parse_link_header(response["Link"]).items():
- links[attrs["rel"]] = attrs
- attrs["href"] = url
- assert links["previous"]["results"] == "false"
- assert links["next"]["results"] == "true"
- assert links["next"]["href"] is not None
- response = self.client.get(links["next"]["href"], format="json")
- assert response.status_code == 200, response.content
- assert response.data["data"] == [
- {
- "id": mock.ANY,
- "project.name": self.project.slug,
- "description": "b",
- },
- ]
- links = {}
- for url, attrs in parse_link_header(response["Link"]).items():
- links[attrs["rel"]] = attrs
- attrs["href"] = url
- assert links["previous"]["results"] == "true"
- assert links["next"]["results"] == "false"
- assert links["previous"]["href"] is not None
- response = self.client.get(links["previous"]["href"], format="json")
- assert response.status_code == 200, response.content
- assert response.data["data"] == [
- {
- "id": mock.ANY,
- "project.name": self.project.slug,
- "description": "a",
- },
- ]
- links = {}
- for url, attrs in parse_link_header(response["Link"]).items():
- links[attrs["rel"]] = attrs
- attrs["href"] = url
- assert links["previous"]["results"] == "false"
- assert links["next"]["results"] == "true"
- def test_precise_timestamps(self):
- self.store_spans(
- [
- self.create_span(
- {"description": "foo", "sentry_tags": {"status": "success"}},
- start_ts=self.ten_mins_ago,
- ),
- ],
- is_eap=self.is_eap,
- )
- response = self.do_request(
- {
- "field": ["precise.start_ts", "precise.finish_ts"],
- "project": self.project.id,
- "dataset": self.dataset,
- }
- )
- start = self.ten_mins_ago.timestamp()
- finish = start + 1
- assert response.status_code == 200, response.content
- assert response.data["data"] == [
- {
- "id": mock.ANY,
- "project.name": self.project.slug,
- "precise.start_ts": start,
- "precise.finish_ts": finish,
- },
- ]
- def test_replay_id(self):
- self.store_spans(
- [
- self.create_span(
- {"description": "foo", "sentry_tags": {"replay_id": "123"}},
- start_ts=self.ten_mins_ago,
- ),
- self.create_span(
- {"description": "foo", "tags": {"replayId": "321"}},
- start_ts=self.ten_mins_ago,
- ),
- ],
- is_eap=self.is_eap,
- )
- response = self.do_request(
- {
- "field": ["replay"],
- "project": self.project.id,
- "dataset": self.dataset,
- "orderby": "replay",
- }
- )
- assert response.status_code == 200, response.content
- assert response.data["data"] == [
- {
- "id": mock.ANY,
- "project.name": self.project.slug,
- "replay": "123",
- },
- {
- "id": mock.ANY,
- "project.name": self.project.slug,
- "replay": "321",
- },
- ]
- def test_user_display(self):
- self.store_spans(
- [
- self.create_span(
- {"description": "foo", "sentry_tags": {"user.email": "test@test.com"}},
- start_ts=self.ten_mins_ago,
- ),
- self.create_span(
- {"description": "foo", "sentry_tags": {"user.username": "test"}},
- start_ts=self.ten_mins_ago,
- ),
- ],
- is_eap=self.is_eap,
- )
- response = self.do_request(
- {
- "field": ["user.display"],
- "project": self.project.id,
- "dataset": self.dataset,
- "orderby": "user.display",
- }
- )
- assert response.status_code == 200, response.content
- assert response.data["data"] == [
- {
- "id": mock.ANY,
- "project.name": self.project.slug,
- "user.display": "test",
- },
- {
- "id": mock.ANY,
- "project.name": self.project.slug,
- "user.display": "test@test.com",
- },
- ]
- def test_query_with_asterisk(self):
- self.store_spans(
- [
- self.create_span(
- {"description": "select * from database"},
- start_ts=self.ten_mins_ago,
- ),
- ],
- is_eap=self.is_eap,
- )
- response = self.do_request(
- {
- "field": ["span.description"],
- "query": 'span.description:"select \\* from database"',
- "project": self.project.id,
- "dataset": self.dataset,
- }
- )
- assert response.status_code == 200, response.content
- assert len(response.data["data"]) == 1
- assert response.data["data"][0]["span.description"] == "select * from database"
- class OrganizationEventsEAPSpanEndpointTest(OrganizationEventsSpanIndexedEndpointTest):
- is_eap = True
- use_rpc = False
- def test_simple(self):
- self.store_spans(
- [
- self.create_span(
- {"description": "foo", "sentry_tags": {"status": "success"}},
- start_ts=self.ten_mins_ago,
- ),
- self.create_span(
- {"description": "bar", "sentry_tags": {"status": "invalid_argument"}},
- start_ts=self.ten_mins_ago,
- ),
- ],
- is_eap=self.is_eap,
- )
- response = self.do_request(
- {
- "field": ["span.status", "description", "count()"],
- "query": "",
- "orderby": "description",
- "project": self.project.id,
- "dataset": self.dataset,
- }
- )
- assert response.status_code == 200, response.content
- data = response.data["data"]
- meta = response.data["meta"]
- assert len(data) == 2
- assert data == [
- {
- "span.status": "invalid_argument",
- "description": "bar",
- "count()": 1,
- },
- {
- "span.status": "success",
- "description": "foo",
- "count()": 1,
- },
- ]
- assert meta["dataset"] == self.dataset
- @pytest.mark.xfail(reason="event_id isn't being written to the new table")
- def test_id_filtering(self):
- super().test_id_filtering()
- def test_span_duration(self):
- spans = [
- self.create_span(
- {"description": "bar", "sentry_tags": {"status": "invalid_argument"}},
- start_ts=self.ten_mins_ago,
- ),
- self.create_span(
- {"description": "foo", "sentry_tags": {"status": "success"}},
- start_ts=self.ten_mins_ago,
- ),
- ]
- self.store_spans(spans, is_eap=self.is_eap)
- response = self.do_request(
- {
- "field": ["span.duration", "description"],
- "query": "",
- "orderby": "description",
- "project": self.project.id,
- "dataset": self.dataset,
- }
- )
- assert response.status_code == 200, response.content
- data = response.data["data"]
- meta = response.data["meta"]
- assert len(data) == 2
- assert data == [
- {
- "span.duration": 1000.0,
- "description": "bar",
- "project.name": self.project.slug,
- "id": spans[0]["span_id"],
- },
- {
- "span.duration": 1000.0,
- "description": "foo",
- "project.name": self.project.slug,
- "id": spans[1]["span_id"],
- },
- ]
- assert meta["dataset"] == self.dataset
- @pytest.mark.xfail
- def test_aggregate_numeric_attr(self):
- self.store_spans(
- [
- self.create_span(
- {
- "description": "foo",
- "sentry_tags": {"status": "success"},
- "tags": {"bar": "bar1"},
- },
- start_ts=self.ten_mins_ago,
- ),
- self.create_span(
- {
- "description": "foo",
- "sentry_tags": {"status": "success"},
- "tags": {"bar": "bar2"},
- },
- measurements={"foo": {"value": 5}},
- start_ts=self.ten_mins_ago,
- ),
- self.create_span(
- {
- "description": "foo",
- "sentry_tags": {"status": "success"},
- "tags": {"bar": "bar3"},
- },
- start_ts=self.ten_mins_ago,
- ),
- ],
- is_eap=self.is_eap,
- )
- response = self.do_request(
- {
- "field": [
- "description",
- "count_unique(bar)",
- "count_unique(tags[bar])",
- "count_unique(tags[bar,string])",
- "count()",
- "count(span.duration)",
- "count(tags[foo, number])",
- "sum(tags[foo,number])",
- "avg(tags[foo,number])",
- "p50(tags[foo,number])",
- "p75(tags[foo,number])",
- "p95(tags[foo,number])",
- "p99(tags[foo,number])",
- "p100(tags[foo,number])",
- "min(tags[foo,number])",
- "max(tags[foo,number])",
- ],
- "query": "",
- "orderby": "description",
- "project": self.project.id,
- "dataset": self.dataset,
- }
- )
- assert response.status_code == 200, response.content
- assert len(response.data["data"]) == 1
- data = response.data["data"]
- assert data[0] == {
- "description": "foo",
- "count_unique(bar)": 3,
- "count_unique(tags[bar])": 3,
- "count_unique(tags[bar,string])": 3,
- "count()": 3,
- "count(span.duration)": 3,
- "count(tags[foo, number])": 1,
- "sum(tags[foo,number])": 5.0,
- "avg(tags[foo,number])": 5.0,
- "p50(tags[foo,number])": 5.0,
- "p75(tags[foo,number])": 5.0,
- "p95(tags[foo,number])": 5.0,
- "p99(tags[foo,number])": 5.0,
- "p100(tags[foo,number])": 5.0,
- "min(tags[foo,number])": 5.0,
- "max(tags[foo,number])": 5.0,
- }
- def test_numeric_attr_without_space(self):
- self.store_spans(
- [
- self.create_span(
- {
- "description": "foo",
- "sentry_tags": {"status": "success"},
- "tags": {"foo": "five"},
- },
- measurements={"foo": {"value": 5}},
- start_ts=self.ten_mins_ago,
- ),
- ],
- is_eap=self.is_eap,
- )
- response = self.do_request(
- {
- "field": ["description", "tags[foo,number]", "tags[foo,string]", "tags[foo]"],
- "query": "",
- "orderby": "description",
- "project": self.project.id,
- "dataset": self.dataset,
- }
- )
- assert response.status_code == 200, response.content
- assert len(response.data["data"]) == 1
- data = response.data["data"]
- assert data[0]["tags[foo,number]"] == 5
- assert data[0]["tags[foo,string]"] == "five"
- assert data[0]["tags[foo]"] == "five"
- def test_numeric_attr_with_spaces(self):
- self.store_spans(
- [
- self.create_span(
- {
- "description": "foo",
- "sentry_tags": {"status": "success"},
- "tags": {"foo": "five"},
- },
- measurements={"foo": {"value": 5}},
- start_ts=self.ten_mins_ago,
- ),
- ],
- is_eap=self.is_eap,
- )
- response = self.do_request(
- {
- "field": ["description", "tags[foo, number]", "tags[foo, string]", "tags[foo]"],
- "query": "",
- "orderby": "description",
- "project": self.project.id,
- "dataset": self.dataset,
- }
- )
- assert response.status_code == 200, response.content
- assert len(response.data["data"]) == 1
- data = response.data["data"]
- assert data[0]["tags[foo, number]"] == 5
- assert data[0]["tags[foo, string]"] == "five"
- assert data[0]["tags[foo]"] == "five"
- def test_numeric_attr_filtering(self):
- self.store_spans(
- [
- self.create_span(
- {
- "description": "foo",
- "sentry_tags": {"status": "success"},
- "tags": {"foo": "five"},
- },
- measurements={"foo": {"value": 5}},
- start_ts=self.ten_mins_ago,
- ),
- self.create_span(
- {"description": "bar", "sentry_tags": {"status": "success", "foo": "five"}},
- measurements={"foo": {"value": 8}},
- start_ts=self.ten_mins_ago,
- ),
- ],
- is_eap=self.is_eap,
- )
- response = self.do_request(
- {
- "field": ["description", "tags[foo,number]"],
- "query": "tags[foo,number]:5",
- "orderby": "description",
- "project": self.project.id,
- "dataset": self.dataset,
- }
- )
- assert response.status_code == 200, response.content
- assert len(response.data["data"]) == 1
- data = response.data["data"]
- assert data[0]["tags[foo,number]"] == 5
- assert data[0]["description"] == "foo"
- def test_long_attr_name(self):
- response = self.do_request(
- {
- "field": ["description", "z" * 201],
- "query": "",
- "orderby": "description",
- "project": self.project.id,
- "dataset": self.dataset,
- }
- )
- assert response.status_code == 400, response.content
- assert "Is Too Long" in response.data["detail"].title()
- def test_numeric_attr_orderby(self):
- self.store_spans(
- [
- self.create_span(
- {
- "description": "baz",
- "sentry_tags": {"status": "success"},
- "tags": {"foo": "five"},
- },
- measurements={"foo": {"value": 71}},
- start_ts=self.ten_mins_ago,
- ),
- self.create_span(
- {
- "description": "foo",
- "sentry_tags": {"status": "success"},
- "tags": {"foo": "five"},
- },
- measurements={"foo": {"value": 5}},
- start_ts=self.ten_mins_ago,
- ),
- self.create_span(
- {
- "description": "bar",
- "sentry_tags": {"status": "success"},
- "tags": {"foo": "five"},
- },
- measurements={"foo": {"value": 8}},
- start_ts=self.ten_mins_ago,
- ),
- ],
- is_eap=self.is_eap,
- )
- response = self.do_request(
- {
- "field": ["description", "tags[foo,number]"],
- "query": "",
- "orderby": ["tags[foo,number]"],
- "project": self.project.id,
- "dataset": self.dataset,
- }
- )
- assert response.status_code == 200, response.content
- assert len(response.data["data"]) == 3
- data = response.data["data"]
- assert data[0]["tags[foo,number]"] == 5
- assert data[0]["description"] == "foo"
- assert data[1]["tags[foo,number]"] == 8
- assert data[1]["description"] == "bar"
- assert data[2]["tags[foo,number]"] == 71
- assert data[2]["description"] == "baz"
- def test_margin_of_error(self):
- total_samples = 10
- in_group = 5
- spans = []
- for _ in range(in_group):
- spans.append(
- self.create_span(
- {
- "description": "foo",
- "sentry_tags": {"status": "success"},
- "measurements": {"client_sample_rate": {"value": 0.00001}},
- },
- start_ts=self.ten_mins_ago,
- )
- )
- for _ in range(total_samples - in_group):
- spans.append(
- self.create_span(
- {
- "description": "bar",
- "sentry_tags": {"status": "success"},
- "measurements": {"client_sample_rate": {"value": 0.00001}},
- },
- )
- )
- self.store_spans(
- spans,
- is_eap=self.is_eap,
- )
- response = self.do_request(
- {
- "field": [
- "margin_of_error()",
- "lower_count_limit()",
- "upper_count_limit()",
- "count()",
- ],
- "query": "description:foo",
- "project": self.project.id,
- "dataset": self.dataset,
- }
- )
- assert response.status_code == 200, response.content
- assert len(response.data["data"]) == 1
- data = response.data["data"][0]
- margin_of_error = data["margin_of_error()"]
- lower_limit = data["lower_count_limit()"]
- upper_limit = data["upper_count_limit()"]
- extrapolated = data["count()"]
- assert margin_of_error == pytest.approx(0.306, rel=1e-1)
- # How to read this; these results mean that the extrapolated count is
- # 500k, with a lower estimated bound of ~200k, and an upper bound of 800k
- assert lower_limit == pytest.approx(190_000, abs=5000)
- assert extrapolated == pytest.approx(500_000, abs=5000)
- assert upper_limit == pytest.approx(810_000, abs=5000)
- def test_skip_aggregate_conditions_option(self):
- span_1 = self.create_span(
- {"description": "foo", "sentry_tags": {"status": "success"}},
- start_ts=self.ten_mins_ago,
- )
- span_2 = self.create_span(
- {"description": "bar", "sentry_tags": {"status": "invalid_argument"}},
- start_ts=self.ten_mins_ago,
- )
- self.store_spans(
- [span_1, span_2],
- is_eap=self.is_eap,
- )
- response = self.do_request(
- {
- "field": ["description"],
- "query": "description:foo count():>1",
- "orderby": "description",
- "project": self.project.id,
- "dataset": self.dataset,
- "allowAggregateConditions": "0",
- }
- )
- assert response.status_code == 200, response.content
- data = response.data["data"]
- meta = response.data["meta"]
- assert len(data) == 1
- assert data == [
- {
- "description": "foo",
- "project.name": self.project.slug,
- "id": span_1["span_id"],
- },
- ]
- assert meta["dataset"] == self.dataset
- def test_span_data_fields_http_resource(self):
- self.store_spans(
- [
- self.create_span(
- {
- "op": "resource.img",
- "description": "/image/",
- "data": {
- "http.decoded_response_content_length": 1,
- "http.response_content_length": 2,
- "http.response_transfer_size": 3,
- },
- },
- start_ts=self.ten_mins_ago,
- ),
- ],
- is_eap=self.is_eap,
- )
- response = self.do_request(
- {
- "field": [
- "http.decoded_response_content_length",
- "http.response_content_length",
- "http.response_transfer_size",
- ],
- "query": "http.decoded_response_content_length:>0 http.response_content_length:>0 http.response_transfer_size:>0",
- "project": self.project.id,
- "dataset": self.dataset,
- "allowAggregateConditions": "0",
- }
- )
- assert response.status_code == 200, response.content
- assert response.data["data"] == [
- {
- "http.decoded_response_content_length": 1,
- "http.response_content_length": 2,
- "http.response_transfer_size": 3,
- "project.name": self.project.slug,
- "id": mock.ANY,
- },
- ]
- expected = {
- "dataset": mock.ANY,
- "datasetReason": "unchanged",
- "fields": {
- "http.decoded_response_content_length": "size",
- "http.response_content_length": "size",
- "http.response_transfer_size": "size",
- "id": "string",
- "project.name": "string",
- },
- "isMetricsData": False,
- "isMetricsExtractedData": False,
- "tips": {},
- "units": {
- "http.decoded_response_content_length": "byte",
- "http.response_content_length": "byte",
- "http.response_transfer_size": "byte",
- "id": None,
- "project.name": None,
- },
- }
- if self.use_rpc:
- expected["accuracy"] = {
- "confidence": [{}],
- }
- assert response.data["meta"] == expected
- def test_filtering_numeric_attr(self):
- span_1 = self.create_span(
- {"description": "foo"},
- measurements={"foo": {"value": 30}},
- start_ts=self.ten_mins_ago,
- )
- span_2 = self.create_span(
- {"description": "foo"},
- measurements={"foo": {"value": 10}},
- start_ts=self.ten_mins_ago,
- )
- self.store_spans([span_1, span_2], is_eap=self.is_eap)
- response = self.do_request(
- {
- "field": ["tags[foo,number]"],
- "query": "span.duration:>=0 tags[foo,number]:>20",
- "project": self.project.id,
- "dataset": self.dataset,
- }
- )
- assert response.status_code == 200, response.content
- assert response.data["data"] == [
- {
- "id": span_1["span_id"],
- "project.name": self.project.slug,
- "tags[foo,number]": 30,
- },
- ]
- def test_byte_fields(self):
- self.store_spans(
- [
- self.create_span(
- {
- "description": "foo",
- "data": {
- "cache.item_size": 1,
- "messaging.message.body.size": 2,
- },
- },
- start_ts=self.ten_mins_ago,
- ),
- ],
- is_eap=self.is_eap,
- )
- response = self.do_request(
- {
- "field": [
- "cache.item_size",
- "measurements.cache.item_size",
- "messaging.message.body.size",
- "measurements.messaging.message.body.size",
- ],
- "project": self.project.id,
- "dataset": self.dataset,
- }
- )
- assert response.data["data"] == [
- {
- "id": mock.ANY,
- "project.name": self.project.slug,
- "cache.item_size": 1.0,
- "measurements.cache.item_size": 1.0,
- "measurements.messaging.message.body.size": 2.0,
- "messaging.message.body.size": 2.0,
- },
- ]
- def test_aggregate_filter(self):
- self._test_aggregate_filter(
- [
- "count():2",
- "count():>1",
- "avg(measurements.lcp):>3000",
- "avg(measurements.lcp):>3s",
- "count():>1 avg(measurements.lcp):>3000",
- "count():>1 AND avg(measurements.lcp):>3000",
- "count():>1 OR avg(measurements.lcp):>3000",
- "(count():>1 AND avg(http.response_content_length):>3000) OR (count():>1 AND avg(measurements.lcp):>3000)",
- ]
- )
- class OrganizationEventsEAPRPCSpanEndpointTest(OrganizationEventsEAPSpanEndpointTest):
- is_eap = True
- use_rpc = True
- @mock.patch(
- "sentry.utils.snuba_rpc._snuba_pool.urlopen", side_effect=urllib3.exceptions.TimeoutError
- )
- def test_timeout(self, mock_rpc):
- response = self.do_request(
- {
- "field": ["span.status", "description", "count()"],
- "query": "",
- "orderby": "description",
- "project": self.project.id,
- "dataset": self.dataset,
- }
- )
- assert response.status_code == 400, response.content
- assert "Query timeout" in response.data["detail"]
- def test_extrapolation(self):
- """Extrapolation only changes the number when there's a sample rate"""
- spans = []
- spans.append(
- self.create_span(
- {
- "description": "foo",
- "sentry_tags": {"status": "success"},
- "measurements": {"client_sample_rate": {"value": 0.1}},
- },
- start_ts=self.ten_mins_ago,
- )
- )
- spans.append(
- self.create_span(
- {
- "description": "bar",
- "sentry_tags": {"status": "success"},
- },
- start_ts=self.ten_mins_ago,
- )
- )
- self.store_spans(spans, is_eap=self.is_eap)
- response = self.do_request(
- {
- "field": ["description", "count()"],
- "orderby": "-count()",
- "query": "",
- "project": self.project.id,
- "dataset": self.dataset,
- }
- )
- assert response.status_code == 200, response.content
- data = response.data["data"]
- meta = response.data["meta"]
- confidence = meta["accuracy"]["confidence"]
- assert len(data) == 2
- assert len(confidence) == 2
- assert data[0]["count()"] == 10
- assert confidence[0]["count()"] == "low"
- assert data[1]["count()"] == 1
- # While logically the confidence for 1 event at 100% sample rate should be high, we're going with low until we
- # get customer feedback
- assert confidence[1]["count()"] == "low"
- def test_span_duration(self):
- spans = [
- self.create_span(
- {"description": "bar", "sentry_tags": {"status": "invalid_argument"}},
- start_ts=self.ten_mins_ago,
- ),
- self.create_span(
- {"description": "foo", "sentry_tags": {"status": "success"}},
- start_ts=self.ten_mins_ago,
- ),
- ]
- self.store_spans(spans, is_eap=self.is_eap)
- response = self.do_request(
- {
- "field": ["span.duration", "description"],
- "query": "",
- "orderby": "description",
- "project": self.project.id,
- "dataset": self.dataset,
- }
- )
- assert response.status_code == 200, response.content
- data = response.data["data"]
- meta = response.data["meta"]
- assert len(data) == 2
- assert data == [
- {
- "span.duration": 1000.0,
- "description": "bar",
- "project.name": self.project.slug,
- "id": spans[0]["span_id"],
- },
- {
- "span.duration": 1000.0,
- "description": "foo",
- "project.name": self.project.slug,
- "id": spans[1]["span_id"],
- },
- ]
- assert meta["dataset"] == self.dataset
- def test_average_sampling_rate(self):
- spans = []
- spans.append(
- self.create_span(
- {
- "description": "foo",
- "sentry_tags": {"status": "success"},
- "measurements": {"client_sample_rate": {"value": 0.1}},
- },
- start_ts=self.ten_mins_ago,
- )
- )
- spans.append(
- self.create_span(
- {
- "description": "bar",
- "sentry_tags": {"status": "success"},
- "measurements": {"client_sample_rate": {"value": 0.85}},
- },
- start_ts=self.ten_mins_ago,
- )
- )
- self.store_spans(spans, is_eap=self.is_eap)
- response = self.do_request(
- {
- "field": [
- "avg_sample(sampling_rate)",
- "count()",
- "min(sampling_rate)",
- "count_sample()",
- ],
- "query": "",
- "project": self.project.id,
- "dataset": self.dataset,
- }
- )
- assert response.status_code == 200, response.content
- data = response.data["data"]
- meta = response.data["meta"]
- confidence = meta["accuracy"]["confidence"]
- assert len(data) == 1
- assert data[0]["avg_sample(sampling_rate)"] == pytest.approx(0.475)
- assert data[0]["min(sampling_rate)"] == pytest.approx(0.1)
- assert data[0]["count_sample()"] == 2
- assert data[0]["count()"] == 11
- assert confidence[0]["count()"] == "low"
- def test_aggregate_numeric_attr(self):
- self.store_spans(
- [
- self.create_span(
- {
- "description": "foo",
- "sentry_tags": {"status": "success"},
- "tags": {"bar": "bar1"},
- },
- start_ts=self.ten_mins_ago,
- ),
- self.create_span(
- {
- "description": "foo",
- "sentry_tags": {"status": "success"},
- "tags": {"bar": "bar2"},
- },
- measurements={"foo": {"value": 5}},
- start_ts=self.ten_mins_ago,
- ),
- ],
- is_eap=self.is_eap,
- )
- response = self.do_request(
- {
- "field": [
- "description",
- "count_unique(bar)",
- "count_unique(tags[bar])",
- "count_unique(tags[bar,string])",
- "count()",
- "count(span.duration)",
- "count(tags[foo, number])",
- "sum(tags[foo,number])",
- "avg(tags[foo,number])",
- "p50(tags[foo,number])",
- "p75(tags[foo,number])",
- "p95(tags[foo,number])",
- "p99(tags[foo,number])",
- "p100(tags[foo,number])",
- "min(tags[foo,number])",
- "max(tags[foo,number])",
- ],
- "query": "",
- "orderby": "description",
- "project": self.project.id,
- "dataset": self.dataset,
- }
- )
- assert response.status_code == 200, response.content
- assert len(response.data["data"]) == 1
- data = response.data["data"]
- assert data[0] == {
- "description": "foo",
- "count_unique(bar)": 2,
- "count_unique(tags[bar])": 2,
- "count_unique(tags[bar,string])": 2,
- "count()": 2,
- "count(span.duration)": 2,
- "count(tags[foo, number])": 1,
- "sum(tags[foo,number])": 5.0,
- "avg(tags[foo,number])": 5.0,
- "p50(tags[foo,number])": 5.0,
- "p75(tags[foo,number])": 5.0,
- "p95(tags[foo,number])": 5.0,
- "p99(tags[foo,number])": 5.0,
- "p100(tags[foo,number])": 5.0,
- "min(tags[foo,number])": 5.0,
- "max(tags[foo,number])": 5.0,
- }
- @pytest.mark.skip(reason="margin will not be moved to the RPC")
- def test_margin_of_error(self):
- super().test_margin_of_error()
- @pytest.mark.skip(reason="module not migrated over")
- def test_module_alias(self):
- super().test_module_alias()
- @pytest.mark.xfail(
- reason="wip: depends on rpc having a way to set a different default in virtual contexts"
- )
- def test_span_module(self):
- super().test_span_module()
- def test_inp_span(self):
- replay_id = uuid.uuid4().hex
- self.store_spans(
- [
- self.create_span(
- {
- "sentry_tags": {
- "replay_id": replay_id,
- "browser.name": "Chrome",
- "transaction": "/pageloads/",
- }
- },
- start_ts=self.ten_mins_ago,
- ),
- ],
- is_eap=self.is_eap,
- )
- response = self.do_request(
- {
- # Not moving origin.transaction to RPC, its equivalent to transaction and just represents the
- # transaction that's related to the span
- "field": ["replay.id", "browser.name", "transaction", "count()"],
- "query": f"replay.id:{replay_id} AND browser.name:Chrome AND transaction:/pageloads/",
- "orderby": "count()",
- "project": self.project.id,
- "dataset": self.dataset,
- }
- )
- assert response.status_code == 200, response.content
- data = response.data["data"]
- meta = response.data["meta"]
- assert len(data) == 1
- assert data[0]["replay.id"] == replay_id
- assert data[0]["browser.name"] == "Chrome"
- assert data[0]["transaction"] == "/pageloads/"
- assert meta["dataset"] == self.dataset
- @pytest.mark.xfail(
- reason="wip: depends on rpc having a way to set a different default in virtual contexts"
- )
- # https://github.com/getsentry/projects/issues/215?issue=getsentry%7Cprojects%7C488
- def test_other_category_span(self):
- super().test_other_category_span()
- @pytest.mark.xfail(
- reason="wip: not implemented yet, depends on rpc having a way to filter based on casing"
- )
- # https://github.com/getsentry/projects/issues/215?issue=getsentry%7Cprojects%7C489
- def test_span_op_casing(self):
- super().test_span_op_casing()
- def test_tag_wildcards(self):
- self.store_spans(
- [
- self.create_span(
- {"description": "foo", "tags": {"foo": "bar"}},
- start_ts=self.ten_mins_ago,
- ),
- self.create_span(
- {"description": "qux", "tags": {"foo": "qux"}},
- start_ts=self.ten_mins_ago,
- ),
- ],
- is_eap=self.is_eap,
- )
- for query in [
- "foo:b*",
- "foo:*r",
- "foo:*a*",
- "foo:b*r",
- ]:
- response = self.do_request(
- {
- "field": ["foo", "count()"],
- "query": query,
- "project": self.project.id,
- "dataset": self.dataset,
- }
- )
- assert response.status_code == 200, response.content
- assert response.data["data"] == [{"foo": "bar", "count()": 1}]
- @pytest.mark.xfail(reason="wip: rate not implemented yet")
- def test_spm(self):
- super().test_spm()
- def test_is_transaction(self):
- self.store_spans(
- [
- self.create_span(
- {
- "description": "foo",
- "sentry_tags": {"status": "success"},
- "is_segment": True,
- },
- start_ts=self.ten_mins_ago,
- ),
- self.create_span(
- {
- "description": "bar",
- "sentry_tags": {"status": "success"},
- "is_segment": False,
- },
- start_ts=self.ten_mins_ago,
- ),
- ],
- is_eap=self.is_eap,
- )
- response = self.do_request(
- {
- "field": ["span.status", "description", "count()", "is_transaction"],
- "query": "is_transaction:true",
- "orderby": "description",
- "project": self.project.id,
- "dataset": self.dataset,
- }
- )
- assert response.status_code == 200, response.content
- data = response.data["data"]
- meta = response.data["meta"]
- assert len(data) == 1
- assert data == [
- {
- "is_transaction": True,
- "span.status": "success",
- "description": "foo",
- "count()": 1,
- },
- ]
- assert meta["dataset"] == self.dataset
- def test_is_not_transaction(self):
- self.store_spans(
- [
- self.create_span(
- {
- "description": "foo",
- "sentry_tags": {"status": "success"},
- "is_segment": True,
- },
- start_ts=self.ten_mins_ago,
- ),
- self.create_span(
- {
- "description": "bar",
- "sentry_tags": {"status": "success"},
- "is_segment": False,
- },
- start_ts=self.ten_mins_ago,
- ),
- ],
- is_eap=self.is_eap,
- )
- response = self.do_request(
- {
- "field": ["span.status", "description", "count()", "is_transaction"],
- "query": "is_transaction:0",
- "orderby": "description",
- "project": self.project.id,
- "dataset": self.dataset,
- }
- )
- assert response.status_code == 200, response.content
- data = response.data["data"]
- meta = response.data["meta"]
- assert len(data) == 1
- assert data == [
- {
- "is_transaction": False,
- "span.status": "success",
- "description": "bar",
- "count()": 1,
- },
- ]
- assert meta["dataset"] == self.dataset
- def test_byte_fields(self):
- self.store_spans(
- [
- self.create_span(
- {
- "description": "foo",
- "data": {
- "cache.item_size": 1,
- "messaging.message.body.size": 2,
- },
- },
- start_ts=self.ten_mins_ago,
- ),
- ],
- is_eap=self.is_eap,
- )
- response = self.do_request(
- {
- "field": [
- "cache.item_size",
- "measurements.cache.item_size",
- "messaging.message.body.size",
- "measurements.messaging.message.body.size",
- ],
- "project": self.project.id,
- "dataset": self.dataset,
- }
- )
- assert response.data["data"] == [
- {
- "id": mock.ANY,
- "project.name": self.project.slug,
- "cache.item_size": 1.0,
- "measurements.cache.item_size": 1.0,
- "measurements.messaging.message.body.size": 2.0,
- "messaging.message.body.size": 2.0,
- },
- ]
- assert response.data["meta"]["fields"] == {
- "id": "string",
- "project.name": "string",
- "cache.item_size": "size",
- "measurements.cache.item_size": "size",
- "measurements.messaging.message.body.size": "size",
- "messaging.message.body.size": "size",
- }
- assert response.data["meta"]["units"] == {
- "id": None,
- "project.name": None,
- "cache.item_size": "byte",
- "measurements.cache.item_size": "byte",
- "measurements.messaging.message.body.size": "byte",
- "messaging.message.body.size": "byte",
- }
- def test_simple_measurements(self):
- keys = [
- ("app_start_cold", "duration", "millisecond"),
- ("app_start_warm", "duration", "millisecond"),
- ("frames_frozen", "number", None), # should be integer but keeping it consistent
- ("frames_frozen_rate", "percentage", None),
- ("frames_slow", "number", None), # should be integer but keeping it consistent
- ("frames_slow_rate", "percentage", None),
- ("frames_total", "number", None), # should be integer but keeping it consistent
- ("time_to_initial_display", "duration", "millisecond"),
- ("time_to_full_display", "duration", "millisecond"),
- ("stall_count", "number", None), # should be integer but keeping it consistent
- ("stall_percentage", "percentage", None),
- ("stall_stall_longest_time", "number", None),
- ("stall_stall_total_time", "number", None),
- ("cls", "number", None),
- ("fcp", "duration", "millisecond"),
- ("fid", "duration", "millisecond"),
- ("fp", "duration", "millisecond"),
- ("inp", "duration", "millisecond"),
- ("lcp", "duration", "millisecond"),
- ("ttfb", "duration", "millisecond"),
- ("ttfb.requesttime", "duration", "millisecond"),
- ("score.cls", "number", None),
- ("score.fcp", "number", None),
- ("score.fid", "number", None),
- ("score.inp", "number", None),
- ("score.lcp", "number", None),
- ("score.ttfb", "number", None),
- ("score.total", "number", None),
- ("score.weight.cls", "number", None),
- ("score.weight.fcp", "number", None),
- ("score.weight.fid", "number", None),
- ("score.weight.inp", "number", None),
- ("score.weight.lcp", "number", None),
- ("score.weight.ttfb", "number", None),
- ("cache.item_size", "size", "byte"),
- ("messaging.message.body.size", "size", "byte"),
- ("messaging.message.receive.latency", "duration", "millisecond"),
- ("messaging.message.retry.count", "number", None),
- ]
- self._test_simple_measurements(keys)
- def test_explore_sample_query(self):
- spans = [
- self.create_span(
- {"description": "foo", "sentry_tags": {"status": "success"}},
- start_ts=self.ten_mins_ago,
- ),
- self.create_span(
- {"description": "bar", "sentry_tags": {"status": "invalid_argument"}},
- start_ts=self.nine_mins_ago,
- ),
- ]
- self.store_spans(
- spans,
- is_eap=self.is_eap,
- )
- response = self.do_request(
- {
- "field": [
- "id",
- "project",
- "span.op",
- "span.description",
- "span.duration",
- "timestamp",
- "trace",
- "transaction.span_id",
- ],
- # This is to skip INP spans
- "query": "!transaction.span_id:00",
- "orderby": "timestamp",
- "statsPeriod": "1h",
- "project": self.project.id,
- "dataset": self.dataset,
- }
- )
- assert response.status_code == 200, response.content
- data = response.data["data"]
- meta = response.data["meta"]
- assert len(data) == 2
- for source, result in zip(spans, data):
- assert result["id"] == source["span_id"], "id"
- assert result["span.duration"] == 1000.0, "duration"
- # TODO: once the snuba change to return Nones has merged remove the or
- assert result["span.op"] is None or result["span.op"] == "", "op"
- assert result["span.description"] == source["description"], "description"
- ts = datetime.fromisoformat(result["timestamp"])
- assert ts.tzinfo == timezone.utc
- assert ts.timestamp() == pytest.approx(
- source["end_timestamp_precise"], abs=5
- ), "timestamp"
- assert result["transaction.span_id"] == source["segment_id"], "transaction.span_id"
- assert result["project"] == result["project.name"] == self.project.slug, "project"
- assert meta["dataset"] == self.dataset
- def test_query_for_missing_tag(self):
- self.store_spans(
- [
- self.create_span(
- {"description": "foo"},
- start_ts=self.ten_mins_ago,
- ),
- self.create_span(
- {"description": "qux", "tags": {"foo": "bar"}},
- start_ts=self.ten_mins_ago,
- ),
- ],
- is_eap=self.is_eap,
- )
- response = self.do_request(
- {
- "field": ["foo", "count()"],
- "query": "has:foo",
- "project": self.project.id,
- "dataset": self.dataset,
- }
- )
- assert response.status_code == 200, response.content
- assert response.data["data"] == [{"foo": "bar", "count()": 1}]
- def test_query_for_missing_tag_negated(self):
- self.store_spans(
- [
- self.create_span(
- {"description": "foo"},
- start_ts=self.ten_mins_ago,
- ),
- self.create_span(
- {"description": "qux", "tags": {"foo": "bar"}},
- start_ts=self.ten_mins_ago,
- ),
- ],
- is_eap=self.is_eap,
- )
- response = self.do_request(
- {
- "field": ["foo", "count()"],
- "query": "!has:foo",
- "project": self.project.id,
- "dataset": self.dataset,
- }
- )
- assert response.status_code == 200, response.content
- data = response.data["data"]
- assert len(data) == 1
- assert data[0]["foo"] == "" or data[0]["foo"] is None
- assert data[0]["count()"] == 1
- def test_device_class_filter_unknown(self):
- self.store_spans(
- [
- self.create_span({"sentry_tags": {"device.class": ""}}, start_ts=self.ten_mins_ago),
- ],
- is_eap=self.is_eap,
- )
- response = self.do_request(
- {
- "field": ["device.class", "count()"],
- "query": "device.class:Unknown",
- "orderby": "count()",
- "project": self.project.id,
- "dataset": self.dataset,
- }
- )
- assert response.status_code == 200, response.content
- data = response.data["data"]
- meta = response.data["meta"]
- assert len(data) == 1
- assert data[0]["device.class"] == "Unknown"
- assert meta["dataset"] == self.dataset
- @pytest.mark.skip(reason="replay id alias not migrated over")
- def test_replay_id(self):
- super().test_replay_id()
- @pytest.mark.skip(reason="user display alias not migrated over")
- def test_user_display(self):
- super().test_user_display()
|