123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191 |
- import uuid
- import pytest
- from tests.snuba.api.endpoints.test_organization_events import OrganizationEventsEndpointTestBase
- class OrganizationEventsSpanIndexedEndpointTest(OrganizationEventsEndpointTestBase):
- is_eap = False
- use_rpc = False
- """Test the indexed spans dataset.
- To run this locally you may need to set the ENABLE_SPANS_CONSUMER flag to True in Snuba.
- A way to do this is
- 1. run: `sentry devservices down snuba`
- 2. clone snuba locally
- 3. run: `export ENABLE_SPANS_CONSUMER=True`
- 4. run snuba
- At this point tests should work locally
- Once span ingestion is on by default this will no longer need to be done
- """
- @property
- def dataset(self):
- if self.is_eap:
- return "spans"
- else:
- return "spansIndexed"
- def do_request(self, query, features=None, **kwargs):
- query["useRpc"] = "1" if self.use_rpc else "0"
- return super().do_request(query, features, **kwargs)
- def setUp(self):
- super().setUp()
- self.features = {
- "organizations:starfish-view": True,
- }
- @pytest.mark.querybuilder
- def test_simple(self):
- self.store_spans(
- [
- self.create_span(
- {"description": "foo", "sentry_tags": {"status": "success"}},
- start_ts=self.ten_mins_ago,
- ),
- self.create_span(
- {"description": "bar", "sentry_tags": {"status": "invalid_argument"}},
- start_ts=self.ten_mins_ago,
- ),
- ],
- is_eap=self.is_eap,
- )
- response = self.do_request(
- {
- "field": ["span.status", "description", "count()"],
- "query": "",
- "orderby": "description",
- "project": self.project.id,
- "dataset": self.dataset,
- }
- )
- assert response.status_code == 200, response.content
- data = response.data["data"]
- meta = response.data["meta"]
- assert len(data) == 2
- assert data == [
- {
- "span.status": "invalid_argument",
- "description": "bar",
- "count()": 1,
- },
- {
- "span.status": "ok",
- "description": "foo",
- "count()": 1,
- },
- ]
- assert meta["dataset"] == self.dataset
- def test_id_fields(self):
- self.store_spans(
- [
- self.create_span(
- {"description": "foo", "sentry_tags": {"status": "success"}},
- start_ts=self.ten_mins_ago,
- ),
- self.create_span(
- {"description": "bar", "sentry_tags": {"status": "invalid_argument"}},
- start_ts=self.ten_mins_ago,
- ),
- ],
- is_eap=self.is_eap,
- )
- response = self.do_request(
- {
- "field": ["id", "span_id"],
- "query": "",
- "orderby": "id",
- "project": self.project.id,
- "dataset": self.dataset,
- }
- )
- assert response.status_code == 200, response.content
- data = response.data["data"]
- meta = response.data["meta"]
- assert len(data) == 2
- for obj in data:
- assert obj["id"] == obj["span_id"]
- assert meta["dataset"] == self.dataset
- def test_sentry_tags_vs_tags(self):
- self.store_spans(
- [
- self.create_span(
- {"sentry_tags": {"transaction.method": "foo"}}, start_ts=self.ten_mins_ago
- ),
- ],
- is_eap=self.is_eap,
- )
- response = self.do_request(
- {
- "field": ["transaction.method", "count()"],
- "query": "",
- "orderby": "count()",
- "project": self.project.id,
- "dataset": self.dataset,
- }
- )
- assert response.status_code == 200, response.content
- data = response.data["data"]
- meta = response.data["meta"]
- assert len(data) == 1
- assert data[0]["transaction.method"] == "foo"
- assert meta["dataset"] == self.dataset
- def test_sentry_tags_syntax(self):
- self.store_spans(
- [
- self.create_span(
- {"sentry_tags": {"transaction.method": "foo"}}, start_ts=self.ten_mins_ago
- ),
- ],
- is_eap=self.is_eap,
- )
- response = self.do_request(
- {
- "field": ["sentry_tags[transaction.method]", "count()"],
- "query": "",
- "orderby": "count()",
- "project": self.project.id,
- "dataset": self.dataset,
- }
- )
- assert response.status_code == 200, response.content
- data = response.data["data"]
- meta = response.data["meta"]
- assert len(data) == 1
- assert data[0]["sentry_tags[transaction.method]"] == "foo"
- assert meta["dataset"] == self.dataset
- def test_module_alias(self):
- # Delegates `span.module` to `sentry_tags[category]`. Maps `"db.redis"` spans to the `"cache"` module
- self.store_spans(
- [
- self.create_span(
- {
- "op": "db.redis",
- "description": "EXEC *",
- "sentry_tags": {
- "description": "EXEC *",
- "category": "db",
- "op": "db.redis",
- "transaction": "/app/index",
- },
- },
- start_ts=self.ten_mins_ago,
- ),
- ],
- is_eap=self.is_eap,
- )
- response = self.do_request(
- {
- "field": ["span.module", "span.description"],
- "query": "span.module:cache",
- "project": self.project.id,
- "dataset": self.dataset,
- }
- )
- assert response.status_code == 200, response.content
- data = response.data["data"]
- meta = response.data["meta"]
- assert len(data) == 1
- assert data[0]["span.module"] == "cache"
- assert data[0]["span.description"] == "EXEC *"
- assert meta["dataset"] == self.dataset
- def test_device_class_filter_unknown(self):
- self.store_spans(
- [
- self.create_span({"sentry_tags": {"device.class": ""}}, start_ts=self.ten_mins_ago),
- ],
- is_eap=self.is_eap,
- )
- response = self.do_request(
- {
- "field": ["device.class", "count()"],
- "query": "device.class:Unknown",
- "orderby": "count()",
- "project": self.project.id,
- "dataset": self.dataset,
- }
- )
- assert response.status_code == 200, response.content
- data = response.data["data"]
- meta = response.data["meta"]
- assert len(data) == 1
- assert data[0]["device.class"] == "Unknown"
- assert meta["dataset"] == self.dataset
- def test_network_span(self):
- self.store_spans(
- [
- self.create_span(
- {
- "sentry_tags": {
- "action": "GET",
- "category": "http",
- "description": "GET https://*.resource.com",
- "domain": "*.resource.com",
- "op": "http.client",
- "status_code": "200",
- "transaction": "/api/0/data/",
- "transaction.method": "GET",
- "transaction.op": "http.server",
- }
- },
- start_ts=self.ten_mins_ago,
- ),
- ],
- is_eap=self.is_eap,
- )
- response = self.do_request(
- {
- "field": ["span.op", "span.status_code"],
- "query": "span.module:http span.status_code:200",
- "project": self.project.id,
- "dataset": self.dataset,
- }
- )
- assert response.status_code == 200, response.content
- data = response.data["data"]
- meta = response.data["meta"]
- assert len(data) == 1
- assert data[0]["span.op"] == "http.client"
- assert data[0]["span.status_code"] == "200"
- assert meta["dataset"] == self.dataset
- def test_other_category_span(self):
- self.store_spans(
- [
- self.create_span(
- {
- "sentry_tags": {
- "action": "GET",
- "category": "alternative",
- "description": "GET https://*.resource.com",
- "domain": "*.resource.com",
- "op": "alternative",
- "status_code": "200",
- "transaction": "/api/0/data/",
- "transaction.method": "GET",
- "transaction.op": "http.server",
- }
- },
- start_ts=self.ten_mins_ago,
- ),
- ],
- is_eap=self.is_eap,
- )
- response = self.do_request(
- {
- "field": ["span.op", "span.status_code"],
- "query": "span.module:other span.status_code:200",
- "project": self.project.id,
- "dataset": self.dataset,
- }
- )
- assert response.status_code == 200, response.content
- data = response.data["data"]
- meta = response.data["meta"]
- assert len(data) == 1
- assert data[0]["span.op"] == "alternative"
- assert data[0]["span.status_code"] == "200"
- assert meta["dataset"] == self.dataset
- def test_inp_span(self):
- replay_id = uuid.uuid4().hex
- self.store_spans(
- [
- self.create_span(
- {
- "sentry_tags": {
- "replay_id": replay_id,
- "browser.name": "Chrome",
- "transaction": "/pageloads/",
- }
- },
- start_ts=self.ten_mins_ago,
- ),
- ],
- is_eap=self.is_eap,
- )
- response = self.do_request(
- {
- "field": ["replay.id", "browser.name", "origin.transaction", "count()"],
- "query": f"replay.id:{replay_id} AND browser.name:Chrome AND origin.transaction:/pageloads/",
- "orderby": "count()",
- "project": self.project.id,
- "dataset": self.dataset,
- }
- )
- assert response.status_code == 200, response.content
- data = response.data["data"]
- meta = response.data["meta"]
- assert len(data) == 1
- assert data[0]["replay.id"] == replay_id
- assert data[0]["browser.name"] == "Chrome"
- assert data[0]["origin.transaction"] == "/pageloads/"
- assert meta["dataset"] == self.dataset
- def test_id_filtering(self):
- span = self.create_span({"description": "foo"}, start_ts=self.ten_mins_ago)
- self.store_span(span, is_eap=self.is_eap)
- response = self.do_request(
- {
- "field": ["description", "count()"],
- "query": f"id:{span['span_id']}",
- "orderby": "description",
- "project": self.project.id,
- "dataset": self.dataset,
- }
- )
- assert response.status_code == 200, response.content
- data = response.data["data"]
- meta = response.data["meta"]
- assert len(data) == 1
- assert data[0]["description"] == "foo"
- assert meta["dataset"] == self.dataset
- response = self.do_request(
- {
- "field": ["description", "count()"],
- "query": f"transaction.id:{span['event_id']}",
- "orderby": "description",
- "project": self.project.id,
- "dataset": self.dataset,
- }
- )
- assert response.status_code == 200, response.content
- data = response.data["data"]
- meta = response.data["meta"]
- assert len(data) == 1
- assert data[0]["description"] == "foo"
- assert meta["dataset"] == self.dataset
- def test_span_op_casing(self):
- self.store_spans(
- [
- self.create_span(
- {
- "sentry_tags": {
- "replay_id": "abc123",
- "browser.name": "Chrome",
- "transaction": "/pageloads/",
- "op": "this is a transaction",
- }
- },
- start_ts=self.ten_mins_ago,
- ),
- ],
- is_eap=self.is_eap,
- )
- response = self.do_request(
- {
- "field": ["span.op", "count()"],
- "query": 'span.op:"ThIs Is a TraNSActiON"',
- "orderby": "count()",
- "project": self.project.id,
- "dataset": self.dataset,
- }
- )
- assert response.status_code == 200, response.content
- data = response.data["data"]
- meta = response.data["meta"]
- assert len(data) == 1
- assert data[0]["span.op"] == "this is a transaction"
- assert meta["dataset"] == self.dataset
- def test_queue_span(self):
- self.store_spans(
- [
- self.create_span(
- {
- "measurements": {
- "messaging.message.body.size": {"value": 1024, "unit": "byte"},
- "messaging.message.receive.latency": {
- "value": 1000,
- "unit": "millisecond",
- },
- "messaging.message.retry.count": {"value": 2, "unit": "none"},
- },
- "sentry_tags": {
- "transaction": "queue-processor",
- "messaging.destination.name": "events",
- "messaging.message.id": "abc123",
- "trace.status": "ok",
- },
- },
- start_ts=self.ten_mins_ago,
- ),
- ],
- is_eap=self.is_eap,
- )
- response = self.do_request(
- {
- "field": [
- "transaction",
- "messaging.destination.name",
- "messaging.message.id",
- "measurements.messaging.message.receive.latency",
- "measurements.messaging.message.body.size",
- "measurements.messaging.message.retry.count",
- "trace.status",
- "count()",
- ],
- "query": 'messaging.destination.name:"events"',
- "orderby": "count()",
- "project": self.project.id,
- "dataset": self.dataset,
- }
- )
- assert response.status_code == 200, response.content
- data = response.data["data"]
- meta = response.data["meta"]
- assert len(data) == 1
- assert data[0]["transaction"] == "queue-processor"
- assert data[0]["messaging.destination.name"] == "events"
- assert data[0]["messaging.message.id"] == "abc123"
- assert data[0]["trace.status"] == "ok"
- assert data[0]["measurements.messaging.message.receive.latency"] == 1000
- assert data[0]["measurements.messaging.message.body.size"] == 1024
- assert data[0]["measurements.messaging.message.retry.count"] == 2
- assert meta["dataset"] == self.dataset
- def test_tag_wildcards(self):
- self.store_spans(
- [
- self.create_span(
- {"description": "foo", "tags": {"foo": "BaR"}},
- start_ts=self.ten_mins_ago,
- ),
- self.create_span(
- {"description": "qux", "tags": {"foo": "QuX"}},
- start_ts=self.ten_mins_ago,
- ),
- ],
- is_eap=self.is_eap,
- )
- for query in [
- "foo:b*",
- "foo:*r",
- "foo:*a*",
- "foo:b*r",
- ]:
- response = self.do_request(
- {
- "field": ["foo", "count()"],
- "query": query,
- "project": self.project.id,
- "dataset": self.dataset,
- }
- )
- assert response.status_code == 200, response.content
- assert response.data["data"] == [{"foo": "BaR", "count()": 1}]
- def test_query_for_missing_tag(self):
- self.store_spans(
- [
- self.create_span(
- {"description": "foo"},
- start_ts=self.ten_mins_ago,
- ),
- self.create_span(
- {"description": "qux", "tags": {"foo": "bar"}},
- start_ts=self.ten_mins_ago,
- ),
- ],
- is_eap=self.is_eap,
- )
- response = self.do_request(
- {
- "field": ["foo", "count()"],
- "query": 'foo:""',
- "project": self.project.id,
- "dataset": self.dataset,
- }
- )
- assert response.status_code == 200, response.content
- assert response.data["data"] == [{"foo": "", "count()": 1}]
- class OrganizationEventsEAPSpanEndpointTest(OrganizationEventsSpanIndexedEndpointTest):
- is_eap = True
- use_rpc = False
- def test_simple(self):
- self.store_spans(
- [
- self.create_span(
- {"description": "foo", "sentry_tags": {"status": "success"}},
- start_ts=self.ten_mins_ago,
- ),
- self.create_span(
- {"description": "bar", "sentry_tags": {"status": "invalid_argument"}},
- start_ts=self.ten_mins_ago,
- ),
- ],
- is_eap=self.is_eap,
- )
- response = self.do_request(
- {
- "field": ["span.status", "description", "count()"],
- "query": "",
- "orderby": "description",
- "project": self.project.id,
- "dataset": self.dataset,
- }
- )
- assert response.status_code == 200, response.content
- data = response.data["data"]
- meta = response.data["meta"]
- assert len(data) == 2
- assert data == [
- {
- "span.status": "invalid_argument",
- "description": "bar",
- "count()": 1,
- },
- {
- "span.status": "success",
- "description": "foo",
- "count()": 1,
- },
- ]
- assert meta["dataset"] == self.dataset
- @pytest.mark.xfail(reason="event_id isn't being written to the new table")
- def test_id_filtering(self):
- super().test_id_filtering()
- def test_span_duration(self):
- spans = [
- self.create_span(
- {"description": "bar", "sentry_tags": {"status": "invalid_argument"}},
- start_ts=self.ten_mins_ago,
- ),
- self.create_span(
- {"description": "foo", "sentry_tags": {"status": "success"}},
- start_ts=self.ten_mins_ago,
- ),
- ]
- self.store_spans(spans, is_eap=self.is_eap)
- response = self.do_request(
- {
- "field": ["span.duration", "description"],
- "query": "",
- "orderby": "description",
- "project": self.project.id,
- "dataset": self.dataset,
- }
- )
- assert response.status_code == 200, response.content
- data = response.data["data"]
- meta = response.data["meta"]
- assert len(data) == 2
- assert data == [
- {
- "span.duration": 1000.0,
- "description": "bar",
- "project.name": self.project.slug,
- "id": spans[0]["span_id"],
- },
- {
- "span.duration": 1000.0,
- "description": "foo",
- "project.name": self.project.slug,
- "id": spans[1]["span_id"],
- },
- ]
- assert meta["dataset"] == self.dataset
- def test_aggregate_numeric_attr_weighted(self):
- self.store_spans(
- [
- self.create_span(
- {
- "description": "foo",
- "sentry_tags": {"status": "success"},
- "tags": {"bar": "bar1"},
- },
- start_ts=self.ten_mins_ago,
- ),
- self.create_span(
- {
- "description": "foo",
- "sentry_tags": {"status": "success"},
- "tags": {"bar": "bar2"},
- },
- measurements={"foo": {"value": 5}},
- start_ts=self.ten_mins_ago,
- ),
- self.create_span(
- {
- "description": "foo",
- "sentry_tags": {"status": "success"},
- "tags": {"bar": "bar3"},
- },
- start_ts=self.ten_mins_ago,
- ),
- ],
- is_eap=self.is_eap,
- )
- response = self.do_request(
- {
- "field": [
- "description",
- "count_unique_weighted(bar)",
- "count_unique_weighted(tags[bar])",
- "count_unique_weighted(tags[bar,string])",
- "count_weighted()",
- "count_weighted(span.duration)",
- "count_weighted(tags[foo, number])",
- "sum_weighted(tags[foo,number])",
- "avg_weighted(tags[foo,number])",
- "p50_weighted(tags[foo,number])",
- "p75_weighted(tags[foo,number])",
- "p95_weighted(tags[foo,number])",
- "p99_weighted(tags[foo,number])",
- "p100_weighted(tags[foo,number])",
- "min_weighted(tags[foo,number])",
- "max_weighted(tags[foo,number])",
- ],
- "query": "",
- "orderby": "description",
- "project": self.project.id,
- "dataset": self.dataset,
- }
- )
- assert response.status_code == 200, response.content
- assert len(response.data["data"]) == 1
- data = response.data["data"]
- assert data[0] == {
- "description": "foo",
- "count_unique_weighted(bar)": 3,
- "count_unique_weighted(tags[bar])": 3,
- "count_unique_weighted(tags[bar,string])": 3,
- "count_weighted()": 3,
- "count_weighted(span.duration)": 3,
- "count_weighted(tags[foo, number])": 1,
- "sum_weighted(tags[foo,number])": 5.0,
- "avg_weighted(tags[foo,number])": 5.0,
- "p50_weighted(tags[foo,number])": 5.0,
- "p75_weighted(tags[foo,number])": 5.0,
- "p95_weighted(tags[foo,number])": 5.0,
- "p99_weighted(tags[foo,number])": 5.0,
- "p100_weighted(tags[foo,number])": 5.0,
- "min_weighted(tags[foo,number])": 5.0,
- "max_weighted(tags[foo,number])": 5.0,
- }
- def test_numeric_attr_without_space(self):
- self.store_spans(
- [
- self.create_span(
- {
- "description": "foo",
- "sentry_tags": {"status": "success"},
- "tags": {"foo": "five"},
- },
- measurements={"foo": {"value": 5}},
- start_ts=self.ten_mins_ago,
- ),
- ],
- is_eap=self.is_eap,
- )
- response = self.do_request(
- {
- "field": ["description", "tags[foo,number]", "tags[foo,string]", "tags[foo]"],
- "query": "",
- "orderby": "description",
- "project": self.project.id,
- "dataset": self.dataset,
- }
- )
- assert response.status_code == 200, response.content
- assert len(response.data["data"]) == 1
- data = response.data["data"]
- assert data[0]["tags[foo,number]"] == 5
- assert data[0]["tags[foo,string]"] == "five"
- assert data[0]["tags[foo]"] == "five"
- def test_numeric_attr_with_spaces(self):
- self.store_spans(
- [
- self.create_span(
- {
- "description": "foo",
- "sentry_tags": {"status": "success"},
- "tags": {"foo": "five"},
- },
- measurements={"foo": {"value": 5}},
- start_ts=self.ten_mins_ago,
- ),
- ],
- is_eap=self.is_eap,
- )
- response = self.do_request(
- {
- "field": ["description", "tags[foo, number]", "tags[foo, string]", "tags[foo]"],
- "query": "",
- "orderby": "description",
- "project": self.project.id,
- "dataset": self.dataset,
- }
- )
- assert response.status_code == 200, response.content
- assert len(response.data["data"]) == 1
- data = response.data["data"]
- assert data[0]["tags[foo, number]"] == 5
- assert data[0]["tags[foo, string]"] == "five"
- assert data[0]["tags[foo]"] == "five"
- def test_numeric_attr_filtering(self):
- self.store_spans(
- [
- self.create_span(
- {
- "description": "foo",
- "sentry_tags": {"status": "success"},
- "tags": {"foo": "five"},
- },
- measurements={"foo": {"value": 5}},
- start_ts=self.ten_mins_ago,
- ),
- self.create_span(
- {"description": "bar", "sentry_tags": {"status": "success", "foo": "five"}},
- measurements={"foo": {"value": 8}},
- start_ts=self.ten_mins_ago,
- ),
- ],
- is_eap=self.is_eap,
- )
- response = self.do_request(
- {
- "field": ["description", "tags[foo,number]"],
- "query": "tags[foo,number]:5",
- "orderby": "description",
- "project": self.project.id,
- "dataset": self.dataset,
- }
- )
- assert response.status_code == 200, response.content
- assert len(response.data["data"]) == 1
- data = response.data["data"]
- assert data[0]["tags[foo,number]"] == 5
- assert data[0]["description"] == "foo"
- def test_long_attr_name(self):
- response = self.do_request(
- {
- "field": ["description", "z" * 201],
- "query": "",
- "orderby": "description",
- "project": self.project.id,
- "dataset": self.dataset,
- }
- )
- assert response.status_code == 400, response.content
- assert "Is Too Long" in response.data["detail"].title()
- def test_numeric_attr_orderby(self):
- self.store_spans(
- [
- self.create_span(
- {
- "description": "baz",
- "sentry_tags": {"status": "success"},
- "tags": {"foo": "five"},
- },
- measurements={"foo": {"value": 71}},
- start_ts=self.ten_mins_ago,
- ),
- self.create_span(
- {
- "description": "foo",
- "sentry_tags": {"status": "success"},
- "tags": {"foo": "five"},
- },
- measurements={"foo": {"value": 5}},
- start_ts=self.ten_mins_ago,
- ),
- self.create_span(
- {
- "description": "bar",
- "sentry_tags": {"status": "success"},
- "tags": {"foo": "five"},
- },
- measurements={"foo": {"value": 8}},
- start_ts=self.ten_mins_ago,
- ),
- ],
- is_eap=self.is_eap,
- )
- response = self.do_request(
- {
- "field": ["description", "tags[foo,number]"],
- "query": "",
- "orderby": ["tags[foo,number]"],
- "project": self.project.id,
- "dataset": self.dataset,
- }
- )
- assert response.status_code == 200, response.content
- assert len(response.data["data"]) == 3
- data = response.data["data"]
- assert data[0]["tags[foo,number]"] == 5
- assert data[0]["description"] == "foo"
- assert data[1]["tags[foo,number]"] == 8
- assert data[1]["description"] == "bar"
- assert data[2]["tags[foo,number]"] == 71
- assert data[2]["description"] == "baz"
- def test_aggregate_numeric_attr(self):
- self.store_spans(
- [
- self.create_span(
- {
- "description": "foo",
- "sentry_tags": {"status": "success"},
- "tags": {"bar": "bar1"},
- },
- start_ts=self.ten_mins_ago,
- ),
- self.create_span(
- {
- "description": "foo",
- "sentry_tags": {"status": "success"},
- "tags": {"bar": "bar2"},
- },
- measurements={"foo": {"value": 5}},
- start_ts=self.ten_mins_ago,
- ),
- ],
- is_eap=self.is_eap,
- )
- response = self.do_request(
- {
- "field": [
- "description",
- "count_unique(bar)",
- "count_unique(tags[bar])",
- "count_unique(tags[bar,string])",
- "count()",
- "count(span.duration)",
- "count(tags[foo, number])",
- "sum(tags[foo,number])",
- "avg(tags[foo,number])",
- "p50(tags[foo,number])",
- "p75(tags[foo,number])",
- "p95(tags[foo,number])",
- "p99(tags[foo,number])",
- "p100(tags[foo,number])",
- "min(tags[foo,number])",
- "max(tags[foo,number])",
- ],
- "query": "",
- "orderby": "description",
- "project": self.project.id,
- "dataset": self.dataset,
- }
- )
- assert response.status_code == 200, response.content
- assert len(response.data["data"]) == 1
- data = response.data["data"]
- assert data[0] == {
- "description": "foo",
- "count_unique(bar)": 2,
- "count_unique(tags[bar])": 2,
- "count_unique(tags[bar,string])": 2,
- "count()": 2,
- "count(span.duration)": 2,
- "count(tags[foo, number])": 1,
- "sum(tags[foo,number])": 5.0,
- "avg(tags[foo,number])": 5.0,
- "p50(tags[foo,number])": 5.0,
- "p75(tags[foo,number])": 5.0,
- "p95(tags[foo,number])": 5.0,
- "p99(tags[foo,number])": 5.0,
- "p100(tags[foo,number])": 5.0,
- "min(tags[foo,number])": 5.0,
- "max(tags[foo,number])": 5.0,
- }
- def test_margin_of_error(self):
- total_samples = 10
- in_group = 5
- spans = []
- for _ in range(in_group):
- spans.append(
- self.create_span(
- {
- "description": "foo",
- "sentry_tags": {"status": "success"},
- "measurements": {"client_sample_rate": {"value": 0.00001}},
- },
- start_ts=self.ten_mins_ago,
- )
- )
- for _ in range(total_samples - in_group):
- spans.append(
- self.create_span(
- {
- "description": "bar",
- "sentry_tags": {"status": "success"},
- "measurements": {"client_sample_rate": {"value": 0.00001}},
- },
- )
- )
- self.store_spans(
- spans,
- is_eap=self.is_eap,
- )
- response = self.do_request(
- {
- "field": [
- "margin_of_error()",
- "lower_count_limit()",
- "upper_count_limit()",
- "count_weighted()",
- ],
- "query": "description:foo",
- "project": self.project.id,
- "dataset": self.dataset,
- }
- )
- assert response.status_code == 200, response.content
- assert len(response.data["data"]) == 1
- data = response.data["data"][0]
- margin_of_error = data["margin_of_error()"]
- lower_limit = data["lower_count_limit()"]
- upper_limit = data["upper_count_limit()"]
- extrapolated = data["count_weighted()"]
- assert margin_of_error == pytest.approx(0.306, rel=1e-1)
- # How to read this; these results mean that the extrapolated count is
- # 500k, with a lower estimated bound of ~200k, and an upper bound of 800k
- assert lower_limit == pytest.approx(190_000, abs=5000)
- assert extrapolated == pytest.approx(500_000, abs=5000)
- assert upper_limit == pytest.approx(810_000, abs=5000)
- def test_skip_aggregate_conditions_option(self):
- span_1 = self.create_span(
- {"description": "foo", "sentry_tags": {"status": "success"}},
- start_ts=self.ten_mins_ago,
- )
- span_2 = self.create_span(
- {"description": "bar", "sentry_tags": {"status": "invalid_argument"}},
- start_ts=self.ten_mins_ago,
- )
- self.store_spans(
- [span_1, span_2],
- is_eap=self.is_eap,
- )
- response = self.do_request(
- {
- "field": ["description"],
- "query": "description:foo count():>1",
- "orderby": "description",
- "project": self.project.id,
- "dataset": self.dataset,
- "allowAggregateConditions": "0",
- }
- )
- assert response.status_code == 200, response.content
- data = response.data["data"]
- meta = response.data["meta"]
- assert len(data) == 1
- assert data == [
- {
- "description": "foo",
- "project.name": self.project.slug,
- "id": span_1["span_id"],
- },
- ]
- assert meta["dataset"] == self.dataset
- class OrganizationEventsEAPRPCSpanEndpointTest(OrganizationEventsEAPSpanEndpointTest):
- """These tests aren't fully passing yet, currently inheriting xfail from the eap tests"""
- is_eap = True
- use_rpc = True
- def test_span_duration(self):
- spans = [
- self.create_span(
- {"description": "bar", "sentry_tags": {"status": "invalid_argument"}},
- start_ts=self.ten_mins_ago,
- ),
- self.create_span(
- {"description": "foo", "sentry_tags": {"status": "success"}},
- start_ts=self.ten_mins_ago,
- ),
- ]
- self.store_spans(spans, is_eap=self.is_eap)
- response = self.do_request(
- {
- "field": ["span.duration", "description"],
- "query": "",
- "orderby": "description",
- "project": self.project.id,
- "dataset": self.dataset,
- }
- )
- assert response.status_code == 200, response.content
- data = response.data["data"]
- meta = response.data["meta"]
- assert len(data) == 2
- assert data == [
- {
- "span.duration": 1000.0,
- "description": "bar",
- "project.name": self.project.slug,
- "id": spans[0]["span_id"],
- },
- {
- "span.duration": 1000.0,
- "description": "foo",
- "project.name": self.project.slug,
- "id": spans[1]["span_id"],
- },
- ]
- assert meta["dataset"] == self.dataset
- @pytest.mark.xfail(reason="extrapolation not implemented yet")
- def test_aggregate_numeric_attr_weighted(self):
- super().test_aggregate_numeric_attr_weighted()
- @pytest.mark.xfail(reason="RPC failing because of aliasing")
- def test_numeric_attr_without_space(self):
- super().test_numeric_attr_without_space()
- @pytest.mark.xfail(reason="RPC failing because of aliasing")
- def test_numeric_attr_with_spaces(self):
- super().test_numeric_attr_with_spaces()
- @pytest.mark.xfail(reason="RPC failing because of aliasing")
- def test_numeric_attr_filtering(self):
- super().test_numeric_attr_filtering()
- @pytest.mark.xfail(reason="RPC failing because of aliasing")
- def test_numeric_attr_orderby(self):
- super().test_numeric_attr_orderby()
- def test_aggregate_numeric_attr(self):
- self.store_spans(
- [
- self.create_span(
- {
- "description": "foo",
- "sentry_tags": {"status": "success"},
- "tags": {"bar": "bar1"},
- },
- start_ts=self.ten_mins_ago,
- ),
- self.create_span(
- {
- "description": "foo",
- "sentry_tags": {"status": "success"},
- "tags": {"bar": "bar2"},
- },
- measurements={"foo": {"value": 5}},
- start_ts=self.ten_mins_ago,
- ),
- ],
- is_eap=self.is_eap,
- )
- response = self.do_request(
- {
- "field": [
- "description",
- "count_unique(bar)",
- "count_unique(tags[bar])",
- "count_unique(tags[bar,string])",
- "count()",
- "count(span.duration)",
- "count(tags[foo, number])",
- "sum(tags[foo,number])",
- "avg(tags[foo,number])",
- "p50(tags[foo,number])",
- # TODO: bring p75 back once its added to the rpc
- # "p75(tags[foo,number])",
- "p95(tags[foo,number])",
- "p99(tags[foo,number])",
- "p100(tags[foo,number])",
- "min(tags[foo,number])",
- "max(tags[foo,number])",
- ],
- "query": "",
- "orderby": "description",
- "project": self.project.id,
- "dataset": self.dataset,
- }
- )
- assert response.status_code == 200, response.content
- assert len(response.data["data"]) == 1
- data = response.data["data"]
- assert data[0] == {
- "description": "foo",
- "count_unique(bar)": 2,
- "count_unique(tags[bar])": 2,
- "count_unique(tags[bar,string])": 2,
- "count()": 2,
- "count(span.duration)": 2,
- "count(tags[foo, number])": 1,
- "sum(tags[foo,number])": 5.0,
- "avg(tags[foo,number])": 5.0,
- "p50(tags[foo,number])": 5.0,
- # TODO: bring p75 back once its added to the rpc
- # "p75(tags[foo,number])": 5.0,
- "p95(tags[foo,number])": 5.0,
- "p99(tags[foo,number])": 5.0,
- "p100(tags[foo,number])": 5.0,
- "min(tags[foo,number])": 5.0,
- "max(tags[foo,number])": 5.0,
- }
- @pytest.mark.xfail(reason="extrapolation not implemented yet")
- def test_margin_of_error(self):
- super().test_margin_of_error()
|