123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064 |
- import logging
- from collections.abc import MutableMapping
- from datetime import datetime, timezone
- from unittest.mock import patch
- import pytest
- import sentry_kafka_schemas
- from arroyo.backends.kafka import KafkaPayload
- from arroyo.types import BrokerValue, Message, Partition, Topic, Value
- from sentry.sentry_metrics.aggregation_option_registry import AggregationOption
- from sentry.sentry_metrics.configuration import (
- GENERIC_METRICS_SCHEMA_VALIDATION_RULES_OPTION_NAME,
- RELEASE_HEALTH_SCHEMA_VALIDATION_RULES_OPTION_NAME,
- )
- from sentry.sentry_metrics.consumers.indexer.batch import IndexerBatch
- from sentry.sentry_metrics.consumers.indexer.common import BrokerMeta
- from sentry.sentry_metrics.consumers.indexer.processing import INGEST_CODEC
- from sentry.sentry_metrics.consumers.indexer.schema_validator import MetricsSchemaValidator
- from sentry.sentry_metrics.consumers.indexer.tags_validator import (
- GenericMetricsTagsValidator,
- ReleaseHealthTagsValidator,
- )
- from sentry.sentry_metrics.indexer.base import FetchType, FetchTypeExt, Metadata
- from sentry.sentry_metrics.use_case_id_registry import UseCaseID
- from sentry.snuba.metrics.naming_layer.mri import SessionMRI, TransactionMRI
- from sentry.testutils.helpers.options import override_options
- from sentry.utils import json
- MOCK_METRIC_ID_AGG_OPTION = {
- "d:transactions/measurements.fcp@millisecond": AggregationOption.HIST,
- "d:transactions/measurements.lcp@millisecond": AggregationOption.HIST,
- "d:transactions/alert@none": AggregationOption.TEN_SECOND,
- }
- MOCK_USE_CASE_AGG_OPTION = {UseCaseID.TRANSACTIONS: AggregationOption.TEN_SECOND}
- pytestmark = pytest.mark.sentry_metrics
- BROKER_TIMESTAMP = datetime.now(tz=timezone.utc)
- ts = int(datetime.now(tz=timezone.utc).timestamp())
- counter_payload = {
- "name": SessionMRI.RAW_SESSION.value,
- "tags": {
- "environment": "production",
- "session.status": "init",
- },
- "timestamp": ts,
- "type": "c",
- "value": 1,
- "org_id": 1,
- "retention_days": 90,
- "project_id": 3,
- }
- counter_headers = [("namespace", b"sessions")]
- distribution_payload = {
- "name": SessionMRI.RAW_DURATION.value,
- "tags": {
- "environment": "production",
- "session.status": "healthy",
- },
- "timestamp": ts,
- "type": "d",
- "value": [4, 5, 6],
- "org_id": 1,
- "retention_days": 90,
- "project_id": 3,
- }
- distribution_headers = [("namespace", b"sessions")]
- set_payload = {
- "name": SessionMRI.RAW_ERROR.value,
- "tags": {
- "environment": "production",
- "session.status": "errored",
- },
- "timestamp": ts,
- "type": "s",
- "value": [3],
- "org_id": 1,
- "retention_days": 90,
- "project_id": 3,
- }
- set_headers = [("namespace", b"sessions")]
- extracted_string_output = {
- UseCaseID.SESSIONS: {
- 1: {
- "c:sessions/session@none",
- "d:sessions/duration@second",
- "environment",
- "errored",
- "healthy",
- "init",
- "production",
- "s:sessions/error@none",
- "session.status",
- }
- }
- }
- def _construct_messages(payloads):
- message_batch = []
- for i, (payload, headers) in enumerate(payloads):
- message_batch.append(
- Message(
- BrokerValue(
- KafkaPayload(None, json.dumps(payload).encode("utf-8"), headers or []),
- Partition(Topic("topic"), 0),
- i,
- BROKER_TIMESTAMP,
- )
- )
- )
- return message_batch
- def _construct_outer_message(payloads):
- message_batch = _construct_messages(payloads)
- # the outer message uses the last message's partition, offset, and timestamp
- last = message_batch[-1]
- outer_message = Message(Value(message_batch, last.committable))
- return outer_message
- def _deconstruct_messages(snuba_messages, kafka_logical_topic="snuba-metrics"):
- """
- Convert a list of messages returned by `reconstruct_messages` into python
- primitives, to run assertions on:
- assert _deconstruct_messages(batch.reconstruct_messages(...)) == [ ... ]
- This is slightly nicer to work with than:
- assert batch.reconstruct_messages(...) == _construct_messages([ ... ])
- ...because pytest's assertion diffs work better with python primitives.
- """
- rv = []
- codec = sentry_kafka_schemas.get_codec(kafka_logical_topic)
- for msg in snuba_messages:
- decoded = codec.decode(msg.payload.value, validate=True)
- rv.append((decoded, msg.payload.headers))
- return rv
- def _deconstruct_routing_messages(snuba_messages):
- """
- Similar to `_deconstruct_messages`, but for routing messages.
- """
- all_messages = []
- for msg in snuba_messages:
- headers: MutableMapping[str, str] = {}
- for key, value in msg.payload.routing_header.items():
- headers.update({key: value})
- payload = json.loads(msg.payload.routing_message.value.decode("utf-8"))
- all_messages.append((headers, payload, msg.payload.routing_message.headers))
- return all_messages
- def _get_string_indexer_log_records(caplog):
- """
- Get all log records and relevant extra arguments for easy snapshotting.
- """
- return [
- (
- rec.message,
- {
- k: v
- for k, v in rec.__dict__.items()
- if k
- in (
- "string_type",
- "is_global_quota",
- "num_global_quotas",
- "num_global_quotas",
- "org_batch_size",
- )
- },
- )
- for rec in caplog.records
- ]
- @pytest.mark.django_db
- @pytest.mark.parametrize(
- "should_index_tag_values, expected",
- [
- pytest.param(
- True,
- {
- UseCaseID.SESSIONS: {
- 1: {
- "c:sessions/session@none",
- "d:sessions/duration@second",
- "environment",
- "errored",
- "healthy",
- "init",
- "production",
- "s:sessions/error@none",
- "session.status",
- },
- }
- },
- id="index tag values true",
- ),
- pytest.param(
- False,
- {
- UseCaseID.SESSIONS: {
- 1: {
- "c:sessions/session@none",
- "d:sessions/duration@second",
- "environment",
- "s:sessions/error@none",
- "session.status",
- },
- }
- },
- id="index tag values false",
- ),
- ],
- )
- def test_extract_strings_with_rollout(should_index_tag_values, expected):
- """
- Test that the indexer batch extracts the correct strings from the messages
- based on whether tag values should be indexed or not.
- """
- outer_message = _construct_outer_message(
- [
- (counter_payload, counter_headers),
- (distribution_payload, distribution_headers),
- (set_payload, set_headers),
- ]
- )
- batch = IndexerBatch(
- outer_message,
- should_index_tag_values,
- False,
- tags_validator=ReleaseHealthTagsValidator().is_allowed,
- schema_validator=MetricsSchemaValidator(
- INGEST_CODEC, RELEASE_HEALTH_SCHEMA_VALIDATION_RULES_OPTION_NAME
- ).validate,
- )
- assert batch.extract_strings() == expected
- assert not batch.invalid_msg_meta
- @pytest.mark.django_db
- def test_extract_strings_with_multiple_use_case_ids():
- """
- Verify that the extract string method can handle payloads that has multiple
- (generic) uses cases
- """
- counter_payload = {
- "name": "c:spans/session@none",
- "tags": {
- "environment": "production",
- "session.status": "init",
- },
- "timestamp": ts,
- "type": "c",
- "value": 1,
- "org_id": 1,
- "retention_days": 90,
- "project_id": 3,
- }
- distribution_payload = {
- "name": "d:escalating_issues/duration@second",
- "tags": {
- "environment": "production",
- "session.status": "healthy",
- },
- "timestamp": ts,
- "type": "d",
- "value": [4, 5, 6],
- "org_id": 1,
- "retention_days": 90,
- "project_id": 3,
- }
- set_payload = {
- "name": "s:escalating_issues/error@none",
- "tags": {
- "environment": "production",
- "session.status": "errored",
- },
- "timestamp": ts,
- "type": "s",
- "value": [3],
- "org_id": 1,
- "retention_days": 90,
- "project_id": 3,
- }
- outer_message = _construct_outer_message(
- [
- (counter_payload, [("namespace", b"spans")]),
- (distribution_payload, [("namespace", b"escalating_issues")]),
- (set_payload, [("namespace", b"escalating_issues")]),
- ]
- )
- batch = IndexerBatch(
- outer_message,
- True,
- False,
- tags_validator=GenericMetricsTagsValidator().is_allowed,
- schema_validator=MetricsSchemaValidator(
- INGEST_CODEC, GENERIC_METRICS_SCHEMA_VALIDATION_RULES_OPTION_NAME
- ).validate,
- )
- assert batch.extract_strings() == {
- UseCaseID.SPANS: {
- 1: {
- "c:spans/session@none",
- "environment",
- "production",
- "session.status",
- "init",
- }
- },
- UseCaseID.ESCALATING_ISSUES: {
- 1: {
- "d:escalating_issues/duration@second",
- "environment",
- "production",
- "session.status",
- "healthy",
- "s:escalating_issues/error@none",
- "environment",
- "production",
- "session.status",
- "errored",
- }
- },
- }
- @pytest.mark.django_db
- @override_options({"sentry-metrics.indexer.disabled-namespaces": ["escalating_issues"]})
- def test_extract_strings_with_single_use_case_ids_blocked():
- """
- Verify that the extract string method will work normally when a single use case ID is blocked
- """
- counter_payload = {
- "name": "c:spans/session@none",
- "tags": {
- "environment": "production",
- "session.status": "init",
- },
- "timestamp": ts,
- "type": "c",
- "value": 1,
- "org_id": 1,
- "retention_days": 90,
- "project_id": 3,
- }
- distribution_payload = {
- "name": "d:escalating_issues/duration@second",
- "tags": {
- "environment": "production",
- "session.status": "healthy",
- },
- "timestamp": ts,
- "type": "d",
- "value": [4, 5, 6],
- "org_id": 1,
- "retention_days": 90,
- "project_id": 3,
- }
- set_payload = {
- "name": "s:escalating_issues/error@none",
- "tags": {
- "environment": "production",
- "session.status": "errored",
- },
- "timestamp": ts,
- "type": "s",
- "value": [3],
- "org_id": 1,
- "retention_days": 90,
- "project_id": 3,
- }
- outer_message = _construct_outer_message(
- [
- (counter_payload, [("namespace", b"spans")]),
- (distribution_payload, [("namespace", b"escalating_issues")]),
- (set_payload, [("namespace", b"escalating_issues")]),
- ]
- )
- batch = IndexerBatch(
- outer_message,
- True,
- False,
- tags_validator=GenericMetricsTagsValidator().is_allowed,
- schema_validator=MetricsSchemaValidator(
- INGEST_CODEC, GENERIC_METRICS_SCHEMA_VALIDATION_RULES_OPTION_NAME
- ).validate,
- )
- assert batch.extract_strings() == {
- UseCaseID.SPANS: {
- 1: {
- "c:spans/session@none",
- "environment",
- "production",
- "session.status",
- "init",
- }
- }
- }
- assert not batch.invalid_msg_meta
- @pytest.mark.django_db
- @override_options({"sentry-metrics.indexer.disabled-namespaces": ["spans", "escalating_issues"]})
- def test_extract_strings_with_multiple_use_case_ids_blocked():
- """
- Verify that the extract string method will work normally when multiple use case IDs are blocked
- """
- custom_uc_counter_payload = {
- "name": "c:spans/session@none",
- "tags": {
- "environment": "production",
- "session.status": "init",
- },
- "timestamp": ts,
- "type": "c",
- "value": 1,
- "org_id": 1,
- "retention_days": 90,
- "project_id": 3,
- }
- perf_distribution_payload = {
- "name": TransactionMRI.MEASUREMENTS_FCP.value,
- "tags": {
- "environment": "production",
- "session.status": "healthy",
- },
- "timestamp": ts,
- "type": "d",
- "value": [4, 5, 6],
- "org_id": 1,
- "retention_days": 90,
- "project_id": 3,
- }
- custom_uc_set_payload = {
- "name": "s:escalating_issues/error@none",
- "tags": {
- "environment": "production",
- "session.status": "errored",
- },
- "timestamp": ts,
- "type": "s",
- "value": [3],
- "org_id": 2,
- "retention_days": 90,
- "project_id": 3,
- }
- outer_message = _construct_outer_message(
- [
- (custom_uc_counter_payload, [("namespace", b"spans")]),
- (perf_distribution_payload, [("namespace", b"transactions")]),
- (custom_uc_set_payload, [("namespace", b"escalating_issues")]),
- ]
- )
- batch = IndexerBatch(
- outer_message,
- True,
- False,
- tags_validator=GenericMetricsTagsValidator().is_allowed,
- schema_validator=MetricsSchemaValidator(
- INGEST_CODEC, GENERIC_METRICS_SCHEMA_VALIDATION_RULES_OPTION_NAME
- ).validate,
- )
- assert batch.extract_strings() == {
- UseCaseID.TRANSACTIONS: {
- 1: {
- TransactionMRI.MEASUREMENTS_FCP.value,
- "environment",
- "production",
- "session.status",
- "healthy",
- }
- },
- }
- assert not batch.invalid_msg_meta
- @pytest.mark.django_db
- def test_extract_strings_with_invalid_mri():
- """
- Verify that extract strings will drop payload that has invalid MRI in name field but continue processing the rest
- """
- bad_counter_payload = {
- "name": "invalid_MRI",
- "tags": {
- "environment": "production",
- "session.status": "init",
- },
- "timestamp": ts,
- "type": "c",
- "value": 1,
- "org_id": 100,
- "retention_days": 90,
- "project_id": 3,
- }
- counter_payload = {
- "name": "c:spans/session@none",
- "tags": {
- "environment": "production",
- "session.status": "init",
- },
- "timestamp": ts,
- "type": "c",
- "value": 1,
- "org_id": 1,
- "retention_days": 90,
- "project_id": 3,
- }
- distribution_payload = {
- "name": "d:escalating_issues/duration@second",
- "tags": {
- "environment": "production",
- "session.status": "healthy",
- },
- "timestamp": ts,
- "type": "d",
- "value": [4, 5, 6],
- "org_id": 1,
- "retention_days": 90,
- "project_id": 3,
- }
- set_payload = {
- "name": "s:escalating_issues/error@none",
- "tags": {
- "environment": "production",
- "session.status": "errored",
- },
- "timestamp": ts,
- "type": "s",
- "value": [3],
- "org_id": 1,
- "retention_days": 90,
- "project_id": 3,
- }
- outer_message = _construct_outer_message(
- [
- (bad_counter_payload, [("namespace", b"")]),
- (counter_payload, [("namespace", b"spans")]),
- (distribution_payload, [("namespace", b"escalating_issues")]),
- (set_payload, [("namespace", b"escalating_issues")]),
- ]
- )
- batch = IndexerBatch(
- outer_message,
- True,
- False,
- tags_validator=GenericMetricsTagsValidator().is_allowed,
- schema_validator=MetricsSchemaValidator(
- INGEST_CODEC, GENERIC_METRICS_SCHEMA_VALIDATION_RULES_OPTION_NAME
- ).validate,
- )
- assert batch.extract_strings() == {
- UseCaseID.SPANS: {
- 1: {
- "c:spans/session@none",
- "environment",
- "production",
- "session.status",
- "init",
- }
- },
- UseCaseID.ESCALATING_ISSUES: {
- 1: {
- "d:escalating_issues/duration@second",
- "environment",
- "production",
- "session.status",
- "healthy",
- "s:escalating_issues/error@none",
- "environment",
- "production",
- "session.status",
- "errored",
- }
- },
- }
- assert batch.invalid_msg_meta == {BrokerMeta(Partition(Topic("topic"), 0), 0)}
- @pytest.mark.django_db
- def test_extract_strings_with_multiple_use_case_ids_and_org_ids():
- """
- Verify that the extract string method can handle payloads that has multiple
- (generic) uses cases and from different orgs
- """
- custom_uc_counter_payload = {
- "name": "c:spans/session@none",
- "tags": {
- "environment": "production",
- "session.status": "init",
- },
- "timestamp": ts,
- "type": "c",
- "value": 1,
- "org_id": 1,
- "retention_days": 90,
- "project_id": 3,
- }
- perf_distribution_payload = {
- "name": TransactionMRI.MEASUREMENTS_FCP.value,
- "tags": {
- "environment": "production",
- "session.status": "healthy",
- },
- "timestamp": ts,
- "type": "d",
- "value": [4, 5, 6],
- "org_id": 1,
- "retention_days": 90,
- "project_id": 3,
- }
- custom_uc_set_payload = {
- "name": "s:spans/error@none",
- "tags": {
- "environment": "production",
- "session.status": "errored",
- },
- "timestamp": ts,
- "type": "s",
- "value": [3],
- "org_id": 2,
- "retention_days": 90,
- "project_id": 3,
- }
- outer_message = _construct_outer_message(
- [
- (custom_uc_counter_payload, [("namespace", b"spans")]),
- (perf_distribution_payload, [("namespace", b"transactions")]),
- (custom_uc_set_payload, [("namespace", b"spans")]),
- ]
- )
- batch = IndexerBatch(
- outer_message,
- True,
- False,
- tags_validator=GenericMetricsTagsValidator().is_allowed,
- schema_validator=MetricsSchemaValidator(
- INGEST_CODEC, GENERIC_METRICS_SCHEMA_VALIDATION_RULES_OPTION_NAME
- ).validate,
- )
- assert batch.extract_strings() == {
- UseCaseID.SPANS: {
- 1: {
- "c:spans/session@none",
- "environment",
- "production",
- "session.status",
- "init",
- },
- 2: {
- "s:spans/error@none",
- "environment",
- "production",
- "session.status",
- "errored",
- },
- },
- UseCaseID.TRANSACTIONS: {
- 1: {
- TransactionMRI.MEASUREMENTS_FCP.value,
- "environment",
- "production",
- "session.status",
- "healthy",
- }
- },
- }
- assert not batch.invalid_msg_meta
- @pytest.mark.django_db
- @patch(
- "sentry.sentry_metrics.aggregation_option_registry.METRIC_ID_AGG_OPTION",
- MOCK_METRIC_ID_AGG_OPTION,
- )
- @patch(
- "sentry.sentry_metrics.aggregation_option_registry.USE_CASE_AGG_OPTION",
- MOCK_USE_CASE_AGG_OPTION,
- )
- @override_options({"sentry-metrics.10s-granularity": True})
- def test_resolved_with_aggregation_options(caplog, settings):
- settings.SENTRY_METRICS_INDEXER_DEBUG_LOG_SAMPLE_RATE = 1.0
- counter_metric_id = "c:transactions/alert@none"
- dist_metric_id = "d:transactions/measurements.fcp@millisecond"
- set_metric_id = "s:transactions/on_demand@none"
- outer_message = _construct_outer_message(
- [
- (
- {
- **counter_payload,
- "name": counter_metric_id,
- },
- [],
- ),
- ({**distribution_payload, "name": dist_metric_id}, []),
- ({**set_payload, "name": set_metric_id}, []),
- ]
- )
- batch = IndexerBatch(
- outer_message,
- False,
- False,
- tags_validator=GenericMetricsTagsValidator().is_allowed,
- schema_validator=MetricsSchemaValidator(
- INGEST_CODEC, GENERIC_METRICS_SCHEMA_VALIDATION_RULES_OPTION_NAME
- ).validate,
- )
- assert batch.extract_strings() == (
- {
- UseCaseID.TRANSACTIONS: {
- 1: {
- counter_metric_id,
- dist_metric_id,
- "environment",
- set_metric_id,
- "session.status",
- }
- }
- }
- )
- assert not batch.invalid_msg_meta
- caplog.set_level(logging.ERROR)
- snuba_payloads = batch.reconstruct_messages(
- {
- UseCaseID.TRANSACTIONS: {
- 1: {
- counter_metric_id: 1,
- dist_metric_id: 2,
- "environment": 3,
- set_metric_id: 8,
- "session.status": 9,
- }
- }
- },
- {
- UseCaseID.TRANSACTIONS: {
- 1: {
- counter_metric_id: Metadata(id=1, fetch_type=FetchType.CACHE_HIT),
- dist_metric_id: Metadata(id=2, fetch_type=FetchType.CACHE_HIT),
- "environment": Metadata(id=3, fetch_type=FetchType.CACHE_HIT),
- set_metric_id: Metadata(id=8, fetch_type=FetchType.CACHE_HIT),
- "session.status": Metadata(id=9, fetch_type=FetchType.CACHE_HIT),
- }
- },
- },
- ).data
- assert _get_string_indexer_log_records(caplog) == []
- assert _deconstruct_messages(snuba_payloads, kafka_logical_topic="snuba-generic-metrics") == [
- (
- {
- "mapping_meta": {
- "c": {
- "1": counter_metric_id,
- "3": "environment",
- "9": "session.status",
- },
- },
- "metric_id": 1,
- "org_id": 1,
- "project_id": 3,
- "retention_days": 90,
- "tags": {"3": "production", "9": "init"},
- "timestamp": ts,
- "type": "c",
- "use_case_id": "transactions",
- "value": 1.0,
- "aggregation_option": AggregationOption.TEN_SECOND.value,
- "sentry_received_timestamp": BROKER_TIMESTAMP.timestamp(),
- "version": 2,
- },
- [("mapping_sources", b"c"), ("metric_type", "c")],
- ),
- (
- {
- "mapping_meta": {
- "c": {
- "2": dist_metric_id,
- "3": "environment",
- "9": "session.status",
- },
- },
- "metric_id": 2,
- "org_id": 1,
- "project_id": 3,
- "retention_days": 90,
- "tags": {"3": "production", "9": "healthy"},
- "timestamp": ts,
- "type": "d",
- "use_case_id": "transactions",
- "value": [4, 5, 6],
- "aggregation_option": AggregationOption.HIST.value,
- "sentry_received_timestamp": BROKER_TIMESTAMP.timestamp(),
- "version": 2,
- },
- [("mapping_sources", b"c"), ("metric_type", "d")],
- ),
- (
- {
- "mapping_meta": {
- "c": {
- "3": "environment",
- "8": set_metric_id,
- "9": "session.status",
- },
- },
- "metric_id": 8,
- "org_id": 1,
- "project_id": 3,
- "retention_days": 90,
- "tags": {"3": "production", "9": "errored"},
- "timestamp": ts,
- "type": "s",
- "use_case_id": "transactions",
- "value": [3],
- "aggregation_option": AggregationOption.TEN_SECOND.value,
- "sentry_received_timestamp": BROKER_TIMESTAMP.timestamp(),
- "version": 2,
- },
- [("mapping_sources", b"c"), ("metric_type", "s")],
- ),
- ]
- @pytest.mark.django_db
- def test_all_resolved(caplog, settings):
- settings.SENTRY_METRICS_INDEXER_DEBUG_LOG_SAMPLE_RATE = 1.0
- outer_message = _construct_outer_message(
- [
- (counter_payload, counter_headers),
- (distribution_payload, distribution_headers),
- (set_payload, set_headers),
- ]
- )
- batch = IndexerBatch(
- outer_message,
- True,
- False,
- tags_validator=ReleaseHealthTagsValidator().is_allowed,
- schema_validator=MetricsSchemaValidator(
- INGEST_CODEC, RELEASE_HEALTH_SCHEMA_VALIDATION_RULES_OPTION_NAME
- ).validate,
- )
- assert batch.extract_strings() == (
- {
- UseCaseID.SESSIONS: {
- 1: {
- "c:sessions/session@none",
- "d:sessions/duration@second",
- "environment",
- "errored",
- "healthy",
- "init",
- "production",
- "s:sessions/error@none",
- "session.status",
- }
- }
- }
- )
- assert not batch.invalid_msg_meta
- caplog.set_level(logging.ERROR)
- snuba_payloads = batch.reconstruct_messages(
- {
- UseCaseID.SESSIONS: {
- 1: {
- "c:sessions/session@none": 1,
- "d:sessions/duration@second": 2,
- "environment": 3,
- "errored": 4,
- "healthy": 5,
- "init": 6,
- "production": 7,
- "s:sessions/error@none": 8,
- "session.status": 9,
- }
- }
- },
- {
- UseCaseID.SESSIONS: {
- 1: {
- "c:sessions/session@none": Metadata(id=1, fetch_type=FetchType.CACHE_HIT),
- "d:sessions/duration@second": Metadata(id=2, fetch_type=FetchType.CACHE_HIT),
- "environment": Metadata(id=3, fetch_type=FetchType.CACHE_HIT),
- "errored": Metadata(id=4, fetch_type=FetchType.DB_READ),
- "healthy": Metadata(id=5, fetch_type=FetchType.HARDCODED),
- "init": Metadata(id=6, fetch_type=FetchType.HARDCODED),
- "production": Metadata(id=7, fetch_type=FetchType.CACHE_HIT),
- "s:sessions/error@none": Metadata(id=8, fetch_type=FetchType.CACHE_HIT),
- "session.status": Metadata(id=9, fetch_type=FetchType.CACHE_HIT),
- }
- },
- },
- ).data
- assert _get_string_indexer_log_records(caplog) == []
- assert _deconstruct_messages(snuba_payloads) == [
- (
- {
- "mapping_meta": {
- "c": {
- "1": "c:sessions/session@none",
- "3": "environment",
- "7": "production",
- "9": "session.status",
- },
- "h": {"6": "init"},
- },
- "metric_id": 1,
- "org_id": 1,
- "project_id": 3,
- "retention_days": 90,
- "tags": {"3": 7, "9": 6},
- "timestamp": ts,
- "type": "c",
- "use_case_id": "sessions",
- "value": 1.0,
- "sentry_received_timestamp": BROKER_TIMESTAMP.timestamp(),
- },
- [*counter_headers, ("mapping_sources", b"ch"), ("metric_type", "c")],
- ),
- (
- {
- "mapping_meta": {
- "c": {
- "2": "d:sessions/duration@second",
- "3": "environment",
- "7": "production",
- "9": "session.status",
- },
- "h": {"5": "healthy"},
- },
- "metric_id": 2,
- "org_id": 1,
- "project_id": 3,
- "retention_days": 90,
- "tags": {"3": 7, "9": 5},
- "timestamp": ts,
- "type": "d",
- "use_case_id": "sessions",
- "value": [4, 5, 6],
- "sentry_received_timestamp": BROKER_TIMESTAMP.timestamp(),
- },
- [*distribution_headers, ("mapping_sources", b"ch"), ("metric_type", "d")],
- ),
- (
- {
- "mapping_meta": {
- "c": {
- "3": "environment",
- "7": "production",
- "8": "s:sessions/error@none",
- "9": "session.status",
- },
- "d": {"4": "errored"},
- },
- "metric_id": 8,
- "org_id": 1,
- "project_id": 3,
- "retention_days": 90,
- "tags": {"3": 7, "9": 4},
- "timestamp": ts,
- "type": "s",
- "use_case_id": "sessions",
- "value": [3],
- "sentry_received_timestamp": BROKER_TIMESTAMP.timestamp(),
- },
- [*set_headers, ("mapping_sources", b"cd"), ("metric_type", "s")],
- ),
- ]
- @pytest.mark.django_db
- def test_all_resolved_with_routing_information(caplog, settings):
- settings.SENTRY_METRICS_INDEXER_DEBUG_LOG_SAMPLE_RATE = 1.0
- outer_message = _construct_outer_message(
- [
- (counter_payload, counter_headers),
- (distribution_payload, distribution_headers),
- (set_payload, set_headers),
- ]
- )
- batch = IndexerBatch(
- outer_message,
- True,
- True,
- tags_validator=ReleaseHealthTagsValidator().is_allowed,
- schema_validator=MetricsSchemaValidator(
- INGEST_CODEC, RELEASE_HEALTH_SCHEMA_VALIDATION_RULES_OPTION_NAME
- ).validate,
- )
- assert batch.extract_strings() == (
- {
- UseCaseID.SESSIONS: {
- 1: {
- "c:sessions/session@none",
- "d:sessions/duration@second",
- "environment",
- "errored",
- "healthy",
- "init",
- "production",
- "s:sessions/error@none",
- "session.status",
- }
- }
- }
- )
- caplog.set_level(logging.ERROR)
- snuba_payloads = batch.reconstruct_messages(
- {
- UseCaseID.SESSIONS: {
- 1: {
- "c:sessions/session@none": 1,
- "d:sessions/duration@second": 2,
- "environment": 3,
- "errored": 4,
- "healthy": 5,
- "init": 6,
- "production": 7,
- "s:sessions/error@none": 8,
- "session.status": 9,
- }
- }
- },
- {
- UseCaseID.SESSIONS: {
- 1: {
- "c:sessions/session@none": Metadata(id=1, fetch_type=FetchType.CACHE_HIT),
- "d:sessions/duration@second": Metadata(id=2, fetch_type=FetchType.CACHE_HIT),
- "environment": Metadata(id=3, fetch_type=FetchType.CACHE_HIT),
- "errored": Metadata(id=4, fetch_type=FetchType.DB_READ),
- "healthy": Metadata(id=5, fetch_type=FetchType.HARDCODED),
- "init": Metadata(id=6, fetch_type=FetchType.HARDCODED),
- "production": Metadata(id=7, fetch_type=FetchType.CACHE_HIT),
- "s:sessions/error@none": Metadata(id=8, fetch_type=FetchType.CACHE_HIT),
- "session.status": Metadata(id=9, fetch_type=FetchType.CACHE_HIT),
- }
- }
- },
- ).data
- assert _get_string_indexer_log_records(caplog) == []
- assert _deconstruct_routing_messages(snuba_payloads) == [
- (
- {"org_id": 1},
- {
- "mapping_meta": {
- "c": {
- "1": "c:sessions/session@none",
- "3": "environment",
- "7": "production",
- "9": "session.status",
- },
- "h": {"6": "init"},
- },
- "metric_id": 1,
- "org_id": 1,
- "project_id": 3,
- "retention_days": 90,
- "tags": {"3": 7, "9": 6},
- "timestamp": ts,
- "type": "c",
- "use_case_id": "sessions",
- "value": 1.0,
- "sentry_received_timestamp": BROKER_TIMESTAMP.timestamp(),
- },
- [*counter_headers, ("mapping_sources", b"ch"), ("metric_type", "c")],
- ),
- (
- {"org_id": 1},
- {
- "mapping_meta": {
- "c": {
- "2": "d:sessions/duration@second",
- "3": "environment",
- "7": "production",
- "9": "session.status",
- },
- "h": {"5": "healthy"},
- },
- "metric_id": 2,
- "org_id": 1,
- "project_id": 3,
- "retention_days": 90,
- "tags": {"3": 7, "9": 5},
- "timestamp": ts,
- "type": "d",
- "use_case_id": "sessions",
- "value": [4, 5, 6],
- "sentry_received_timestamp": BROKER_TIMESTAMP.timestamp(),
- },
- [
- *distribution_headers,
- ("mapping_sources", b"ch"),
- ("metric_type", "d"),
- ],
- ),
- (
- {"org_id": 1},
- {
- "mapping_meta": {
- "c": {
- "3": "environment",
- "7": "production",
- "8": "s:sessions/error@none",
- "9": "session.status",
- },
- "d": {"4": "errored"},
- },
- "metric_id": 8,
- "org_id": 1,
- "project_id": 3,
- "retention_days": 90,
- "tags": {"3": 7, "9": 4},
- "timestamp": ts,
- "type": "s",
- "use_case_id": "sessions",
- "value": [3],
- "sentry_received_timestamp": BROKER_TIMESTAMP.timestamp(),
- },
- [*set_headers, ("mapping_sources", b"cd"), ("metric_type", "s")],
- ),
- ]
- @pytest.mark.django_db
- def test_all_resolved_retention_days_honored(caplog, settings):
- """
- Tests that the indexer batch honors the incoming retention_days values
- from Relay or falls back to 90.
- """
- distribution_payload_modified = distribution_payload.copy()
- distribution_payload_modified["retention_days"] = 30
- settings.SENTRY_METRICS_INDEXER_DEBUG_LOG_SAMPLE_RATE = 1.0
- outer_message = _construct_outer_message(
- [
- (counter_payload, counter_headers),
- (distribution_payload_modified, distribution_headers),
- (set_payload, set_headers),
- ]
- )
- batch = IndexerBatch(
- outer_message,
- True,
- False,
- tags_validator=ReleaseHealthTagsValidator().is_allowed,
- schema_validator=MetricsSchemaValidator(
- INGEST_CODEC, RELEASE_HEALTH_SCHEMA_VALIDATION_RULES_OPTION_NAME
- ).validate,
- )
- assert batch.extract_strings() == (
- {
- UseCaseID.SESSIONS: {
- 1: {
- "c:sessions/session@none",
- "d:sessions/duration@second",
- "environment",
- "errored",
- "healthy",
- "init",
- "production",
- "s:sessions/error@none",
- "session.status",
- }
- }
- }
- )
- assert not batch.invalid_msg_meta
- caplog.set_level(logging.ERROR)
- snuba_payloads = batch.reconstruct_messages(
- {
- UseCaseID.SESSIONS: {
- 1: {
- "c:sessions/session@none": 1,
- "d:sessions/duration@second": 2,
- "environment": 3,
- "errored": 4,
- "healthy": 5,
- "init": 6,
- "production": 7,
- "s:sessions/error@none": 8,
- "session.status": 9,
- }
- }
- },
- {
- UseCaseID.SESSIONS: {
- 1: {
- "c:sessions/session@none": Metadata(id=1, fetch_type=FetchType.CACHE_HIT),
- "d:sessions/duration@second": Metadata(id=2, fetch_type=FetchType.CACHE_HIT),
- "environment": Metadata(id=3, fetch_type=FetchType.CACHE_HIT),
- "errored": Metadata(id=4, fetch_type=FetchType.DB_READ),
- "healthy": Metadata(id=5, fetch_type=FetchType.HARDCODED),
- "init": Metadata(id=6, fetch_type=FetchType.HARDCODED),
- "production": Metadata(id=7, fetch_type=FetchType.CACHE_HIT),
- "s:sessions/error@none": Metadata(id=8, fetch_type=FetchType.CACHE_HIT),
- "session.status": Metadata(id=9, fetch_type=FetchType.CACHE_HIT),
- }
- }
- },
- ).data
- assert _get_string_indexer_log_records(caplog) == []
- assert _deconstruct_messages(snuba_payloads) == [
- (
- {
- "mapping_meta": {
- "c": {
- "1": "c:sessions/session@none",
- "3": "environment",
- "7": "production",
- "9": "session.status",
- },
- "h": {"6": "init"},
- },
- "metric_id": 1,
- "org_id": 1,
- "project_id": 3,
- "retention_days": 90,
- "tags": {"3": 7, "9": 6},
- "timestamp": ts,
- "type": "c",
- "use_case_id": "sessions",
- "value": 1.0,
- "sentry_received_timestamp": BROKER_TIMESTAMP.timestamp(),
- },
- [*counter_headers, ("mapping_sources", b"ch"), ("metric_type", "c")],
- ),
- (
- {
- "mapping_meta": {
- "c": {
- "2": "d:sessions/duration@second",
- "3": "environment",
- "7": "production",
- "9": "session.status",
- },
- "h": {"5": "healthy"},
- },
- "metric_id": 2,
- "org_id": 1,
- "project_id": 3,
- "retention_days": 30,
- "tags": {"3": 7, "9": 5},
- "timestamp": ts,
- "type": "d",
- "use_case_id": "sessions",
- "value": [4, 5, 6],
- "sentry_received_timestamp": BROKER_TIMESTAMP.timestamp(),
- },
- [*distribution_headers, ("mapping_sources", b"ch"), ("metric_type", "d")],
- ),
- (
- {
- "mapping_meta": {
- "c": {
- "3": "environment",
- "7": "production",
- "8": "s:sessions/error@none",
- "9": "session.status",
- },
- "d": {"4": "errored"},
- },
- "metric_id": 8,
- "org_id": 1,
- "project_id": 3,
- "retention_days": 90,
- "tags": {"3": 7, "9": 4},
- "timestamp": ts,
- "type": "s",
- "use_case_id": "sessions",
- "value": [3],
- "sentry_received_timestamp": BROKER_TIMESTAMP.timestamp(),
- },
- [*set_headers, ("mapping_sources", b"cd"), ("metric_type", "s")],
- ),
- ]
- @pytest.mark.django_db
- def test_batch_resolve_with_values_not_indexed(caplog, settings):
- """
- Tests that the indexer batch skips resolving tag values for indexing and
- sends the raw tag value to Snuba.
- The difference between this test and test_all_resolved is that the tag values are
- strings instead of integers. Because of that indexed tag keys are
- different and mapping_meta is smaller. The payload also contains the
- version field to specify that the tag values are not indexed.
- """
- settings.SENTRY_METRICS_INDEXER_DEBUG_LOG_SAMPLE_RATE = 1.0
- outer_message = _construct_outer_message(
- [
- (counter_payload, counter_headers),
- (distribution_payload, distribution_headers),
- (set_payload, set_headers),
- ]
- )
- batch = IndexerBatch(
- outer_message,
- False,
- False,
- tags_validator=ReleaseHealthTagsValidator().is_allowed,
- schema_validator=MetricsSchemaValidator(
- INGEST_CODEC, RELEASE_HEALTH_SCHEMA_VALIDATION_RULES_OPTION_NAME
- ).validate,
- )
- assert batch.extract_strings() == (
- {
- UseCaseID.SESSIONS: {
- 1: {
- "c:sessions/session@none",
- "d:sessions/duration@second",
- "environment",
- "s:sessions/error@none",
- "session.status",
- }
- }
- }
- )
- assert not batch.invalid_msg_meta
- caplog.set_level(logging.ERROR)
- snuba_payloads = batch.reconstruct_messages(
- {
- UseCaseID.SESSIONS: {
- 1: {
- "c:sessions/session@none": 1,
- "d:sessions/duration@second": 2,
- "environment": 3,
- "s:sessions/error@none": 4,
- "session.status": 5,
- }
- }
- },
- {
- UseCaseID.SESSIONS: {
- 1: {
- "c:sessions/session@none": Metadata(id=1, fetch_type=FetchType.CACHE_HIT),
- "d:sessions/duration@second": Metadata(id=2, fetch_type=FetchType.CACHE_HIT),
- "environment": Metadata(id=3, fetch_type=FetchType.CACHE_HIT),
- "s:sessions/error@none": Metadata(id=4, fetch_type=FetchType.CACHE_HIT),
- "session.status": Metadata(id=5, fetch_type=FetchType.CACHE_HIT),
- }
- }
- },
- ).data
- assert _get_string_indexer_log_records(caplog) == []
- assert _deconstruct_messages(snuba_payloads, kafka_logical_topic="snuba-generic-metrics") == [
- (
- {
- "version": 2,
- "mapping_meta": {
- "c": {
- "1": "c:sessions/session@none",
- "3": "environment",
- "5": "session.status",
- },
- },
- "metric_id": 1,
- "org_id": 1,
- "project_id": 3,
- "retention_days": 90,
- "tags": {"3": "production", "5": "init"},
- "timestamp": ts,
- "type": "c",
- "use_case_id": "sessions",
- "value": 1.0,
- "sentry_received_timestamp": BROKER_TIMESTAMP.timestamp(),
- },
- [*counter_headers, ("mapping_sources", b"c"), ("metric_type", "c")],
- ),
- (
- {
- "version": 2,
- "mapping_meta": {
- "c": {
- "2": "d:sessions/duration@second",
- "3": "environment",
- "5": "session.status",
- },
- },
- "metric_id": 2,
- "org_id": 1,
- "project_id": 3,
- "retention_days": 90,
- "tags": {"3": "production", "5": "healthy"},
- "timestamp": ts,
- "type": "d",
- "use_case_id": "sessions",
- "value": [4, 5, 6],
- "sentry_received_timestamp": BROKER_TIMESTAMP.timestamp(),
- },
- [
- *distribution_headers,
- ("mapping_sources", b"c"),
- ("metric_type", "d"),
- ],
- ),
- (
- {
- "version": 2,
- "mapping_meta": {
- "c": {
- "3": "environment",
- "4": "s:sessions/error@none",
- "5": "session.status",
- },
- },
- "metric_id": 4,
- "org_id": 1,
- "project_id": 3,
- "retention_days": 90,
- "tags": {"3": "production", "5": "errored"},
- "timestamp": ts,
- "type": "s",
- "use_case_id": "sessions",
- "value": [3],
- "sentry_received_timestamp": BROKER_TIMESTAMP.timestamp(),
- },
- [
- *set_headers,
- ("mapping_sources", b"c"),
- ("metric_type", "s"),
- ],
- ),
- ]
- @pytest.mark.django_db
- def test_metric_id_rate_limited(caplog, settings):
- settings.SENTRY_METRICS_INDEXER_DEBUG_LOG_SAMPLE_RATE = 1.0
- outer_message = _construct_outer_message(
- [
- (counter_payload, counter_headers),
- (distribution_payload, distribution_headers),
- (set_payload, set_headers),
- ]
- )
- batch = IndexerBatch(
- outer_message,
- True,
- False,
- tags_validator=ReleaseHealthTagsValidator().is_allowed,
- schema_validator=MetricsSchemaValidator(
- INGEST_CODEC, RELEASE_HEALTH_SCHEMA_VALIDATION_RULES_OPTION_NAME
- ).validate,
- )
- assert batch.extract_strings() == (
- {
- UseCaseID.SESSIONS: {
- 1: {
- "c:sessions/session@none",
- "d:sessions/duration@second",
- "environment",
- "errored",
- "healthy",
- "init",
- "production",
- "s:sessions/error@none",
- "session.status",
- }
- }
- }
- )
- assert not batch.invalid_msg_meta
- caplog.set_level(logging.ERROR)
- snuba_payloads = batch.reconstruct_messages(
- {
- UseCaseID.SESSIONS: {
- 1: {
- "c:sessions/session@none": None,
- "d:sessions/duration@second": None,
- "environment": 3,
- "errored": 4,
- "healthy": 5,
- "init": 6,
- "production": 7,
- "s:sessions/error@none": 8,
- "session.status": 9,
- }
- }
- },
- {
- UseCaseID.SESSIONS: {
- 1: {
- "c:sessions/session@none": Metadata(
- id=None,
- fetch_type=FetchType.RATE_LIMITED,
- fetch_type_ext=FetchTypeExt(is_global=False),
- ),
- "d:sessions/duration@second": Metadata(
- id=None, fetch_type=FetchType.RATE_LIMITED, fetch_type_ext=None
- ),
- "environment": Metadata(id=3, fetch_type=FetchType.CACHE_HIT),
- "errored": Metadata(id=4, fetch_type=FetchType.DB_READ),
- "healthy": Metadata(id=5, fetch_type=FetchType.HARDCODED),
- "init": Metadata(id=6, fetch_type=FetchType.HARDCODED),
- "production": Metadata(id=7, fetch_type=FetchType.CACHE_HIT),
- "s:sessions/error@none": Metadata(id=None, fetch_type=FetchType.DB_READ),
- "session.status": Metadata(id=9, fetch_type=FetchType.CACHE_HIT),
- }
- }
- },
- ).data
- assert _deconstruct_messages(snuba_payloads) == [
- (
- {
- "mapping_meta": {
- "c": {"3": "environment", "7": "production", "9": "session.status"},
- "d": {"4": "errored", "None": "s:sessions/error@none"},
- },
- "metric_id": 8,
- "org_id": 1,
- "project_id": 3,
- "retention_days": 90,
- "tags": {"3": 7, "9": 4},
- "timestamp": ts,
- "type": "s",
- "use_case_id": "sessions",
- "value": [3],
- "sentry_received_timestamp": BROKER_TIMESTAMP.timestamp(),
- },
- [
- *set_headers,
- ("mapping_sources", b"cd"),
- ("metric_type", "s"),
- ],
- ),
- ]
- assert _get_string_indexer_log_records(caplog) == [
- (
- "process_messages.dropped_message",
- {"org_batch_size": 9, "is_global_quota": False, "string_type": "metric_id"},
- ),
- (
- "process_messages.dropped_message",
- {"org_batch_size": 9, "is_global_quota": False, "string_type": "metric_id"},
- ),
- ]
- @pytest.mark.django_db
- def test_tag_key_rate_limited(caplog, settings):
- settings.SENTRY_METRICS_INDEXER_DEBUG_LOG_SAMPLE_RATE = 1.0
- outer_message = _construct_outer_message(
- [
- (counter_payload, counter_headers),
- (distribution_payload, distribution_headers),
- (set_payload, set_headers),
- ]
- )
- batch = IndexerBatch(
- outer_message,
- True,
- False,
- tags_validator=ReleaseHealthTagsValidator().is_allowed,
- schema_validator=MetricsSchemaValidator(
- INGEST_CODEC, RELEASE_HEALTH_SCHEMA_VALIDATION_RULES_OPTION_NAME
- ).validate,
- )
- assert batch.extract_strings() == (
- {
- UseCaseID.SESSIONS: {
- 1: {
- "c:sessions/session@none",
- "d:sessions/duration@second",
- "environment",
- "errored",
- "healthy",
- "init",
- "production",
- "s:sessions/error@none",
- "session.status",
- }
- }
- }
- )
- assert not batch.invalid_msg_meta
- caplog.set_level(logging.ERROR)
- snuba_payloads = batch.reconstruct_messages(
- {
- UseCaseID.SESSIONS: {
- 1: {
- "c:sessions/session@none": 1,
- "d:sessions/duration@second": 2,
- "environment": None,
- "errored": 4,
- "healthy": 5,
- "init": 6,
- "production": 7,
- "s:sessions/error@none": 8,
- "session.status": 9,
- }
- }
- },
- {
- UseCaseID.SESSIONS: {
- 1: {
- "c:sessions/session@none": Metadata(id=1, fetch_type=FetchType.CACHE_HIT),
- "d:sessions/duration@second": Metadata(id=2, fetch_type=FetchType.CACHE_HIT),
- "environment": Metadata(
- id=None,
- fetch_type=FetchType.RATE_LIMITED,
- fetch_type_ext=FetchTypeExt(is_global=False),
- ),
- "errored": Metadata(id=4, fetch_type=FetchType.DB_READ),
- "healthy": Metadata(id=5, fetch_type=FetchType.HARDCODED),
- "init": Metadata(id=6, fetch_type=FetchType.HARDCODED),
- "production": Metadata(id=7, fetch_type=FetchType.CACHE_HIT),
- "s:sessions/error@none": Metadata(id=8, fetch_type=FetchType.CACHE_HIT),
- "session.status": Metadata(id=9, fetch_type=FetchType.CACHE_HIT),
- }
- }
- },
- ).data
- assert _get_string_indexer_log_records(caplog) == [
- (
- "process_messages.dropped_message",
- {"num_global_quotas": 0, "org_batch_size": 9, "string_type": "tags"},
- ),
- (
- "process_messages.dropped_message",
- {"num_global_quotas": 0, "org_batch_size": 9, "string_type": "tags"},
- ),
- (
- "process_messages.dropped_message",
- {"num_global_quotas": 0, "org_batch_size": 9, "string_type": "tags"},
- ),
- ]
- assert _deconstruct_messages(snuba_payloads) == []
- @pytest.mark.django_db
- def test_tag_value_rate_limited(caplog, settings):
- settings.SENTRY_METRICS_INDEXER_DEBUG_LOG_SAMPLE_RATE = 1.0
- outer_message = _construct_outer_message(
- [
- (counter_payload, counter_headers),
- (distribution_payload, distribution_headers),
- (set_payload, set_headers),
- ]
- )
- batch = IndexerBatch(
- outer_message,
- True,
- False,
- tags_validator=ReleaseHealthTagsValidator().is_allowed,
- schema_validator=MetricsSchemaValidator(
- INGEST_CODEC, RELEASE_HEALTH_SCHEMA_VALIDATION_RULES_OPTION_NAME
- ).validate,
- )
- assert batch.extract_strings() == (
- {
- UseCaseID.SESSIONS: {
- 1: {
- "c:sessions/session@none",
- "d:sessions/duration@second",
- "environment",
- "errored",
- "healthy",
- "init",
- "production",
- "s:sessions/error@none",
- "session.status",
- }
- }
- }
- )
- assert not batch.invalid_msg_meta
- caplog.set_level(logging.ERROR)
- snuba_payloads = batch.reconstruct_messages(
- {
- UseCaseID.SESSIONS: {
- 1: {
- "c:sessions/session@none": 1,
- "d:sessions/duration@second": 2,
- "environment": 3,
- "errored": None,
- "healthy": 5,
- "init": 6,
- "production": 7,
- "s:sessions/error@none": 8,
- "session.status": 9,
- }
- }
- },
- {
- UseCaseID.SESSIONS: {
- 1: {
- "c:sessions/session@none": Metadata(id=1, fetch_type=FetchType.CACHE_HIT),
- "d:sessions/duration@second": Metadata(id=2, fetch_type=FetchType.CACHE_HIT),
- "environment": Metadata(id=3, fetch_type=FetchType.CACHE_HIT),
- "errored": Metadata(
- id=None,
- fetch_type=FetchType.RATE_LIMITED,
- fetch_type_ext=FetchTypeExt(is_global=False),
- ),
- "healthy": Metadata(id=5, fetch_type=FetchType.HARDCODED),
- "init": Metadata(id=6, fetch_type=FetchType.HARDCODED),
- "production": Metadata(id=7, fetch_type=FetchType.CACHE_HIT),
- "s:sessions/error@none": Metadata(id=8, fetch_type=FetchType.CACHE_HIT),
- "session.status": Metadata(id=9, fetch_type=FetchType.CACHE_HIT),
- }
- }
- },
- ).data
- assert _get_string_indexer_log_records(caplog) == [
- (
- "process_messages.dropped_message",
- {"num_global_quotas": 0, "org_batch_size": 9, "string_type": "tags"},
- ),
- ]
- assert _deconstruct_messages(snuba_payloads) == [
- (
- {
- "mapping_meta": {
- "c": {
- "1": "c:sessions/session@none",
- "3": "environment",
- "7": "production",
- "9": "session.status",
- },
- "h": {"6": "init"},
- },
- "metric_id": 1,
- "org_id": 1,
- "project_id": 3,
- "retention_days": 90,
- "tags": {"3": 7, "9": 6},
- "timestamp": ts,
- "type": "c",
- "use_case_id": "sessions",
- "value": 1.0,
- "sentry_received_timestamp": BROKER_TIMESTAMP.timestamp(),
- },
- [
- *counter_headers,
- ("mapping_sources", b"ch"),
- ("metric_type", "c"),
- ],
- ),
- (
- {
- "mapping_meta": {
- "c": {
- "2": "d:sessions/duration@second",
- "3": "environment",
- "7": "production",
- "9": "session.status",
- },
- "h": {"5": "healthy"},
- },
- "metric_id": 2,
- "org_id": 1,
- "project_id": 3,
- "retention_days": 90,
- "tags": {"3": 7, "9": 5},
- "timestamp": ts,
- "type": "d",
- "use_case_id": "sessions",
- "value": [4, 5, 6],
- "sentry_received_timestamp": BROKER_TIMESTAMP.timestamp(),
- },
- [
- *distribution_headers,
- ("mapping_sources", b"ch"),
- ("metric_type", "d"),
- ],
- ),
- ]
- @pytest.mark.django_db
- def test_one_org_limited(caplog, settings):
- settings.SENTRY_METRICS_INDEXER_DEBUG_LOG_SAMPLE_RATE = 1.0
- outer_message = _construct_outer_message(
- [
- (counter_payload, counter_headers),
- ({**distribution_payload, "org_id": 2}, distribution_headers),
- ]
- )
- batch = IndexerBatch(
- outer_message,
- True,
- False,
- tags_validator=ReleaseHealthTagsValidator().is_allowed,
- schema_validator=MetricsSchemaValidator(
- INGEST_CODEC, RELEASE_HEALTH_SCHEMA_VALIDATION_RULES_OPTION_NAME
- ).validate,
- )
- assert batch.extract_strings() == (
- {
- UseCaseID.SESSIONS: {
- 1: {
- "c:sessions/session@none",
- "environment",
- "init",
- "production",
- "session.status",
- },
- 2: {
- "d:sessions/duration@second",
- "environment",
- "healthy",
- "production",
- "session.status",
- },
- }
- }
- )
- assert not batch.invalid_msg_meta
- caplog.set_level(logging.ERROR)
- snuba_payloads = batch.reconstruct_messages(
- {
- UseCaseID.SESSIONS: {
- 1: {
- "c:sessions/session@none": 1,
- "environment": None,
- "init": 3,
- "production": 4,
- "session.status": 5,
- },
- 2: {
- "d:sessions/duration@second": 1,
- "environment": 2,
- "healthy": 3,
- "production": 4,
- "session.status": 5,
- },
- }
- },
- {
- UseCaseID.SESSIONS: {
- 1: {
- "c:sessions/session@none": Metadata(id=1, fetch_type=FetchType.CACHE_HIT),
- "environment": Metadata(
- id=None,
- fetch_type=FetchType.RATE_LIMITED,
- fetch_type_ext=FetchTypeExt(is_global=False),
- ),
- "init": Metadata(id=3, fetch_type=FetchType.HARDCODED),
- "production": Metadata(id=4, fetch_type=FetchType.CACHE_HIT),
- "session.status": Metadata(id=5, fetch_type=FetchType.CACHE_HIT),
- },
- 2: {
- "d:sessions/duration@second": Metadata(id=1, fetch_type=FetchType.CACHE_HIT),
- "environment": Metadata(id=2, fetch_type=FetchType.CACHE_HIT),
- "healthy": Metadata(id=3, fetch_type=FetchType.HARDCODED),
- "production": Metadata(id=4, fetch_type=FetchType.CACHE_HIT),
- "session.status": Metadata(id=5, fetch_type=FetchType.CACHE_HIT),
- },
- }
- },
- ).data
- assert _get_string_indexer_log_records(caplog) == [
- (
- "process_messages.dropped_message",
- {"num_global_quotas": 0, "org_batch_size": 5, "string_type": "tags"},
- ),
- ]
- assert _deconstruct_messages(snuba_payloads) == [
- (
- {
- "mapping_meta": {
- "c": {
- "1": "d:sessions/duration@second",
- "2": "environment",
- "4": "production",
- "5": "session.status",
- },
- "h": {"3": "healthy"},
- },
- "metric_id": 1,
- "org_id": 2,
- "project_id": 3,
- "retention_days": 90,
- "tags": {"2": 4, "5": 3},
- "timestamp": ts,
- "type": "d",
- "use_case_id": "sessions",
- "value": [4, 5, 6],
- "sentry_received_timestamp": BROKER_TIMESTAMP.timestamp(),
- },
- [
- *distribution_headers,
- ("mapping_sources", b"ch"),
- ("metric_type", "d"),
- ],
- ),
- ]
- @pytest.mark.django_db
- def test_cardinality_limiter(caplog, settings):
- """
- Test functionality of the indexer batch related to cardinality-limiting. More concretely, assert that `IndexerBatch.filter_messages`:
- 1. removes the messages from the outgoing batch
- 2. prevents strings from filtered messages from being extracted & indexed
- 3. does not crash when strings from filtered messages are not passed into reconstruct_messages
- 4. still extracts strings that exist both in filtered and unfiltered messages (eg "environment")
- """
- settings.SENTRY_METRICS_INDEXER_DEBUG_LOG_SAMPLE_RATE = 1.0
- outer_message = _construct_outer_message(
- [
- (counter_payload, counter_headers),
- (distribution_payload, distribution_headers),
- (set_payload, set_headers),
- ]
- )
- batch = IndexerBatch(
- outer_message,
- True,
- False,
- tags_validator=ReleaseHealthTagsValidator().is_allowed,
- schema_validator=MetricsSchemaValidator(
- INGEST_CODEC, RELEASE_HEALTH_SCHEMA_VALIDATION_RULES_OPTION_NAME
- ).validate,
- )
- keys_to_remove = list(batch.parsed_payloads_by_meta)[:2]
- # the messages come in a certain order, and Python dictionaries preserve
- # their insertion order. So we can hardcode offsets here.
- assert keys_to_remove == [
- BrokerMeta(partition=Partition(Topic("topic"), 0), offset=0),
- BrokerMeta(partition=Partition(Topic("topic"), 0), offset=1),
- ]
- batch.filter_messages(keys_to_remove)
- assert batch.extract_strings() == {
- UseCaseID.SESSIONS: {
- 1: {
- "environment",
- "errored",
- "production",
- # Note, we only extracted one MRI, of the one metric that we didn't
- # drop
- "s:sessions/error@none",
- "session.status",
- },
- }
- }
- assert not batch.invalid_msg_meta
- snuba_payloads = batch.reconstruct_messages(
- {
- UseCaseID.SESSIONS: {
- 1: {
- "environment": 1,
- "errored": 2,
- "production": 3,
- "s:sessions/error@none": 4,
- "session.status": 5,
- },
- }
- },
- {
- UseCaseID.SESSIONS: {
- 1: {
- "environment": Metadata(id=1, fetch_type=FetchType.CACHE_HIT),
- "errored": Metadata(id=2, fetch_type=FetchType.CACHE_HIT),
- "production": Metadata(id=3, fetch_type=FetchType.CACHE_HIT),
- "s:sessions/error@none": Metadata(id=4, fetch_type=FetchType.CACHE_HIT),
- "session.status": Metadata(id=5, fetch_type=FetchType.CACHE_HIT),
- }
- }
- },
- ).data
- assert _deconstruct_messages(snuba_payloads) == [
- (
- {
- "mapping_meta": {
- "c": {
- "1": "environment",
- "2": "errored",
- "3": "production",
- "4": "s:sessions/error@none",
- "5": "session.status",
- },
- },
- "metric_id": 4,
- "org_id": 1,
- "project_id": 3,
- "retention_days": 90,
- "tags": {"1": 3, "5": 2},
- "timestamp": ts,
- "type": "s",
- "use_case_id": "sessions",
- "value": [3],
- "sentry_received_timestamp": BROKER_TIMESTAMP.timestamp(),
- },
- [
- *set_headers,
- ("mapping_sources", b"c"),
- ("metric_type", "s"),
- ],
- )
- ]
|