Browse Source

test: Always test kafka-related things (#17133)

* test: Always test kafka-related things

* fix: Fix test

* fix: ZOOKEEPER_HOSTS for everyone!

* fix: Fix travis

* fix: Fix pytest-sentry condition

* ref: Rename docker container to be consistent with devservices

* ref: Remove obsolete tsdb test
Markus Unterwaditzer 5 years ago
parent
commit
062058362e

+ 13 - 11
.travis.yml

@@ -36,11 +36,15 @@ env:
     - NODE_DIR="${HOME}/.nvm/versions/node/v$(< .nvmrc)"
     - NODE_OPTIONS=--max-old-space-size=4096
     - PYTEST_SENTRY_DSN=https://6fd5cfea2d4d46b182ad214ac7810508@sentry.io/2423079
+    - SENTRY_KAFKA_HOSTS=localhost:9092
+    - SENTRY_ZOOKEEPER_HOSTS=localhost:2181
     - PYTEST_ADDOPTS="--reruns 5"
 
-before_install:
-  - &pip_install pip install --no-cache-dir "pip>=20.0.2"
-  - '[ "$TRAVIS_BRANCH" != "master" ] || export PYTEST_SENTRY_ALWAYS_REPORT=1'
+base_install: &before_install_default |-
+  pip install --no-cache-dir "pip>=20.0.2"
+  docker run -d --network host --name sentry_zookeeper -e ZOOKEEPER_CLIENT_PORT=2181 confluentinc/cp-zookeeper:4.1.0
+  docker run -d --network host --name sentry_kafka -e KAFKA_ZOOKEEPER_CONNECT=localhost:2181 -e KAFKA_ADVERTISED_LISTENERS=PLAINTEXT://localhost:9092 -e KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR=1 confluentinc/cp-kafka:4.1.0
+  [ "$TRAVIS_PULL_REQUEST" != "false" ] || export PYTEST_SENTRY_ALWAYS_REPORT=1
 
 script:
   # certain commands require sentry init to be run, but this is only true for
@@ -76,7 +80,7 @@ base_postgres: &postgres_default
     - redis-server
     - postgresql
   before_install:
-    - *pip_install
+    - *before_install_default
     - docker run -d --network host --name clickhouse-server --ulimit nofile=262144:262144 yandex/clickhouse-server:19.11
     - docker run -d --network host --name snuba --env SNUBA_SETTINGS=test --env CLICKHOUSE_SERVER=localhost:9000 getsentry/snuba
     - docker ps -a
@@ -94,7 +98,7 @@ base_acceptance: &acceptance_default
     - redis-server
     - postgresql
   before_install:
-    - *pip_install
+    - *before_install_default
     - find "$NODE_DIR" -type d -empty -delete
     - nvm install
     - docker run -d --network host --name clickhouse-server --ulimit nofile=262144:262144 yandex/clickhouse-server:19.11
@@ -157,7 +161,7 @@ matrix:
       name: 'Frontend'
       env: TEST_SUITE=js
       before_install:
-        - *pip_install
+        - *before_install_default
         - find "$NODE_DIR" -type d -empty -delete
         - nvm install
       install:
@@ -179,6 +183,7 @@ matrix:
       name: 'Symbolicator Integration'
       env: TEST_SUITE=symbolicator
       before_install:
+        - *before_install_default
         - docker run -d --network host --name clickhouse-server --ulimit nofile=262144:262144 yandex/clickhouse-server:19.11
         - docker run -d --network host --name snuba --env SNUBA_SETTINGS=test --env CLICKHOUSE_SERVER=localhost:9000 getsentry/snuba
         - docker run -d --network host --name symbolicator us.gcr.io/sentryio/symbolicator:latest run
@@ -186,23 +191,20 @@ matrix:
 
     - python: 2.7
       name: 'Snuba Integration with migrations'
-      env: TEST_SUITE=snuba USE_SNUBA=1 SENTRY_ZOOKEEPER_HOSTS=localhost:2181 SENTRY_KAFKA_HOSTS=localhost:9092 MIGRATIONS_TEST_MIGRATE=1
+      env: TEST_SUITE=snuba USE_SNUBA=1 MIGRATIONS_TEST_MIGRATE=1
       services:
         - docker
         - memcached
         - redis-server
         - postgresql
       before_install:
-        - *pip_install
-        - docker run -d --network host --name zookeeper -e ZOOKEEPER_CLIENT_PORT=2181 confluentinc/cp-zookeeper:4.1.0
-        - docker run -d --network host --name kafka -e KAFKA_ZOOKEEPER_CONNECT=localhost:2181 -e KAFKA_ADVERTISED_LISTENERS=PLAINTEXT://localhost:9092 -e KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR=1 confluentinc/cp-kafka:4.1.0
+        - *before_install_default
         - docker run -d --network host --name clickhouse-server --ulimit nofile=262144:262144 yandex/clickhouse-server:19.11
         - docker run -d --network host --name snuba --env SNUBA_SETTINGS=test --env CLICKHOUSE_SERVER=localhost:9000 getsentry/snuba
         - docker ps -a
       install:
         - python setup.py install_egg_info
         - pip install -U -e ".[dev]"
-        - pip install confluent-kafka
       before_script:
         - psql -c 'create database sentry;' -U postgres
 

+ 1 - 1
tests/sentry/eventstream/kafka/test_consumer.py

@@ -19,7 +19,7 @@ except ImportError:
 
 @contextmanager
 def create_topic(partitions=1, replication_factor=1):
-    command = ["docker", "exec", "kafka", "kafka-topics"] + [
+    command = ["docker", "exec", "sentry_kafka", "kafka-topics"] + [
         "--zookeeper",
         os.environ["SENTRY_ZOOKEEPER_HOSTS"],
     ]

+ 1 - 56
tests/sentry/ingest/outcome_consumer/test_outcomes_kafka.py

@@ -7,12 +7,9 @@ import six.moves
 from sentry.ingest.outcomes_consumer import get_outcomes_consumer, mark_signal_sent, is_signal_sent
 from sentry.signals import event_filtered, event_dropped
 from sentry.testutils.factories import Factories
-from sentry.utils.outcomes import Outcome, _get_tsdb_cache_key, mark_tsdb_incremented_many
-from django.core.cache import cache
+from sentry.utils.outcomes import Outcome
 from django.conf import settings
-from django.utils import timezone
 from sentry.utils import json
-from sentry import tsdb
 
 logger = logging.getLogger(__name__)
 
@@ -337,55 +334,3 @@ def test_outcome_consumer_handles_rate_limited_outcomes(
     assert set(event_dropped_sink) == set(
         [("127.33.44.1", "reason_1"), ("127.33.44.2", "reason_2")]
     )
-
-
-@pytest.mark.django_db
-def test_tsdb(kafka_producer, task_runner, kafka_admin, requires_kafka, monkeypatch):
-    producer, project_id, topic_name = _setup_outcome_test(kafka_producer, kafka_admin)
-
-    timestamps = []
-
-    for i in range(2):
-        timestamp = timezone.now()
-        timestamps.append(timestamp)
-        producer.produce(
-            topic_name,
-            _get_outcome(
-                event_id=i,
-                project_id=project_id,
-                outcome=Outcome.RATE_LIMITED,
-                reason="go_away",
-                remote_addr="127.0.0.1",
-                timestamp=timestamp,
-            ),
-        )
-
-    # Mark first item as already processed
-    mark_tsdb_incremented_many([(project_id, _get_event_id(0))])
-    assert cache.get(_get_tsdb_cache_key(project_id, _get_event_id(0))) is not None
-    assert cache.get(_get_tsdb_cache_key(project_id, _get_event_id(1))) is None
-
-    tsdb_increments = []
-    monkeypatch.setattr("sentry.tsdb.incr_multi", tsdb_increments.append)
-
-    group_id = "test-outcome-consumer-6"
-
-    consumer = get_outcomes_consumer(
-        max_batch_size=1, max_batch_time=100, group_id=group_id, auto_offset_reset="earliest"
-    )
-
-    i = 0
-
-    while not tsdb_increments and i < MAX_POLL_ITERATIONS:
-        consumer._run_once()
-        i += 1
-
-    assert tsdb_increments == [
-        [
-            (tsdb.models.project_total_received, project_id, {"timestamp": timestamps[1]}),
-            (tsdb.models.project_total_rejected, project_id, {"timestamp": timestamps[1]}),
-        ]
-    ]
-
-    assert cache.get(_get_tsdb_cache_key(project_id, _get_event_id(0))) is not None
-    assert cache.get(_get_tsdb_cache_key(project_id, _get_event_id(1))) is not None