#!/usr/bin/env python from __future__ import absolute_import from sentry.runner import configure configure() import itertools import random from datetime import datetime, timedelta from hashlib import sha1 from random import randint from uuid import uuid4 import six from django.conf import settings from django.db import IntegrityError, transaction from django.db.models import F from django.utils import timezone from pytz import utc from sentry import buffer, roles, tsdb from sentry.event_manager import HashDiscarded from sentry.incidents.logic import ( AlertRuleNameAlreadyUsedError, create_alert_rule, create_alert_rule_trigger, create_incident, ) from sentry.incidents.models import IncidentType, AlertRuleThresholdType from sentry.models import ( Activity, Broadcast, Commit, CommitAuthor, CommitFileChange, Deploy, EventAttachment, Environment, File, Group, GroupRelease, GroupTombstone, Organization, OrganizationAccessRequest, OrganizationMember, Project, Release, ReleaseCommit, ReleaseEnvironment, ReleaseProjectEnvironment, ReleaseFile, Repository, Team, TOMBSTONE_FIELDS_FROM_GROUP, User, UserReport, Monitor, MonitorStatus, MonitorType, MonitorCheckIn, CheckInStatus, ) from sentry.signals import mocks_loaded from sentry.similarity import features from sentry.snuba.models import QueryAggregations from sentry.utils import loremipsum from sentry.utils.hashlib import md5_text from sentry.utils.samples import create_sample_event as _create_sample_event from sentry.utils.samples import generate_user PLATFORMS = itertools.cycle(["ruby", "php", "python", "java", "javascript"]) LEVELS = itertools.cycle(["error", "error", "error", "fatal", "warning"]) ENVIRONMENTS = itertools.cycle(["production", "production", "staging", "alpha", "beta", ""]) MONITOR_NAMES = itertools.cycle(settings.CELERYBEAT_SCHEDULE.keys()) MONITOR_SCHEDULES = itertools.cycle(["* * * * *", "0 * * * *", "0 0 * * *"]) LONG_MESSAGE = """Code: 0. DB::Exception: String is too long for DateTime: 2018-10-26T19:14:18+00:00. Stack trace: 0. clickhouse-server(StackTrace::StackTrace()+0x16) [0x99e9626] 1. clickhouse-server(DB::Exception::Exception(std::__cxx11::basic_string, std::allocator > const&, int)+0x22) [0x3087172] 2. clickhouse-server(DB::FunctionComparison::executeDateOrDateTimeOrEnumOrUUIDWithConstString(DB::Block&, unsigned long, DB::IColumn const*, DB::IColumn const*, std::shared_ptr const&, std::shared_ptr const&, bool, unsigned long)+0x13c8) [0x3b233d8] 3. clickhouse-server(DB::FunctionComparison::executeImpl(DB::Block&, std::vector > const&, unsigned long, unsigned long)+0x576) [0x3bafc86] 4. clickhouse-server(DB::PreparedFunctionImpl::defaultImplementationForNulls(DB::Block&, std::vector > const&, unsigned long, unsigned long)+0x174) [0x7953cd4] 5. clickhouse-server(DB::PreparedFunctionImpl::executeWithoutLowCardinalityColumns(DB::Block&, std::vector > const&, unsigned long, unsigned long)+0x54) [0x7953b04] 6. clickhouse-server(DB::PreparedFunctionImpl::execute(DB::Block&, std::vector > const&, unsigned long, unsigned long)+0x3e2) [0x7954222] 7. clickhouse-server(DB::ExpressionAction::execute(DB::Block&, std::unordered_map, std::allocator >, unsigned long, std::hash, std::allocator > >, std::equal_to, std::allocator > >, std::allocator, std::allocator > const, unsigned long> > >&) const+0x69b) [0x7b021fb] 8. clickhouse-server(DB::ExpressionActions::execute(DB::Block&) const+0xe6) [0x7b03676] 9. clickhouse-server(DB::FilterBlockInputStream::FilterBlockInputStream(std::shared_ptr const&, std::shared_ptr const&, std::__cxx11::basic_string, std::allocator > const&, bool)+0x711) [0x79b7e31] 10. clickhouse-server() [0x75e9443] 11. clickhouse-server(DB::InterpreterSelectQuery::executeImpl(DB::InterpreterSelectQuery::Pipeline&, std::shared_ptr const&, bool)+0x118f) [0x75f212f] 12. clickhouse-server(DB::InterpreterSelectQuery::InterpreterSelectQuery(std::shared_ptr const&, DB::Context const&, std::shared_ptr const&, std::shared_ptr const&, std::vector, std::allocator >, std::allocator, std::allocator > > > const&, DB::QueryProcessingStage::Enum, unsigned long, bool)+0x5e6) [0x75f2d46] 13. clickhouse-server(DB::InterpreterSelectQuery::InterpreterSelectQuery(std::shared_ptr const&, DB::Context const&, std::vector, std::allocator >, std::allocator, std::allocator > > > const&, DB::QueryProcessingStage::Enum, unsigned long, bool)+0x56) [0x75f3aa6] 14. clickhouse-server(DB::InterpreterSelectWithUnionQuery::InterpreterSelectWithUnionQuery(std::shared_ptr const&, DB::Context const&, std::vector, std::allocator >, std::allocator, std::allocator > > > const&, DB::QueryProcessingStage::Enum, unsigned long, bool)+0x7e7) [0x75ffab7] 15. clickhouse-server(DB::InterpreterFactory::get(std::shared_ptr&, DB::Context&, DB::QueryProcessingStage::Enum)+0x3a8) [0x75dc138] 16. clickhouse-server() [0x768fad9] 17. clickhouse-server(DB::executeQuery(std::__cxx11::basic...""" def make_sentence(words=None): if words is None: words = int(random.weibullvariate(8, 3)) return " ".join(random.choice(loremipsum.words) for _ in range(words)) def create_sample_event(*args, **kwargs): try: event = _create_sample_event(*args, **kwargs) except HashDiscarded as e: print("> Skipping Event: {}".format(e.message)) # NOQA else: if event is not None: features.record([event]) return event def generate_commits(user): commits = [] for i in range(random.randint(1, 20)): if i == 1: filename = "raven/base.py" else: filename = random.choice(loremipsum.words) + ".js" if random.randint(0, 5) == 1: author = (user.name, user.email) else: author = ( "{} {}".format(random.choice(loremipsum.words), random.choice(loremipsum.words)), "{}@example.com".format(random.choice(loremipsum.words)), ) commits.append( { "key": sha1(uuid4().hex).hexdigest(), "message": "feat: Do something to {}\n{}".format(filename, make_sentence()), "author": author, "files": [(filename, "M")], } ) return commits def generate_tombstones(project, user): # attempt to create a high enough previous_group_id # that it won't conflict with any group ids prev_group_id = 100000 try: prev_group_id = ( max( GroupTombstone.objects.order_by("-previous_group_id")[0].previous_group_id, prev_group_id, ) + 1 ) except IndexError: pass for group in Group.objects.filter(project=project)[:5]: GroupTombstone.objects.create( previous_group_id=prev_group_id, actor_id=user.id, **{name: getattr(group, name) for name in TOMBSTONE_FIELDS_FROM_GROUP} ) prev_group_id += 1 def create_system_time_series(): now = datetime.utcnow().replace(tzinfo=utc) for _ in xrange(60): count = randint(1, 10) tsdb.incr_multi( ( (tsdb.models.internal, "client-api.all-versions.responses.2xx"), (tsdb.models.internal, "client-api.all-versions.requests"), ), now, int(count * 0.9), ) tsdb.incr_multi( ((tsdb.models.internal, "client-api.all-versions.responses.4xx"),), now, int(count * 0.05), ) tsdb.incr_multi( ((tsdb.models.internal, "client-api.all-versions.responses.5xx"),), now, int(count * 0.1), ) now = now - timedelta(seconds=1) for _ in xrange(24 * 30): count = randint(100, 1000) tsdb.incr_multi( ( (tsdb.models.internal, "client-api.all-versions.responses.2xx"), (tsdb.models.internal, "client-api.all-versions.requests"), ), now, int(count * 4.9), ) tsdb.incr_multi( ((tsdb.models.internal, "client-api.all-versions.responses.4xx"),), now, int(count * 0.05), ) tsdb.incr_multi( ((tsdb.models.internal, "client-api.all-versions.responses.5xx"),), now, int(count * 0.1), ) now = now - timedelta(hours=1) def create_sample_time_series(event, release=None): if event is None: return group = event.group project = group.project key = project.key_set.all()[0] now = datetime.utcnow().replace(tzinfo=utc) environment = Environment.get_or_create( project=project, name=Environment.get_name_or_default(event.get_tag("environment")) ) if release: ReleaseEnvironment.get_or_create( project=project, release=release, environment=environment, datetime=now ) grouprelease = GroupRelease.get_or_create( group=group, release=release, environment=environment, datetime=now ) for _ in xrange(60): count = randint(1, 10) tsdb.incr_multi( ((tsdb.models.project, project.id), (tsdb.models.group, group.id)), now, count, environment_id=environment.id, ) tsdb.incr_multi( ( (tsdb.models.organization_total_received, project.organization_id), (tsdb.models.project_total_received, project.id), (tsdb.models.key_total_received, key.id), ), now, int(count * 1.1), ) tsdb.incr( tsdb.models.project_total_forwarded, project.id, now, int(count * 1.1), ) tsdb.incr_multi( ( (tsdb.models.organization_total_rejected, project.organization_id), (tsdb.models.project_total_rejected, project.id), (tsdb.models.key_total_rejected, key.id), ), now, int(count * 0.1), ) frequencies = [ (tsdb.models.frequent_issues_by_project, {project.id: {group.id: count}}), (tsdb.models.frequent_environments_by_group, {group.id: {environment.id: count}}), ] if release: frequencies.append( (tsdb.models.frequent_releases_by_group, {group.id: {grouprelease.id: count}}) ) tsdb.record_frequency_multi(frequencies, now) now = now - timedelta(seconds=1) for _ in xrange(24 * 30): count = randint(100, 1000) tsdb.incr_multi( ((tsdb.models.project, group.project.id), (tsdb.models.group, group.id)), now, count, environment_id=environment.id, ) tsdb.incr_multi( ( (tsdb.models.organization_total_received, project.organization_id), (tsdb.models.project_total_received, project.id), (tsdb.models.key_total_received, key.id), ), now, int(count * 1.1), ) tsdb.incr_multi( ( (tsdb.models.organization_total_rejected, project.organization_id), (tsdb.models.project_total_rejected, project.id), (tsdb.models.key_total_rejected, key.id), ), now, int(count * 0.1), ) frequencies = [ (tsdb.models.frequent_issues_by_project, {project.id: {group.id: count}}), (tsdb.models.frequent_environments_by_group, {group.id: {environment.id: count}}), ] if release: frequencies.append( (tsdb.models.frequent_releases_by_group, {group.id: {grouprelease.id: count}}) ) tsdb.record_frequency_multi(frequencies, now) now = now - timedelta(hours=1) def main(num_events=1, extra_events=False): user = User.objects.filter(is_superuser=True)[0] dummy_user, _ = User.objects.get_or_create( username="dummy@example.com", defaults={"email": "dummy@example.com"} ) dummy_user.set_password("dummy") dummy_user.save() mocks = ( ("Massive Dynamic", ("Ludic Science",)), ("Captain Planet", ("Earth", "Fire", "Wind", "Water", "Heart")), ) Broadcast.objects.create( title="Learn about Source Maps", message="Source maps are JSON files that contain information on how to map your transpiled source code back to their original source.", link="https://docs.sentry.io/platforms/javascript/#source-maps", ) if settings.SENTRY_SINGLE_ORGANIZATION: org = Organization.get_default() print("Mocking org {}".format(org.name)) # NOQA else: print("Mocking org {}".format("Default")) # NOQA org, _ = Organization.objects.get_or_create(slug="default") OrganizationMember.objects.get_or_create( user=user, organization=org, role=roles.get_top_dog().id ) dummy_member, _ = OrganizationMember.objects.get_or_create( user=dummy_user, organization=org, defaults={"role": roles.get_default().id} ) for team_name, project_names in mocks: print("> Mocking team {}".format(team_name)) # NOQA team, _ = Team.objects.get_or_create(name=team_name, defaults={"organization": org}) for project_name in project_names: print(" > Mocking project {}".format(project_name)) # NOQA project, _ = Project.objects.get_or_create( name=project_name, defaults={ "organization": org, "first_event": timezone.now(), "flags": Project.flags.has_releases, }, ) project.add_team(team) if not project.first_event: project.update(first_event=project.date_added) if not project.flags.has_releases: project.update(flags=F("flags").bitor(Project.flags.has_releases)) monitor, created = Monitor.objects.get_or_create( name=next(MONITOR_NAMES), project_id=project.id, organization_id=org.id, type=MonitorType.CRON_JOB, defaults={ "config": {"schedule": next(MONITOR_SCHEDULES)}, "next_checkin": timezone.now() + timedelta(minutes=60), "last_checkin": timezone.now(), }, ) if not created: if not (monitor.config or {}).get("schedule"): monitor.config = {"schedule": next(MONITOR_SCHEDULES)} monitor.update( config=monitor.config, status=MonitorStatus.OK if randint(0, 10) < 7 else MonitorStatus.ERROR, last_checkin=timezone.now(), next_checkin=monitor.get_next_scheduled_checkin(timezone.now()), ) MonitorCheckIn.objects.create( project_id=monitor.project_id, monitor=monitor, status=CheckInStatus.OK if monitor.status == MonitorStatus.OK else CheckInStatus.ERROR, ) with transaction.atomic(): has_release = Release.objects.filter( version=sha1(uuid4().bytes).hexdigest(), organization_id=project.organization_id, projects=project, ).exists() if not has_release: release = Release.objects.filter( version=sha1(uuid4().bytes).hexdigest(), organization_id=project.organization_id, ).first() if not release: release = Release.objects.create( version=sha1(uuid4().bytes).hexdigest(), organization_id=project.organization_id, ) release.add_project(project) generate_tombstones(project, user) raw_commits = generate_commits(user) try: with transaction.atomic(): repo, _ = Repository.objects.get_or_create( organization_id=org.id, provider="integrations:github", external_id="example/example", defaults={ "name": "Example Repo", "url": "https://github.com/example/example", }, ) except IntegrityError: # for users with legacy github plugin # upgrade to the new integration repo = Repository.objects.get( organization_id=org.id, provider="github", external_id="example/example", name="Example Repo", ) repo.provider = "integrations:github" repo.save() authors = set() for commit_index, raw_commit in enumerate(raw_commits): author = CommitAuthor.objects.get_or_create( organization_id=org.id, email=raw_commit["author"][1], defaults={"name": raw_commit["author"][0]}, )[0] commit = Commit.objects.get_or_create( organization_id=org.id, repository_id=repo.id, key=raw_commit["key"], defaults={"author": author, "message": raw_commit["message"]}, )[0] authors.add(author) for file in raw_commit["files"]: ReleaseFile.objects.get_or_create( organization_id=project.organization_id, release=release, name=file[0], file=File.objects.get_or_create( name=file[0], type="release.file", checksum="abcde" * 8, size=13043 )[0], defaults={"organization_id": project.organization_id}, ) CommitFileChange.objects.get_or_create( organization_id=org.id, commit=commit, filename=file[0], type=file[1] ) ReleaseCommit.objects.get_or_create( organization_id=org.id, release=release, commit=commit, order=commit_index ) # create an unreleased commit Commit.objects.get_or_create( organization_id=org.id, repository_id=repo.id, key=sha1(uuid4().hex).hexdigest(), defaults={ "author": CommitAuthor.objects.get_or_create( organization_id=org.id, email=user.email, defaults={"name": user.name} )[0], "message": "feat: Do something to {}\n{}".format( random.choice(loremipsum.words) + ".js", make_sentence() ), }, )[0] Activity.objects.create( type=Activity.RELEASE, project=project, ident=release.version, user=user, data={"version": release.version}, ) environment = Environment.get_or_create(project=project, name=six.next(ENVIRONMENTS)) deploy = Deploy.objects.create( organization_id=project.organization_id, release=release, environment_id=environment.id, ) release.update( commit_count=len(raw_commits), last_commit_id=commit.id, total_deploys=Deploy.objects.filter(release=release).count(), last_deploy_id=deploy.id, authors=[six.text_type(a.id) for a in authors], ) ReleaseProjectEnvironment.objects.create_or_update( project=project, environment=environment, release=release, defaults={"last_deploy_id": deploy.id}, ) Activity.objects.create( type=Activity.DEPLOY, project=project, ident=release.version, data={ "version": release.version, "deploy_id": deploy.id, "environment": environment.name, }, datetime=deploy.date_finished, ) # Add a bunch of additional dummy events to support pagination if extra_events: for _ in range(45): platform = six.next(PLATFORMS) create_sample_event( project=project, platform=platform, release=release.version, level=six.next(LEVELS), environment=six.next(ENVIRONMENTS), message="This is a mostly useless example %s exception" % platform, checksum=md5_text(platform + six.text_type(_)).hexdigest(), user=generate_user(), ) for _ in range(num_events): event1 = create_sample_event( project=project, platform="python", release=release.version, environment=six.next(ENVIRONMENTS), user=generate_user(), ) EventAttachment.objects.create( project_id=project.id, event_id=event1.event_id, name="example-logfile.txt", file=File.objects.get_or_create( name="example-logfile.txt", type="text/plain", checksum="abcde" * 8, size=13043, )[0], ) event2 = create_sample_event( project=project, platform="javascript", release=release.version, environment=six.next(ENVIRONMENTS), sdk={"name": "raven-js", "version": "2.1.0"}, user=generate_user(), ) event3 = create_sample_event(project, "java") event4 = create_sample_event( project=project, platform="ruby", release=release.version, environment=six.next(ENVIRONMENTS), user=generate_user(), ) event5 = create_sample_event( project=project, platform="cocoa", release=release.version, environment=six.next(ENVIRONMENTS), user=generate_user(), ) create_sample_event( project=project, platform="php", release=release.version, environment=six.next(ENVIRONMENTS), message=LONG_MESSAGE, user=generate_user(), ) create_sample_event( project=project, platform="cocoa", sample_name="react-native", release=release.version, environment=six.next(ENVIRONMENTS), user=generate_user(), ) create_sample_event( project=project, platform="pii", release=release.version, environment=six.next(ENVIRONMENTS), user=generate_user(), ) if event5: Commit.objects.get_or_create( organization_id=org.id, repository_id=repo.id, key=sha1(uuid4().hex).hexdigest(), defaults={ "author": CommitAuthor.objects.get_or_create( organization_id=org.id, email=user.email, defaults={"name": user.name} )[0], "message": "Ooops!\nFixes {}".format(event5.group.qualified_short_id), }, )[0] create_sample_event(project=project, environment=six.next(ENVIRONMENTS), platform="csp") if event3: UserReport.objects.create( project=project, event_id=event3.event_id, group=event3.group, name="Jane Bloggs", email="jane@example.com", comments=make_sentence(), ) try: # Metric alerts alert_rule = create_alert_rule( org, [project], "My Alert Rule", "level:error", "count()", 10, AlertRuleThresholdType.ABOVE, 1, ) create_alert_rule_trigger(alert_rule, "critical", 10) create_incident( org, type_=IncidentType.DETECTED, title="My Incident", date_started=datetime.utcnow().replace(tzinfo=utc), alert_rule=alert_rule, projects=[project], ) except AlertRuleNameAlreadyUsedError: pass print(" > Loading time series data".format(project_name)) # NOQA create_sample_time_series(event1, release=release) create_sample_time_series(event2, release=release) create_sample_time_series(event3) create_sample_time_series(event4, release=release) create_sample_time_series(event5, release=release) if hasattr(buffer, "process_pending"): print(" > Processing pending buffers") # NOQA buffer.process_pending() mocks_loaded.send(project=project, sender=__name__) OrganizationAccessRequest.objects.create_or_update(member=dummy_member, team=team) Activity.objects.create( type=Activity.RELEASE, project=project, ident="4f38b65c62c4565aa94bba391ff8946922a8eed4", user=user, data={"version": "4f38b65c62c4565aa94bba391ff8946922a8eed4"}, ) create_system_time_series() if __name__ == "__main__": settings.CELERY_ALWAYS_EAGER = True from optparse import OptionParser parser = OptionParser() parser.add_option("--events", dest="num_events", default=1, type=int) parser.add_option("--extra-events", dest="extra_events", default=False, action="store_true") (options, args) = parser.parse_args() try: main(num_events=options.num_events, extra_events=options.extra_events) except Exception: # Avoid reporting any issues recursively back into Sentry import traceback import sys traceback.print_exc() sys.exit(1)