#!/usr/bin/env python

import time

from sentry.runner import configure
from sentry.types.activity import ActivityType

configure()

import itertools
import random
from datetime import datetime, timedelta
from hashlib import sha1
from random import randint
from uuid import uuid4

from django.conf import settings
from django.db import IntegrityError, transaction
from django.db.models import F
from django.utils import timezone
from pytz import utc

from sentry import buffer, roles, tsdb
from sentry.event_manager import HashDiscarded
from sentry.incidents.logic import create_alert_rule, create_alert_rule_trigger, create_incident
from sentry.incidents.models import AlertRuleThresholdType, IncidentType
from sentry.models import (
    TOMBSTONE_FIELDS_FROM_GROUP,
    Activity,
    Broadcast,
    CheckInStatus,
    Commit,
    CommitAuthor,
    CommitFileChange,
    Deploy,
    Environment,
    EventAttachment,
    File,
    Group,
    GroupRelease,
    GroupTombstone,
    Monitor,
    MonitorCheckIn,
    MonitorStatus,
    MonitorType,
    Organization,
    OrganizationAccessRequest,
    OrganizationMember,
    Project,
    Release,
    ReleaseCommit,
    ReleaseEnvironment,
    ReleaseFile,
    ReleaseProjectEnvironment,
    Repository,
    Team,
    User,
    UserReport,
)
from sentry.signals import mocks_loaded
from sentry.similarity import features
from sentry.utils import loremipsum
from sentry.utils.hashlib import md5_text
from sentry.utils.samples import create_sample_event as _create_sample_event
from sentry.utils.samples import create_trace, generate_user, random_normal

PLATFORMS = itertools.cycle(["ruby", "php", "python", "java", "javascript"])

LEVELS = itertools.cycle(["error", "error", "error", "fatal", "warning"])

ENVIRONMENTS = itertools.cycle(["production", "production", "staging", "alpha", "beta", ""])

MONITOR_NAMES = itertools.cycle(settings.CELERYBEAT_SCHEDULE.keys())

MONITOR_SCHEDULES = itertools.cycle(["* * * * *", "0 * * * *", "0 0 * * *"])

LONG_MESSAGE = """Code: 0.
DB::Exception: String is too long for DateTime: 2018-10-26T19:14:18+00:00. Stack trace:

0. clickhouse-server(StackTrace::StackTrace()+0x16) [0x99e9626]
1. clickhouse-server(DB::Exception::Exception(std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char> > const&, int)+0x22) [0x3087172]
2. clickhouse-server(DB::FunctionComparison<DB::EqualsOp, DB::NameEquals>::executeDateOrDateTimeOrEnumOrUUIDWithConstString(DB::Block&, unsigned long, DB::IColumn const*, DB::IColumn const*, std::shared_ptr<DB::IDataType const> const&, std::shared_ptr<DB::IDataType const> const&, bool, unsigned long)+0x13c8) [0x3b233d8]
3. clickhouse-server(DB::FunctionComparison<DB::EqualsOp, DB::NameEquals>::executeImpl(DB::Block&, std::vector<unsigned long, std::allocator<unsigned long> > const&, unsigned long, unsigned long)+0x576) [0x3bafc86]
4. clickhouse-server(DB::PreparedFunctionImpl::defaultImplementationForNulls(DB::Block&, std::vector<unsigned long, std::allocator<unsigned long> > const&, unsigned long, unsigned long)+0x174) [0x7953cd4]
5. clickhouse-server(DB::PreparedFunctionImpl::executeWithoutLowCardinalityColumns(DB::Block&, std::vector<unsigned long, std::allocator<unsigned long> > const&, unsigned long, unsigned long)+0x54) [0x7953b04]
6. clickhouse-server(DB::PreparedFunctionImpl::execute(DB::Block&, std::vector<unsigned long, std::allocator<unsigned long> > const&, unsigned long, unsigned long)+0x3e2) [0x7954222]
7. clickhouse-server(DB::ExpressionAction::execute(DB::Block&, std::unordered_map<std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char> >, unsigned long, std::hash<std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char> > >, std::equal_to<std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char> > >, std::allocator<std::pair<std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char> > const, unsigned long> > >&) const+0x69b) [0x7b021fb]
8. clickhouse-server(DB::ExpressionActions::execute(DB::Block&) const+0xe6) [0x7b03676]
9. clickhouse-server(DB::FilterBlockInputStream::FilterBlockInputStream(std::shared_ptr<DB::IBlockInputStream> const&, std::shared_ptr<DB::ExpressionActions> const&, std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char> > const&, bool)+0x711) [0x79b7e31]
10. clickhouse-server() [0x75e9443]
11. clickhouse-server(DB::InterpreterSelectQuery::executeImpl(DB::InterpreterSelectQuery::Pipeline&, std::shared_ptr<DB::IBlockInputStream> const&, bool)+0x118f) [0x75f212f]
12. clickhouse-server(DB::InterpreterSelectQuery::InterpreterSelectQuery(std::shared_ptr<DB::IAST> const&, DB::Context const&, std::shared_ptr<DB::IBlockInputStream> const&, std::shared_ptr<DB::IStorage> const&, std::vector<std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char> >, std::allocator<std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char> > > > const&, DB::QueryProcessingStage::Enum, unsigned long, bool)+0x5e6) [0x75f2d46]
13. clickhouse-server(DB::InterpreterSelectQuery::InterpreterSelectQuery(std::shared_ptr<DB::IAST> const&, DB::Context const&, std::vector<std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char> >, std::allocator<std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char> > > > const&, DB::QueryProcessingStage::Enum, unsigned long, bool)+0x56) [0x75f3aa6]
14. clickhouse-server(DB::InterpreterSelectWithUnionQuery::InterpreterSelectWithUnionQuery(std::shared_ptr<DB::IAST> const&, DB::Context const&, std::vector<std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char> >, std::allocator<std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char> > > > const&, DB::QueryProcessingStage::Enum, unsigned long, bool)+0x7e7) [0x75ffab7]
15. clickhouse-server(DB::InterpreterFactory::get(std::shared_ptr<DB::IAST>&, DB::Context&, DB::QueryProcessingStage::Enum)+0x3a8) [0x75dc138]
16. clickhouse-server() [0x768fad9]
17. clickhouse-server(DB::executeQuery(std::__cxx11::basic..."""


def make_sentence(words=None):
    if words is None:
        words = int(random.weibullvariate(8, 3))
    return " ".join(random.choice(loremipsum.words) for _ in range(words))


def create_sample_event(*args, **kwargs):
    try:
        event = _create_sample_event(*args, **kwargs)
    except HashDiscarded as e:
        print(f"> Skipping Event: {e.message}")  # NOQA
    else:
        if event is not None:
            features.record([event])
            return event


def generate_commits(user):
    commits = []
    for i in range(random.randint(1, 20)):
        if i == 1:
            filename = "raven/base.py"
        else:
            filename = random.choice(loremipsum.words) + ".js"
        if random.randint(0, 5) == 1:
            author = (user.name, user.email)
        else:
            author = (
                f"{random.choice(loremipsum.words)} {random.choice(loremipsum.words)}",
                f"{random.choice(loremipsum.words)}@example.com",
            )

        commits.append(
            {
                "key": sha1(uuid4().bytes).hexdigest(),
                "message": f"feat: Do something to {filename}\n{make_sentence()}",
                "author": author,
                "files": [(filename, "M")],
            }
        )
    return commits


def generate_tombstones(project, user):
    # attempt to create a high enough previous_group_id
    # that it won't conflict with any group ids
    prev_group_id = 100000
    try:
        prev_group_id = (
            max(
                GroupTombstone.objects.order_by("-previous_group_id")[0].previous_group_id,
                prev_group_id,
            )
            + 1
        )
    except IndexError:
        pass

    for group in Group.objects.filter(project=project)[:5]:
        GroupTombstone.objects.create(
            previous_group_id=prev_group_id,
            actor_id=user.id,
            **{name: getattr(group, name) for name in TOMBSTONE_FIELDS_FROM_GROUP},
        )
        prev_group_id += 1


def create_system_time_series():
    now = datetime.utcnow().replace(tzinfo=utc)

    for _ in range(60):
        count = randint(1, 10)
        tsdb.incr_multi(
            (
                (tsdb.models.internal, "client-api.all-versions.responses.2xx"),
                (tsdb.models.internal, "client-api.all-versions.requests"),
            ),
            now,
            int(count * 0.9),
        )
        tsdb.incr_multi(
            ((tsdb.models.internal, "client-api.all-versions.responses.4xx"),),
            now,
            int(count * 0.05),
        )
        tsdb.incr_multi(
            ((tsdb.models.internal, "client-api.all-versions.responses.5xx"),),
            now,
            int(count * 0.1),
        )
        now = now - timedelta(seconds=1)

    for _ in range(24 * 30):
        count = randint(100, 1000)
        tsdb.incr_multi(
            (
                (tsdb.models.internal, "client-api.all-versions.responses.2xx"),
                (tsdb.models.internal, "client-api.all-versions.requests"),
            ),
            now,
            int(count * 4.9),
        )
        tsdb.incr_multi(
            ((tsdb.models.internal, "client-api.all-versions.responses.4xx"),),
            now,
            int(count * 0.05),
        )
        tsdb.incr_multi(
            ((tsdb.models.internal, "client-api.all-versions.responses.5xx"),),
            now,
            int(count * 0.1),
        )
        now = now - timedelta(hours=1)


def create_sample_time_series(event, release=None):
    if event is None:
        return

    group = event.group

    project = group.project

    key = project.key_set.all()[0]

    now = datetime.utcnow().replace(tzinfo=utc)

    environment = Environment.get_or_create(
        project=project, name=Environment.get_name_or_default(event.get_tag("environment"))
    )

    if release:
        ReleaseEnvironment.get_or_create(
            project=project, release=release, environment=environment, datetime=now
        )

        grouprelease = GroupRelease.get_or_create(
            group=group, release=release, environment=environment, datetime=now
        )

    for _ in range(60):
        count = randint(1, 10)
        tsdb.incr_multi(
            ((tsdb.models.project, project.id), (tsdb.models.group, group.id)),
            now,
            count,
            environment_id=environment.id,
        )
        tsdb.incr_multi(
            (
                (tsdb.models.organization_total_received, project.organization_id),
                (tsdb.models.project_total_received, project.id),
                (tsdb.models.key_total_received, key.id),
            ),
            now,
            int(count * 1.1),
        )
        tsdb.incr(
            tsdb.models.project_total_forwarded,
            project.id,
            now,
            int(count * 1.1),
        )
        tsdb.incr_multi(
            (
                (tsdb.models.organization_total_rejected, project.organization_id),
                (tsdb.models.project_total_rejected, project.id),
                (tsdb.models.key_total_rejected, key.id),
            ),
            now,
            int(count * 0.1),
        )

        frequencies = [
            (tsdb.models.frequent_issues_by_project, {project.id: {group.id: count}}),
            (tsdb.models.frequent_environments_by_group, {group.id: {environment.id: count}}),
        ]
        if release:
            frequencies.append(
                (tsdb.models.frequent_releases_by_group, {group.id: {grouprelease.id: count}})
            )

        tsdb.record_frequency_multi(frequencies, now)

        now = now - timedelta(seconds=1)

    for _ in range(24 * 30):
        count = randint(100, 1000)
        tsdb.incr_multi(
            ((tsdb.models.project, group.project.id), (tsdb.models.group, group.id)),
            now,
            count,
            environment_id=environment.id,
        )
        tsdb.incr_multi(
            (
                (tsdb.models.organization_total_received, project.organization_id),
                (tsdb.models.project_total_received, project.id),
                (tsdb.models.key_total_received, key.id),
            ),
            now,
            int(count * 1.1),
        )
        tsdb.incr_multi(
            (
                (tsdb.models.organization_total_rejected, project.organization_id),
                (tsdb.models.project_total_rejected, project.id),
                (tsdb.models.key_total_rejected, key.id),
            ),
            now,
            int(count * 0.1),
        )

        frequencies = [
            (tsdb.models.frequent_issues_by_project, {project.id: {group.id: count}}),
            (tsdb.models.frequent_environments_by_group, {group.id: {environment.id: count}}),
        ]
        if release:
            frequencies.append(
                (tsdb.models.frequent_releases_by_group, {group.id: {grouprelease.id: count}})
            )

        tsdb.record_frequency_multi(frequencies, now)

        now = now - timedelta(hours=1)


def main(num_events=1, extra_events=False, load_trends=False, slow=False):
    try:
        user = User.objects.filter(is_superuser=True)[0]
    except IndexError:
        raise Exception("No superuser exists (run `make bootstrap`)")

    dummy_user, _ = User.objects.get_or_create(
        username="dummy@example.com", defaults={"email": "dummy@example.com"}
    )
    dummy_user.set_password("dummy")
    dummy_user.save()

    mocks = (
        ("Massive Dynamic", ("Ludic Science",)),
        ("Captain Planet", ("Earth", "Fire", "Wind", "Water", "Heart")),
    )
    project_map = {}

    Broadcast.objects.create(
        title="Learn about Source Maps",
        message="Source maps are JSON files that contain information on how to map your transpiled source code back to their original source.",
        link="https://docs.sentry.io/platforms/javascript/#source-maps",
    )

    if settings.SENTRY_SINGLE_ORGANIZATION:
        org = Organization.get_default()
        print(f"Mocking org {org.name}")  # NOQA
    else:
        print("Mocking org {}".format("Default"))  # NOQA
        org, _ = Organization.objects.get_or_create(slug="default")

    OrganizationMember.objects.get_or_create(
        user=user, organization=org, role=roles.get_top_dog().id
    )

    dummy_member, _ = OrganizationMember.objects.get_or_create(
        user=dummy_user, organization=org, defaults={"role": roles.get_default().id}
    )
    # Allow for 0 events, if you only want transactions
    event1 = event2 = event3 = event4 = event5 = None

    for team_name, project_names in mocks:
        print(f"> Mocking team {team_name}")  # NOQA
        team, _ = Team.objects.get_or_create(name=team_name, defaults={"organization": org})

        for project_name in project_names:
            print(f"  > Mocking project {project_name}")  # NOQA
            project, _ = Project.objects.get_or_create(
                name=project_name,
                defaults={
                    "organization": org,
                    "first_event": timezone.now(),
                    "flags": Project.flags.has_releases,
                },
            )
            project_map[project_name] = project
            project.add_team(team)
            if not project.first_event:
                project.update(first_event=project.date_added)
            if not project.flags.has_releases:
                project.update(flags=F("flags").bitor(Project.flags.has_releases))

            monitor, created = Monitor.objects.get_or_create(
                name=next(MONITOR_NAMES),
                project_id=project.id,
                organization_id=org.id,
                type=MonitorType.CRON_JOB,
                defaults={
                    "config": {"schedule": next(MONITOR_SCHEDULES)},
                    "next_checkin": timezone.now() + timedelta(minutes=60),
                    "last_checkin": timezone.now(),
                },
            )
            if not created:
                if not (monitor.config or {}).get("schedule"):
                    monitor.config = {"schedule": next(MONITOR_SCHEDULES)}
                monitor.update(
                    config=monitor.config,
                    status=MonitorStatus.OK if randint(0, 10) < 7 else MonitorStatus.ERROR,
                    last_checkin=timezone.now(),
                    next_checkin=monitor.get_next_scheduled_checkin(timezone.now()),
                )

            MonitorCheckIn.objects.create(
                project_id=monitor.project_id,
                monitor=monitor,
                status=CheckInStatus.OK
                if monitor.status == MonitorStatus.OK
                else CheckInStatus.ERROR,
            )

            with transaction.atomic():
                has_release = Release.objects.filter(
                    version=sha1(uuid4().bytes).hexdigest(),
                    organization_id=project.organization_id,
                    projects=project,
                ).exists()
                if not has_release:
                    release = Release.objects.filter(
                        version=sha1(uuid4().bytes).hexdigest(),
                        organization_id=project.organization_id,
                    ).first()
                    if not release:
                        release = Release.objects.create(
                            version=sha1(uuid4().bytes).hexdigest(),
                            organization_id=project.organization_id,
                        )
                    release.add_project(project)

            generate_tombstones(project, user)

            raw_commits = generate_commits(user)

            try:
                with transaction.atomic():
                    repo, _ = Repository.objects.get_or_create(
                        organization_id=org.id,
                        provider="integrations:github",
                        external_id="example/example",
                        defaults={
                            "name": "Example Repo",
                            "url": "https://github.com/example/example",
                        },
                    )
            except IntegrityError:
                # for users with legacy github plugin
                # upgrade to the new integration
                repo = Repository.objects.get(
                    organization_id=org.id,
                    provider="github",
                    external_id="example/example",
                    name="Example Repo",
                )
                repo.provider = "integrations:github"
                repo.save()

            authors = set()

            for commit_index, raw_commit in enumerate(raw_commits):
                author = CommitAuthor.objects.get_or_create(
                    organization_id=org.id,
                    email=raw_commit["author"][1],
                    defaults={"name": raw_commit["author"][0]},
                )[0]
                commit = Commit.objects.get_or_create(
                    organization_id=org.id,
                    repository_id=repo.id,
                    key=raw_commit["key"],
                    defaults={"author": author, "message": raw_commit["message"]},
                )[0]
                authors.add(author)

                for file in raw_commit["files"]:
                    ReleaseFile.objects.get_or_create(
                        organization_id=project.organization_id,
                        release_id=release.id,
                        name=file[0],
                        file=File.objects.get_or_create(
                            name=file[0], type="release.file", checksum="abcde" * 8, size=13043
                        )[0],
                        defaults={"organization_id": project.organization_id},
                    )

                    CommitFileChange.objects.get_or_create(
                        organization_id=org.id, commit=commit, filename=file[0], type=file[1]
                    )

                ReleaseCommit.objects.get_or_create(
                    organization_id=org.id, release=release, commit=commit, order=commit_index
                )

            # create an unreleased commit
            Commit.objects.get_or_create(
                organization_id=org.id,
                repository_id=repo.id,
                key=sha1(uuid4().bytes).hexdigest(),
                defaults={
                    "author": CommitAuthor.objects.get_or_create(
                        organization_id=org.id, email=user.email, defaults={"name": user.name}
                    )[0],
                    "message": "feat: Do something to {}\n{}".format(
                        random.choice(loremipsum.words) + ".js", make_sentence()
                    ),
                },
            )[0]

            Activity.objects.create(
                type=ActivityType.RELEASE.value,
                project=project,
                ident=release.version,
                user=user,
                data={"version": release.version},
            )

            environment = Environment.get_or_create(project=project, name=next(ENVIRONMENTS))

            deploy = Deploy.objects.create(
                organization_id=project.organization_id,
                release=release,
                environment_id=environment.id,
            )

            release.update(
                commit_count=len(raw_commits),
                last_commit_id=commit.id,
                total_deploys=Deploy.objects.filter(release=release).count(),
                last_deploy_id=deploy.id,
                authors=[str(a.id) for a in authors],
            )

            ReleaseProjectEnvironment.objects.create_or_update(
                project=project,
                environment=environment,
                release=release,
                defaults={"last_deploy_id": deploy.id},
            )

            Activity.objects.create(
                type=ActivityType.DEPLOY.value,
                project=project,
                ident=release.version,
                data={
                    "version": release.version,
                    "deploy_id": deploy.id,
                    "environment": environment.name,
                },
                datetime=deploy.date_finished,
            )

            # Add a bunch of additional dummy events to support pagination
            if extra_events:
                for _ in range(45):
                    platform = next(PLATFORMS)

                    create_sample_event(
                        project=project,
                        platform=platform,
                        release=release.version,
                        level=next(LEVELS),
                        environment=next(ENVIRONMENTS),
                        message="This is a mostly useless example %s exception" % platform,
                        checksum=md5_text(platform + str(_)).hexdigest(),
                        user=generate_user(),
                    )

            for _ in range(num_events):
                event1 = create_sample_event(
                    project=project,
                    platform="python",
                    release=release.version,
                    environment=next(ENVIRONMENTS),
                    user=generate_user(),
                )

                EventAttachment.objects.create(
                    project_id=project.id,
                    event_id=event1.event_id,
                    name="example-logfile.txt",
                    file_id=File.objects.get_or_create(
                        name="example-logfile.txt",
                        type="text/plain",
                        checksum="abcde" * 8,
                        size=13043,
                    )[0].id,
                )

                event2 = create_sample_event(
                    project=project,
                    platform="javascript",
                    release=release.version,
                    environment=next(ENVIRONMENTS),
                    sdk={"name": "raven-js", "version": "2.1.0"},
                    user=generate_user(),
                )

                event3 = create_sample_event(project, "java")

                event4 = create_sample_event(
                    project=project,
                    platform="ruby",
                    release=release.version,
                    environment=next(ENVIRONMENTS),
                    user=generate_user(),
                )

                event5 = create_sample_event(
                    project=project,
                    platform="cocoa",
                    release=release.version,
                    environment=next(ENVIRONMENTS),
                    user=generate_user(),
                )

                create_sample_event(
                    project=project,
                    platform="php",
                    release=release.version,
                    environment=next(ENVIRONMENTS),
                    message=LONG_MESSAGE,
                    user=generate_user(),
                )

                create_sample_event(
                    project=project,
                    platform="cocoa",
                    sample_name="react-native",
                    release=release.version,
                    environment=next(ENVIRONMENTS),
                    user=generate_user(),
                )

                create_sample_event(
                    project=project,
                    platform="pii",
                    release=release.version,
                    environment=next(ENVIRONMENTS),
                    user=generate_user(),
                )
            if event5:
                Commit.objects.get_or_create(
                    organization_id=org.id,
                    repository_id=repo.id,
                    key=sha1(uuid4().bytes).hexdigest(),
                    defaults={
                        "author": CommitAuthor.objects.get_or_create(
                            organization_id=org.id, email=user.email, defaults={"name": user.name}
                        )[0],
                        "message": f"Ooops!\nFixes {event5.group.qualified_short_id}",
                    },
                )[0]

            create_sample_event(project=project, environment=next(ENVIRONMENTS), platform="csp")

            if event3:
                UserReport.objects.create(
                    project_id=project.id,
                    event_id=event3.event_id,
                    group_id=event3.group.id,
                    name="Jane Bloggs",
                    email="jane@example.com",
                    comments=make_sentence(),
                )

            # Metric alerts
            alert_rule = create_alert_rule(
                org,
                [project],
                "My Alert Rule",
                "level:error",
                "count()",
                10,
                AlertRuleThresholdType.ABOVE,
                1,
            )
            create_alert_rule_trigger(alert_rule, "critical", 10)
            create_incident(
                org,
                type_=IncidentType.DETECTED,
                title="My Incident",
                date_started=datetime.utcnow().replace(tzinfo=utc),
                alert_rule=alert_rule,
                projects=[project],
            )

            print(f"    > Loading time series data")  # NOQA
            if event1:
                create_sample_time_series(event1, release=release)
            if event2:
                create_sample_time_series(event2, release=release)
            if event3:
                create_sample_time_series(event3)
            if event4:
                create_sample_time_series(event4, release=release)
            if event5:
                create_sample_time_series(event5, release=release)

            if hasattr(buffer, "process_pending"):
                print("    > Processing pending buffers")  # NOQA
                buffer.process_pending()

            mocks_loaded.send(project=project, sender=__name__)

        OrganizationAccessRequest.objects.create_or_update(member=dummy_member, team=team)

    create_mock_transactions(project_map, load_trends, slow)

    Activity.objects.create(
        type=ActivityType.RELEASE.value,
        project=project,
        ident="4f38b65c62c4565aa94bba391ff8946922a8eed4",
        user=user,
        data={"version": "4f38b65c62c4565aa94bba391ff8946922a8eed4"},
    )

    create_system_time_series()


def create_mock_transactions(project_map, load_trends=False, slow=False):
    backend_project = project_map["Earth"]
    frontend_project = project_map["Fire"]
    service_projects = [
        project_map["Wind"],
        project_map["Water"],
        project_map["Heart"],
    ]
    for project in project_map.values():
        if not project.flags.has_transactions:
            project.update(flags=F("flags").bitor(Project.flags.has_transactions))

    timestamp = timezone.now()
    print(f"    > Loading a trace")  # NOQA
    create_trace(
        slow,
        timestamp - timedelta(milliseconds=random_normal(4000, 250, 1000)),
        timestamp,
        generate_user(),
        uuid4().hex,
        None,
        {
            "project": frontend_project,
            "transaction": "/plants/:plantId/",
            "frontend": True,
            "errors": 1,
            "children": [
                {
                    "project": backend_project,
                    "transaction": "/api/plants/",
                    "children": [
                        {
                            "project": service_projects[0],
                            "transaction": "/products/all/",
                            "children": [],
                        },
                        {
                            "project": service_projects[1],
                            "transaction": "/analytics/",
                            "children": [],
                        },
                        {
                            "project": service_projects[2],
                            "transaction": "tasks.create_invoice",
                            "children": [
                                {
                                    "project": service_projects[2],
                                    "transaction": "tasks.process_invoice",
                                    "children": [
                                        {
                                            "project": service_projects[2],
                                            "transaction": "tasks.process_invoice",
                                            "children": [
                                                {
                                                    "project": service_projects[2],
                                                    "transaction": "tasks.process_invoice",
                                                    "children": [
                                                        {
                                                            "project": service_projects[2],
                                                            "transaction": "tasks.process_invoice",
                                                            "children": [],
                                                        },
                                                    ],
                                                },
                                            ],
                                        },
                                    ],
                                },
                            ],
                        },
                    ],
                },
            ],
        },
    )

    if load_trends:
        print(f"    > Loading trends data")  # NOQA
        for day in range(14):
            for hour in range(24):
                timestamp = timezone.now() - timedelta(days=day, hours=hour)
                transaction_user = generate_user()
                trace_id = uuid4().hex

                frontend_span_id = uuid4().hex[:16]
                frontend_root_span_id = uuid4().hex[:16]
                frontend_duration = random_normal(2000 - 50 * day, 250, 1000)

                create_sample_event(
                    project=frontend_project,
                    platform="javascript-transaction",
                    transaction="/trends/:frontend/",
                    event_id=uuid4().hex,
                    user=transaction_user,
                    timestamp=timestamp,
                    # start_timestamp decreases based on day so that there's a trend
                    start_timestamp=timestamp - timedelta(milliseconds=frontend_duration),
                    measurements={
                        "fp": {"value": random_normal(1250 - 50 * day, 200, 500)},
                        "fcp": {"value": random_normal(1250 - 50 * day, 200, 500)},
                        "lcp": {"value": random_normal(2800 - 50 * day, 400, 2000)},
                        "fid": {"value": random_normal(5 - 0.125 * day, 2, 1)},
                    },
                    # Root
                    parent_span_id=None,
                    span_id=frontend_root_span_id,
                    trace=trace_id,
                    spans=[
                        {
                            "same_process_as_parent": True,
                            "op": "http",
                            "description": "GET /api/plants/?all_plants=1",
                            "data": {
                                "duration": random_normal(
                                    1 - 0.05 * day, 0.25, 0.01, frontend_duration / 1000
                                ),
                                "offset": 0.02,
                            },
                            "span_id": frontend_span_id,
                            "trace_id": trace_id,
                        }
                    ],
                )
                # try to give clickhouse some breathing room
                if slow:
                    time.sleep(0.05)

                backend_duration = random_normal(1500 + 50 * day, 250, 500)

                create_sample_event(
                    project=backend_project,
                    platform="transaction",
                    transaction="/trends/backend/",
                    event_id=uuid4().hex,
                    user=transaction_user,
                    timestamp=timestamp,
                    start_timestamp=timestamp - timedelta(milliseconds=backend_duration),
                    # match the trace from the javascript transaction
                    trace=trace_id,
                    parent_span_id=frontend_root_span_id,
                    spans=[],
                )

                # try to give clickhouse some breathing room
                if slow:
                    time.sleep(0.05)


if __name__ == "__main__":
    settings.CELERY_ALWAYS_EAGER = True

    from optparse import OptionParser

    parser = OptionParser()
    parser.add_option("--events", default=1, type=int, help="number of events to generate")
    parser.add_option(
        "--extra-events",
        default=False,
        action="store_true",
        help="add multiple events for each error group",
    )
    parser.add_option(
        "--load-trends",
        default=False,
        action="store_true",
        help="load multiple transactions for each id to show trends",
    )
    parser.add_option(
        "--slow",
        default=False,
        action="store_true",
        help="sleep between each transaction to let clickhouse rest",
    )

    (options, args) = parser.parse_args()

    try:
        main(
            num_events=options.events,
            extra_events=options.extra_events,
            load_trends=options.load_trends,
            slow=options.slow,
        )
    except Exception:
        # Avoid reporting any issues recursively back into Sentry
        import sys
        import traceback

        traceback.print_exc()
        sys.exit(1)