Browse Source

build: Start uploading test results to Codecov (#74768)

This enhances the Codecov coverage comment when test failures occur to
show information pertaining to test failures instead of coverage
results. This also enables Codecov to start running flaky test detection
in the background on Sentry's test results to see how it performs in
terms of latency and accuracy.

---------

Co-authored-by: Buck Evan <buck.evan@sentry.io>
Co-authored-by: getsantry[bot] <66042841+getsantry[bot]@users.noreply.github.com>
joseph-sentry 7 months ago
parent
commit
71a149f491

+ 18 - 9
.github/actions/artifacts/action.yml

@@ -6,6 +6,10 @@ inputs:
     description: 'Path to coverage file(s) - comma separated for multiple files'
     default: '.artifacts/*.coverage.xml'
     required: true
+  test_result_files:
+    description: 'Path to test result file(s) - comma separated for multiple files'
+    default: '.artifacts/*.junit.xml'
+    required: true
   type:
     description: 'The type of change (frontend, backend)'
     default: 'backend'
@@ -20,13 +24,18 @@ inputs:
 runs:
   using: 'composite'
   steps:
-    - name: Upload to codecov
-      uses: codecov/codecov-action@e0b68c6749509c5f83f984dd99a76a1c1a231044 # v4.0.1
-      with:
-        token: ${{ inputs.token }}
-        flags: ${{ inputs.type }}
-        files: ${{ inputs.files }}
-        override_commit: ${{ inputs.commit_sha}}
-        plugin: noop
-        verbose: true
+    - name: Download and Verify Codecov CLI
+      shell: bash
+      run: |
+        ./.github/actions/artifacts/download_codecov_cli.py
+    - name: Upload Coverage and Test Results
       continue-on-error: true
+      shell: bash
+      env:
+        INPUT_TOKEN: ${{ inputs.token }}
+        INPUT_COMMIT_SHA: ${{ inputs.commit_sha }}
+        INPUT_TYPE: ${{ inputs.type }}
+        INPUT_FILES: ${{ inputs.files }}
+        INPUT_TEST_RESULT_FILES: ${{ inputs.test_result_files }}
+      run: |
+        ./.github/actions/artifacts/do_upload.py

+ 93 - 0
.github/actions/artifacts/do_upload.py

@@ -0,0 +1,93 @@
+#!/usr/bin/env python3
+
+import glob
+import itertools
+import os
+from subprocess import Popen
+
+
+def run_command(command: list[str], log_file: str):
+    with open(log_file, "wb") as f:
+        return Popen(command, stdout=f, stderr=f)
+
+
+def main():
+    """
+    First we get the arguments passed to the upload artifacts action via the env vars,
+    then we build the command for uploading coverage, run it in a thread, and build the
+    command for uploading test results and run it in a thread. We wait for both commands
+    to finish running then print their logs sequentially.
+
+    When we run the commands we're piping their stdout and stderr to a file so we can print the
+    contents of the files sequentially. We don't want the output of each command to be interleaved.
+
+    --plugin noop is passed to both commands, we don't need the CLI plugins because we're already
+    generating the coverage files ourselves.
+    """
+    input_token = os.getenv("INPUT_TOKEN")
+    input_commit_sha = os.getenv("INPUT_COMMIT_SHA")
+    input_type = os.getenv("INPUT_TYPE")
+    input_files = os.getenv("INPUT_FILES", "").split(",")
+    input_test_result_files = os.getenv("INPUT_TEST_RESULT_FILES", "").split(",")
+
+    glob_expanded_coverage_files = [glob.glob(file, recursive=True) for file in input_files]
+    coverage_files = list(itertools.chain.from_iterable(glob_expanded_coverage_files))
+
+    codecov_base_cmd = ["./codecov", "--verbose"]
+
+    upload_flags = [
+        "-t",
+        input_token,
+        "--commit-sha",
+        input_commit_sha,
+        "--plugin",
+        "noop",
+        "--flag",
+        input_type,
+    ]
+
+    upload_coverage_cmd = [*codecov_base_cmd, "upload-process", *upload_flags]
+    for file in coverage_files:
+        upload_coverage_cmd += ["--file", file]
+
+    upload_coverage_log_file = "coverage-upload.log"
+
+    glob_expanded_test_result_files = [
+        glob.glob(file, recursive=True) for file in input_test_result_files
+    ]
+    test_result_files = list(itertools.chain.from_iterable(glob_expanded_test_result_files))
+
+    upload_test_results_cmd = [
+        *codecov_base_cmd,
+        "do-upload",
+        "--report-type",
+        "test_results",
+        *upload_flags,
+    ]
+    for file in test_result_files:
+        upload_test_results_cmd += ["--file", file]
+
+    upload_test_results_log_file = "upload-test-results.log"
+
+    # so that the logs are not interleaved when printed
+    jobs = [
+        run_command(upload_test_results_cmd, upload_test_results_log_file),
+        run_command(upload_coverage_cmd, upload_coverage_log_file),
+    ]
+    tail_args = ("tail", "-f", "--sleep-interval", "3")
+    for job in jobs:
+        tail_args += ("--pid", str(job.pid))
+    tail_args += (
+        upload_coverage_log_file,
+        upload_test_results_log_file,
+    )
+
+    # wait, while showing un-interleaved logs
+    jobs.append(Popen(tail_args))
+
+    for job in jobs:
+        job.wait()
+
+
+if __name__ == "__main__":
+    main()

+ 44 - 0
.github/actions/artifacts/download_codecov_cli.py

@@ -0,0 +1,44 @@
+#!/usr/bin/env python3
+
+import os
+import subprocess
+import urllib.request
+
+
+def download_file(url, filename):
+    chunk_size = 1024 * 1024  # 1 MB chunks
+    with urllib.request.urlopen(url) as response:
+        with open(filename, "wb") as out_file:
+            while True:
+                chunk = response.read(chunk_size)
+                if not chunk:
+                    break
+                out_file.write(chunk)
+
+
+def run_command(command):
+    # check true so if the command fails the entire script fails
+    subprocess.run(command, check=True)
+
+
+def main():
+    key_url = "https://keybase.io/codecovsecurity/pgp_keys.asc"
+    cli_url = "https://cli.codecov.io/latest/linux/codecov"
+    sha256sum_url = "https://cli.codecov.io/latest/linux/codecov.SHA256SUM"
+    sha256sig_url = "https://cli.codecov.io/latest/linux/codecov.SHA256SUM.sig"
+
+    download_file(key_url, "pgp_keys.asc")
+    run_command(
+        ["gpg", "--no-default-keyring", "--keyring", "trustedkeys.gpg", "--import", "pgp_keys.asc"]
+    )
+
+    download_file(cli_url, "codecov")
+    download_file(sha256sum_url, "codecov.SHA256SUM")
+    download_file(sha256sig_url, "codecov.SHA256SUM.sig")
+    run_command(["gpgv", "codecov.SHA256SUM.sig", "codecov.SHA256SUM"])
+    run_command(["shasum", "-a", "256", "-c", "codecov.SHA256SUM"])
+    os.chmod("codecov", 0o755)
+
+
+if __name__ == "__main__":
+    main()

+ 8 - 4
Makefile

@@ -93,7 +93,7 @@ build-chartcuterie-config:
 
 run-acceptance:
 	@echo "--> Running acceptance tests"
-	python3 -b -m pytest tests/acceptance --cov . --cov-report="xml:.artifacts/acceptance.coverage.xml" --json-report --json-report-file=".artifacts/pytest.acceptance.json" --json-report-omit=log
+	python3 -b -m pytest tests/acceptance --cov . --cov-report="xml:.artifacts/acceptance.coverage.xml" --json-report --json-report-file=".artifacts/pytest.acceptance.json" --json-report-omit=log --junit-xml=".artifacts/acceptance.junit.xml" -o junit_suite_name=acceptance
 	@echo ""
 
 test-cli: create-db
@@ -141,7 +141,9 @@ test-python-ci:
 		--cov . $(COV_ARGS) \
 		--json-report \
 		--json-report-file=".artifacts/pytest.json" \
-		--json-report-omit=log
+		--json-report-omit=log \
+		--junit-xml=.artifacts/pytest.junit.xml \
+		-o junit_suite_name=pytest
 	@echo ""
 
 # it's not possible to change settings.DATABASE after django startup, so
@@ -164,20 +166,22 @@ test-monolith-dbs:
 	  --json-report \
 	  --json-report-file=".artifacts/pytest.monolith-dbs.json" \
 	  --json-report-omit=log \
+	  --junit-xml=.artifacts/monolith-dbs.junit.xml \
+	  -o junit_suite_name=monolith-dbs \
 	;
 	@echo ""
 
 test-tools:
 	@echo "--> Running tools tests"
 	@# bogus configuration to force vanilla pytest
-	python3 -b -m pytest -c setup.cfg --confcutdir tests/tools tests/tools -vv --cov=tools --cov=tests/tools --cov-report="xml:.artifacts/tools.coverage.xml"
+	python3 -b -m pytest -c setup.cfg --confcutdir tests/tools tests/tools -vv --cov=tools --cov=tests/tools --cov-report="xml:.artifacts/tools.coverage.xml" --junit-xml=.artifacts/tools.junit.xml -o junit_suite_name=tools
 	@echo ""
 
 # JavaScript relay tests are meant to be run within Symbolicator test suite, as they are parametrized to verify both processing pipelines during migration process.
 # Running Locally: Run `sentry devservices up kafka` before starting these tests
 test-symbolicator:
 	@echo "--> Running symbolicator tests"
-	python3 -b -m pytest tests/symbolicator -vv --cov . --cov-report="xml:.artifacts/symbolicator.coverage.xml"
+	python3 -b -m pytest tests/symbolicator -vv --cov . --cov-report="xml:.artifacts/symbolicator.coverage.xml" --junit-xml=.artifacts/symbolicator.junit.xml -o junit_suite_name=symbolicator
 	python3 -b -m pytest tests/relay_integration/lang/javascript/ -vv -m symbolicator
 	python3 -b -m pytest tests/relay_integration/lang/java/ -vv -m symbolicator
 	@echo ""