Browse Source

Mute/Unmute tool (#11124)

Kirill Rysin 3 months ago
parent
commit
decc7f1b7c

+ 57 - 0
.github/config/mute_rules.md

@@ -89,3 +89,60 @@ Open the [Flaky](https://datalens.yandex/4un3zdm0zcnyr) dashboard.
 - If the `summary:` column shows `mute <= 3` and `success rate >= 98%` - **it's time to enable the test**.
 - Perform steps from [How to Unmute](#how-to-unmute)
 - You are awesome!
+
+### Unmute stable and flaky tests automaticaly
+
+
+**setup**
+1) ```pip install PyGithub```
+2) request git token
+```
+# Github api (personal access token (classic)) token shoud have permitions to
+# repo
+# - repo:status
+# - repo_deployment
+# - public_repo
+# admin:org
+# project
+```
+3) save it to env `export GITHUB_TOKEN=<token>
+4) save to env `export CI_YDB_SERVICE_ACCOUNT_KEY_FILE_CREDENTIALS=<iam_cloud_file> 
+
+**How to use**
+
+0) *update your branch* - you shoud have last version of muted_ya localy
+1) Run instance https://github.com/ydb-platform/ydb/actions/workflows/collect_analytics.yml
+2) wait till end of step `Collect all test monitor (how long tests in state)` (about 7 min)
+3) run `create_new_muted_ya.py update_muted_ya` - it creates bunch of files in `%repo_path%/mute_update/`
+     
+| File Name                              | Description                                                                                     |
+|----------------------------------------|-------------------------------------------------------------------------------------------------|
+| deleted.txt                            | Tests what look like deleted (no runs 28 days in a row)                                         |
+| deleted_debug.txt                      | With detailed info                                                                              |
+| flaky.txt                              | Tests which are flaky today AND total runs > 3 AND fail_count > 2                               |
+| flaky_debug.txt                        | With detailed info                                                                              |
+| muted_stable.txt                       | Muted tests which are stable for the last 14 days                                               |
+| muted_stable_debug.txt                 | With detailed info                                                                              |
+| new_muted_ya.txt                       | Muted_ya.txt version with excluded **muted_stable** and **deleted** tests                       |
+| new_muted_ya_debug.txt                 | With detailed info                                                                              |
+| new_muted_ya_with_flaky.txt            | Muted_ya.txt version with excluded **muted_stable** and **deleted** tests and included **flaky**|
+| new_muted_ya_with_flaky_debug.txt      | With detailed info                                                                              |
+|muted_ya_sorted.txt| original muted_ya with resolved wildcards for real tests (not chunks)|
+|muted_ya_sorted_debug.txt| With detailed info|
+
+
+**1. Unmute Stable**
+1) replace content of [muted_ya](https://github.com/ydb-platform/ydb/blob/main/.github/config/muted_ya.txt) with content of **new_muted_ya.txt** 
+2) create new PR and paste in PR Description 
+- `<Unmuted tests : stable 9 and deleted 0>`  from concole output
+-  content from **muted_stable_debug** and **deleted_debug**
+3) Merge
+ example https://github.com/ydb-platform/ydb/pull/11099
+
+**2. Mute Flaky** (AFTER UNMUTE STABLE ONLY)
+1) replace content of [muted_ya](https://github.com/ydb-platform/ydb/blob/main/.github/config/muted_ya.txt) with content of **new_muted_ya_with_flaky.txt** 
+2) create new PR 
+2) run `create_new_muted_ya.py create_issues` - it creates issue for each flaky test in **flaky.txt** 
+3) copy from console output text like ' Created issue ...' and paste in PR
+4) merge
+ example https://github.com/ydb-platform/ydb/pull/11101

+ 0 - 226
.github/scripts/analytics/get_mute_issues.py

@@ -1,226 +0,0 @@
-import os
-import re
-import requests
-
-ORG_NAME = 'ydb-platform'
-PROJECT_ID = '45'
-query_template = """
-{
-  organization(login: "%s") {
-    projectV2(number: %s) {
-      id
-      title
-      items(first: 100, after: %s) {
-        nodes {
-          content {
-            ... on Issue {
-              id
-              title
-              url
-              state
-              body
-              createdAt
-            }
-          }
-          fieldValues(first: 20) {
-            nodes {
-              ... on ProjectV2ItemFieldSingleSelectValue {
-                field {
-                  ... on ProjectV2SingleSelectField {
-                    name
-                  }
-                }
-                name
-                id
-                updatedAt
-              }
-              ... on ProjectV2ItemFieldLabelValue {
-                labels(first: 20) {
-                  nodes {
-                    id
-                    name
-                  }
-                }
-              }
-              ... on ProjectV2ItemFieldTextValue {
-                text
-                id
-                updatedAt
-                creator {
-                  url
-                }
-              }
-              ... on ProjectV2ItemFieldMilestoneValue {
-                milestone {
-                  id
-                }
-              }
-              ... on ProjectV2ItemFieldRepositoryValue {
-                repository {
-                  id
-                  url
-                }
-              }
-            }
-          }
-        }
-        pageInfo {
-          hasNextPage
-          endCursor
-        }
-      }
-    }
-  }
-}
-"""
-
-
-def run_query(query, headers):
-    request = requests.post('https://api.github.com/graphql', json={'query': query}, headers=headers)
-    if request.status_code == 200:
-        return request.json()
-    else:
-        raise Exception(f"Query failed to run by returning code of {request.status_code}. {query}")
-
-
-def fetch_all_issues(org_name, project_id):
-    issues = []
-    has_next_page = True
-    end_cursor = "null"
-
-    while has_next_page:
-        query = query_template % (org_name, project_id, end_cursor)
-        GITHUB_TOKEN = os.environ["GITHUB_TOKEN"]
-        headers = {"Authorization": f"Bearer {GITHUB_TOKEN}"}
-        result = run_query(query, headers)
-
-        if result:
-            project_items = result['data']['organization']['projectV2']['items']
-            issues.extend(project_items['nodes'])
-
-            page_info = project_items['pageInfo']
-            has_next_page = page_info['hasNextPage']
-            end_cursor = f"\"{page_info['endCursor']}\"" if page_info['endCursor'] else "null"
-        else:
-            has_next_page = False
-
-    return issues
-
-
-def parse_body(body):
-    tests = []
-    branches = []
-    prepared_body = ''
-    start_mute_list = "<!--mute_list_start-->"
-    end_mute_list = "<!--mute_list_end-->"
-    start_branch_list = "<!--branch_list_start-->"
-    end_branch_list = "<!--branch_list_end-->"
-
-    # tests
-    if all(x in body for x in [start_mute_list, end_mute_list]):
-        idx1 = body.find(start_mute_list)
-        idx2 = body.find(end_mute_list)
-        lines = body[idx1 + len(start_mute_list) + 1 : idx2].split('\n')
-    else:
-        if body.startswith('Mute:'):
-            prepared_body = body.split('Mute:', 1)[1].strip()
-        elif body.startswith('Mute'):
-            prepared_body = body.split('Mute', 1)[1].strip()
-        elif body.startswith('ydb'):
-            prepared_body = body
-        lines = prepared_body.split('**Add line to')[0].split('\n')
-    tests = [line.strip() for line in lines if line.strip().startswith('ydb/')]
-
-    # branch
-    if all(x in body for x in [start_branch_list, end_branch_list]):
-        idx1 = body.find(start_branch_list)
-        idx2 = body.find(end_branch_list)
-        branches = body[idx1 + len(start_branch_list) + 1 : idx2].split('\n')
-    else:
-        branches = ['main']
-
-    return tests, branches
-
-
-def get_issues_and_tests_from_project(ORG_NAME, PROJECT_ID):
-    issues = fetch_all_issues(ORG_NAME, PROJECT_ID)
-    issues_prepared = {}
-    for issue in issues:
-        content = issue['content']
-        if content:
-            body = content['body']
-
-            # for debug
-            if content['id'] == 'I_kwDOGzZjoM6V3BoE':
-                print(1)
-            #
-
-            tests, branches = parse_body(body)
-
-            field_values = issue.get('fieldValues', {}).get('nodes', [])
-            for field_value in field_values:
-                field_name = field_value.get('field', {}).get('name', '').lower()
-
-                if field_name == "status" and 'name' in field_value:
-                    status = field_value.get('name', 'N/A')
-                    status_updated = field_value.get('updatedAt', '1970-01-0901T00:00:01Z')
-                elif field_name == "owner" and 'name' in field_value:
-                    owner = field_value.get('name', 'N/A')
-
-            print(f"Issue ID: {content['id']}")
-            print(f"Title: {content['title']}")
-            print(f"URL: {content['url']}")
-            print(f"State: {content['state']}")
-            print(f"CreatedAt: {content['createdAt']}")
-            print(f"Status: {status}")
-            print(f"Status updated: {status_updated}")
-            print(f"Owner: {owner}")
-            print("Tests:")
-
-            issues_prepared[content['id']] = {}
-            issues_prepared[content['id']]['title'] = content['title']
-            issues_prepared[content['id']]['url'] = content['url']
-            issues_prepared[content['id']]['state'] = content['state']
-            issues_prepared[content['id']]['createdAt'] = content['createdAt']
-            issues_prepared[content['id']]['status_updated'] = status_updated
-            issues_prepared[content['id']]['status'] = status
-            issues_prepared[content['id']]['owner'] = owner
-            issues_prepared[content['id']]['tests'] = []
-            issues_prepared[content['id']]['branches'] = branches
-
-            for test in tests:
-                issues_prepared[content['id']]['tests'].append(test)
-                print(f"- {test}")
-            print('\n')
-
-    return issues_prepared
-
-
-def get_muted_tests():
-    issues = get_issues_and_tests_from_project(ORG_NAME, PROJECT_ID)
-    muted_tests = {}
-    for issue in issues:
-        if issues[issue]["status"] == "Muted":
-            for test in issues[issue]['tests']:
-                if test not in muted_tests:
-                    muted_tests[test] = []
-                    muted_tests[test].append(
-                        {
-                            'url': issues[issue]['url'],
-                            'createdAt': issues[issue]['createdAt'],
-                            'status_updated': issues[issue]['status_updated'],
-                        }
-                    )
-
-    return muted_tests
-
-
-def main():
-    if "GITHUB_TOKEN" not in os.environ:
-        print("Error: Env variable GITHUB_TOKEN is missing, skipping")
-        return 1
-    get_muted_tests()
-
-
-if __name__ == "__main__":
-    main()

+ 401 - 0
.github/scripts/tests/create_new_muted_ya.py

@@ -0,0 +1,401 @@
+#!/usr/bin/env python3
+import argparse
+import configparser
+import datetime
+import os
+import posixpath
+import re
+import ydb
+import logging
+
+from get_diff_lines_of_file import get_diff_lines_of_file
+from mute_utils import pattern_to_re
+from transform_ya_junit import YaMuteCheck
+from update_mute_issues import (
+    create_and_add_issue_to_project,
+    generate_github_issue_title_and_body,
+    get_muted_tests_from_issues,
+)
+
+# Configure logging
+logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
+
+dir = os.path.dirname(__file__)
+config = configparser.ConfigParser()
+config_file_path = f"{dir}/../../config/ydb_qa_db.ini"
+repo_path = f"{dir}/../../../"
+muted_ya_path = '.github/config/muted_ya.txt'
+config.read(config_file_path)
+
+DATABASE_ENDPOINT = config["QA_DB"]["DATABASE_ENDPOINT"]
+DATABASE_PATH = config["QA_DB"]["DATABASE_PATH"]
+
+
+def execute_query(driver):
+    query_string = '''
+    SELECT * from (
+        SELECT data.*,
+        CASE WHEN new_flaky.full_name IS NOT NULL THEN True ELSE False END AS new_flaky_today,
+        CASE WHEN flaky.full_name IS NOT NULL THEN True ELSE False END AS flaky_today,
+        CASE WHEN muted_stable.full_name IS NOT NULL THEN True ELSE False END AS muted_stable_today,
+        CASE WHEN muted_stable_n_days.full_name IS NOT NULL THEN True ELSE False END AS muted_stable_n_days_today,
+        CASE WHEN deleted.full_name IS NOT NULL THEN True ELSE False END AS deleted_today
+
+        FROM
+        (SELECT test_name, suite_folder, full_name, date_window, build_type, branch, days_ago_window, history, history_class, pass_count, mute_count, fail_count, skip_count, success_rate, summary, owner, is_muted, is_test_chunk, state, previous_state, state_change_date, days_in_state, previous_state_filtered, state_change_date_filtered, days_in_state_filtered, state_filtered
+        FROM `test_results/analytics/tests_monitor_test_with_filtered_states`) as data
+        left JOIN 
+        (SELECT full_name, build_type, branch
+            FROM `test_results/analytics/tests_monitor_test_with_filtered_states`
+            WHERE state = 'Flaky'
+            AND days_in_state = 1
+            AND date_window = CurrentUtcDate()
+            )as new_flaky
+        ON 
+            data.full_name = new_flaky.full_name
+            and data.build_type = new_flaky.build_type
+            and data.branch = new_flaky.branch
+        LEFT JOIN 
+        (SELECT full_name, build_type, branch
+            FROM `test_results/analytics/tests_monitor_test_with_filtered_states`
+            WHERE state = 'Flaky'
+            AND date_window = CurrentUtcDate()
+            )as flaky
+        ON 
+            data.full_name = flaky.full_name
+            and data.build_type = flaky.build_type
+            and data.branch = flaky.branch
+        LEFT JOIN 
+        (SELECT full_name, build_type, branch
+            FROM `test_results/analytics/tests_monitor_test_with_filtered_states`
+            WHERE state = 'Muted Stable'
+            AND date_window = CurrentUtcDate()
+            )as muted_stable
+
+        ON 
+            data.full_name = muted_stable.full_name
+            and data.build_type = muted_stable.build_type
+            and data.branch = muted_stable.branch
+        LEFT JOIN 
+        (SELECT full_name, build_type, branch
+            FROM `test_results/analytics/tests_monitor_test_with_filtered_states`
+            WHERE state= 'Muted Stable'
+            AND days_in_state >= 14
+            AND date_window = CurrentUtcDate()
+            and is_test_chunk = 0
+            )as muted_stable_n_days
+
+        ON 
+            data.full_name = muted_stable_n_days.full_name
+            and data.build_type = muted_stable_n_days.build_type
+            and data.branch = muted_stable_n_days.branch
+       
+        LEFT JOIN 
+        (SELECT full_name, build_type, branch
+            FROM `test_results/analytics/tests_monitor_test_with_filtered_states`
+            WHERE state = 'no_runs'
+            AND days_in_state >= 14
+            AND date_window = CurrentUtcDate()
+            and is_test_chunk = 0
+            )as deleted
+
+        ON 
+            data.full_name = deleted.full_name
+            and data.build_type = deleted.build_type
+            and data.branch = deleted.branch
+        ) 
+        where date_window = CurrentUtcDate() and branch = 'main'
+    
+    '''
+
+    query = ydb.ScanQuery(query_string, {})
+    table_client = ydb.TableClient(driver, ydb.TableClientSettings())
+    it = table_client.scan_query(query)
+    results = []
+    while True:
+        try:
+            result = next(it)
+            results = results + result.result_set.rows
+        except StopIteration:
+            break
+
+    return results
+
+
+def add_lines_to_file(file_path, lines_to_add):
+    try:
+        os.makedirs(os.path.dirname(file_path), exist_ok=True)
+        with open(file_path, 'w') as f:
+            f.writelines(lines_to_add)
+        logging.info(f"Lines added to {file_path}")
+    except Exception as e:
+        logging.error(f"Error adding lines to {file_path}: {e}")
+
+
+def apply_and_add_mutes(all_tests, output_path, mute_check):
+
+    output_path = os.path.join(output_path, 'mute_update')
+
+    all_tests = sorted(all_tests, key=lambda d: d['full_name'])
+
+    try:
+
+        deleted_tests = set(
+            f"{test.get('suite_folder')} {test.get('test_name')}\n" for test in all_tests if test.get('deleted_today')
+        )
+
+        deleted_tests = sorted(deleted_tests)
+        add_lines_to_file(os.path.join(output_path, 'deleted.txt'), deleted_tests)
+
+        deleted_tests_debug = set(
+            f"{test.get('suite_folder')} {test.get('test_name')} # owner {test.get('owner')} success_rate {test.get('success_rate')}%, state {test.get('state')} days in state {test.get('days_in_state')}\n"
+            for test in all_tests
+            if test.get('deleted_today')
+        )
+
+        deleted_tests_debug = sorted(deleted_tests_debug)
+        add_lines_to_file(os.path.join(output_path, 'deleted_debug.txt'), deleted_tests_debug)
+
+        muted_stable_tests = set(
+            f"{test.get('suite_folder')} {test.get('test_name')}\n"
+            for test in all_tests
+            if test.get('muted_stable_n_days_today')
+        )
+
+        muted_stable_tests = sorted(muted_stable_tests)
+        add_lines_to_file(os.path.join(output_path, 'muted_stable.txt'), muted_stable_tests)
+
+        muted_stable_tests_debug = set(
+            f"{test.get('suite_folder')} {test.get('test_name')} "
+            + f"# owner {test.get('owner')} success_rate {test.get('success_rate')}%, state {test.get('state')} days in state {test.get('days_in_state')}\n"
+            for test in all_tests
+            if test.get('muted_stable_n_days_today')
+        )
+
+        muted_stable_tests_debug = sorted(muted_stable_tests_debug)
+        add_lines_to_file(os.path.join(output_path, 'muted_stable_debug.txt'), muted_stable_tests_debug)
+
+        # Add all flaky tests
+        flaky_tests = set(
+            re.sub(r'\d+/(\d+)\]', r'*/*]', f"{test.get('suite_folder')} {test.get('test_name')}\n")
+            for test in all_tests
+            if test.get('days_in_state') >= 1
+            and test.get('flaky_today')
+            and (test.get('pass_count') + test.get('fail_count')) > 3
+            and test.get('fail_count') > 2
+        )
+        flaky_tests = sorted(flaky_tests)
+        add_lines_to_file(os.path.join(output_path, 'flaky.txt'), flaky_tests)
+
+        flaky_tests_debug = set(
+            re.sub(r'\d+/(\d+)\]', r'*/*]', f"{test.get('suite_folder')} {test.get('test_name')}")
+            + f" # owner {test.get('owner')} success_rate {test.get('success_rate')}%, state {test.get('state')}, days in state {test.get('days_in_state')}, pass_count {test.get('pass_count')}, fail count {test.get('fail_count')}\n"
+            for test in all_tests
+            if test.get('days_in_state') >= 1
+            and test.get('flaky_today')
+            and (test.get('pass_count') + test.get('fail_count')) > 3
+            and test.get('fail_count') > 2
+        )
+        ## тесты может запускаться 1 раз в день. если за последние 7 дней набирается трешход то мьютим
+        ## падения сегодня более весомы ??  
+        ## за 7 дней смотреть?
+        #----
+        ## Mute Flaky редко запускаемых тестов
+        ##   Разобраться почему 90 % флакающих тестов имеют только 1 падение и в статусе Flaky только 2 дня      
+        flaky_tests_debug = sorted(flaky_tests_debug)
+        add_lines_to_file(os.path.join(output_path, 'flaky_debug.txt'), flaky_tests_debug)
+
+        new_muted_ya_tests_debug = []
+        new_muted_ya_tests = []
+        new_muted_ya_tests_with_flaky = []
+        new_muted_ya_tests_with_flaky_debug = []
+        unmuted_tests_debug = []
+        muted_ya_tests_sorted = []
+        muted_ya_tests_sorted_debug = []
+        muted_before_count = 0
+        unmuted_stable = 0
+        unmuted_deleted = 0
+        # Apply mute check and filter out already muted tests
+        for test in all_tests:
+            testsuite = test.get('suite_folder')
+            testcase = test.get('test_name')
+            success_rate = test.get('success_rate')
+            days_in_state = test.get('days_in_state')
+            owner = test.get('owner')
+            state = test.get('state')
+            test_string = f"{testsuite} {testcase}\n"
+            test_string_debug = f"{testsuite} {testcase} # owner {owner} success_rate {success_rate}%, state {state} days in state {days_in_state}\n"
+            test_string = re.sub(r'\d+/(\d+)\]', r'*/*]', test_string)
+            if (
+                testsuite and testcase and mute_check(testsuite, testcase) or test_string in flaky_tests
+            ) and test_string not in new_muted_ya_tests_with_flaky:
+                if test_string not in muted_stable_tests and test_string not in deleted_tests:
+                    new_muted_ya_tests_with_flaky.append(test_string)
+                    new_muted_ya_tests_with_flaky_debug.append(test_string_debug)
+
+            if testsuite and testcase and mute_check(testsuite, testcase):
+               
+                if test_string not in muted_ya_tests_sorted:
+                    muted_ya_tests_sorted.append(test_string)
+                    muted_ya_tests_sorted_debug.append(test_string_debug)
+                    muted_before_count += 1
+                if test_string not in new_muted_ya_tests:
+                    if test_string not in muted_stable_tests and test_string not in deleted_tests:
+                        new_muted_ya_tests.append(test_string)
+                        new_muted_ya_tests_debug.append(test_string_debug)
+                    if test_string in muted_stable_tests:
+                        unmuted_stable += 1
+                    if test_string in deleted_tests:
+                        unmuted_deleted += 1
+                    unmuted_tests_debug.append(test_string_debug)
+
+        muted_ya_tests_sorted = sorted(muted_ya_tests_sorted)
+        add_lines_to_file(os.path.join(output_path, 'muted_ya_sorted.txt'), muted_ya_tests_sorted)
+        muted_ya_tests_sorted_debug = sorted(muted_ya_tests_sorted_debug)
+        add_lines_to_file(os.path.join(output_path, 'muted_ya_sorted_debug.txt'), muted_ya_tests_sorted_debug)
+        new_muted_ya_tests = sorted(new_muted_ya_tests)
+        add_lines_to_file(os.path.join(output_path, 'new_muted_ya.txt'), new_muted_ya_tests)
+        new_muted_ya_tests_debug = sorted(new_muted_ya_tests_debug)
+        add_lines_to_file(os.path.join(output_path, 'new_muted_ya_debug.txt'), new_muted_ya_tests_debug)
+        new_muted_ya_tests_with_flaky = sorted(new_muted_ya_tests_with_flaky)
+        add_lines_to_file(os.path.join(output_path, 'new_muted_ya_with_flaky.txt'), new_muted_ya_tests_with_flaky)
+        new_muted_ya_tests_with_flaky_debug = sorted(new_muted_ya_tests_with_flaky_debug)
+        add_lines_to_file(
+            os.path.join(output_path, 'new_muted_ya_with_flaky_debug.txt'), new_muted_ya_tests_with_flaky_debug
+        )
+        unmuted_tests_debug = sorted(unmuted_tests_debug)
+        add_lines_to_file(os.path.join(output_path, 'unmuted_debug.txt'), unmuted_tests_debug)
+
+        logging.info(f"Muted before script: {muted_before_count} tests")
+        logging.info(f"Muted stable : {len(muted_stable_tests)}")
+        logging.info(f"Flaky tests : {len(flaky_tests)}")
+        logging.info(f"Result: Muted without deleted and stable : {len(new_muted_ya_tests)}")
+        logging.info(f"Result: Muted without deleted and stable, with flaky : {len(new_muted_ya_tests_with_flaky)}")
+        logging.info(f"Result: Unmuted tests : stable {unmuted_stable} and deleted {unmuted_deleted}")
+    except (KeyError, TypeError) as e:
+        logging.error(f"Error processing test data: {e}. Check your query results for valid keys.")
+        return []
+
+    return len(new_muted_ya_tests)
+
+
+def read_tests_from_file(file_path):
+    result = []
+    with open(file_path, "r") as fp:
+        for line in fp:
+            line = line.strip()
+            try:
+                testsuite, testcase = line.split(" ", maxsplit=1)
+                result.append({'testsuite': testsuite, 'testcase': testcase, 'full_name': f"{testsuite}/{testcase}"})
+            except ValueError:
+                log_print(f"cant parse line: {line!r}")
+                continue
+    return result
+
+
+def create_mute_issues(all_tests, file_path):
+    base_date = datetime.datetime(1970, 1, 1)
+    tests_from_file = read_tests_from_file(file_path)
+    muted_tests_in_issues = get_muted_tests_from_issues()
+    prepared_tests_by_suite = {}
+    for test in all_tests:
+        for test_from_file in tests_from_file:
+            if test['full_name'] == test_from_file['full_name']:
+                if test['full_name'] in muted_tests_in_issues:
+                    logging.info(
+                        f"test {test['full_name']} already have issue, {muted_tests_in_issues[test['full_name']][0]['url']}"
+                    )
+                else:
+                    key = f"{test_from_file['testsuite']}:{test['owner']}"
+                    if not prepared_tests_by_suite.get(key):
+                        prepared_tests_by_suite[key] = []
+                    prepared_tests_by_suite[key].append(
+                        {
+                            'mute_string': f"{ test.get('suite_folder')} {test.get('test_name')}",
+                            'test_name': test.get('test_name'),
+                            'suite_folder': test.get('suite_folder'),
+                            'full_name': test.get('full_name'),
+                            'success_rate': test.get('success_rate'),
+                            'days_in_state': test.get('days_in_state'),
+                            'date_window': (base_date + datetime.timedelta(days=test.get('date_window'))).date() ,
+                            'owner': test.get('owner'),
+                            'state': test.get('state'),
+                            'summary': test.get('summary'),
+                            'fail_count': test.get('fail_count'),
+                            'pass_count': test.get('pass_count'),
+                            'branch': test.get('branch'),
+                        }
+                    )
+    results = []
+    for item in prepared_tests_by_suite:
+
+        title, body = generate_github_issue_title_and_body(prepared_tests_by_suite[item])
+        result = create_and_add_issue_to_project(
+            title, body, state='Muted', owner=prepared_tests_by_suite[item][0]['owner'].split('/', 1)[1]
+        )
+        if not result:
+            break
+        else:
+            results.append(
+                f"Created issue '{title}' for {prepared_tests_by_suite[item][0]['owner']}, url {result['issue_url']}"
+            )
+
+    print("\n\n")
+    print("\n".join(results))
+
+
+def mute_worker(args):
+
+    # Simplified Connection
+    if "CI_YDB_SERVICE_ACCOUNT_KEY_FILE_CREDENTIALS" not in os.environ:
+        print("Error: Env variable CI_YDB_SERVICE_ACCOUNT_KEY_FILE_CREDENTIALS is missing, skipping")
+        return 1
+    else:
+        # Do not set up 'real' variable from gh workflows because it interfere with ydb tests
+        # So, set up it locally
+        os.environ["YDB_SERVICE_ACCOUNT_KEY_FILE_CREDENTIALS"] = os.environ[
+            "CI_YDB_SERVICE_ACCOUNT_KEY_FILE_CREDENTIALS"
+        ]
+
+    mute_check = YaMuteCheck()
+    mute_check.load(muted_ya_path)
+
+    with ydb.Driver(
+        endpoint=DATABASE_ENDPOINT,
+        database=DATABASE_PATH,
+        credentials=ydb.credentials_from_env_variables(),
+    ) as driver:
+        driver.wait(timeout=10, fail_fast=True)
+        session = ydb.retry_operation_sync(lambda: driver.table_client.session().create())
+        tc_settings = ydb.TableClientSettings().with_native_date_in_result_sets(enabled=True)
+
+        all_tests = execute_query(driver)
+    if args.mode == 'update_muted_ya':
+        output_path = args.output_folder
+        os.makedirs(output_path, exist_ok=True)
+        apply_and_add_mutes(all_tests, output_path, mute_check)
+
+    elif args.mode == 'create_issues':
+        file_path = args.file_path
+        create_mute_issues(all_tests, file_path)
+
+
+if __name__ == "__main__":
+    parser = argparse.ArgumentParser(description="Add tests to mutes files based on flaky_today condition")
+
+    subparsers = parser.add_subparsers(dest='mode', help="Mode to perform")
+
+    update_muted_ya_parser = subparsers.add_parser('update_muted_ya', help='create new muted_ya')
+    update_muted_ya_parser.add_argument('--output_folder', default=repo_path, required=False, help='Output folder.')
+
+    create_issues_parser = subparsers.add_parser(
+        'create_issues',
+        help='create issues by muted_ya like files',
+    )
+    create_issues_parser.add_argument(
+        '--file_path', default=f'{repo_path}/mute_update/flaky.txt', required=False, help='file path'
+    )
+
+    args = parser.parse_args()
+
+    mute_worker(args)

+ 487 - 0
.github/scripts/tests/update_mute_issues.py

@@ -0,0 +1,487 @@
+import os
+import re
+import requests
+from github import Github
+from urllib.parse import quote, urlencode
+
+
+ORG_NAME = 'ydb-platform'
+REPO_NAME = 'ydb'
+PROJECT_ID = '45'
+TEST_HISTORY_DASHBOARD = "https://datalens.yandex/4un3zdm0zcnyr"
+CURRENT_TEST_HISTORY_DASHBOARD = "https://datalens.yandex/34xnbsom67hcq?"
+
+# Github api (personal access token (classic)) token shoud have permitions to
+# repo
+# - repo:status
+# - repo_deployment
+# - public_repo
+# admin:org
+# project
+
+def run_query(query, variables=None):
+    GITHUB_TOKEN = os.environ["GITHUB_TOKEN"]
+    HEADERS = {"Authorization": f"Bearer {GITHUB_TOKEN}", "Content-Type": "application/json"}
+    request = requests.post(
+        'https://api.github.com/graphql', json={'query': query, 'variables': variables}, headers=HEADERS
+    )
+    if request.status_code == 200:
+        return request.json()
+    else:
+        raise Exception(f"Query failed to run by returning code of {request.status_code}. {query}")
+
+
+def get_repository(org_name=ORG_NAME, repo_name=REPO_NAME):
+    query = """
+    {
+      organization(login: "%s") {
+        repository(name: "%s") {
+          id
+        }
+      }
+    }
+    """ % (
+        org_name,
+        repo_name,
+    )
+    result = run_query(query)
+    return result['data']['organization']['repository']
+
+
+def get_project_v2_fields(org_name=ORG_NAME, project_id=PROJECT_ID):
+    query_template = """
+   {
+      organization(login: "%s") {
+        projectV2(number: %s) {
+          id
+          fields(first: 100) {
+            nodes {
+              ... on ProjectV2Field {
+                id
+                name
+              }
+              ... on ProjectV2SingleSelectField {
+                id
+                name
+                options {
+                  id
+                  name
+                }
+              
+              }
+              
+            }
+          }
+        }
+      }
+    }
+    """
+    query = query_template % (org_name, project_id)
+
+    result = run_query(query)
+    return (
+        result['data']['organization']['projectV2']['id'],
+        result['data']['organization']['projectV2']['fields']['nodes'],
+    )
+
+
+def create_and_add_issue_to_project(title, body, project_id=PROJECT_ID, org_name=ORG_NAME, state=None, owner=None):
+    """Добавляет issue в проект.
+
+    Args:
+        title (str): Название issue.
+        body (str): Содержимое issue.
+        project_id (int): ID проекта.
+        org_name (str): Имя организации.
+
+    Returns:
+        None
+    """
+
+    result = None
+    # Получаем ID полей "State" и "Owner"
+    inner_project_id, project_fields = get_project_v2_fields(org_name, project_id)
+    state_field_id = None
+    owner_field_id = None
+    for field in project_fields:
+        if field.get('name'):
+            if field['name'].lower() == "status":
+                state_field_id = field['id']
+                state_option_id = None
+                if state:
+                    for option in field['options']:
+                        if option['name'].lower() == state.lower():
+                            state_option_id = option['id']
+                            break
+            if field['name'].lower() == "owner":
+                owner_field_id = field['id']
+                owner_option_id = None
+                if owner:
+                    for option in field['options']:
+                        if option['name'].lower() == owner.lower():
+                            owner_option_id = option['id']
+                            break
+
+    if not state_field_id or not owner_field_id:
+        raise Exception(f"Не найдены поля 'State' или 'Owner' в проекте {project_id}")
+    # get repo
+    repo = get_repository()
+    # create issue
+    query = """
+    mutation ($repositoryId: ID!, $title: String!, $body: String!) {
+      createIssue(input: {repositoryId: $repositoryId, title: $title, body: $body}) {
+        issue {
+          id,
+          url
+        }
+      }
+    }
+    """
+    variables = {"repositoryId": repo['id'], "title": title, "body": body}
+    issue = run_query(query, variables)
+    if not issue.get('errors'):
+        print(f"Issue {title} created ")
+    else:
+        print(f"Error: Issue {title} not created ")
+        return result
+    issue_id = issue['data']['createIssue']['issue']['id']
+    issue_url = issue['data']['createIssue']['issue']['url']
+
+    query_add_to_project = """
+     mutation ($projectId: ID!, $issueId: ID!) {
+    addProjectV2ItemById(input: {projectId: $projectId, contentId: $issueId}) {
+      item {
+          id
+        }
+      }
+    }
+    """
+    variables = {
+        "projectId": inner_project_id,
+        "issueId": issue_id,
+    }
+    result_add_to_project = run_query(query_add_to_project, variables)
+    item_id = result_add_to_project['data']['addProjectV2ItemById']['item']['id']
+    if not result_add_to_project.get('errors'):
+        print(f"Issue {issue_url} added to project.")
+    else:
+        print(f"Error: Issue {title}: {issue_url} not added to project.")
+        return result
+
+    for field_name, filed_value, field_id, value_id in [
+        ['state', state, state_field_id, state_option_id],
+        ['owner', owner, owner_field_id, owner_option_id],
+    ]:
+        query_modify_fields = """
+      mutation ($projectId: ID!, $itemId: ID!, $FieldId: ID!, $OptionId: String) {
+        updateProjectV2ItemFieldValue(input: {
+          projectId: $projectId,
+          itemId: $itemId,
+          fieldId: $FieldId,
+          value: {
+            singleSelectOptionId: $OptionId
+          }
+        }) {
+          projectV2Item {
+            id
+          }
+        }
+
+      }
+      """
+        variables = {
+            "projectId": inner_project_id,
+            "itemId": item_id,
+            "FieldId": field_id,
+            "OptionId": value_id,
+        }
+        result_modify_field = run_query(query_modify_fields, variables)
+        if not result_modify_field.get('errors'):
+            print(f"Issue {title}: {issue_url} modified :{field_name} = {filed_value}")
+        else:
+            print(f"Error: Issue {title}: {issue_url}  not modified")
+            return result
+    result = {'issue_url': issue_url, 'owner': owner, 'title': title}
+    return result
+
+
+def fetch_all_issues(org_name=ORG_NAME, project_id=PROJECT_ID):
+    issues = []
+    has_next_page = True
+    end_cursor = "null"
+
+    project_issues_query = """
+    {
+      organization(login: "%s") {
+        projectV2(number: %s) {
+          id
+          title
+          items(first: 100, after: %s) {
+            nodes {
+              content {
+                ... on Issue {
+                  id
+                  title
+                  url
+                  state
+                  body
+                  createdAt
+                }
+              }
+              fieldValues(first: 20) {
+                nodes {
+                  ... on ProjectV2ItemFieldSingleSelectValue {
+                    field {
+                      ... on ProjectV2SingleSelectField {
+                        name
+                      }
+                    }
+                    name
+                    id
+                    updatedAt
+                  }
+                  ... on ProjectV2ItemFieldLabelValue {
+                    labels(first: 20) {
+                      nodes {
+                        id
+                        name
+                      }
+                    }
+                  }
+                  ... on ProjectV2ItemFieldTextValue {
+                    text
+                    id
+                    updatedAt
+                    creator {
+                      url
+                    }
+                  }
+                  ... on ProjectV2ItemFieldMilestoneValue {
+                    milestone {
+                      id
+                    }
+                  }
+                  ... on ProjectV2ItemFieldRepositoryValue {
+                    repository {
+                      id
+                      url
+                    }
+                  }
+                }
+              }
+            }
+            pageInfo {
+              hasNextPage
+              endCursor
+            }
+          }
+        }
+      }
+    }
+    """
+    while has_next_page:
+        query = project_issues_query % (org_name, project_id, end_cursor)
+
+        result = run_query(query)
+
+        if result:
+            project_items = result['data']['organization']['projectV2']['items']
+            issues.extend(project_items['nodes'])
+
+            page_info = project_items['pageInfo']
+            has_next_page = page_info['hasNextPage']
+            end_cursor = f"\"{page_info['endCursor']}\"" if page_info['endCursor'] else "null"
+        else:
+            has_next_page = False
+
+    return issues
+
+
+def generate_github_issue_title_and_body(test_data):
+    owner = test_data[0]['owner']
+    branch = test_data[0]['branch']
+    test_full_names = [f"{d['full_name']}" for d in test_data]
+    test_mute_strings = [f"{d['mute_string']}" for d in test_data]
+    summary = [
+        f"{d['test_name']}: {d['state']} last {d['days_in_state']} days, at {d['date_window']}: success_rate {d['success_rate']}%, {d['summary']}"
+        for d in test_data
+    ]
+
+    # Title
+    if len(test_full_names) > 1:
+        title = f'Mute {test_data[0]["suite_folder"]} {len(test_full_names)} tests'
+    else:
+        title = f'Mute {test_data[0]["full_name"]}'
+
+    # Преобразование списка тестов в строку и кодирование
+    test_string = "\n".join(test_full_names)
+
+    test_mute_strings_string = "\n".join(test_mute_strings)
+
+    summary_string = "\n".join(summary)
+
+    # Создаем ссылку на историю тестов, кодируя параметры
+
+    test_run_history_params = "&".join(
+        urlencode({"full_name": f"__in_{test}"})
+        for test in test_full_names
+    )
+    test_run_history_link = f"{CURRENT_TEST_HISTORY_DASHBOARD}{test_run_history_params}"
+
+    # owner
+    owner_link = f"[{owner}](https://github.com/orgs/ydb-platform/teams/{owner.split('/',1)[1]})"
+    # Тело сообщения и кодирование
+    body_template = (
+        f"Mute:<!--mute_list_start-->\n"
+        f"{test_string}\n"
+        f"<!--mute_list_end-->\n\n"
+        f"Branch:<!--branch_list_start-->\n"
+        f"{branch}\n"
+        f"<!--branch_list_end-->\n\n"
+        f"**Add line to [muted_ya.txt](https://github.com/ydb-platform/ydb/blob/main/.github/config/muted_ya.txt):**\n"
+        "```\n"
+        f"{test_mute_strings_string}\n"
+        "```\n\n"
+        f"Owner: {owner}\n\n"
+        "**Read more in [mute_rules.md](https://github.com/ydb-platform/ydb/blob/main/.github/config/mute_rules.md)**\n\n"
+        f"**Summary history:** \n {summary_string}\n"
+        "\n\n"
+        f"**Test run history:** [link]({test_run_history_link})\n\n"
+        f"More info in [dashboard]({TEST_HISTORY_DASHBOARD})"
+    )
+
+    return (
+        title,
+        body_template,
+    )
+
+
+def parse_body(body):
+    tests = []
+    branches = []
+    prepared_body = ''
+    start_mute_list = "<!--mute_list_start-->"
+    end_mute_list = "<!--mute_list_end-->"
+    start_branch_list = "<!--branch_list_start-->"
+    end_branch_list = "<!--branch_list_end-->"
+
+    # tests
+    if all(x in body for x in [start_mute_list, end_mute_list]):
+        idx1 = body.find(start_mute_list)
+        idx2 = body.find(end_mute_list)
+        lines = body[idx1 + len(start_mute_list) + 1 : idx2].split('\n')
+    else:
+        if body.startswith('Mute:'):
+            prepared_body = body.split('Mute:', 1)[1].strip()
+        elif body.startswith('Mute'):
+            prepared_body = body.split('Mute', 1)[1].strip()
+        elif body.startswith('ydb'):
+            prepared_body = body
+        lines = prepared_body.split('**Add line to')[0].split('\n')
+    tests = [line.strip() for line in lines if line.strip().startswith('ydb/')]
+
+    # branch
+    if all(x in body for x in [start_branch_list, end_branch_list]):
+        idx1 = body.find(start_branch_list)
+        idx2 = body.find(end_branch_list)
+        branches = body[idx1 + len(start_branch_list) + 1 : idx2].split('\n')
+    else:
+        branches = ['main']
+
+    return tests, branches
+
+
+def get_issues_and_tests_from_project(ORG_NAME, PROJECT_ID):
+    issues = fetch_all_issues(ORG_NAME, PROJECT_ID)
+    all_issues_with_contet = {}
+    for issue in issues:
+        content = issue['content']
+        if content:
+            body = content['body']
+
+            # for debug
+            if content['id'] == 'I_kwDOGzZjoM6V3BoE':
+                print(1)
+            #
+
+            tests, branches = parse_body(body)
+
+            field_values = issue.get('fieldValues', {}).get('nodes', [])
+            for field_value in field_values:
+                field_name = field_value.get('field', {}).get('name', '').lower()
+
+                if field_name == "status" and 'name' in field_value:
+                    status = field_value.get('name', 'N/A')
+                    status_updated = field_value.get('updatedAt', '1970-01-0901T00:00:01Z')
+                elif field_name == "owner" and 'name' in field_value:
+                    owner = field_value.get('name', 'N/A')
+
+            print(f"Issue ID: {content['id']}")
+            print(f"Title: {content['title']}")
+            print(f"URL: {content['url']}")
+            print(f"State: {content['state']}")
+            print(f"CreatedAt: {content['createdAt']}")
+            print(f"Status: {status}")
+            print(f"Status updated: {status_updated}")
+            print(f"Owner: {owner}")
+            print("Tests:")
+
+            all_issues_with_contet[content['id']] = {}
+            all_issues_with_contet[content['id']]['title'] = content['title']
+            all_issues_with_contet[content['id']]['url'] = content['url']
+            all_issues_with_contet[content['id']]['state'] = content['state']
+            all_issues_with_contet[content['id']]['createdAt'] = content['createdAt']
+            all_issues_with_contet[content['id']]['status_updated'] = status_updated
+            all_issues_with_contet[content['id']]['status'] = status
+            all_issues_with_contet[content['id']]['owner'] = owner
+            all_issues_with_contet[content['id']]['tests'] = []
+            all_issues_with_contet[content['id']]['branches'] = branches
+
+            for test in tests:
+                all_issues_with_contet[content['id']]['tests'].append(test)
+                print(f"- {test}")
+            print('\n')
+
+    return all_issues_with_contet
+
+
+def get_muted_tests_from_issues():
+    issues = get_issues_and_tests_from_project(ORG_NAME, PROJECT_ID)
+    muted_tests = {}
+    for issue in issues:
+        if issues[issue]["status"] == "Muted":
+            for test in issues[issue]['tests']:
+                if test not in muted_tests:
+                    muted_tests[test] = []
+                    muted_tests[test].append(
+                        {
+                            'url': issues[issue]['url'],
+                            'createdAt': issues[issue]['createdAt'],
+                            'status_updated': issues[issue]['status_updated'],
+                            'status': issues[issue]['status'],
+                            'state': issues[issue]['state'],
+                            'branches': issues[issue]['branches'],
+                        }
+                    )
+
+    return muted_tests
+
+
+def main():
+
+    if "GITHUB_TOKEN" not in os.environ:
+        print("Error: Env variable GITHUB_TOKEN is missing, skipping")
+        return 1
+    else:
+        github_token = os.environ["GITHUB_TOKEN"]
+    # muted_tests = get_muted_tests_from_issues()
+
+    # create_github_issues(tests)
+
+
+# create_and_add_issue_to_project('test issue','test_issue_body', state = 'Muted', owner = 'fq')
+# print(1)
+# update_issue_state(muted_tests, github_token, "closed")
+
+if __name__ == "__main__":
+    main()