ytest.py 44 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423
  1. from __future__ import print_function
  2. import base64
  3. import collections
  4. import copy
  5. import json
  6. import os
  7. import re
  8. import six
  9. import subprocess
  10. try:
  11. from StringIO import StringIO
  12. except ImportError:
  13. from io import StringIO
  14. import _common
  15. import _dart_fields as df
  16. import _requirements as reqs
  17. import lib.test_const as consts
  18. import ymake
  19. from _dart_fields import (
  20. serialize_list,
  21. get_unit_list_variable,
  22. deserialize_list,
  23. prepare_env,
  24. create_dart_record,
  25. )
  26. BLOCK_SEPARATOR = '============================================================='
  27. SPLIT_FACTOR_MAX_VALUE = 1000
  28. SPLIT_FACTOR_TEST_FILES_MAX_VALUE = 4250
  29. PARTITION_MODS = ('SEQUENTIAL', 'MODULO')
  30. DEFAULT_TIDY_CONFIG = "build/config/tests/clang_tidy/config.yaml"
  31. DEFAULT_TIDY_CONFIG_MAP_PATH = "build/yandex_specific/config/clang_tidy/tidy_default_map.json"
  32. PROJECT_TIDY_CONFIG_MAP_PATH = "build/yandex_specific/config/clang_tidy/tidy_project_map.json"
  33. KTLINT_CURRENT_EDITOR_CONFIG = "arcadia/build/platform/java/ktlint/.editorconfig"
  34. KTLINT_OLD_EDITOR_CONFIG = "arcadia/build/platform/java/ktlint_old/.editorconfig"
  35. YTEST_FIELDS_BASE = (
  36. df.AndroidApkTestActivity.value,
  37. df.BinaryPath.normalized,
  38. df.BuildFolderPath.normalized,
  39. df.CustomDependencies.all_standard,
  40. df.GlobalLibraryPath.value,
  41. df.ScriptRelPath.second_flat,
  42. df.SkipTest.value,
  43. df.SourceFolderPath.normalized,
  44. df.SplitFactor.from_macro_args_and_unit,
  45. df.TestCwd.from_unit,
  46. df.TestedProjectFilename.value,
  47. df.TestedProjectName.unit_name,
  48. df.TestEnv.value,
  49. df.TestIosDeviceType.value,
  50. df.TestIosRuntimeType.value,
  51. df.TestRecipes.value,
  52. )
  53. YTEST_FIELDS_EXTRA = (
  54. df.Blob.value,
  55. df.ForkMode.from_macro_and_unit,
  56. df.Size.from_macro_args_and_unit,
  57. df.Tag.from_macro_args_and_unit,
  58. df.TestTimeout.from_macro_args_and_unit,
  59. df.YtSpec.from_macro_args_and_unit,
  60. )
  61. PY_EXEC_FIELDS_BASE = (
  62. df.Blob.value,
  63. df.BuildFolderPath.stripped,
  64. df.CanonizeSubPath.value,
  65. df.CustomDependencies.test_depends_only,
  66. df.ForkMode.test_fork_mode,
  67. df.ForkTestFiles.value,
  68. df.PythonPaths.value,
  69. df.Requirements.from_unit,
  70. df.Size.from_unit,
  71. df.SkipTest.value,
  72. df.SourceFolderPath.normalized,
  73. df.SplitFactor.from_unit,
  74. df.Tag.from_macro_args_and_unit,
  75. df.TestCwd.keywords_replaced,
  76. df.TestData.from_unit_with_canonical,
  77. df.TestEnv.value,
  78. df.TestFiles.test_srcs,
  79. df.TestPartition.value,
  80. df.TestRecipes.value,
  81. df.TestTimeout.from_unit_with_default,
  82. df.UseArcadiaPython.value,
  83. )
  84. CHECK_FIELDS_BASE = (
  85. df.CustomDependencies.depends_only,
  86. df.Requirements.from_macro_args,
  87. df.ScriptRelPath.first_flat,
  88. df.TestEnv.value,
  89. df.TestName.first_flat,
  90. df.UseArcadiaPython.value,
  91. )
  92. tidy_config_map = None
  93. def ontest_data(unit, *args):
  94. ymake.report_configure_error("TEST_DATA is removed in favour of DATA")
  95. def is_yt_spec_contain_pool_info(filename): # XXX switch to yson in ymake + perf test for configure
  96. pool_re = re.compile(r"""['"]*pool['"]*\s*?=""")
  97. cypress_root_re = re.compile(r"""['"]*cypress_root['"]*\s*=""")
  98. with open(filename, 'r') as afile:
  99. yt_spec = afile.read()
  100. return pool_re.search(yt_spec) and cypress_root_re.search(yt_spec)
  101. def validate_test(unit, kw):
  102. def get_list(key):
  103. return deserialize_list(kw.get(key, ""))
  104. valid_kw = copy.deepcopy(kw)
  105. errors = []
  106. warnings = []
  107. mandatory_fields = {"SCRIPT-REL-PATH", "SOURCE-FOLDER-PATH", "TEST-NAME"}
  108. for field in mandatory_fields - valid_kw.keys():
  109. errors.append(f"Mandatory field {field!r} is not set in DART")
  110. if valid_kw.get('SCRIPT-REL-PATH') == 'boost.test':
  111. project_path = valid_kw.get('BUILD-FOLDER-PATH', "")
  112. if not project_path.startswith(
  113. ("contrib", "mail", "maps", "tools/idl", "metrika", "devtools", "mds", "yandex_io", "smart_devices")
  114. ):
  115. errors.append("BOOSTTEST is not allowed here")
  116. size_timeout = collections.OrderedDict(sorted(consts.TestSize.DefaultTimeouts.items(), key=lambda t: t[1]))
  117. size = valid_kw.get('SIZE', consts.TestSize.Small).lower()
  118. tags = set(get_list("TAG"))
  119. requirements_orig = get_list("REQUIREMENTS")
  120. in_autocheck = consts.YaTestTags.NotAutocheck not in tags and consts.YaTestTags.Manual not in tags
  121. is_fat = consts.YaTestTags.Fat in tags
  122. is_force_sandbox = consts.YaTestTags.ForceDistbuild not in tags and is_fat
  123. is_ytexec_run = consts.YaTestTags.YtRunner in tags
  124. is_fuzzing = valid_kw.get("FUZZING", False)
  125. is_kvm = 'kvm' in requirements_orig
  126. requirements = {}
  127. secret_requirements = ('sb_vault', 'yav')
  128. list_requirements = secret_requirements
  129. for req in requirements_orig:
  130. if req in ('kvm',):
  131. requirements[req] = str(True)
  132. continue
  133. if ":" in req:
  134. req_name, req_value = req.split(":", 1)
  135. if req_name in list_requirements:
  136. requirements[req_name] = ",".join(filter(None, [requirements.get(req_name), req_value]))
  137. else:
  138. if req_name in requirements:
  139. if req_value in ["0"]:
  140. warnings.append(
  141. "Requirement [[imp]]{}[[rst]] is dropped [[imp]]{}[[rst]] -> [[imp]]{}[[rst]]".format(
  142. req_name, requirements[req_name], req_value
  143. )
  144. )
  145. del requirements[req_name]
  146. elif requirements[req_name] != req_value:
  147. warnings.append(
  148. "Requirement [[imp]]{}[[rst]] is redefined [[imp]]{}[[rst]] -> [[imp]]{}[[rst]]".format(
  149. req_name, requirements[req_name], req_value
  150. )
  151. )
  152. requirements[req_name] = req_value
  153. else:
  154. requirements[req_name] = req_value
  155. else:
  156. errors.append("Invalid requirement syntax [[imp]]{}[[rst]]: expect <requirement>:<value>".format(req))
  157. if not errors:
  158. for req_name, req_value in requirements.items():
  159. try:
  160. error_msg = reqs.validate_requirement(
  161. req_name,
  162. req_value,
  163. size,
  164. is_force_sandbox,
  165. in_autocheck,
  166. is_fuzzing,
  167. is_kvm,
  168. is_ytexec_run,
  169. requirements,
  170. )
  171. except Exception as e:
  172. error_msg = str(e)
  173. if error_msg:
  174. errors += [error_msg]
  175. invalid_requirements_for_distbuild = [
  176. requirement for requirement in requirements.keys() if requirement not in ('ram', 'ram_disk', 'cpu', 'network')
  177. ]
  178. sb_tags = []
  179. # XXX Unfortunately, some users have already started using colons
  180. # in their tag names. Use skip set to avoid treating their tag as system ones.
  181. # Remove this check when all such user tags are removed.
  182. skip_set = ('ynmt_benchmark', 'bert_models', 'zeliboba_map')
  183. # Verify the prefixes of the system tags to avoid pointless use of the REQUIREMENTS macro parameters in the TAG macro.
  184. for tag in tags:
  185. if tag.startswith('sb:'):
  186. sb_tags.append(tag)
  187. elif ':' in tag and not tag.startswith('ya:') and tag.split(':')[0] not in skip_set:
  188. errors.append(
  189. "Only [[imp]]sb:[[rst]] and [[imp]]ya:[[rst]] prefixes are allowed in system tags: {}".format(tag)
  190. )
  191. if is_fat:
  192. if size != consts.TestSize.Large:
  193. errors.append("Only LARGE test may have ya:fat tag")
  194. if in_autocheck and not is_force_sandbox:
  195. if invalid_requirements_for_distbuild:
  196. errors.append(
  197. "'{}' REQUIREMENTS options can be used only for FAT tests without ya:force_distbuild tag. Remove TAG(ya:force_distbuild) or an option.".format(
  198. invalid_requirements_for_distbuild
  199. )
  200. )
  201. if sb_tags:
  202. errors.append(
  203. "You can set sandbox tags '{}' only for FAT tests without ya:force_distbuild. Remove TAG(ya:force_sandbox) or sandbox tags.".format(
  204. sb_tags
  205. )
  206. )
  207. if consts.YaTestTags.SandboxCoverage in tags:
  208. errors.append("You can set 'ya:sandbox_coverage' tag only for FAT tests without ya:force_distbuild.")
  209. if is_ytexec_run:
  210. errors.append(
  211. "Running LARGE tests over YT (ya:yt) on Distbuild (ya:force_distbuild) is forbidden. Consider removing TAG(ya:force_distbuild)."
  212. )
  213. else:
  214. if is_force_sandbox:
  215. errors.append('ya:force_sandbox can be used with LARGE tests only')
  216. if consts.YaTestTags.Privileged in tags:
  217. errors.append("ya:privileged can be used with LARGE tests only")
  218. if in_autocheck and size == consts.TestSize.Large:
  219. errors.append("LARGE test must have ya:fat tag")
  220. if consts.YaTestTags.Privileged in tags and 'container' not in requirements:
  221. errors.append("Only tests with 'container' requirement can have 'ya:privileged' tag")
  222. if size not in size_timeout:
  223. errors.append(
  224. "Unknown test size: [[imp]]{}[[rst]], choose from [[imp]]{}[[rst]]".format(
  225. size.upper(), ", ".join([sz.upper() for sz in size_timeout.keys()])
  226. )
  227. )
  228. else:
  229. try:
  230. timeout = int(valid_kw.get('TEST-TIMEOUT', size_timeout[size]) or size_timeout[size])
  231. script_rel_path = valid_kw.get('SCRIPT-REL-PATH')
  232. if timeout < 0:
  233. raise Exception("Timeout must be > 0")
  234. skip_timeout_verification = script_rel_path in ('java.style', 'ktlint')
  235. if size_timeout[size] < timeout and in_autocheck and not skip_timeout_verification:
  236. suggested_size = None
  237. for s, t in size_timeout.items():
  238. if timeout <= t:
  239. suggested_size = s
  240. break
  241. if suggested_size:
  242. suggested_size = ", suggested size: [[imp]]{}[[rst]]".format(suggested_size.upper())
  243. else:
  244. suggested_size = ""
  245. errors.append(
  246. "Max allowed timeout for test size [[imp]]{}[[rst]] is [[imp]]{} sec[[rst]]{}".format(
  247. size.upper(), size_timeout[size], suggested_size
  248. )
  249. )
  250. except Exception as e:
  251. errors.append("Error when parsing test timeout: [[bad]]{}[[rst]]".format(e))
  252. requirements_list = []
  253. for req_name, req_value in six.iteritems(requirements):
  254. requirements_list.append(req_name + ":" + req_value)
  255. valid_kw['REQUIREMENTS'] = serialize_list(sorted(requirements_list))
  256. # Mark test with ya:external tag if it requests any secret from external storages
  257. # It's not stable and nonreproducible by definition
  258. for x in secret_requirements:
  259. if x in requirements:
  260. tags.add(consts.YaTestTags.External)
  261. if valid_kw.get("FUZZ-OPTS"):
  262. for option in get_list("FUZZ-OPTS"):
  263. if not option.startswith("-"):
  264. errors.append(
  265. "Unrecognized fuzzer option '[[imp]]{}[[rst]]'. All fuzzer options should start with '-'".format(
  266. option
  267. )
  268. )
  269. break
  270. eqpos = option.find("=")
  271. if eqpos == -1 or len(option) == eqpos + 1:
  272. errors.append(
  273. "Unrecognized fuzzer option '[[imp]]{}[[rst]]'. All fuzzer options should obtain value specified after '='".format(
  274. option
  275. )
  276. )
  277. break
  278. if option[eqpos - 1] == " " or option[eqpos + 1] == " ":
  279. errors.append("Spaces are not allowed: '[[imp]]{}[[rst]]'".format(option))
  280. break
  281. if option[:eqpos] in ("-runs", "-dict", "-jobs", "-workers", "-artifact_prefix", "-print_final_stats"):
  282. errors.append(
  283. "You can't use '[[imp]]{}[[rst]]' - it will be automatically calculated or configured during run".format(
  284. option
  285. )
  286. )
  287. break
  288. if valid_kw.get("YT-SPEC"):
  289. if not is_ytexec_run:
  290. errors.append("You can use YT_SPEC macro only tests marked with ya:yt tag")
  291. else:
  292. for filename in get_list("YT-SPEC"):
  293. filename = unit.resolve('$S/' + filename)
  294. if not os.path.exists(filename):
  295. errors.append("File '{}' specified in the YT_SPEC macro doesn't exist".format(filename))
  296. continue
  297. if not is_yt_spec_contain_pool_info(filename):
  298. tags.add(consts.YaTestTags.External)
  299. tags.add("ya:yt_research_pool")
  300. partition = valid_kw.get('TEST_PARTITION', 'SEQUENTIAL')
  301. if partition not in PARTITION_MODS:
  302. raise ValueError('partition mode should be one of {}, detected: {}'.format(PARTITION_MODS, partition))
  303. if valid_kw.get('SPLIT-FACTOR'):
  304. if valid_kw.get('FORK-MODE') == 'none':
  305. errors.append('SPLIT_FACTOR must be use with FORK_TESTS() or FORK_SUBTESTS() macro')
  306. value = 1
  307. try:
  308. value = int(valid_kw.get('SPLIT-FACTOR'))
  309. if value <= 0:
  310. raise ValueError("must be > 0")
  311. if value > SPLIT_FACTOR_MAX_VALUE:
  312. raise ValueError("the maximum allowed value is {}".format(SPLIT_FACTOR_MAX_VALUE))
  313. except ValueError as e:
  314. errors.append('Incorrect SPLIT_FACTOR value: {}'.format(e))
  315. if valid_kw.get('FORK-TEST-FILES') and size != consts.TestSize.Large:
  316. nfiles = count_entries(valid_kw.get('TEST-FILES'))
  317. if nfiles * value > SPLIT_FACTOR_TEST_FILES_MAX_VALUE:
  318. errors.append(
  319. 'Too much chunks generated:{} (limit: {}). Remove FORK_TEST_FILES() macro or reduce SPLIT_FACTOR({}).'.format(
  320. nfiles * value, SPLIT_FACTOR_TEST_FILES_MAX_VALUE, value
  321. )
  322. )
  323. if tags:
  324. valid_kw['TAG'] = serialize_list(sorted(tags))
  325. unit_path = _common.get_norm_unit_path(unit)
  326. if (
  327. not is_fat
  328. and consts.YaTestTags.Noretries in tags
  329. and not is_ytexec_run
  330. and not unit_path.startswith("devtools/dummy_arcadia/test/noretries")
  331. ):
  332. errors.append("Only LARGE tests can have 'ya:noretries' tag")
  333. if errors:
  334. return None, warnings, errors
  335. return valid_kw, warnings, errors
  336. def dump_test(unit, kw):
  337. kw = {k: v for k, v in kw.items() if v and (not isinstance(v, str | bytes) or v.strip())}
  338. valid_kw, warnings, errors = validate_test(unit, kw)
  339. for w in warnings:
  340. unit.message(['warn', w])
  341. for e in errors:
  342. ymake.report_configure_error(e)
  343. if valid_kw is None:
  344. return None
  345. string_handler = StringIO()
  346. for k, v in six.iteritems(valid_kw):
  347. print(k + ': ' + six.ensure_str(v), file=string_handler)
  348. print(BLOCK_SEPARATOR, file=string_handler)
  349. data = string_handler.getvalue()
  350. string_handler.close()
  351. return data
  352. def reference_group_var(varname: str, extensions: list[str] | None = None) -> str:
  353. if extensions is None:
  354. return f'"${{join=\\;:{varname}}}"'
  355. return serialize_list(f'${{ext={ext};join=\\;:{varname}}}' for ext in extensions)
  356. def count_entries(x):
  357. # see (de)serialize_list
  358. assert x is None or isinstance(x, str), type(x)
  359. if not x:
  360. return 0
  361. return x.count(";") + 1
  362. def implies(a, b):
  363. return bool((not a) or b)
  364. def match_coverage_extractor_requirements(unit):
  365. # we add test if
  366. return all(
  367. (
  368. # tests are requested
  369. unit.get("TESTS_REQUESTED") == "yes",
  370. # build implies clang coverage, which supports segment extraction from the binaries
  371. unit.get("CLANG_COVERAGE") == "yes",
  372. # contrib was requested
  373. implies(
  374. _common.get_norm_unit_path(unit).startswith("contrib/"), unit.get("ENABLE_CONTRIB_COVERAGE") == "yes"
  375. ),
  376. )
  377. )
  378. def get_tidy_config_map(unit, map_path):
  379. config_map_path = unit.resolve(os.path.join("$S", map_path))
  380. config_map = {}
  381. try:
  382. with open(config_map_path, 'r') as afile:
  383. config_map = json.load(afile)
  384. except ValueError:
  385. ymake.report_configure_error("{} is invalid json".format(map_path))
  386. except Exception as e:
  387. ymake.report_configure_error(str(e))
  388. return config_map
  389. def prepare_config_map(config_map):
  390. return list(reversed(sorted(config_map.items())))
  391. def get_default_tidy_config(unit):
  392. unit_path = _common.get_norm_unit_path(unit)
  393. tidy_default_config_map = prepare_config_map(get_tidy_config_map(unit, DEFAULT_TIDY_CONFIG_MAP_PATH))
  394. for project_prefix, config_path in tidy_default_config_map:
  395. if unit_path.startswith(project_prefix):
  396. return config_path
  397. return DEFAULT_TIDY_CONFIG
  398. ordered_tidy_map = None
  399. def get_project_tidy_config(unit):
  400. global ordered_tidy_map
  401. if ordered_tidy_map is None:
  402. ordered_tidy_map = prepare_config_map(get_tidy_config_map(unit, PROJECT_TIDY_CONFIG_MAP_PATH))
  403. unit_path = _common.get_norm_unit_path(unit)
  404. for project_prefix, config_path in ordered_tidy_map:
  405. if unit_path.startswith(project_prefix):
  406. return config_path
  407. else:
  408. return get_default_tidy_config(unit)
  409. @df.with_fields(
  410. CHECK_FIELDS_BASE
  411. + (
  412. df.TestedProjectName.normalized_basename,
  413. df.SourceFolderPath.normalized,
  414. df.SbrUidExt.value,
  415. df.TestFiles.value,
  416. )
  417. )
  418. def check_data(fields, unit, *args):
  419. flat_args, spec_args = _common.sort_by_keywords(
  420. {
  421. "DEPENDS": -1,
  422. "TIMEOUT": 1,
  423. "DATA": -1,
  424. "TAG": -1,
  425. "REQUIREMENTS": -1,
  426. "FORK_MODE": 1,
  427. "SPLIT_FACTOR": 1,
  428. "FORK_SUBTESTS": 0,
  429. "FORK_TESTS": 0,
  430. "SIZE": 1,
  431. },
  432. args,
  433. )
  434. dart_record = create_dart_record(fields, unit, flat_args, spec_args)
  435. if not dart_record[df.TestFiles.KEY]:
  436. return
  437. data = dump_test(unit, dart_record)
  438. if data:
  439. unit.set_property(["DART_DATA", data])
  440. @df.with_fields(
  441. CHECK_FIELDS_BASE
  442. + (
  443. df.TestedProjectName.normalized_basename,
  444. df.SourceFolderPath.normalized,
  445. df.SbrUidExt.value,
  446. df.TestFiles.flat_args_wo_first,
  447. )
  448. )
  449. def check_resource(fields, unit, *args):
  450. flat_args, spec_args = _common.sort_by_keywords(
  451. {
  452. "DEPENDS": -1,
  453. "TIMEOUT": 1,
  454. "DATA": -1,
  455. "TAG": -1,
  456. "REQUIREMENTS": -1,
  457. "FORK_MODE": 1,
  458. "SPLIT_FACTOR": 1,
  459. "FORK_SUBTESTS": 0,
  460. "FORK_TESTS": 0,
  461. "SIZE": 1,
  462. },
  463. args,
  464. )
  465. dart_record = create_dart_record(fields, unit, flat_args, spec_args)
  466. data = dump_test(unit, dart_record)
  467. if data:
  468. unit.set_property(["DART_DATA", data])
  469. @df.with_fields(
  470. CHECK_FIELDS_BASE
  471. + (
  472. df.TestedProjectName.normalized_basename,
  473. df.SourceFolderPath.normalized,
  474. df.TestData.ktlint,
  475. df.TestFiles.flat_args_wo_first,
  476. df.ModuleLang.value,
  477. df.KtlintBinary.value,
  478. df.UseKtlintOld.value,
  479. df.KtlintBaselineFile.value,
  480. )
  481. )
  482. def ktlint(fields, unit, *args):
  483. flat_args, spec_args = _common.sort_by_keywords(
  484. {
  485. "DEPENDS": -1,
  486. "TIMEOUT": 1,
  487. "DATA": -1,
  488. "TAG": -1,
  489. "REQUIREMENTS": -1,
  490. "FORK_MODE": 1,
  491. "SPLIT_FACTOR": 1,
  492. "FORK_SUBTESTS": 0,
  493. "FORK_TESTS": 0,
  494. "SIZE": 1,
  495. },
  496. args,
  497. )
  498. dart_record = create_dart_record(fields, unit, flat_args, spec_args)
  499. dart_record[df.TestTimeout.KEY] = '120'
  500. data = dump_test(unit, dart_record)
  501. if data:
  502. unit.set_property(["DART_DATA", data])
  503. @df.with_fields(
  504. CHECK_FIELDS_BASE
  505. + (
  506. df.TestedProjectName.normalized_basename,
  507. df.SourceFolderPath.normalized,
  508. df.TestData.java_style,
  509. df.ForkMode.test_fork_mode,
  510. df.TestFiles.java_style,
  511. df.JdkLatestVersion.value,
  512. df.JdkResource.value,
  513. df.ModuleLang.value,
  514. )
  515. )
  516. def java_style(fields, unit, *args):
  517. flat_args, spec_args = _common.sort_by_keywords(
  518. {
  519. "DEPENDS": -1,
  520. "TIMEOUT": 1,
  521. "DATA": -1,
  522. "TAG": -1,
  523. "REQUIREMENTS": -1,
  524. "FORK_MODE": 1,
  525. "SPLIT_FACTOR": 1,
  526. "FORK_SUBTESTS": 0,
  527. "FORK_TESTS": 0,
  528. "SIZE": 1,
  529. },
  530. args,
  531. )
  532. if len(flat_args) < 2:
  533. raise Exception("Not enough arguments for JAVA_STYLE check")
  534. # jstyle should use the latest jdk
  535. unit.onpeerdir([unit.get('JDK_LATEST_PEERDIR')])
  536. dart_record = create_dart_record(fields, unit, flat_args, spec_args)
  537. dart_record[df.TestTimeout.KEY] = '240'
  538. dart_record[df.ScriptRelPath.KEY] = 'java.style'
  539. data = dump_test(unit, dart_record)
  540. if data:
  541. unit.set_property(["DART_DATA", data])
  542. @df.with_fields(
  543. CHECK_FIELDS_BASE
  544. + (
  545. df.TestedProjectName.test_dir,
  546. df.SourceFolderPath.test_dir,
  547. df.ForkMode.test_fork_mode,
  548. df.TestFiles.flat_args_wo_first,
  549. df.ModuleLang.value,
  550. )
  551. )
  552. def gofmt(fields, unit, *args):
  553. flat_args, spec_args = _common.sort_by_keywords(
  554. {
  555. "DEPENDS": -1,
  556. "TIMEOUT": 1,
  557. "DATA": -1,
  558. "TAG": -1,
  559. "REQUIREMENTS": -1,
  560. "FORK_MODE": 1,
  561. "SPLIT_FACTOR": 1,
  562. "FORK_SUBTESTS": 0,
  563. "FORK_TESTS": 0,
  564. "SIZE": 1,
  565. },
  566. args,
  567. )
  568. dart_record = create_dart_record(fields, unit, flat_args, spec_args)
  569. data = dump_test(unit, dart_record)
  570. if data:
  571. unit.set_property(["DART_DATA", data])
  572. @df.with_fields(
  573. CHECK_FIELDS_BASE
  574. + (
  575. df.TestedProjectName.normalized_basename,
  576. df.SourceFolderPath.normalized,
  577. df.ForkMode.test_fork_mode,
  578. df.TestFiles.flat_args_wo_first,
  579. df.ModuleLang.value,
  580. )
  581. )
  582. def govet(fields, unit, *args):
  583. flat_args, spec_args = _common.sort_by_keywords(
  584. {
  585. "DEPENDS": -1,
  586. "TIMEOUT": 1,
  587. "DATA": -1,
  588. "TAG": -1,
  589. "REQUIREMENTS": -1,
  590. "FORK_MODE": 1,
  591. "SPLIT_FACTOR": 1,
  592. "FORK_SUBTESTS": 0,
  593. "FORK_TESTS": 0,
  594. "SIZE": 1,
  595. },
  596. args,
  597. )
  598. dart_record = create_dart_record(fields, unit, flat_args, spec_args)
  599. data = dump_test(unit, dart_record)
  600. if data:
  601. unit.set_property(["DART_DATA", data])
  602. def onadd_check(unit, *args):
  603. if unit.get("TIDY") == "yes":
  604. # graph changed for clang_tidy tests
  605. return
  606. flat_args, *_ = _common.sort_by_keywords(
  607. {
  608. "DEPENDS": -1,
  609. "TIMEOUT": 1,
  610. "DATA": -1,
  611. "TAG": -1,
  612. "REQUIREMENTS": -1,
  613. "FORK_MODE": 1,
  614. "SPLIT_FACTOR": 1,
  615. "FORK_SUBTESTS": 0,
  616. "FORK_TESTS": 0,
  617. "SIZE": 1,
  618. },
  619. args,
  620. )
  621. check_type = flat_args[0]
  622. if check_type == "check.data" and unit.get('VALIDATE_DATA') != "no":
  623. check_data(unit, *args)
  624. elif check_type == "check.resource" and unit.get('VALIDATE_DATA') != "no":
  625. check_resource(unit, *args)
  626. elif check_type == "ktlint":
  627. ktlint(unit, *args)
  628. elif check_type == "JAVA_STYLE" and (unit.get('YMAKE_JAVA_TEST') != 'yes' or unit.get('ALL_SRCDIRS')):
  629. java_style(unit, *args)
  630. elif check_type == "gofmt":
  631. gofmt(unit, *args)
  632. elif check_type == "govet":
  633. govet(unit, *args)
  634. def on_register_no_check_imports(unit):
  635. s = unit.get('NO_CHECK_IMPORTS_FOR_VALUE')
  636. if s not in ('', 'None'):
  637. unit.onresource(['-', 'py/no_check_imports/{}="{}"'.format(_common.pathid(s), s)])
  638. @df.with_fields(
  639. (
  640. df.TestedProjectName.normalized_basename,
  641. df.SourceFolderPath.normalized,
  642. df.TestEnv.value,
  643. df.UseArcadiaPython.value,
  644. df.TestFiles.normalized,
  645. df.ModuleLang.value,
  646. df.NoCheck.value,
  647. )
  648. )
  649. def onadd_check_py_imports(fields, unit, *args):
  650. if unit.get("TIDY") == "yes":
  651. # graph changed for clang_tidy tests
  652. return
  653. if unit.get('NO_CHECK_IMPORTS_FOR_VALUE').strip() == "":
  654. return
  655. unit.onpeerdir(['library/python/testing/import_test'])
  656. dart_record = create_dart_record(fields, unit, (), {})
  657. dart_record[df.TestName.KEY] = 'pyimports'
  658. dart_record[df.ScriptRelPath.KEY] = 'py.imports'
  659. data = dump_test(unit, dart_record)
  660. if data:
  661. unit.set_property(["DART_DATA", data])
  662. @df.with_fields(
  663. PY_EXEC_FIELDS_BASE
  664. + (
  665. df.TestName.filename_without_ext,
  666. df.ScriptRelPath.pytest,
  667. df.TestedProjectName.path_filename_basename,
  668. df.ModuleLang.value,
  669. df.BinaryPath.stripped,
  670. df.TestRunnerBin.value,
  671. )
  672. )
  673. def onadd_pytest_bin(fields, unit, *args):
  674. if unit.get("TIDY") == "yes":
  675. # graph changed for clang_tidy tests
  676. return
  677. flat_args, spec_args = _common.sort_by_keywords({'RUNNER_BIN': 1}, args)
  678. if flat_args:
  679. ymake.report_configure_error(
  680. 'Unknown arguments found while processing add_pytest_bin macro: {!r}'.format(flat_args)
  681. )
  682. if unit.get('ADD_SRCDIR_TO_TEST_DATA') == "yes":
  683. unit.ondata_files(_common.get_norm_unit_path(unit))
  684. yt_spec = df.YtSpec.from_unit(unit, flat_args, spec_args)
  685. if yt_spec and yt_spec[df.YtSpec.KEY]:
  686. unit.ondata_files(deserialize_list(yt_spec[df.YtSpec.KEY]))
  687. dart_record = create_dart_record(fields, unit, flat_args, spec_args)
  688. if yt_spec:
  689. dart_record |= yt_spec
  690. data = dump_test(unit, dart_record)
  691. if data:
  692. unit.set_property(["DART_DATA", data])
  693. @df.with_fields(
  694. (
  695. df.SourceFolderPath.normalized,
  696. df.TestName.normalized_joined_dir_basename,
  697. df.ScriptRelPath.junit,
  698. df.TestTimeout.from_unit,
  699. df.TestedProjectName.normalized,
  700. df.TestEnv.value,
  701. df.TestData.java_test,
  702. df.ForkMode.test_fork_mode,
  703. df.SplitFactor.from_unit,
  704. df.CustomDependencies.test_depends_only,
  705. df.Tag.from_macro_args_and_unit,
  706. df.Size.from_unit,
  707. df.Requirements.with_maybe_fuzzing,
  708. df.TestRecipes.value,
  709. df.ModuleType.value,
  710. df.UnittestDir.value,
  711. df.JvmArgs.value,
  712. # TODO optimize, SystemProperties is used in TestData
  713. df.SystemProperties.value,
  714. df.TestCwd.from_unit,
  715. df.SkipTest.value,
  716. df.JavaClasspathCmdType.value,
  717. df.JdkResource.value,
  718. df.JdkForTests.value,
  719. df.ModuleLang.value,
  720. df.TestClasspath.value,
  721. df.TestClasspathOrigins.value,
  722. df.TestClasspathDeps.value,
  723. df.TestJar.value,
  724. )
  725. )
  726. def onjava_test(fields, unit, *args):
  727. if unit.get("TIDY") == "yes":
  728. # graph changed for clang_tidy tests
  729. return
  730. assert unit.get('MODULE_TYPE') is not None
  731. if unit.get('MODULE_TYPE') == 'JTEST_FOR':
  732. if not unit.get('UNITTEST_DIR'):
  733. ymake.report_configure_error('skip JTEST_FOR in {}: no args provided'.format(unit.path()))
  734. return
  735. if unit.get('ADD_SRCDIR_TO_TEST_DATA') == "yes":
  736. unit.ondata_files(_common.get_norm_unit_path(unit))
  737. yt_spec = df.YtSpec.from_unit_list_var(unit, (), {})
  738. unit.ondata_files(deserialize_list(yt_spec[df.YtSpec.KEY]))
  739. try:
  740. dart_record = create_dart_record(fields, unit, (), {})
  741. except df.DartValueError:
  742. return
  743. dart_record |= yt_spec
  744. data = dump_test(unit, dart_record)
  745. if data:
  746. unit.set_property(['DART_DATA', data])
  747. @df.with_fields(
  748. (
  749. df.SourceFolderPath.normalized,
  750. df.TestName.normalized_joined_dir_basename_deps,
  751. df.TestedProjectName.normalized,
  752. df.CustomDependencies.test_depends_only,
  753. df.IgnoreClasspathClash.value,
  754. df.ModuleType.value,
  755. df.ModuleLang.value,
  756. df.Classpath.value,
  757. )
  758. )
  759. def onjava_test_deps(fields, unit, *args):
  760. if unit.get("TIDY") == "yes":
  761. # graph changed for clang_tidy tests
  762. return
  763. assert unit.get('MODULE_TYPE') is not None
  764. assert len(args) == 1
  765. mode = args[0]
  766. dart_record = create_dart_record(fields, unit, (args[0],), {})
  767. dart_record[df.ScriptRelPath.KEY] = 'java.dependency.test'
  768. if mode == 'strict':
  769. dart_record[df.StrictClasspathClash.KEY] = 'yes'
  770. data = dump_test(unit, dart_record)
  771. unit.set_property(['DART_DATA', data])
  772. def onsetup_pytest_bin(unit, *args):
  773. use_arcadia_python = unit.get('USE_ARCADIA_PYTHON') == "yes"
  774. if use_arcadia_python:
  775. unit.onresource(['-', 'PY_MAIN={}'.format("library.python.pytest.main:main")]) # XXX
  776. unit.onadd_pytest_bin(list(args))
  777. def onrun(unit, *args):
  778. exectest_cmd = unit.get(["EXECTEST_COMMAND_VALUE"]) or ''
  779. exectest_cmd += "\n" + subprocess.list2cmdline(args)
  780. unit.set(["EXECTEST_COMMAND_VALUE", exectest_cmd])
  781. @df.with_fields(
  782. PY_EXEC_FIELDS_BASE
  783. + (
  784. df.TestName.filename_without_pkg_ext,
  785. df.TestedProjectName.path_filename_basename_without_pkg_ext,
  786. df.BinaryPath.stripped_without_pkg_ext,
  787. )
  788. )
  789. def onsetup_exectest(fields, unit, *args):
  790. if unit.get("TIDY") == "yes":
  791. # graph changed for clang_tidy tests
  792. return
  793. command = unit.get(["EXECTEST_COMMAND_VALUE"])
  794. if command is None:
  795. ymake.report_configure_error("EXECTEST must have at least one RUN macro")
  796. return
  797. command = command.replace("$EXECTEST_COMMAND_VALUE", "")
  798. if "PYTHON_BIN" in command:
  799. unit.ondepends('contrib/tools/python')
  800. unit.set(["TEST_BLOB_DATA", base64.b64encode(six.ensure_binary(command))])
  801. if unit.get('ADD_SRCDIR_TO_TEST_DATA') == "yes":
  802. unit.ondata_files(_common.get_norm_unit_path(unit))
  803. yt_spec = df.YtSpec.from_unit(unit, (), {})
  804. if yt_spec and yt_spec[df.YtSpec.KEY]:
  805. unit.ondata_files(deserialize_list(yt_spec[df.YtSpec.KEY]))
  806. dart_record = create_dart_record(fields, unit, (), {})
  807. dart_record[df.ScriptRelPath.KEY] = 'exectest'
  808. if yt_spec:
  809. dart_record |= yt_spec
  810. data = dump_test(unit, dart_record)
  811. if data:
  812. unit.set_property(["DART_DATA", data])
  813. def onsetup_run_python(unit):
  814. if unit.get("USE_ARCADIA_PYTHON") == "yes":
  815. unit.ondepends('contrib/tools/python')
  816. def on_add_linter_check(unit, *args):
  817. if unit.get("TIDY") == "yes":
  818. return
  819. source_root_from_prefix = '${ARCADIA_ROOT}/'
  820. source_root_to_prefix = '$S/'
  821. unlimited = -1
  822. no_lint_value = _common.get_no_lint_value(unit)
  823. if no_lint_value in ("none", "none_internal"):
  824. return
  825. keywords = {
  826. "DEPENDS": unlimited,
  827. "FILES": unlimited,
  828. "CONFIGS": unlimited,
  829. "GLOBAL_RESOURCES": unlimited,
  830. "FILE_PROCESSING_TIME": 1,
  831. "EXTRA_PARAMS": unlimited,
  832. }
  833. flat_args, spec_args = _common.sort_by_keywords(keywords, args)
  834. if len(flat_args) != 2:
  835. unit.message(['ERROR', '_ADD_LINTER_CHECK params: expected 2 free parameters'])
  836. return
  837. configs = []
  838. for cfg in spec_args.get('CONFIGS', []):
  839. filename = unit.resolve(source_root_to_prefix + cfg)
  840. if not os.path.exists(filename):
  841. unit.message(['ERROR', 'Configuration file {} is not found'.format(filename)])
  842. return
  843. configs.append(cfg)
  844. deps = []
  845. lint_name, linter = flat_args
  846. deps.append(os.path.dirname(linter))
  847. test_files = []
  848. for path in spec_args.get('FILES', []):
  849. if path.startswith(source_root_from_prefix):
  850. test_files.append(path.replace(source_root_from_prefix, source_root_to_prefix, 1))
  851. elif path.startswith(source_root_to_prefix):
  852. test_files.append(path)
  853. if lint_name == 'cpp_style':
  854. files_dart = reference_group_var("ALL_SRCS", consts.STYLE_CPP_ALL_EXTS)
  855. else:
  856. if not test_files:
  857. unit.message(['WARN', 'No files to lint for {}'.format(lint_name)])
  858. return
  859. files_dart = serialize_list(test_files)
  860. for arg in spec_args.get('EXTRA_PARAMS', []):
  861. if '=' not in arg:
  862. unit.message(['WARN', 'Wrong EXTRA_PARAMS value: "{}". Values must have format "name=value".'.format(arg)])
  863. return
  864. deps += spec_args.get('DEPENDS', [])
  865. for dep in deps:
  866. unit.ondepends(dep)
  867. for resource in spec_args.get('GLOBAL_RESOURCES', []):
  868. unit.onpeerdir(resource)
  869. test_record = {
  870. 'TEST-NAME': lint_name,
  871. 'SCRIPT-REL-PATH': 'custom_lint',
  872. 'TESTED-PROJECT-NAME': unit.name(),
  873. 'SOURCE-FOLDER-PATH': _common.get_norm_unit_path(unit),
  874. 'CUSTOM-DEPENDENCIES': " ".join(deps),
  875. 'TEST-ENV': prepare_env(unit.get("TEST_ENV_VALUE")),
  876. 'USE_ARCADIA_PYTHON': unit.get('USE_ARCADIA_PYTHON') or '',
  877. # TODO remove FILES, see DEVTOOLS-7052
  878. 'FILES': files_dart,
  879. 'TEST-FILES': files_dart,
  880. # Linter specific parameters
  881. # TODO Add configs to DATA. See YMAKE-427
  882. 'LINT-CONFIGS': serialize_list(configs),
  883. 'LINT-NAME': lint_name,
  884. 'LINT-FILE-PROCESSING-TIME': spec_args.get('FILE_PROCESSING_TIME', [''])[0],
  885. 'LINT-EXTRA-PARAMS': serialize_list(spec_args.get('EXTRA_PARAMS', [])),
  886. 'LINTER': linter,
  887. }
  888. data = dump_test(unit, test_record)
  889. if data:
  890. unit.set_property(["DART_DATA", data])
  891. @df.with_fields(
  892. YTEST_FIELDS_BASE
  893. + (
  894. df.TestName.value,
  895. df.TestPartition.value,
  896. df.ModuleLang.value,
  897. )
  898. )
  899. def clang_tidy(fields, unit, *args):
  900. keywords = {
  901. "DEPENDS": -1,
  902. "DATA": -1,
  903. "TIMEOUT": 1,
  904. "FORK_MODE": 1,
  905. "SPLIT_FACTOR": 1,
  906. "FORK_SUBTESTS": 0,
  907. "FORK_TESTS": 0,
  908. }
  909. flat_args, spec_args = _common.sort_by_keywords(keywords, args)
  910. if unit.get("TIDY_CONFIG"):
  911. default_config_path = unit.get("TIDY_CONFIG")
  912. project_config_path = unit.get("TIDY_CONFIG")
  913. else:
  914. default_config_path = get_default_tidy_config(unit)
  915. project_config_path = get_project_tidy_config(unit)
  916. unit.set(["DEFAULT_TIDY_CONFIG", default_config_path])
  917. unit.set(["PROJECT_TIDY_CONFIG", project_config_path])
  918. dart_record = create_dart_record(fields, unit, flat_args, spec_args)
  919. data = dump_test(unit, dart_record)
  920. if data:
  921. unit.set_property(["DART_DATA", data])
  922. @df.with_fields(
  923. YTEST_FIELDS_BASE
  924. + YTEST_FIELDS_EXTRA
  925. + (
  926. df.TestName.value,
  927. df.TestData.from_macro_args_and_unit,
  928. df.Requirements.from_macro_args_and_unit,
  929. df.TestPartition.value,
  930. df.ModuleLang.value,
  931. )
  932. )
  933. def unittest_py(fields, unit, *args):
  934. keywords = {
  935. "DEPENDS": -1,
  936. "DATA": -1,
  937. "TIMEOUT": 1,
  938. "FORK_MODE": 1,
  939. "SPLIT_FACTOR": 1,
  940. "FORK_SUBTESTS": 0,
  941. "FORK_TESTS": 0,
  942. }
  943. flat_args, spec_args = _common.sort_by_keywords(keywords, args)
  944. if unit.get('ADD_SRCDIR_TO_TEST_DATA') == "yes":
  945. unit.ondata_files(_common.get_norm_unit_path(unit))
  946. dart_record = create_dart_record(fields, unit, flat_args, spec_args)
  947. data = dump_test(unit, dart_record)
  948. if data:
  949. unit.set_property(["DART_DATA", data])
  950. @df.with_fields(
  951. YTEST_FIELDS_BASE
  952. + YTEST_FIELDS_EXTRA
  953. + (
  954. df.TestName.value,
  955. df.TestData.from_macro_args_and_unit,
  956. df.Requirements.from_macro_args_and_unit,
  957. df.TestPartition.value,
  958. df.ModuleLang.value,
  959. )
  960. )
  961. def gunittest(fields, unit, *args):
  962. keywords = {
  963. "DEPENDS": -1,
  964. "DATA": -1,
  965. "TIMEOUT": 1,
  966. "FORK_MODE": 1,
  967. "SPLIT_FACTOR": 1,
  968. "FORK_SUBTESTS": 0,
  969. "FORK_TESTS": 0,
  970. }
  971. flat_args, spec_args = _common.sort_by_keywords(keywords, args)
  972. if unit.get('ADD_SRCDIR_TO_TEST_DATA') == "yes":
  973. unit.ondata_files(_common.get_norm_unit_path(unit))
  974. dart_record = create_dart_record(fields, unit, flat_args, spec_args)
  975. data = dump_test(unit, dart_record)
  976. if data:
  977. unit.set_property(["DART_DATA", data])
  978. @df.with_fields(
  979. YTEST_FIELDS_BASE
  980. + YTEST_FIELDS_EXTRA
  981. + (
  982. df.TestName.value,
  983. df.TestData.from_macro_args_and_unit,
  984. df.Requirements.from_macro_args_and_unit,
  985. df.TestPartition.value,
  986. df.ModuleLang.value,
  987. df.BenchmarkOpts.value,
  988. )
  989. )
  990. def g_benchmark(fields, unit, *args):
  991. keywords = {
  992. "DEPENDS": -1,
  993. "DATA": -1,
  994. "TIMEOUT": 1,
  995. "FORK_MODE": 1,
  996. "SPLIT_FACTOR": 1,
  997. "FORK_SUBTESTS": 0,
  998. "FORK_TESTS": 0,
  999. }
  1000. flat_args, spec_args = _common.sort_by_keywords(keywords, args)
  1001. if unit.get('ADD_SRCDIR_TO_TEST_DATA') == "yes":
  1002. unit.ondata_files(_common.get_norm_unit_path(unit))
  1003. dart_record = create_dart_record(fields, unit, flat_args, spec_args)
  1004. data = dump_test(unit, dart_record)
  1005. if data:
  1006. unit.set_property(["DART_DATA", data])
  1007. @df.with_fields(
  1008. YTEST_FIELDS_BASE
  1009. + YTEST_FIELDS_EXTRA
  1010. + (
  1011. df.TestName.value,
  1012. df.TestData.from_macro_args_and_unit_with_canonical,
  1013. df.Requirements.from_macro_args_and_unit,
  1014. df.TestPartition.value,
  1015. df.ModuleLang.value,
  1016. )
  1017. )
  1018. def go_test(fields, unit, *args):
  1019. keywords = {
  1020. "DEPENDS": -1,
  1021. "DATA": -1,
  1022. "TIMEOUT": 1,
  1023. "FORK_MODE": 1,
  1024. "SPLIT_FACTOR": 1,
  1025. "FORK_SUBTESTS": 0,
  1026. "FORK_TESTS": 0,
  1027. }
  1028. flat_args, spec_args = _common.sort_by_keywords(keywords, args)
  1029. if unit.get('ADD_SRCDIR_TO_TEST_DATA') == "yes":
  1030. unit.ondata_files(_common.get_norm_unit_path(unit))
  1031. unit.ondata_files(get_unit_list_variable(unit, 'TEST_YT_SPEC_VALUE'))
  1032. dart_record = create_dart_record(fields, unit, flat_args, spec_args)
  1033. data = dump_test(unit, dart_record)
  1034. if data:
  1035. unit.set_property(["DART_DATA", data])
  1036. @df.with_fields(
  1037. YTEST_FIELDS_BASE
  1038. + YTEST_FIELDS_EXTRA
  1039. + (
  1040. df.TestName.value,
  1041. df.TestData.from_macro_args_and_unit,
  1042. df.Requirements.from_macro_args_and_unit,
  1043. df.TestPartition.value,
  1044. )
  1045. )
  1046. def boost_test(fields, unit, *args):
  1047. keywords = {
  1048. "DEPENDS": -1,
  1049. "DATA": -1,
  1050. "TIMEOUT": 1,
  1051. "FORK_MODE": 1,
  1052. "SPLIT_FACTOR": 1,
  1053. "FORK_SUBTESTS": 0,
  1054. "FORK_TESTS": 0,
  1055. }
  1056. flat_args, spec_args = _common.sort_by_keywords(keywords, args)
  1057. if unit.get('ADD_SRCDIR_TO_TEST_DATA') == "yes":
  1058. unit.ondata_files(_common.get_norm_unit_path(unit))
  1059. unit.ondata_files(get_unit_list_variable(unit, 'TEST_YT_SPEC_VALUE'))
  1060. dart_record = create_dart_record(fields, unit, flat_args, spec_args)
  1061. data = dump_test(unit, dart_record)
  1062. if data:
  1063. unit.set_property(["DART_DATA", data])
  1064. @df.with_fields(
  1065. YTEST_FIELDS_BASE
  1066. + YTEST_FIELDS_EXTRA
  1067. + (
  1068. df.TestName.value,
  1069. df.TestData.from_macro_args_and_unit,
  1070. df.Requirements.with_maybe_fuzzing,
  1071. df.FuzzDicts.value,
  1072. df.FuzzOpts.value,
  1073. df.Fuzzing.value,
  1074. )
  1075. )
  1076. def fuzz_test(fields, unit, *args):
  1077. keywords = {
  1078. "DEPENDS": -1,
  1079. "DATA": -1,
  1080. "TIMEOUT": 1,
  1081. "FORK_MODE": 1,
  1082. "SPLIT_FACTOR": 1,
  1083. "FORK_SUBTESTS": 0,
  1084. "FORK_TESTS": 0,
  1085. }
  1086. flat_args, spec_args = _common.sort_by_keywords(keywords, args)
  1087. if unit.get('ADD_SRCDIR_TO_TEST_DATA') == "yes":
  1088. unit.ondata_files(_common.get_norm_unit_path(unit))
  1089. unit.ondata_files("fuzzing/{}/corpus.json".format(_common.get_norm_unit_path(unit)))
  1090. unit.ondata_files(get_unit_list_variable(unit, 'TEST_YT_SPEC_VALUE'))
  1091. dart_record = create_dart_record(fields, unit, flat_args, spec_args)
  1092. data = dump_test(unit, dart_record)
  1093. if data:
  1094. unit.set_property(["DART_DATA", data])
  1095. @df.with_fields(
  1096. YTEST_FIELDS_BASE
  1097. + YTEST_FIELDS_EXTRA
  1098. + (
  1099. df.TestName.value,
  1100. df.TestData.from_macro_args_and_unit,
  1101. df.Requirements.from_macro_args_and_unit,
  1102. df.TestPartition.value,
  1103. df.ModuleLang.value,
  1104. df.BenchmarkOpts.value,
  1105. )
  1106. )
  1107. def y_benchmark(fields, unit, *args):
  1108. keywords = {
  1109. "DEPENDS": -1,
  1110. "DATA": -1,
  1111. "TIMEOUT": 1,
  1112. "FORK_MODE": 1,
  1113. "SPLIT_FACTOR": 1,
  1114. "FORK_SUBTESTS": 0,
  1115. "FORK_TESTS": 0,
  1116. }
  1117. flat_args, spec_args = _common.sort_by_keywords(keywords, args)
  1118. unit.ondata_files(get_unit_list_variable(unit, 'TEST_YT_SPEC_VALUE'))
  1119. dart_record = create_dart_record(fields, unit, flat_args, spec_args)
  1120. data = dump_test(unit, dart_record)
  1121. if data:
  1122. unit.set_property(["DART_DATA", data])
  1123. @df.with_fields(
  1124. YTEST_FIELDS_BASE
  1125. + YTEST_FIELDS_EXTRA
  1126. + (
  1127. df.TestName.value,
  1128. df.TestData.from_macro_args_and_unit,
  1129. df.Requirements.from_macro_args_and_unit,
  1130. df.TestPartition.value,
  1131. )
  1132. )
  1133. def coverage_extractor(fields, unit, *args):
  1134. keywords = {
  1135. "DEPENDS": -1,
  1136. "DATA": -1,
  1137. "TIMEOUT": 1,
  1138. "FORK_MODE": 1,
  1139. "SPLIT_FACTOR": 1,
  1140. "FORK_SUBTESTS": 0,
  1141. "FORK_TESTS": 0,
  1142. }
  1143. flat_args, spec_args = _common.sort_by_keywords(keywords, args)
  1144. unit.ondata_files(get_unit_list_variable(unit, 'TEST_YT_SPEC_VALUE'))
  1145. dart_record = create_dart_record(fields, unit, flat_args, spec_args)
  1146. data = dump_test(unit, dart_record)
  1147. if data:
  1148. unit.set_property(["DART_DATA", data])
  1149. @df.with_fields(
  1150. YTEST_FIELDS_BASE
  1151. + YTEST_FIELDS_EXTRA
  1152. + (
  1153. df.TestName.first_flat_with_bench,
  1154. df.TestData.from_macro_args_and_unit,
  1155. df.Requirements.from_macro_args_and_unit,
  1156. df.TestPartition.value,
  1157. df.GoBenchTimeout.value,
  1158. df.ModuleLang.value,
  1159. )
  1160. )
  1161. def go_bench(fields, unit, *args):
  1162. keywords = {
  1163. "DEPENDS": -1,
  1164. "DATA": -1,
  1165. "TIMEOUT": 1,
  1166. "FORK_MODE": 1,
  1167. "SPLIT_FACTOR": 1,
  1168. "FORK_SUBTESTS": 0,
  1169. "FORK_TESTS": 0,
  1170. }
  1171. flat_args, spec_args = _common.sort_by_keywords(keywords, args)
  1172. tags = df.Tag.from_macro_args_and_unit(unit, flat_args, spec_args)[df.Tag.KEY]
  1173. if "ya:run_go_benchmark" not in tags:
  1174. return
  1175. unit.ondata_files(get_unit_list_variable(unit, 'TEST_YT_SPEC_VALUE'))
  1176. dart_record = create_dart_record(fields, unit, flat_args, spec_args)
  1177. data = dump_test(unit, dart_record)
  1178. if data:
  1179. unit.set_property(["DART_DATA", data])
  1180. def onadd_ytest(unit, *args):
  1181. keywords = {
  1182. "DEPENDS": -1,
  1183. "DATA": -1,
  1184. "TIMEOUT": 1,
  1185. "FORK_MODE": 1,
  1186. "SPLIT_FACTOR": 1,
  1187. "FORK_SUBTESTS": 0,
  1188. "FORK_TESTS": 0,
  1189. }
  1190. flat_args, *_ = _common.sort_by_keywords(keywords, args)
  1191. test_type = flat_args[1]
  1192. # TIDY not supported for module
  1193. if unit.get("TIDY_ENABLED") == "yes" and test_type != "clang_tidy":
  1194. return
  1195. # TIDY explicitly disabled for module in ymake.core.conf
  1196. elif test_type == "clang_tidy" and unit.get("TIDY_ENABLED") != "yes":
  1197. return
  1198. # TIDY disabled for module in ya.make
  1199. elif unit.get("TIDY") == "yes" and unit.get("TIDY_ENABLED") != "yes":
  1200. return
  1201. elif test_type == "no.test":
  1202. return
  1203. elif test_type == "clang_tidy" and unit.get("TIDY_ENABLED") == "yes":
  1204. clang_tidy(unit, *args)
  1205. elif test_type == "unittest.py":
  1206. unittest_py(unit, *args)
  1207. elif test_type == "gunittest":
  1208. gunittest(unit, *args)
  1209. elif test_type == "g_benchmark":
  1210. g_benchmark(unit, *args)
  1211. elif test_type == "go.test":
  1212. go_test(unit, *args)
  1213. elif test_type == "boost.test":
  1214. boost_test(unit, *args)
  1215. elif test_type == "fuzz.test":
  1216. fuzz_test(unit, *args)
  1217. elif test_type == "y_benchmark":
  1218. y_benchmark(unit, *args)
  1219. elif test_type == "coverage.extractor" and match_coverage_extractor_requirements(unit):
  1220. coverage_extractor(unit, *args)
  1221. elif test_type == "go.bench":
  1222. go_bench(unit, *args)