ytest.py 45 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487
  1. from __future__ import print_function
  2. import base64
  3. import collections
  4. import copy
  5. import json
  6. import os
  7. import re
  8. import six
  9. import subprocess
  10. try:
  11. from StringIO import StringIO
  12. except ImportError:
  13. from io import StringIO
  14. import _common
  15. import _dart_fields as df
  16. import _requirements as reqs
  17. import lib.test_const as consts
  18. import ymake
  19. from _dart_fields import (
  20. serialize_list,
  21. get_unit_list_variable,
  22. deserialize_list,
  23. create_dart_record,
  24. )
  25. BLOCK_SEPARATOR = '============================================================='
  26. SPLIT_FACTOR_MAX_VALUE = 1000
  27. SPLIT_FACTOR_TEST_FILES_MAX_VALUE = 4250
  28. PARTITION_MODS = ('SEQUENTIAL', 'MODULO')
  29. DEFAULT_TIDY_CONFIG = "build/config/tests/clang_tidy/config.yaml"
  30. DEFAULT_TIDY_CONFIG_MAP_PATH = "build/yandex_specific/config/clang_tidy/tidy_default_map.json"
  31. PROJECT_TIDY_CONFIG_MAP_PATH = "build/yandex_specific/config/clang_tidy/tidy_project_map.json"
  32. KTLINT_CURRENT_EDITOR_CONFIG = "arcadia/build/platform/java/ktlint/.editorconfig"
  33. KTLINT_OLD_EDITOR_CONFIG = "arcadia/build/platform/java/ktlint_old/.editorconfig"
  34. YTEST_FIELDS_BASE = (
  35. df.AndroidApkTestActivity.value,
  36. df.BinaryPath.normalized,
  37. df.BuildFolderPath.normalized,
  38. df.CustomDependencies.all_standard,
  39. df.GlobalLibraryPath.value,
  40. df.ScriptRelPath.second_flat,
  41. df.SkipTest.value,
  42. df.SourceFolderPath.normalized,
  43. df.SplitFactor.from_macro_args_and_unit,
  44. df.TestCwd.from_unit,
  45. df.TestedProjectFilename.value,
  46. df.TestedProjectName.unit_name,
  47. df.TestEnv.value,
  48. df.TestIosDeviceType.value,
  49. df.TestIosRuntimeType.value,
  50. df.TestRecipes.value,
  51. )
  52. YTEST_FIELDS_EXTRA = (
  53. df.Blob.value,
  54. df.ForkMode.from_macro_and_unit,
  55. df.Size.from_macro_args_and_unit,
  56. df.Tag.from_macro_args_and_unit,
  57. df.TestTimeout.from_macro_args_and_unit,
  58. df.YtSpec.from_macro_args_and_unit,
  59. )
  60. PY_EXEC_FIELDS_BASE = (
  61. df.Blob.value,
  62. df.BuildFolderPath.stripped,
  63. df.CanonizeSubPath.value,
  64. df.CustomDependencies.test_depends_only,
  65. df.ForkMode.test_fork_mode,
  66. df.ForkTestFiles.value,
  67. df.PythonPaths.value,
  68. df.Requirements.from_unit,
  69. df.Size.from_unit,
  70. df.SkipTest.value,
  71. df.SourceFolderPath.normalized,
  72. df.SplitFactor.from_unit,
  73. df.Tag.from_macro_args_and_unit,
  74. df.TestCwd.keywords_replaced,
  75. df.TestData.from_unit_with_canonical,
  76. df.TestEnv.value,
  77. df.TestFiles.test_srcs,
  78. df.TestPartition.value,
  79. df.TestRecipes.value,
  80. df.TestTimeout.from_unit_with_default,
  81. df.UseArcadiaPython.value,
  82. )
  83. CHECK_FIELDS_BASE = (
  84. df.CustomDependencies.depends_only,
  85. df.Requirements.from_macro_args,
  86. df.ScriptRelPath.first_flat,
  87. df.TestEnv.value,
  88. df.TestName.first_flat,
  89. df.UseArcadiaPython.value,
  90. )
  91. LINTER_FIELDS_BASE = (
  92. df.LintName.value,
  93. df.LintExtraParams.from_macro_args,
  94. df.TestName.name_from_macro_args,
  95. df.TestedProjectName.unit_name,
  96. df.SourceFolderPath.normalized,
  97. df.TestEnv.value,
  98. df.UseArcadiaPython.value,
  99. df.LintFileProcessingTime.from_macro_args,
  100. df.Linter.value,
  101. df.CustomDependencies.depends_with_linter,
  102. )
  103. tidy_config_map = None
  104. def ontest_data(unit, *args):
  105. ymake.report_configure_error("TEST_DATA is removed in favour of DATA")
  106. def is_yt_spec_contain_pool_info(filename): # XXX switch to yson in ymake + perf test for configure
  107. pool_re = re.compile(r"""['"]*pool['"]*\s*?=""")
  108. cypress_root_re = re.compile(r"""['"]*cypress_root['"]*\s*=""")
  109. with open(filename, 'r') as afile:
  110. yt_spec = afile.read()
  111. return pool_re.search(yt_spec) and cypress_root_re.search(yt_spec)
  112. def validate_test(unit, kw):
  113. def get_list(key):
  114. return deserialize_list(kw.get(key, ""))
  115. valid_kw = copy.deepcopy(kw)
  116. errors = []
  117. warnings = []
  118. mandatory_fields = {"SCRIPT-REL-PATH", "SOURCE-FOLDER-PATH", "TEST-NAME"}
  119. for field in mandatory_fields - valid_kw.keys():
  120. errors.append(f"Mandatory field {field!r} is not set in DART")
  121. if valid_kw.get('SCRIPT-REL-PATH') == 'boost.test':
  122. project_path = valid_kw.get('BUILD-FOLDER-PATH', "")
  123. if not project_path.startswith(
  124. ("contrib", "mail", "maps", "tools/idl", "metrika", "devtools", "mds", "yandex_io", "smart_devices")
  125. ):
  126. errors.append("BOOSTTEST is not allowed here")
  127. size_timeout = collections.OrderedDict(sorted(consts.TestSize.DefaultTimeouts.items(), key=lambda t: t[1]))
  128. size = valid_kw.get('SIZE', consts.TestSize.Small).lower()
  129. tags = set(get_list("TAG"))
  130. requirements_orig = get_list("REQUIREMENTS")
  131. in_autocheck = consts.YaTestTags.NotAutocheck not in tags and consts.YaTestTags.Manual not in tags
  132. is_fat = consts.YaTestTags.Fat in tags
  133. is_force_sandbox = consts.YaTestTags.ForceDistbuild not in tags and is_fat
  134. is_ytexec_run = consts.YaTestTags.YtRunner in tags
  135. is_fuzzing = valid_kw.get("FUZZING", False)
  136. is_kvm = 'kvm' in requirements_orig
  137. requirements = {}
  138. secret_requirements = ('sb_vault', 'yav')
  139. list_requirements = secret_requirements
  140. for req in requirements_orig:
  141. if req in ('kvm',):
  142. requirements[req] = str(True)
  143. continue
  144. if ":" in req:
  145. req_name, req_value = req.split(":", 1)
  146. if req_name in list_requirements:
  147. requirements[req_name] = ",".join(filter(None, [requirements.get(req_name), req_value]))
  148. else:
  149. if req_name in requirements:
  150. if req_value in ["0"]:
  151. warnings.append(
  152. "Requirement [[imp]]{}[[rst]] is dropped [[imp]]{}[[rst]] -> [[imp]]{}[[rst]]".format(
  153. req_name, requirements[req_name], req_value
  154. )
  155. )
  156. del requirements[req_name]
  157. elif requirements[req_name] != req_value:
  158. warnings.append(
  159. "Requirement [[imp]]{}[[rst]] is redefined [[imp]]{}[[rst]] -> [[imp]]{}[[rst]]".format(
  160. req_name, requirements[req_name], req_value
  161. )
  162. )
  163. requirements[req_name] = req_value
  164. else:
  165. requirements[req_name] = req_value
  166. else:
  167. errors.append("Invalid requirement syntax [[imp]]{}[[rst]]: expect <requirement>:<value>".format(req))
  168. if not errors:
  169. for req_name, req_value in requirements.items():
  170. try:
  171. error_msg = reqs.validate_requirement(
  172. req_name,
  173. req_value,
  174. size,
  175. is_force_sandbox,
  176. in_autocheck,
  177. is_fuzzing,
  178. is_kvm,
  179. is_ytexec_run,
  180. requirements,
  181. )
  182. except Exception as e:
  183. error_msg = str(e)
  184. if error_msg:
  185. errors += [error_msg]
  186. invalid_requirements_for_distbuild = [
  187. requirement for requirement in requirements.keys() if requirement not in ('ram', 'ram_disk', 'cpu', 'network')
  188. ]
  189. sb_tags = []
  190. # XXX Unfortunately, some users have already started using colons
  191. # in their tag names. Use skip set to avoid treating their tag as system ones.
  192. # Remove this check when all such user tags are removed.
  193. skip_set = ('ynmt_benchmark', 'bert_models', 'zeliboba_map')
  194. # Verify the prefixes of the system tags to avoid pointless use of the REQUIREMENTS macro parameters in the TAG macro.
  195. for tag in tags:
  196. if tag.startswith('sb:'):
  197. sb_tags.append(tag)
  198. elif ':' in tag and not tag.startswith('ya:') and tag.split(':')[0] not in skip_set:
  199. errors.append(
  200. "Only [[imp]]sb:[[rst]] and [[imp]]ya:[[rst]] prefixes are allowed in system tags: {}".format(tag)
  201. )
  202. if is_fat:
  203. if size != consts.TestSize.Large:
  204. errors.append("Only LARGE test may have ya:fat tag")
  205. if in_autocheck and not is_force_sandbox:
  206. if invalid_requirements_for_distbuild:
  207. errors.append(
  208. "'{}' REQUIREMENTS options can be used only for FAT tests without ya:force_distbuild tag. Remove TAG(ya:force_distbuild) or an option.".format(
  209. invalid_requirements_for_distbuild
  210. )
  211. )
  212. if sb_tags:
  213. errors.append(
  214. "You can set sandbox tags '{}' only for FAT tests without ya:force_distbuild. Remove TAG(ya:force_sandbox) or sandbox tags.".format(
  215. sb_tags
  216. )
  217. )
  218. if consts.YaTestTags.SandboxCoverage in tags:
  219. errors.append("You can set 'ya:sandbox_coverage' tag only for FAT tests without ya:force_distbuild.")
  220. if is_ytexec_run:
  221. errors.append(
  222. "Running LARGE tests over YT (ya:yt) on Distbuild (ya:force_distbuild) is forbidden. Consider removing TAG(ya:force_distbuild)."
  223. )
  224. else:
  225. if is_force_sandbox:
  226. errors.append('ya:force_sandbox can be used with LARGE tests only')
  227. if consts.YaTestTags.Privileged in tags:
  228. errors.append("ya:privileged can be used with LARGE tests only")
  229. if in_autocheck and size == consts.TestSize.Large:
  230. errors.append("LARGE test must have ya:fat tag")
  231. if consts.YaTestTags.Privileged in tags and 'container' not in requirements:
  232. errors.append("Only tests with 'container' requirement can have 'ya:privileged' tag")
  233. if size not in size_timeout:
  234. errors.append(
  235. "Unknown test size: [[imp]]{}[[rst]], choose from [[imp]]{}[[rst]]".format(
  236. size.upper(), ", ".join([sz.upper() for sz in size_timeout.keys()])
  237. )
  238. )
  239. else:
  240. try:
  241. timeout = int(valid_kw.get('TEST-TIMEOUT', size_timeout[size]) or size_timeout[size])
  242. script_rel_path = valid_kw.get('SCRIPT-REL-PATH')
  243. if timeout < 0:
  244. raise Exception("Timeout must be > 0")
  245. skip_timeout_verification = script_rel_path in ('java.style', 'ktlint')
  246. if size_timeout[size] < timeout and in_autocheck and not skip_timeout_verification:
  247. suggested_size = None
  248. for s, t in size_timeout.items():
  249. if timeout <= t:
  250. suggested_size = s
  251. break
  252. if suggested_size:
  253. suggested_size = ", suggested size: [[imp]]{}[[rst]]".format(suggested_size.upper())
  254. else:
  255. suggested_size = ""
  256. errors.append(
  257. "Max allowed timeout for test size [[imp]]{}[[rst]] is [[imp]]{} sec[[rst]]{}".format(
  258. size.upper(), size_timeout[size], suggested_size
  259. )
  260. )
  261. except Exception as e:
  262. errors.append("Error when parsing test timeout: [[bad]]{}[[rst]]".format(e))
  263. requirements_list = []
  264. for req_name, req_value in six.iteritems(requirements):
  265. requirements_list.append(req_name + ":" + req_value)
  266. valid_kw['REQUIREMENTS'] = serialize_list(sorted(requirements_list))
  267. # Mark test with ya:external tag if it requests any secret from external storages
  268. # It's not stable and nonreproducible by definition
  269. for x in secret_requirements:
  270. if x in requirements:
  271. tags.add(consts.YaTestTags.External)
  272. if valid_kw.get("FUZZ-OPTS"):
  273. for option in get_list("FUZZ-OPTS"):
  274. if not option.startswith("-"):
  275. errors.append(
  276. "Unrecognized fuzzer option '[[imp]]{}[[rst]]'. All fuzzer options should start with '-'".format(
  277. option
  278. )
  279. )
  280. break
  281. eqpos = option.find("=")
  282. if eqpos == -1 or len(option) == eqpos + 1:
  283. errors.append(
  284. "Unrecognized fuzzer option '[[imp]]{}[[rst]]'. All fuzzer options should obtain value specified after '='".format(
  285. option
  286. )
  287. )
  288. break
  289. if option[eqpos - 1] == " " or option[eqpos + 1] == " ":
  290. errors.append("Spaces are not allowed: '[[imp]]{}[[rst]]'".format(option))
  291. break
  292. if option[:eqpos] in ("-runs", "-dict", "-jobs", "-workers", "-artifact_prefix", "-print_final_stats"):
  293. errors.append(
  294. "You can't use '[[imp]]{}[[rst]]' - it will be automatically calculated or configured during run".format(
  295. option
  296. )
  297. )
  298. break
  299. if valid_kw.get("YT-SPEC"):
  300. if not is_ytexec_run:
  301. errors.append("You can use YT_SPEC macro only tests marked with ya:yt tag")
  302. else:
  303. for filename in get_list("YT-SPEC"):
  304. filename = unit.resolve('$S/' + filename)
  305. if not os.path.exists(filename):
  306. errors.append("File '{}' specified in the YT_SPEC macro doesn't exist".format(filename))
  307. continue
  308. if not is_yt_spec_contain_pool_info(filename):
  309. tags.add(consts.YaTestTags.External)
  310. tags.add("ya:yt_research_pool")
  311. partition = valid_kw.get('TEST_PARTITION', 'SEQUENTIAL')
  312. if partition not in PARTITION_MODS:
  313. raise ValueError('partition mode should be one of {}, detected: {}'.format(PARTITION_MODS, partition))
  314. if valid_kw.get('SPLIT-FACTOR'):
  315. if valid_kw.get('FORK-MODE') == 'none':
  316. errors.append('SPLIT_FACTOR must be use with FORK_TESTS() or FORK_SUBTESTS() macro')
  317. value = 1
  318. try:
  319. value = int(valid_kw.get('SPLIT-FACTOR'))
  320. if value <= 0:
  321. raise ValueError("must be > 0")
  322. if value > SPLIT_FACTOR_MAX_VALUE:
  323. raise ValueError("the maximum allowed value is {}".format(SPLIT_FACTOR_MAX_VALUE))
  324. except ValueError as e:
  325. errors.append('Incorrect SPLIT_FACTOR value: {}'.format(e))
  326. if valid_kw.get('FORK-TEST-FILES') and size != consts.TestSize.Large:
  327. nfiles = count_entries(valid_kw.get('TEST-FILES'))
  328. if nfiles * value > SPLIT_FACTOR_TEST_FILES_MAX_VALUE:
  329. errors.append(
  330. 'Too much chunks generated:{} (limit: {}). Remove FORK_TEST_FILES() macro or reduce SPLIT_FACTOR({}).'.format(
  331. nfiles * value, SPLIT_FACTOR_TEST_FILES_MAX_VALUE, value
  332. )
  333. )
  334. if tags:
  335. valid_kw['TAG'] = serialize_list(sorted(tags))
  336. unit_path = _common.get_norm_unit_path(unit)
  337. if (
  338. not is_fat
  339. and consts.YaTestTags.Noretries in tags
  340. and not is_ytexec_run
  341. and not unit_path.startswith("devtools/dummy_arcadia/test/noretries")
  342. ):
  343. errors.append("Only LARGE tests can have 'ya:noretries' tag")
  344. if errors:
  345. return None, warnings, errors
  346. return valid_kw, warnings, errors
  347. def dump_test(unit, kw):
  348. kw = {k: v for k, v in kw.items() if v and (not isinstance(v, str | bytes) or v.strip())}
  349. valid_kw, warnings, errors = validate_test(unit, kw)
  350. for w in warnings:
  351. unit.message(['warn', w])
  352. for e in errors:
  353. ymake.report_configure_error(e)
  354. if valid_kw is None:
  355. return None
  356. string_handler = StringIO()
  357. for k, v in six.iteritems(valid_kw):
  358. print(k + ': ' + six.ensure_str(v), file=string_handler)
  359. print(BLOCK_SEPARATOR, file=string_handler)
  360. data = string_handler.getvalue()
  361. string_handler.close()
  362. return data
  363. def count_entries(x):
  364. # see (de)serialize_list
  365. assert x is None or isinstance(x, str), type(x)
  366. if not x:
  367. return 0
  368. return x.count(";") + 1
  369. def implies(a, b):
  370. return bool((not a) or b)
  371. def match_coverage_extractor_requirements(unit):
  372. # we add test if
  373. return all(
  374. (
  375. # tests are requested
  376. unit.get("TESTS_REQUESTED") == "yes",
  377. # build implies clang coverage, which supports segment extraction from the binaries
  378. unit.get("CLANG_COVERAGE") == "yes",
  379. # contrib was requested
  380. implies(
  381. _common.get_norm_unit_path(unit).startswith("contrib/"), unit.get("ENABLE_CONTRIB_COVERAGE") == "yes"
  382. ),
  383. )
  384. )
  385. def get_tidy_config_map(unit, map_path):
  386. config_map_path = unit.resolve(os.path.join("$S", map_path))
  387. config_map = {}
  388. try:
  389. with open(config_map_path, 'r') as afile:
  390. config_map = json.load(afile)
  391. except ValueError:
  392. ymake.report_configure_error("{} is invalid json".format(map_path))
  393. except Exception as e:
  394. ymake.report_configure_error(str(e))
  395. return config_map
  396. def prepare_config_map(config_map):
  397. return list(reversed(sorted(config_map.items())))
  398. def get_default_tidy_config(unit):
  399. unit_path = _common.get_norm_unit_path(unit)
  400. tidy_default_config_map = prepare_config_map(get_tidy_config_map(unit, DEFAULT_TIDY_CONFIG_MAP_PATH))
  401. for project_prefix, config_path in tidy_default_config_map:
  402. if unit_path.startswith(project_prefix):
  403. return config_path
  404. return DEFAULT_TIDY_CONFIG
  405. ordered_tidy_map = None
  406. def get_project_tidy_config(unit):
  407. global ordered_tidy_map
  408. if ordered_tidy_map is None:
  409. ordered_tidy_map = prepare_config_map(get_tidy_config_map(unit, PROJECT_TIDY_CONFIG_MAP_PATH))
  410. unit_path = _common.get_norm_unit_path(unit)
  411. for project_prefix, config_path in ordered_tidy_map:
  412. if unit_path.startswith(project_prefix):
  413. return config_path
  414. else:
  415. return get_default_tidy_config(unit)
  416. @df.with_fields(
  417. CHECK_FIELDS_BASE
  418. + (
  419. df.TestedProjectName.normalized_basename,
  420. df.SourceFolderPath.normalized,
  421. df.SbrUidExt.value,
  422. df.TestFiles.value,
  423. )
  424. )
  425. def check_data(fields, unit, *args):
  426. flat_args, spec_args = _common.sort_by_keywords(
  427. {
  428. "DEPENDS": -1,
  429. "TIMEOUT": 1,
  430. "DATA": -1,
  431. "TAG": -1,
  432. "REQUIREMENTS": -1,
  433. "FORK_MODE": 1,
  434. "SPLIT_FACTOR": 1,
  435. "FORK_SUBTESTS": 0,
  436. "FORK_TESTS": 0,
  437. "SIZE": 1,
  438. },
  439. args,
  440. )
  441. dart_record = create_dart_record(fields, unit, flat_args, spec_args)
  442. if not dart_record[df.TestFiles.KEY]:
  443. return
  444. dart_record[df.ModuleLang.KEY] = consts.ModuleLang.LANG_AGNOSTIC
  445. data = dump_test(unit, dart_record)
  446. if data:
  447. unit.set_property(["DART_DATA", data])
  448. @df.with_fields(
  449. CHECK_FIELDS_BASE
  450. + (
  451. df.TestedProjectName.normalized_basename,
  452. df.SourceFolderPath.normalized,
  453. df.SbrUidExt.value,
  454. df.TestFiles.flat_args_wo_first,
  455. )
  456. )
  457. def check_resource(fields, unit, *args):
  458. flat_args, spec_args = _common.sort_by_keywords(
  459. {
  460. "DEPENDS": -1,
  461. "TIMEOUT": 1,
  462. "DATA": -1,
  463. "TAG": -1,
  464. "REQUIREMENTS": -1,
  465. "FORK_MODE": 1,
  466. "SPLIT_FACTOR": 1,
  467. "FORK_SUBTESTS": 0,
  468. "FORK_TESTS": 0,
  469. "SIZE": 1,
  470. },
  471. args,
  472. )
  473. dart_record = create_dart_record(fields, unit, flat_args, spec_args)
  474. dart_record[df.ModuleLang.KEY] = consts.ModuleLang.LANG_AGNOSTIC
  475. data = dump_test(unit, dart_record)
  476. if data:
  477. unit.set_property(["DART_DATA", data])
  478. @df.with_fields(
  479. CHECK_FIELDS_BASE
  480. + (
  481. df.TestedProjectName.normalized_basename,
  482. df.SourceFolderPath.normalized,
  483. df.TestData.ktlint,
  484. df.TestFiles.flat_args_wo_first,
  485. df.ModuleLang.value,
  486. df.KtlintBinary.value,
  487. df.UseKtlintOld.value,
  488. df.KtlintBaselineFile.value,
  489. )
  490. )
  491. def ktlint(fields, unit, *args):
  492. flat_args, spec_args = _common.sort_by_keywords(
  493. {
  494. "DEPENDS": -1,
  495. "TIMEOUT": 1,
  496. "DATA": -1,
  497. "TAG": -1,
  498. "REQUIREMENTS": -1,
  499. "FORK_MODE": 1,
  500. "SPLIT_FACTOR": 1,
  501. "FORK_SUBTESTS": 0,
  502. "FORK_TESTS": 0,
  503. "SIZE": 1,
  504. },
  505. args,
  506. )
  507. dart_record = create_dart_record(fields, unit, flat_args, spec_args)
  508. dart_record[df.TestTimeout.KEY] = '120'
  509. data = dump_test(unit, dart_record)
  510. if data:
  511. unit.set_property(["DART_DATA", data])
  512. @df.with_fields(
  513. CHECK_FIELDS_BASE
  514. + (
  515. df.TestedProjectName.normalized_basename,
  516. df.SourceFolderPath.normalized,
  517. df.TestData.java_style,
  518. df.ForkMode.test_fork_mode,
  519. df.TestFiles.java_style,
  520. df.JdkLatestVersion.value,
  521. df.JdkResource.value,
  522. df.ModuleLang.value,
  523. )
  524. )
  525. def java_style(fields, unit, *args):
  526. flat_args, spec_args = _common.sort_by_keywords(
  527. {
  528. "DEPENDS": -1,
  529. "TIMEOUT": 1,
  530. "DATA": -1,
  531. "TAG": -1,
  532. "REQUIREMENTS": -1,
  533. "FORK_MODE": 1,
  534. "SPLIT_FACTOR": 1,
  535. "FORK_SUBTESTS": 0,
  536. "FORK_TESTS": 0,
  537. "SIZE": 1,
  538. },
  539. args,
  540. )
  541. if len(flat_args) < 2:
  542. raise Exception("Not enough arguments for JAVA_STYLE check")
  543. # jstyle should use the latest jdk
  544. unit.onpeerdir([unit.get('JDK_LATEST_PEERDIR')])
  545. dart_record = create_dart_record(fields, unit, flat_args, spec_args)
  546. dart_record[df.TestTimeout.KEY] = '240'
  547. dart_record[df.ScriptRelPath.KEY] = 'java.style'
  548. data = dump_test(unit, dart_record)
  549. if data:
  550. unit.set_property(["DART_DATA", data])
  551. @df.with_fields(
  552. CHECK_FIELDS_BASE
  553. + (
  554. df.TestedProjectName.test_dir,
  555. df.SourceFolderPath.test_dir,
  556. df.ForkMode.test_fork_mode,
  557. df.TestFiles.flat_args_wo_first,
  558. df.ModuleLang.value,
  559. )
  560. )
  561. def gofmt(fields, unit, *args):
  562. flat_args, spec_args = _common.sort_by_keywords(
  563. {
  564. "DEPENDS": -1,
  565. "TIMEOUT": 1,
  566. "DATA": -1,
  567. "TAG": -1,
  568. "REQUIREMENTS": -1,
  569. "FORK_MODE": 1,
  570. "SPLIT_FACTOR": 1,
  571. "FORK_SUBTESTS": 0,
  572. "FORK_TESTS": 0,
  573. "SIZE": 1,
  574. },
  575. args,
  576. )
  577. dart_record = create_dart_record(fields, unit, flat_args, spec_args)
  578. data = dump_test(unit, dart_record)
  579. if data:
  580. unit.set_property(["DART_DATA", data])
  581. @df.with_fields(
  582. CHECK_FIELDS_BASE
  583. + (
  584. df.TestedProjectName.normalized_basename,
  585. df.SourceFolderPath.normalized,
  586. df.ForkMode.test_fork_mode,
  587. df.TestFiles.flat_args_wo_first,
  588. df.ModuleLang.value,
  589. )
  590. )
  591. def govet(fields, unit, *args):
  592. flat_args, spec_args = _common.sort_by_keywords(
  593. {
  594. "DEPENDS": -1,
  595. "TIMEOUT": 1,
  596. "DATA": -1,
  597. "TAG": -1,
  598. "REQUIREMENTS": -1,
  599. "FORK_MODE": 1,
  600. "SPLIT_FACTOR": 1,
  601. "FORK_SUBTESTS": 0,
  602. "FORK_TESTS": 0,
  603. "SIZE": 1,
  604. },
  605. args,
  606. )
  607. dart_record = create_dart_record(fields, unit, flat_args, spec_args)
  608. data = dump_test(unit, dart_record)
  609. if data:
  610. unit.set_property(["DART_DATA", data])
  611. @df.with_fields(
  612. CHECK_FIELDS_BASE
  613. + (
  614. df.TestedProjectName.normalized_basename,
  615. df.SourceFolderPath.normalized,
  616. df.TestFiles.flat_args_wo_first,
  617. df.ModuleLang.value,
  618. )
  619. )
  620. def detekt_report(fields, unit, *args):
  621. flat_args, spec_args = _common.sort_by_keywords(
  622. {
  623. "DEPENDS": -1,
  624. "TIMEOUT": 1,
  625. "DATA": -1,
  626. "TAG": -1,
  627. "REQUIREMENTS": -1,
  628. "FORK_MODE": 1,
  629. "SPLIT_FACTOR": 1,
  630. "FORK_SUBTESTS": 0,
  631. "FORK_TESTS": 0,
  632. "SIZE": 1,
  633. },
  634. args,
  635. )
  636. dart_record = create_dart_record(fields, unit, flat_args, spec_args)
  637. data = dump_test(unit, dart_record)
  638. if data:
  639. unit.set_property(["DART_DATA", data])
  640. def onadd_check(unit, *args):
  641. if unit.get("TIDY") == "yes":
  642. # graph changed for clang_tidy tests
  643. return
  644. flat_args, *_ = _common.sort_by_keywords(
  645. {
  646. "DEPENDS": -1,
  647. "TIMEOUT": 1,
  648. "DATA": -1,
  649. "TAG": -1,
  650. "REQUIREMENTS": -1,
  651. "FORK_MODE": 1,
  652. "SPLIT_FACTOR": 1,
  653. "FORK_SUBTESTS": 0,
  654. "FORK_TESTS": 0,
  655. "SIZE": 1,
  656. },
  657. args,
  658. )
  659. check_type = flat_args[0]
  660. if check_type == "check.data" and unit.get('VALIDATE_DATA') != "no":
  661. check_data(unit, *args)
  662. elif check_type == "check.resource" and unit.get('VALIDATE_DATA') != "no":
  663. check_resource(unit, *args)
  664. elif check_type == "ktlint":
  665. ktlint(unit, *args)
  666. elif check_type == "JAVA_STYLE" and unit.get('ALL_SRCDIRS'):
  667. java_style(unit, *args)
  668. elif check_type == "gofmt":
  669. gofmt(unit, *args)
  670. elif check_type == "govet":
  671. govet(unit, *args)
  672. elif check_type == "detekt.report":
  673. detekt_report(unit, *args)
  674. def on_register_no_check_imports(unit):
  675. s = unit.get('NO_CHECK_IMPORTS_FOR_VALUE')
  676. if s not in ('', 'None'):
  677. unit.onresource(['DONT_COMPRESS', '-', 'py/no_check_imports/{}="{}"'.format(_common.pathid(s), s)])
  678. @df.with_fields(
  679. (
  680. df.TestedProjectName.normalized_basename,
  681. df.SourceFolderPath.normalized,
  682. df.TestEnv.value,
  683. df.UseArcadiaPython.value,
  684. df.TestFiles.normalized,
  685. df.ModuleLang.value,
  686. df.NoCheck.value,
  687. )
  688. )
  689. def onadd_check_py_imports(fields, unit, *args):
  690. if unit.get("TIDY") == "yes":
  691. # graph changed for clang_tidy tests
  692. return
  693. if unit.get('NO_CHECK_IMPORTS_FOR_VALUE').strip() == "":
  694. return
  695. unit.onpeerdir(['library/python/testing/import_test'])
  696. dart_record = create_dart_record(fields, unit, (), {})
  697. dart_record[df.TestName.KEY] = 'pyimports'
  698. dart_record[df.ScriptRelPath.KEY] = 'py.imports'
  699. # Import tests work correctly in this mode, but can slow down by 2-3 times,
  700. # due to the fact that files need to be read from the file system.
  701. # Therefore, we disable them, since the external-py-files mode is designed exclusively
  702. # to speed up the short cycle of developing regular tests.
  703. if unit.get('EXTERNAL_PY_FILES'):
  704. dart_record[df.SkipTest.KEY] = 'Import tests disabled in external-py-files mode'
  705. data = dump_test(unit, dart_record)
  706. if data:
  707. unit.set_property(["DART_DATA", data])
  708. @df.with_fields(
  709. PY_EXEC_FIELDS_BASE
  710. + (
  711. df.TestName.filename_without_ext,
  712. df.ScriptRelPath.pytest,
  713. df.TestedProjectName.path_filename_basename,
  714. df.ModuleLang.value,
  715. df.BinaryPath.stripped,
  716. df.TestRunnerBin.value,
  717. df.DockerImage.value,
  718. )
  719. )
  720. def onadd_pytest_bin(fields, unit, *args):
  721. if unit.get("TIDY") == "yes":
  722. # graph changed for clang_tidy tests
  723. return
  724. flat_args, spec_args = _common.sort_by_keywords({'RUNNER_BIN': 1}, args)
  725. if flat_args:
  726. ymake.report_configure_error(
  727. 'Unknown arguments found while processing add_pytest_bin macro: {!r}'.format(flat_args)
  728. )
  729. if unit.get('ADD_SRCDIR_TO_TEST_DATA') == "yes":
  730. unit.ondata_files(_common.get_norm_unit_path(unit))
  731. yt_spec = df.YtSpec.from_unit(unit, flat_args, spec_args)
  732. if yt_spec and yt_spec[df.YtSpec.KEY]:
  733. unit.ondata_files(deserialize_list(yt_spec[df.YtSpec.KEY]))
  734. dart_record = create_dart_record(fields, unit, flat_args, spec_args)
  735. if yt_spec:
  736. dart_record |= yt_spec
  737. data = dump_test(unit, dart_record)
  738. if data:
  739. unit.set_property(["DART_DATA", data])
  740. @df.with_fields(
  741. (
  742. df.SourceFolderPath.normalized,
  743. df.TestName.normalized_joined_dir_basename,
  744. df.ScriptRelPath.junit,
  745. df.TestTimeout.from_unit,
  746. df.TestedProjectName.normalized,
  747. df.TestEnv.value,
  748. df.TestData.java_test,
  749. df.ForkMode.test_fork_mode,
  750. df.SplitFactor.from_unit,
  751. df.CustomDependencies.test_depends_only,
  752. df.Tag.from_macro_args_and_unit,
  753. df.Size.from_unit,
  754. df.Requirements.with_maybe_fuzzing,
  755. df.TestRecipes.value,
  756. df.ModuleType.value,
  757. df.UnittestDir.value,
  758. df.JvmArgs.value,
  759. # TODO optimize, SystemProperties is used in TestData
  760. df.SystemProperties.value,
  761. df.TestCwd.from_unit,
  762. df.SkipTest.value,
  763. df.JavaClasspathCmdType.value,
  764. df.JdkResource.value,
  765. df.JdkForTests.value,
  766. df.ModuleLang.value,
  767. df.TestClasspath.value,
  768. df.TestClasspathDeps.value,
  769. df.TestJar.value,
  770. df.DockerImage.value,
  771. )
  772. )
  773. def onjava_test(fields, unit, *args):
  774. if unit.get("TIDY") == "yes":
  775. # graph changed for clang_tidy tests
  776. return
  777. assert unit.get('MODULE_TYPE') is not None
  778. if unit.get('MODULE_TYPE') == 'JTEST_FOR':
  779. if not unit.get('UNITTEST_DIR'):
  780. ymake.report_configure_error('skip JTEST_FOR in {}: no args provided'.format(unit.path()))
  781. return
  782. if unit.get('ADD_SRCDIR_TO_TEST_DATA') == "yes":
  783. unit.ondata_files(_common.get_norm_unit_path(unit))
  784. yt_spec = df.YtSpec.from_unit_list_var(unit, (), {})
  785. unit.ondata_files(deserialize_list(yt_spec[df.YtSpec.KEY]))
  786. try:
  787. dart_record = create_dart_record(fields, unit, (), {})
  788. except df.DartValueError:
  789. return
  790. dart_record |= yt_spec
  791. data = dump_test(unit, dart_record)
  792. if data:
  793. unit.set_property(['DART_DATA', data])
  794. @df.with_fields(
  795. (
  796. df.SourceFolderPath.normalized,
  797. df.TestName.normalized_joined_dir_basename_deps,
  798. df.TestedProjectName.normalized,
  799. df.CustomDependencies.test_depends_only,
  800. df.IgnoreClasspathClash.value,
  801. df.ModuleType.value,
  802. df.ModuleLang.value,
  803. df.Classpath.value,
  804. )
  805. )
  806. def onjava_test_deps(fields, unit, *args):
  807. if unit.get("TIDY") == "yes":
  808. # graph changed for clang_tidy tests
  809. return
  810. assert unit.get('MODULE_TYPE') is not None
  811. assert len(args) == 1
  812. mode = args[0]
  813. dart_record = create_dart_record(fields, unit, (args[0],), {})
  814. dart_record[df.ScriptRelPath.KEY] = 'java.dependency.test'
  815. if mode == 'strict':
  816. dart_record[df.StrictClasspathClash.KEY] = 'yes'
  817. data = dump_test(unit, dart_record)
  818. unit.set_property(['DART_DATA', data])
  819. def onsetup_pytest_bin(unit, *args):
  820. use_arcadia_python = unit.get('USE_ARCADIA_PYTHON') == "yes"
  821. if use_arcadia_python:
  822. unit.onresource(['DONT_COMPRESS', '-', 'PY_MAIN={}'.format("library.python.pytest.main:main")]) # XXX
  823. unit.onadd_pytest_bin(list(args))
  824. def onrun(unit, *args):
  825. exectest_cmd = unit.get(["EXECTEST_COMMAND_VALUE"]) or ''
  826. exectest_cmd += "\n" + subprocess.list2cmdline(args)
  827. unit.set(["EXECTEST_COMMAND_VALUE", exectest_cmd])
  828. @df.with_fields(
  829. PY_EXEC_FIELDS_BASE
  830. + (
  831. df.TestName.filename_without_pkg_ext,
  832. df.TestedProjectName.path_filename_basename_without_pkg_ext,
  833. df.BinaryPath.stripped_without_pkg_ext,
  834. df.DockerImage.value,
  835. )
  836. )
  837. def onsetup_exectest(fields, unit, *args):
  838. if unit.get("TIDY") == "yes":
  839. # graph changed for clang_tidy tests
  840. return
  841. command = unit.get(["EXECTEST_COMMAND_VALUE"])
  842. if command is None:
  843. ymake.report_configure_error("EXECTEST must have at least one RUN macro")
  844. return
  845. command = command.replace("$EXECTEST_COMMAND_VALUE", "")
  846. if "PYTHON_BIN" in command:
  847. unit.ondepends('contrib/tools/python')
  848. unit.set(["TEST_BLOB_DATA", base64.b64encode(six.ensure_binary(command))])
  849. if unit.get('ADD_SRCDIR_TO_TEST_DATA') == "yes":
  850. unit.ondata_files(_common.get_norm_unit_path(unit))
  851. yt_spec = df.YtSpec.from_unit(unit, (), {})
  852. if yt_spec and yt_spec[df.YtSpec.KEY]:
  853. unit.ondata_files(deserialize_list(yt_spec[df.YtSpec.KEY]))
  854. dart_record = create_dart_record(fields, unit, (), {})
  855. dart_record[df.ScriptRelPath.KEY] = 'exectest'
  856. if yt_spec:
  857. dart_record |= yt_spec
  858. data = dump_test(unit, dart_record)
  859. if data:
  860. unit.set_property(["DART_DATA", data])
  861. def onsetup_run_python(unit):
  862. if unit.get("USE_ARCADIA_PYTHON") == "yes":
  863. unit.ondepends('contrib/tools/python')
  864. @df.with_fields(
  865. (
  866. df.TestFiles.cpp_linter_files,
  867. df.LintConfigs.cpp_configs,
  868. )
  869. + LINTER_FIELDS_BASE
  870. )
  871. def on_add_cpp_linter_check(fields, unit, *args):
  872. if unit.get("TIDY") == "yes":
  873. return
  874. no_lint_value = _common.get_no_lint_value(unit)
  875. if no_lint_value in ("none", "none_internal"):
  876. return
  877. unlimited = -1
  878. keywords = {
  879. "NAME": 1,
  880. "LINTER": 1,
  881. "DEPENDS": unlimited,
  882. "CONFIGS": 1,
  883. "CUSTOM_CONFIG": 1,
  884. "GLOBAL_RESOURCES": unlimited,
  885. "FILE_PROCESSING_TIME": 1,
  886. "EXTRA_PARAMS": unlimited,
  887. "CONFIG_TYPE": 1,
  888. }
  889. _, spec_args = _common.sort_by_keywords(keywords, args)
  890. global_resources = spec_args.get('GLOBAL_RESOURCES', [])
  891. for resource in global_resources:
  892. unit.onpeerdir(resource)
  893. try:
  894. dart_record = create_dart_record(fields, unit, (), spec_args)
  895. except df.DartValueError as e:
  896. if msg := str(e):
  897. unit.message(['WARN', msg])
  898. return
  899. dart_record[df.ScriptRelPath.KEY] = 'custom_lint'
  900. data = dump_test(unit, dart_record)
  901. if data:
  902. unit.set_property(["DART_DATA", data])
  903. @df.with_fields(
  904. (
  905. df.TestFiles.py_linter_files,
  906. df.LintConfigs.python_configs,
  907. )
  908. + LINTER_FIELDS_BASE
  909. )
  910. def on_add_py_linter_check(fields, unit, *args):
  911. if unit.get("TIDY") == "yes":
  912. return
  913. no_lint_value = _common.get_no_lint_value(unit)
  914. if no_lint_value in ("none", "none_internal"):
  915. return
  916. unlimited = -1
  917. keywords = {
  918. "NAME": 1,
  919. "LINTER": 1,
  920. "DEPENDS": unlimited,
  921. "CONFIGS": 1,
  922. "GLOBAL_RESOURCES": unlimited,
  923. "FILE_PROCESSING_TIME": 1,
  924. "EXTRA_PARAMS": unlimited,
  925. "PROJECT_TO_CONFIG_MAP": 1,
  926. "FLAKE_MIGRATIONS_CONFIG": 1,
  927. "CUSTOM_CONFIG": 1,
  928. "CONFIG_TYPE": 1,
  929. }
  930. _, spec_args = _common.sort_by_keywords(keywords, args)
  931. global_resources = spec_args.get('GLOBAL_RESOURCES', [])
  932. for resource in global_resources:
  933. unit.onpeerdir(resource)
  934. try:
  935. dart_record = create_dart_record(fields, unit, (), spec_args)
  936. except df.DartValueError as e:
  937. if msg := str(e):
  938. unit.message(['WARN', msg])
  939. return
  940. dart_record[df.ScriptRelPath.KEY] = 'custom_lint'
  941. data = dump_test(unit, dart_record)
  942. if data:
  943. unit.set_property(["DART_DATA", data])
  944. @df.with_fields(
  945. YTEST_FIELDS_BASE
  946. + (
  947. df.TestName.value,
  948. df.TestPartition.value,
  949. df.ModuleLang.value,
  950. )
  951. )
  952. def clang_tidy(fields, unit, *args):
  953. keywords = {
  954. "DEPENDS": -1,
  955. "DATA": -1,
  956. "TIMEOUT": 1,
  957. "FORK_MODE": 1,
  958. "SPLIT_FACTOR": 1,
  959. "FORK_SUBTESTS": 0,
  960. "FORK_TESTS": 0,
  961. }
  962. flat_args, spec_args = _common.sort_by_keywords(keywords, args)
  963. if unit.get("TIDY_CONFIG"):
  964. default_config_path = unit.get("TIDY_CONFIG")
  965. project_config_path = unit.get("TIDY_CONFIG")
  966. else:
  967. default_config_path = get_default_tidy_config(unit)
  968. project_config_path = get_project_tidy_config(unit)
  969. unit.set(["DEFAULT_TIDY_CONFIG", default_config_path])
  970. unit.set(["PROJECT_TIDY_CONFIG", project_config_path])
  971. dart_record = create_dart_record(fields, unit, flat_args, spec_args)
  972. data = dump_test(unit, dart_record)
  973. if data:
  974. unit.set_property(["DART_DATA", data])
  975. @df.with_fields(
  976. YTEST_FIELDS_BASE
  977. + YTEST_FIELDS_EXTRA
  978. + (
  979. df.TestName.value,
  980. df.TestData.from_macro_args_and_unit,
  981. df.Requirements.from_macro_args_and_unit,
  982. df.TestPartition.value,
  983. df.ModuleLang.value,
  984. df.DockerImage.value,
  985. )
  986. )
  987. def unittest_py(fields, unit, *args):
  988. keywords = {
  989. "DEPENDS": -1,
  990. "DATA": -1,
  991. "TIMEOUT": 1,
  992. "FORK_MODE": 1,
  993. "SPLIT_FACTOR": 1,
  994. "FORK_SUBTESTS": 0,
  995. "FORK_TESTS": 0,
  996. }
  997. flat_args, spec_args = _common.sort_by_keywords(keywords, args)
  998. if unit.get('ADD_SRCDIR_TO_TEST_DATA') == "yes":
  999. unit.ondata_files(_common.get_norm_unit_path(unit))
  1000. dart_record = create_dart_record(fields, unit, flat_args, spec_args)
  1001. data = dump_test(unit, dart_record)
  1002. if data:
  1003. unit.set_property(["DART_DATA", data])
  1004. @df.with_fields(
  1005. YTEST_FIELDS_BASE
  1006. + YTEST_FIELDS_EXTRA
  1007. + (
  1008. df.TestName.value,
  1009. df.TestData.from_macro_args_and_unit,
  1010. df.Requirements.from_macro_args_and_unit,
  1011. df.TestPartition.value,
  1012. df.ModuleLang.value,
  1013. df.DockerImage.value,
  1014. )
  1015. )
  1016. def gunittest(fields, unit, *args):
  1017. keywords = {
  1018. "DEPENDS": -1,
  1019. "DATA": -1,
  1020. "TIMEOUT": 1,
  1021. "FORK_MODE": 1,
  1022. "SPLIT_FACTOR": 1,
  1023. "FORK_SUBTESTS": 0,
  1024. "FORK_TESTS": 0,
  1025. }
  1026. flat_args, spec_args = _common.sort_by_keywords(keywords, args)
  1027. if unit.get('ADD_SRCDIR_TO_TEST_DATA') == "yes":
  1028. unit.ondata_files(_common.get_norm_unit_path(unit))
  1029. dart_record = create_dart_record(fields, unit, flat_args, spec_args)
  1030. data = dump_test(unit, dart_record)
  1031. if data:
  1032. unit.set_property(["DART_DATA", data])
  1033. @df.with_fields(
  1034. YTEST_FIELDS_BASE
  1035. + YTEST_FIELDS_EXTRA
  1036. + (
  1037. df.TestName.value,
  1038. df.TestData.from_macro_args_and_unit,
  1039. df.Requirements.from_macro_args_and_unit,
  1040. df.TestPartition.value,
  1041. df.ModuleLang.value,
  1042. df.BenchmarkOpts.value,
  1043. df.DockerImage.value,
  1044. )
  1045. )
  1046. def g_benchmark(fields, unit, *args):
  1047. keywords = {
  1048. "DEPENDS": -1,
  1049. "DATA": -1,
  1050. "TIMEOUT": 1,
  1051. "FORK_MODE": 1,
  1052. "SPLIT_FACTOR": 1,
  1053. "FORK_SUBTESTS": 0,
  1054. "FORK_TESTS": 0,
  1055. }
  1056. flat_args, spec_args = _common.sort_by_keywords(keywords, args)
  1057. if unit.get('ADD_SRCDIR_TO_TEST_DATA') == "yes":
  1058. unit.ondata_files(_common.get_norm_unit_path(unit))
  1059. dart_record = create_dart_record(fields, unit, flat_args, spec_args)
  1060. data = dump_test(unit, dart_record)
  1061. if data:
  1062. unit.set_property(["DART_DATA", data])
  1063. @df.with_fields(
  1064. YTEST_FIELDS_BASE
  1065. + YTEST_FIELDS_EXTRA
  1066. + (
  1067. df.TestName.value,
  1068. df.TestData.from_macro_args_and_unit_with_canonical,
  1069. df.Requirements.from_macro_args_and_unit,
  1070. df.TestPartition.value,
  1071. df.ModuleLang.value,
  1072. df.DockerImage.value,
  1073. )
  1074. )
  1075. def go_test(fields, unit, *args):
  1076. keywords = {
  1077. "DEPENDS": -1,
  1078. "DATA": -1,
  1079. "TIMEOUT": 1,
  1080. "FORK_MODE": 1,
  1081. "SPLIT_FACTOR": 1,
  1082. "FORK_SUBTESTS": 0,
  1083. "FORK_TESTS": 0,
  1084. }
  1085. flat_args, spec_args = _common.sort_by_keywords(keywords, args)
  1086. if unit.get('ADD_SRCDIR_TO_TEST_DATA') == "yes":
  1087. unit.ondata_files(_common.get_norm_unit_path(unit))
  1088. unit.ondata_files(get_unit_list_variable(unit, 'TEST_YT_SPEC_VALUE'))
  1089. dart_record = create_dart_record(fields, unit, flat_args, spec_args)
  1090. data = dump_test(unit, dart_record)
  1091. if data:
  1092. unit.set_property(["DART_DATA", data])
  1093. @df.with_fields(
  1094. YTEST_FIELDS_BASE
  1095. + YTEST_FIELDS_EXTRA
  1096. + (
  1097. df.TestName.value,
  1098. df.TestData.from_macro_args_and_unit,
  1099. df.Requirements.from_macro_args_and_unit,
  1100. df.TestPartition.value,
  1101. df.DockerImage.value,
  1102. )
  1103. )
  1104. def boost_test(fields, unit, *args):
  1105. keywords = {
  1106. "DEPENDS": -1,
  1107. "DATA": -1,
  1108. "TIMEOUT": 1,
  1109. "FORK_MODE": 1,
  1110. "SPLIT_FACTOR": 1,
  1111. "FORK_SUBTESTS": 0,
  1112. "FORK_TESTS": 0,
  1113. }
  1114. flat_args, spec_args = _common.sort_by_keywords(keywords, args)
  1115. if unit.get('ADD_SRCDIR_TO_TEST_DATA') == "yes":
  1116. unit.ondata_files(_common.get_norm_unit_path(unit))
  1117. unit.ondata_files(get_unit_list_variable(unit, 'TEST_YT_SPEC_VALUE'))
  1118. dart_record = create_dart_record(fields, unit, flat_args, spec_args)
  1119. data = dump_test(unit, dart_record)
  1120. if data:
  1121. unit.set_property(["DART_DATA", data])
  1122. @df.with_fields(
  1123. YTEST_FIELDS_BASE
  1124. + YTEST_FIELDS_EXTRA
  1125. + (
  1126. df.TestName.value,
  1127. df.TestData.from_macro_args_and_unit,
  1128. df.Requirements.with_maybe_fuzzing,
  1129. df.FuzzDicts.value,
  1130. df.FuzzOpts.value,
  1131. df.Fuzzing.value,
  1132. df.DockerImage.value,
  1133. )
  1134. )
  1135. def fuzz_test(fields, unit, *args):
  1136. keywords = {
  1137. "DEPENDS": -1,
  1138. "DATA": -1,
  1139. "TIMEOUT": 1,
  1140. "FORK_MODE": 1,
  1141. "SPLIT_FACTOR": 1,
  1142. "FORK_SUBTESTS": 0,
  1143. "FORK_TESTS": 0,
  1144. }
  1145. flat_args, spec_args = _common.sort_by_keywords(keywords, args)
  1146. if unit.get('ADD_SRCDIR_TO_TEST_DATA') == "yes":
  1147. unit.ondata_files(_common.get_norm_unit_path(unit))
  1148. unit.ondata_files("fuzzing/{}/corpus.json".format(_common.get_norm_unit_path(unit)))
  1149. unit.ondata_files(get_unit_list_variable(unit, 'TEST_YT_SPEC_VALUE'))
  1150. dart_record = create_dart_record(fields, unit, flat_args, spec_args)
  1151. data = dump_test(unit, dart_record)
  1152. if data:
  1153. unit.set_property(["DART_DATA", data])
  1154. @df.with_fields(
  1155. YTEST_FIELDS_BASE
  1156. + YTEST_FIELDS_EXTRA
  1157. + (
  1158. df.TestName.value,
  1159. df.TestData.from_macro_args_and_unit,
  1160. df.Requirements.from_macro_args_and_unit,
  1161. df.TestPartition.value,
  1162. df.ModuleLang.value,
  1163. df.BenchmarkOpts.value,
  1164. df.DockerImage.value,
  1165. )
  1166. )
  1167. def y_benchmark(fields, unit, *args):
  1168. keywords = {
  1169. "DEPENDS": -1,
  1170. "DATA": -1,
  1171. "TIMEOUT": 1,
  1172. "FORK_MODE": 1,
  1173. "SPLIT_FACTOR": 1,
  1174. "FORK_SUBTESTS": 0,
  1175. "FORK_TESTS": 0,
  1176. }
  1177. flat_args, spec_args = _common.sort_by_keywords(keywords, args)
  1178. unit.ondata_files(get_unit_list_variable(unit, 'TEST_YT_SPEC_VALUE'))
  1179. dart_record = create_dart_record(fields, unit, flat_args, spec_args)
  1180. data = dump_test(unit, dart_record)
  1181. if data:
  1182. unit.set_property(["DART_DATA", data])
  1183. @df.with_fields(
  1184. YTEST_FIELDS_BASE
  1185. + YTEST_FIELDS_EXTRA
  1186. + (
  1187. df.TestName.value,
  1188. df.TestData.from_macro_args_and_unit,
  1189. df.Requirements.from_macro_args_and_unit,
  1190. df.TestPartition.value,
  1191. )
  1192. )
  1193. def coverage_extractor(fields, unit, *args):
  1194. keywords = {
  1195. "DEPENDS": -1,
  1196. "DATA": -1,
  1197. "TIMEOUT": 1,
  1198. "FORK_MODE": 1,
  1199. "SPLIT_FACTOR": 1,
  1200. "FORK_SUBTESTS": 0,
  1201. "FORK_TESTS": 0,
  1202. }
  1203. flat_args, spec_args = _common.sort_by_keywords(keywords, args)
  1204. unit.ondata_files(get_unit_list_variable(unit, 'TEST_YT_SPEC_VALUE'))
  1205. dart_record = create_dart_record(fields, unit, flat_args, spec_args)
  1206. data = dump_test(unit, dart_record)
  1207. if data:
  1208. unit.set_property(["DART_DATA", data])
  1209. @df.with_fields(
  1210. YTEST_FIELDS_BASE
  1211. + YTEST_FIELDS_EXTRA
  1212. + (
  1213. df.TestName.first_flat_with_bench,
  1214. df.TestData.from_macro_args_and_unit,
  1215. df.Requirements.from_macro_args_and_unit,
  1216. df.TestPartition.value,
  1217. df.GoBenchTimeout.value,
  1218. df.ModuleLang.value,
  1219. df.DockerImage.value,
  1220. )
  1221. )
  1222. def go_bench(fields, unit, *args):
  1223. keywords = {
  1224. "DEPENDS": -1,
  1225. "DATA": -1,
  1226. "TIMEOUT": 1,
  1227. "FORK_MODE": 1,
  1228. "SPLIT_FACTOR": 1,
  1229. "FORK_SUBTESTS": 0,
  1230. "FORK_TESTS": 0,
  1231. }
  1232. flat_args, spec_args = _common.sort_by_keywords(keywords, args)
  1233. tags = df.Tag.from_macro_args_and_unit(unit, flat_args, spec_args)[df.Tag.KEY]
  1234. if "ya:run_go_benchmark" not in tags:
  1235. return
  1236. unit.ondata_files(get_unit_list_variable(unit, 'TEST_YT_SPEC_VALUE'))
  1237. dart_record = create_dart_record(fields, unit, flat_args, spec_args)
  1238. data = dump_test(unit, dart_record)
  1239. if data:
  1240. unit.set_property(["DART_DATA", data])
  1241. def onadd_ytest(unit, *args):
  1242. keywords = {
  1243. "DEPENDS": -1,
  1244. "DATA": -1,
  1245. "TIMEOUT": 1,
  1246. "FORK_MODE": 1,
  1247. "SPLIT_FACTOR": 1,
  1248. "FORK_SUBTESTS": 0,
  1249. "FORK_TESTS": 0,
  1250. }
  1251. flat_args, *_ = _common.sort_by_keywords(keywords, args)
  1252. test_type = flat_args[1]
  1253. # TIDY not supported for module
  1254. if unit.get("TIDY_ENABLED") == "yes" and test_type != "clang_tidy":
  1255. return
  1256. # TIDY explicitly disabled for module in ymake.core.conf
  1257. elif test_type == "clang_tidy" and unit.get("TIDY_ENABLED") != "yes":
  1258. return
  1259. # TIDY disabled for module in ya.make
  1260. elif unit.get("TIDY") == "yes" and unit.get("TIDY_ENABLED") != "yes":
  1261. return
  1262. elif test_type == "no.test":
  1263. return
  1264. elif test_type == "clang_tidy" and unit.get("TIDY_ENABLED") == "yes":
  1265. clang_tidy(unit, *args)
  1266. elif test_type == "unittest.py":
  1267. unittest_py(unit, *args)
  1268. elif test_type == "gunittest":
  1269. gunittest(unit, *args)
  1270. elif test_type == "g_benchmark":
  1271. g_benchmark(unit, *args)
  1272. elif test_type == "go.test":
  1273. go_test(unit, *args)
  1274. elif test_type == "boost.test":
  1275. boost_test(unit, *args)
  1276. elif test_type == "fuzz.test":
  1277. fuzz_test(unit, *args)
  1278. elif test_type == "y_benchmark":
  1279. y_benchmark(unit, *args)
  1280. elif test_type == "coverage.extractor" and match_coverage_extractor_requirements(unit):
  1281. coverage_extractor(unit, *args)
  1282. elif test_type == "go.bench":
  1283. go_bench(unit, *args)