ytest.py 46 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496
  1. from __future__ import print_function
  2. import base64
  3. import collections
  4. import copy
  5. import json
  6. import os
  7. import re
  8. import six
  9. import subprocess
  10. try:
  11. from StringIO import StringIO
  12. except ImportError:
  13. from io import StringIO
  14. import _common
  15. import _dart_fields as df
  16. import _requirements as reqs
  17. import lib.test_const as consts
  18. import ymake
  19. from _dart_fields import (
  20. serialize_list,
  21. get_unit_list_variable,
  22. deserialize_list,
  23. prepare_env,
  24. create_dart_record,
  25. )
  26. BLOCK_SEPARATOR = '============================================================='
  27. SPLIT_FACTOR_MAX_VALUE = 1000
  28. SPLIT_FACTOR_TEST_FILES_MAX_VALUE = 4250
  29. PARTITION_MODS = ('SEQUENTIAL', 'MODULO')
  30. DEFAULT_TIDY_CONFIG = "build/config/tests/clang_tidy/config.yaml"
  31. DEFAULT_TIDY_CONFIG_MAP_PATH = "build/yandex_specific/config/clang_tidy/tidy_default_map.json"
  32. PROJECT_TIDY_CONFIG_MAP_PATH = "build/yandex_specific/config/clang_tidy/tidy_project_map.json"
  33. KTLINT_CURRENT_EDITOR_CONFIG = "arcadia/build/platform/java/ktlint/.editorconfig"
  34. KTLINT_OLD_EDITOR_CONFIG = "arcadia/build/platform/java/ktlint_old/.editorconfig"
  35. YTEST_FIELDS_BASE = (
  36. df.AndroidApkTestActivity.value,
  37. df.BinaryPath.normalized,
  38. df.BuildFolderPath.normalized,
  39. df.CustomDependencies.all_standard,
  40. df.GlobalLibraryPath.value,
  41. df.ScriptRelPath.second_flat,
  42. df.SkipTest.value,
  43. df.SourceFolderPath.normalized,
  44. df.SplitFactor.from_macro_args_and_unit,
  45. df.TestCwd.from_unit,
  46. df.TestedProjectFilename.value,
  47. df.TestedProjectName.unit_name,
  48. df.TestEnv.value,
  49. df.TestIosDeviceType.value,
  50. df.TestIosRuntimeType.value,
  51. df.TestRecipes.value,
  52. )
  53. YTEST_FIELDS_EXTRA = (
  54. df.Blob.value,
  55. df.ForkMode.from_macro_and_unit,
  56. df.Size.from_macro_args_and_unit,
  57. df.Tag.from_macro_args_and_unit,
  58. df.TestTimeout.from_macro_args_and_unit,
  59. df.YtSpec.from_macro_args_and_unit,
  60. )
  61. PY_EXEC_FIELDS_BASE = (
  62. df.Blob.value,
  63. df.BuildFolderPath.stripped,
  64. df.CanonizeSubPath.value,
  65. df.CustomDependencies.test_depends_only,
  66. df.ForkMode.test_fork_mode,
  67. df.ForkTestFiles.value,
  68. df.PythonPaths.value,
  69. df.Requirements.from_unit,
  70. df.Size.from_unit,
  71. df.SkipTest.value,
  72. df.SourceFolderPath.normalized,
  73. df.SplitFactor.from_unit,
  74. df.Tag.from_macro_args_and_unit,
  75. df.TestCwd.keywords_replaced,
  76. df.TestData.from_unit_with_canonical,
  77. df.TestEnv.value,
  78. df.TestFiles.test_srcs,
  79. df.TestPartition.value,
  80. df.TestRecipes.value,
  81. df.TestTimeout.from_unit_with_default,
  82. df.UseArcadiaPython.value,
  83. )
  84. CHECK_FIELDS_BASE = (
  85. df.CustomDependencies.depends_only,
  86. df.Requirements.from_macro_args,
  87. df.ScriptRelPath.first_flat,
  88. df.TestEnv.value,
  89. df.TestName.first_flat,
  90. df.UseArcadiaPython.value,
  91. )
  92. tidy_config_map = None
  93. def ontest_data(unit, *args):
  94. ymake.report_configure_error("TEST_DATA is removed in favour of DATA")
  95. def is_yt_spec_contain_pool_info(filename): # XXX switch to yson in ymake + perf test for configure
  96. pool_re = re.compile(r"""['"]*pool['"]*\s*?=""")
  97. cypress_root_re = re.compile(r"""['"]*cypress_root['"]*\s*=""")
  98. with open(filename, 'r') as afile:
  99. yt_spec = afile.read()
  100. return pool_re.search(yt_spec) and cypress_root_re.search(yt_spec)
  101. def validate_test(unit, kw):
  102. def get_list(key):
  103. return deserialize_list(kw.get(key, ""))
  104. valid_kw = copy.deepcopy(kw)
  105. errors = []
  106. warnings = []
  107. mandatory_fields = {"SCRIPT-REL-PATH", "SOURCE-FOLDER-PATH", "TEST-NAME"}
  108. for field in mandatory_fields - valid_kw.keys():
  109. errors.append(f"Mandatory field {field!r} is not set in DART")
  110. if valid_kw.get('SCRIPT-REL-PATH') == 'boost.test':
  111. project_path = valid_kw.get('BUILD-FOLDER-PATH', "")
  112. if not project_path.startswith(
  113. ("contrib", "mail", "maps", "tools/idl", "metrika", "devtools", "mds", "yandex_io", "smart_devices")
  114. ):
  115. errors.append("BOOSTTEST is not allowed here")
  116. size_timeout = collections.OrderedDict(sorted(consts.TestSize.DefaultTimeouts.items(), key=lambda t: t[1]))
  117. size = valid_kw.get('SIZE', consts.TestSize.Small).lower()
  118. tags = set(get_list("TAG"))
  119. requirements_orig = get_list("REQUIREMENTS")
  120. in_autocheck = consts.YaTestTags.NotAutocheck not in tags and consts.YaTestTags.Manual not in tags
  121. is_fat = consts.YaTestTags.Fat in tags
  122. is_force_sandbox = consts.YaTestTags.ForceDistbuild not in tags and is_fat
  123. is_ytexec_run = consts.YaTestTags.YtRunner in tags
  124. is_fuzzing = valid_kw.get("FUZZING", False)
  125. is_kvm = 'kvm' in requirements_orig
  126. requirements = {}
  127. secret_requirements = ('sb_vault', 'yav')
  128. list_requirements = secret_requirements
  129. for req in requirements_orig:
  130. if req in ('kvm',):
  131. requirements[req] = str(True)
  132. continue
  133. if ":" in req:
  134. req_name, req_value = req.split(":", 1)
  135. if req_name in list_requirements:
  136. requirements[req_name] = ",".join(filter(None, [requirements.get(req_name), req_value]))
  137. else:
  138. if req_name in requirements:
  139. if req_value in ["0"]:
  140. warnings.append(
  141. "Requirement [[imp]]{}[[rst]] is dropped [[imp]]{}[[rst]] -> [[imp]]{}[[rst]]".format(
  142. req_name, requirements[req_name], req_value
  143. )
  144. )
  145. del requirements[req_name]
  146. elif requirements[req_name] != req_value:
  147. warnings.append(
  148. "Requirement [[imp]]{}[[rst]] is redefined [[imp]]{}[[rst]] -> [[imp]]{}[[rst]]".format(
  149. req_name, requirements[req_name], req_value
  150. )
  151. )
  152. requirements[req_name] = req_value
  153. else:
  154. requirements[req_name] = req_value
  155. else:
  156. errors.append("Invalid requirement syntax [[imp]]{}[[rst]]: expect <requirement>:<value>".format(req))
  157. if not errors:
  158. for req_name, req_value in requirements.items():
  159. try:
  160. error_msg = reqs.validate_requirement(
  161. req_name,
  162. req_value,
  163. size,
  164. is_force_sandbox,
  165. in_autocheck,
  166. is_fuzzing,
  167. is_kvm,
  168. is_ytexec_run,
  169. requirements,
  170. )
  171. except Exception as e:
  172. error_msg = str(e)
  173. if error_msg:
  174. errors += [error_msg]
  175. invalid_requirements_for_distbuild = [
  176. requirement for requirement in requirements.keys() if requirement not in ('ram', 'ram_disk', 'cpu', 'network')
  177. ]
  178. sb_tags = []
  179. # XXX Unfortunately, some users have already started using colons
  180. # in their tag names. Use skip set to avoid treating their tag as system ones.
  181. # Remove this check when all such user tags are removed.
  182. skip_set = ('ynmt_benchmark', 'bert_models', 'zeliboba_map')
  183. # Verify the prefixes of the system tags to avoid pointless use of the REQUIREMENTS macro parameters in the TAG macro.
  184. for tag in tags:
  185. if tag.startswith('sb:'):
  186. sb_tags.append(tag)
  187. elif ':' in tag and not tag.startswith('ya:') and tag.split(':')[0] not in skip_set:
  188. errors.append(
  189. "Only [[imp]]sb:[[rst]] and [[imp]]ya:[[rst]] prefixes are allowed in system tags: {}".format(tag)
  190. )
  191. if is_fat:
  192. if size != consts.TestSize.Large:
  193. errors.append("Only LARGE test may have ya:fat tag")
  194. if in_autocheck and not is_force_sandbox:
  195. if invalid_requirements_for_distbuild:
  196. errors.append(
  197. "'{}' REQUIREMENTS options can be used only for FAT tests without ya:force_distbuild tag. Remove TAG(ya:force_distbuild) or an option.".format(
  198. invalid_requirements_for_distbuild
  199. )
  200. )
  201. if sb_tags:
  202. errors.append(
  203. "You can set sandbox tags '{}' only for FAT tests without ya:force_distbuild. Remove TAG(ya:force_sandbox) or sandbox tags.".format(
  204. sb_tags
  205. )
  206. )
  207. if consts.YaTestTags.SandboxCoverage in tags:
  208. errors.append("You can set 'ya:sandbox_coverage' tag only for FAT tests without ya:force_distbuild.")
  209. if is_ytexec_run:
  210. errors.append(
  211. "Running LARGE tests over YT (ya:yt) on Distbuild (ya:force_distbuild) is forbidden. Consider removing TAG(ya:force_distbuild)."
  212. )
  213. else:
  214. if is_force_sandbox:
  215. errors.append('ya:force_sandbox can be used with LARGE tests only')
  216. if consts.YaTestTags.Privileged in tags:
  217. errors.append("ya:privileged can be used with LARGE tests only")
  218. if in_autocheck and size == consts.TestSize.Large:
  219. errors.append("LARGE test must have ya:fat tag")
  220. if consts.YaTestTags.Privileged in tags and 'container' not in requirements:
  221. errors.append("Only tests with 'container' requirement can have 'ya:privileged' tag")
  222. if size not in size_timeout:
  223. errors.append(
  224. "Unknown test size: [[imp]]{}[[rst]], choose from [[imp]]{}[[rst]]".format(
  225. size.upper(), ", ".join([sz.upper() for sz in size_timeout.keys()])
  226. )
  227. )
  228. else:
  229. try:
  230. timeout = int(valid_kw.get('TEST-TIMEOUT', size_timeout[size]) or size_timeout[size])
  231. script_rel_path = valid_kw.get('SCRIPT-REL-PATH')
  232. if timeout < 0:
  233. raise Exception("Timeout must be > 0")
  234. skip_timeout_verification = script_rel_path in ('java.style', 'ktlint')
  235. if size_timeout[size] < timeout and in_autocheck and not skip_timeout_verification:
  236. suggested_size = None
  237. for s, t in size_timeout.items():
  238. if timeout <= t:
  239. suggested_size = s
  240. break
  241. if suggested_size:
  242. suggested_size = ", suggested size: [[imp]]{}[[rst]]".format(suggested_size.upper())
  243. else:
  244. suggested_size = ""
  245. errors.append(
  246. "Max allowed timeout for test size [[imp]]{}[[rst]] is [[imp]]{} sec[[rst]]{}".format(
  247. size.upper(), size_timeout[size], suggested_size
  248. )
  249. )
  250. except Exception as e:
  251. errors.append("Error when parsing test timeout: [[bad]]{}[[rst]]".format(e))
  252. requirements_list = []
  253. for req_name, req_value in six.iteritems(requirements):
  254. requirements_list.append(req_name + ":" + req_value)
  255. valid_kw['REQUIREMENTS'] = serialize_list(sorted(requirements_list))
  256. # Mark test with ya:external tag if it requests any secret from external storages
  257. # It's not stable and nonreproducible by definition
  258. for x in secret_requirements:
  259. if x in requirements:
  260. tags.add(consts.YaTestTags.External)
  261. if valid_kw.get("FUZZ-OPTS"):
  262. for option in get_list("FUZZ-OPTS"):
  263. if not option.startswith("-"):
  264. errors.append(
  265. "Unrecognized fuzzer option '[[imp]]{}[[rst]]'. All fuzzer options should start with '-'".format(
  266. option
  267. )
  268. )
  269. break
  270. eqpos = option.find("=")
  271. if eqpos == -1 or len(option) == eqpos + 1:
  272. errors.append(
  273. "Unrecognized fuzzer option '[[imp]]{}[[rst]]'. All fuzzer options should obtain value specified after '='".format(
  274. option
  275. )
  276. )
  277. break
  278. if option[eqpos - 1] == " " or option[eqpos + 1] == " ":
  279. errors.append("Spaces are not allowed: '[[imp]]{}[[rst]]'".format(option))
  280. break
  281. if option[:eqpos] in ("-runs", "-dict", "-jobs", "-workers", "-artifact_prefix", "-print_final_stats"):
  282. errors.append(
  283. "You can't use '[[imp]]{}[[rst]]' - it will be automatically calculated or configured during run".format(
  284. option
  285. )
  286. )
  287. break
  288. if valid_kw.get("YT-SPEC"):
  289. if not is_ytexec_run:
  290. errors.append("You can use YT_SPEC macro only tests marked with ya:yt tag")
  291. else:
  292. for filename in get_list("YT-SPEC"):
  293. filename = unit.resolve('$S/' + filename)
  294. if not os.path.exists(filename):
  295. errors.append("File '{}' specified in the YT_SPEC macro doesn't exist".format(filename))
  296. continue
  297. if not is_yt_spec_contain_pool_info(filename):
  298. tags.add(consts.YaTestTags.External)
  299. tags.add("ya:yt_research_pool")
  300. partition = valid_kw.get('TEST_PARTITION', 'SEQUENTIAL')
  301. if partition not in PARTITION_MODS:
  302. raise ValueError('partition mode should be one of {}, detected: {}'.format(PARTITION_MODS, partition))
  303. if valid_kw.get('SPLIT-FACTOR'):
  304. if valid_kw.get('FORK-MODE') == 'none':
  305. errors.append('SPLIT_FACTOR must be use with FORK_TESTS() or FORK_SUBTESTS() macro')
  306. value = 1
  307. try:
  308. value = int(valid_kw.get('SPLIT-FACTOR'))
  309. if value <= 0:
  310. raise ValueError("must be > 0")
  311. if value > SPLIT_FACTOR_MAX_VALUE:
  312. raise ValueError("the maximum allowed value is {}".format(SPLIT_FACTOR_MAX_VALUE))
  313. except ValueError as e:
  314. errors.append('Incorrect SPLIT_FACTOR value: {}'.format(e))
  315. if valid_kw.get('FORK-TEST-FILES') and size != consts.TestSize.Large:
  316. nfiles = count_entries(valid_kw.get('TEST-FILES'))
  317. if nfiles * value > SPLIT_FACTOR_TEST_FILES_MAX_VALUE:
  318. errors.append(
  319. 'Too much chunks generated:{} (limit: {}). Remove FORK_TEST_FILES() macro or reduce SPLIT_FACTOR({}).'.format(
  320. nfiles * value, SPLIT_FACTOR_TEST_FILES_MAX_VALUE, value
  321. )
  322. )
  323. if tags:
  324. valid_kw['TAG'] = serialize_list(sorted(tags))
  325. unit_path = _common.get_norm_unit_path(unit)
  326. if (
  327. not is_fat
  328. and consts.YaTestTags.Noretries in tags
  329. and not is_ytexec_run
  330. and not unit_path.startswith("devtools/dummy_arcadia/test/noretries")
  331. ):
  332. errors.append("Only LARGE tests can have 'ya:noretries' tag")
  333. if errors:
  334. return None, warnings, errors
  335. return valid_kw, warnings, errors
  336. def dump_test(unit, kw):
  337. kw = {k: v for k, v in kw.items() if v and (not isinstance(v, str | bytes) or v.strip())}
  338. valid_kw, warnings, errors = validate_test(unit, kw)
  339. for w in warnings:
  340. unit.message(['warn', w])
  341. for e in errors:
  342. ymake.report_configure_error(e)
  343. if valid_kw is None:
  344. return None
  345. string_handler = StringIO()
  346. for k, v in six.iteritems(valid_kw):
  347. print(k + ': ' + six.ensure_str(v), file=string_handler)
  348. print(BLOCK_SEPARATOR, file=string_handler)
  349. data = string_handler.getvalue()
  350. string_handler.close()
  351. return data
  352. def reference_group_var(varname: str, extensions: list[str] | None = None) -> str:
  353. if extensions is None:
  354. return f'"${{join=\\;:{varname}}}"'
  355. return serialize_list(f'${{ext={ext};join=\\;:{varname}}}' for ext in extensions)
  356. def count_entries(x):
  357. # see (de)serialize_list
  358. assert x is None or isinstance(x, str), type(x)
  359. if not x:
  360. return 0
  361. return x.count(";") + 1
  362. def implies(a, b):
  363. return bool((not a) or b)
  364. def match_coverage_extractor_requirements(unit):
  365. # we add test if
  366. return all(
  367. (
  368. # tests are requested
  369. unit.get("TESTS_REQUESTED") == "yes",
  370. # build implies clang coverage, which supports segment extraction from the binaries
  371. unit.get("CLANG_COVERAGE") == "yes",
  372. # contrib was requested
  373. implies(
  374. _common.get_norm_unit_path(unit).startswith("contrib/"), unit.get("ENABLE_CONTRIB_COVERAGE") == "yes"
  375. ),
  376. )
  377. )
  378. def get_tidy_config_map(unit, map_path):
  379. config_map_path = unit.resolve(os.path.join("$S", map_path))
  380. config_map = {}
  381. try:
  382. with open(config_map_path, 'r') as afile:
  383. config_map = json.load(afile)
  384. except ValueError:
  385. ymake.report_configure_error("{} is invalid json".format(map_path))
  386. except Exception as e:
  387. ymake.report_configure_error(str(e))
  388. return config_map
  389. def prepare_config_map(config_map):
  390. return list(reversed(sorted(config_map.items())))
  391. def get_default_tidy_config(unit):
  392. unit_path = _common.get_norm_unit_path(unit)
  393. tidy_default_config_map = prepare_config_map(get_tidy_config_map(unit, DEFAULT_TIDY_CONFIG_MAP_PATH))
  394. for project_prefix, config_path in tidy_default_config_map:
  395. if unit_path.startswith(project_prefix):
  396. return config_path
  397. return DEFAULT_TIDY_CONFIG
  398. ordered_tidy_map = None
  399. def get_project_tidy_config(unit):
  400. global ordered_tidy_map
  401. if ordered_tidy_map is None:
  402. ordered_tidy_map = prepare_config_map(get_tidy_config_map(unit, PROJECT_TIDY_CONFIG_MAP_PATH))
  403. unit_path = _common.get_norm_unit_path(unit)
  404. for project_prefix, config_path in ordered_tidy_map:
  405. if unit_path.startswith(project_prefix):
  406. return config_path
  407. else:
  408. return get_default_tidy_config(unit)
  409. @df.with_fields(
  410. CHECK_FIELDS_BASE
  411. + (
  412. df.TestedProjectName.normalized_basename,
  413. df.SourceFolderPath.normalized,
  414. df.SbrUidExt.value,
  415. df.TestFiles.value,
  416. )
  417. )
  418. def check_data(fields, unit, *args):
  419. flat_args, spec_args = _common.sort_by_keywords(
  420. {
  421. "DEPENDS": -1,
  422. "TIMEOUT": 1,
  423. "DATA": -1,
  424. "TAG": -1,
  425. "REQUIREMENTS": -1,
  426. "FORK_MODE": 1,
  427. "SPLIT_FACTOR": 1,
  428. "FORK_SUBTESTS": 0,
  429. "FORK_TESTS": 0,
  430. "SIZE": 1,
  431. },
  432. args,
  433. )
  434. dart_record = create_dart_record(fields, unit, flat_args, spec_args)
  435. if not dart_record[df.TestFiles.KEY]:
  436. return
  437. data = dump_test(unit, dart_record)
  438. if data:
  439. unit.set_property(["DART_DATA", data])
  440. @df.with_fields(
  441. CHECK_FIELDS_BASE
  442. + (
  443. df.TestedProjectName.normalized_basename,
  444. df.SourceFolderPath.normalized,
  445. df.SbrUidExt.value,
  446. df.TestFiles.flat_args_wo_first,
  447. )
  448. )
  449. def check_resource(fields, unit, *args):
  450. flat_args, spec_args = _common.sort_by_keywords(
  451. {
  452. "DEPENDS": -1,
  453. "TIMEOUT": 1,
  454. "DATA": -1,
  455. "TAG": -1,
  456. "REQUIREMENTS": -1,
  457. "FORK_MODE": 1,
  458. "SPLIT_FACTOR": 1,
  459. "FORK_SUBTESTS": 0,
  460. "FORK_TESTS": 0,
  461. "SIZE": 1,
  462. },
  463. args,
  464. )
  465. dart_record = create_dart_record(fields, unit, flat_args, spec_args)
  466. data = dump_test(unit, dart_record)
  467. if data:
  468. unit.set_property(["DART_DATA", data])
  469. @df.with_fields(
  470. CHECK_FIELDS_BASE
  471. + (
  472. df.TestedProjectName.normalized_basename,
  473. df.SourceFolderPath.normalized,
  474. df.TestData.ktlint,
  475. df.TestFiles.flat_args_wo_first,
  476. df.ModuleLang.value,
  477. df.KtlintBinary.value,
  478. df.UseKtlintOld.value,
  479. df.KtlintBaselineFile.value,
  480. )
  481. )
  482. def ktlint(fields, unit, *args):
  483. flat_args, spec_args = _common.sort_by_keywords(
  484. {
  485. "DEPENDS": -1,
  486. "TIMEOUT": 1,
  487. "DATA": -1,
  488. "TAG": -1,
  489. "REQUIREMENTS": -1,
  490. "FORK_MODE": 1,
  491. "SPLIT_FACTOR": 1,
  492. "FORK_SUBTESTS": 0,
  493. "FORK_TESTS": 0,
  494. "SIZE": 1,
  495. },
  496. args,
  497. )
  498. dart_record = create_dart_record(fields, unit, flat_args, spec_args)
  499. dart_record[df.TestTimeout.KEY] = '120'
  500. data = dump_test(unit, dart_record)
  501. if data:
  502. unit.set_property(["DART_DATA", data])
  503. @df.with_fields(
  504. CHECK_FIELDS_BASE
  505. + (
  506. df.TestedProjectName.normalized_basename,
  507. df.SourceFolderPath.normalized,
  508. df.TestData.java_style,
  509. df.ForkMode.test_fork_mode,
  510. df.TestFiles.java_style,
  511. df.JdkLatestVersion.value,
  512. df.JdkResource.value,
  513. df.ModuleLang.value,
  514. )
  515. )
  516. def java_style(fields, unit, *args):
  517. flat_args, spec_args = _common.sort_by_keywords(
  518. {
  519. "DEPENDS": -1,
  520. "TIMEOUT": 1,
  521. "DATA": -1,
  522. "TAG": -1,
  523. "REQUIREMENTS": -1,
  524. "FORK_MODE": 1,
  525. "SPLIT_FACTOR": 1,
  526. "FORK_SUBTESTS": 0,
  527. "FORK_TESTS": 0,
  528. "SIZE": 1,
  529. },
  530. args,
  531. )
  532. if len(flat_args) < 2:
  533. raise Exception("Not enough arguments for JAVA_STYLE check")
  534. # jstyle should use the latest jdk
  535. unit.onpeerdir([unit.get('JDK_LATEST_PEERDIR')])
  536. dart_record = create_dart_record(fields, unit, flat_args, spec_args)
  537. dart_record[df.TestTimeout.KEY] = '240'
  538. dart_record[df.ScriptRelPath.KEY] = 'java.style'
  539. data = dump_test(unit, dart_record)
  540. if data:
  541. unit.set_property(["DART_DATA", data])
  542. @df.with_fields(
  543. CHECK_FIELDS_BASE
  544. + (
  545. df.TestedProjectName.test_dir,
  546. df.SourceFolderPath.test_dir,
  547. df.ForkMode.test_fork_mode,
  548. df.TestFiles.flat_args_wo_first,
  549. df.ModuleLang.value,
  550. )
  551. )
  552. def gofmt(fields, unit, *args):
  553. flat_args, spec_args = _common.sort_by_keywords(
  554. {
  555. "DEPENDS": -1,
  556. "TIMEOUT": 1,
  557. "DATA": -1,
  558. "TAG": -1,
  559. "REQUIREMENTS": -1,
  560. "FORK_MODE": 1,
  561. "SPLIT_FACTOR": 1,
  562. "FORK_SUBTESTS": 0,
  563. "FORK_TESTS": 0,
  564. "SIZE": 1,
  565. },
  566. args,
  567. )
  568. dart_record = create_dart_record(fields, unit, flat_args, spec_args)
  569. data = dump_test(unit, dart_record)
  570. if data:
  571. unit.set_property(["DART_DATA", data])
  572. @df.with_fields(
  573. CHECK_FIELDS_BASE
  574. + (
  575. df.TestedProjectName.normalized_basename,
  576. df.SourceFolderPath.normalized,
  577. df.ForkMode.test_fork_mode,
  578. df.TestFiles.flat_args_wo_first,
  579. df.ModuleLang.value,
  580. )
  581. )
  582. def govet(fields, unit, *args):
  583. flat_args, spec_args = _common.sort_by_keywords(
  584. {
  585. "DEPENDS": -1,
  586. "TIMEOUT": 1,
  587. "DATA": -1,
  588. "TAG": -1,
  589. "REQUIREMENTS": -1,
  590. "FORK_MODE": 1,
  591. "SPLIT_FACTOR": 1,
  592. "FORK_SUBTESTS": 0,
  593. "FORK_TESTS": 0,
  594. "SIZE": 1,
  595. },
  596. args,
  597. )
  598. dart_record = create_dart_record(fields, unit, flat_args, spec_args)
  599. data = dump_test(unit, dart_record)
  600. if data:
  601. unit.set_property(["DART_DATA", data])
  602. def onadd_check(unit, *args):
  603. if unit.get("TIDY") == "yes":
  604. # graph changed for clang_tidy tests
  605. return
  606. flat_args, *_ = _common.sort_by_keywords(
  607. {
  608. "DEPENDS": -1,
  609. "TIMEOUT": 1,
  610. "DATA": -1,
  611. "TAG": -1,
  612. "REQUIREMENTS": -1,
  613. "FORK_MODE": 1,
  614. "SPLIT_FACTOR": 1,
  615. "FORK_SUBTESTS": 0,
  616. "FORK_TESTS": 0,
  617. "SIZE": 1,
  618. },
  619. args,
  620. )
  621. check_type = flat_args[0]
  622. if check_type == "check.data" and unit.get('VALIDATE_DATA') != "no":
  623. check_data(unit, *args)
  624. elif check_type == "check.resource" and unit.get('VALIDATE_DATA') != "no":
  625. check_resource(unit, *args)
  626. elif check_type == "ktlint":
  627. ktlint(unit, *args)
  628. elif check_type == "JAVA_STYLE" and (unit.get('YMAKE_JAVA_TEST') != 'yes' or unit.get('ALL_SRCDIRS')):
  629. java_style(unit, *args)
  630. elif check_type == "gofmt":
  631. gofmt(unit, *args)
  632. elif check_type == "govet":
  633. govet(unit, *args)
  634. def on_register_no_check_imports(unit):
  635. s = unit.get('NO_CHECK_IMPORTS_FOR_VALUE')
  636. if s not in ('', 'None'):
  637. unit.onresource(['-', 'py/no_check_imports/{}="{}"'.format(_common.pathid(s), s)])
  638. @df.with_fields(
  639. (
  640. df.TestedProjectName.normalized_basename,
  641. df.SourceFolderPath.normalized,
  642. df.TestEnv.value,
  643. df.UseArcadiaPython.value,
  644. df.TestFiles.normalized,
  645. df.ModuleLang.value,
  646. df.NoCheck.value,
  647. )
  648. )
  649. def onadd_check_py_imports(fields, unit, *args):
  650. if unit.get("TIDY") == "yes":
  651. # graph changed for clang_tidy tests
  652. return
  653. if unit.get('NO_CHECK_IMPORTS_FOR_VALUE').strip() == "":
  654. return
  655. unit.onpeerdir(['library/python/testing/import_test'])
  656. dart_record = create_dart_record(fields, unit, (), {})
  657. dart_record[df.TestName.KEY] = 'pyimports'
  658. dart_record[df.ScriptRelPath.KEY] = 'py.imports'
  659. data = dump_test(unit, dart_record)
  660. if data:
  661. unit.set_property(["DART_DATA", data])
  662. @df.with_fields(
  663. PY_EXEC_FIELDS_BASE
  664. + (
  665. df.TestName.filename_without_ext,
  666. df.ScriptRelPath.pytest,
  667. df.TestedProjectName.path_filename_basename,
  668. df.ModuleLang.value,
  669. df.BinaryPath.stripped,
  670. df.TestRunnerBin.value,
  671. df.DockerImage.value,
  672. )
  673. )
  674. def onadd_pytest_bin(fields, unit, *args):
  675. if unit.get("TIDY") == "yes":
  676. # graph changed for clang_tidy tests
  677. return
  678. flat_args, spec_args = _common.sort_by_keywords({'RUNNER_BIN': 1}, args)
  679. if flat_args:
  680. ymake.report_configure_error(
  681. 'Unknown arguments found while processing add_pytest_bin macro: {!r}'.format(flat_args)
  682. )
  683. if unit.get('ADD_SRCDIR_TO_TEST_DATA') == "yes":
  684. unit.ondata_files(_common.get_norm_unit_path(unit))
  685. yt_spec = df.YtSpec.from_unit(unit, flat_args, spec_args)
  686. if yt_spec and yt_spec[df.YtSpec.KEY]:
  687. unit.ondata_files(deserialize_list(yt_spec[df.YtSpec.KEY]))
  688. dart_record = create_dart_record(fields, unit, flat_args, spec_args)
  689. if yt_spec:
  690. dart_record |= yt_spec
  691. data = dump_test(unit, dart_record)
  692. if data:
  693. unit.set_property(["DART_DATA", data])
  694. @df.with_fields(
  695. (
  696. df.SourceFolderPath.normalized,
  697. df.TestName.normalized_joined_dir_basename,
  698. df.ScriptRelPath.junit,
  699. df.TestTimeout.from_unit,
  700. df.TestedProjectName.normalized,
  701. df.TestEnv.value,
  702. df.TestData.java_test,
  703. df.ForkMode.test_fork_mode,
  704. df.SplitFactor.from_unit,
  705. df.CustomDependencies.test_depends_only,
  706. df.Tag.from_macro_args_and_unit,
  707. df.Size.from_unit,
  708. df.Requirements.with_maybe_fuzzing,
  709. df.TestRecipes.value,
  710. df.ModuleType.value,
  711. df.UnittestDir.value,
  712. df.JvmArgs.value,
  713. # TODO optimize, SystemProperties is used in TestData
  714. df.SystemProperties.value,
  715. df.TestCwd.from_unit,
  716. df.SkipTest.value,
  717. df.JavaClasspathCmdType.value,
  718. df.JdkResource.value,
  719. df.JdkForTests.value,
  720. df.ModuleLang.value,
  721. df.TestClasspath.value,
  722. df.TestClasspathOrigins.value,
  723. df.TestClasspathDeps.value,
  724. df.TestJar.value,
  725. df.DockerImage.value,
  726. )
  727. )
  728. def onjava_test(fields, unit, *args):
  729. if unit.get("TIDY") == "yes":
  730. # graph changed for clang_tidy tests
  731. return
  732. assert unit.get('MODULE_TYPE') is not None
  733. if unit.get('MODULE_TYPE') == 'JTEST_FOR':
  734. if not unit.get('UNITTEST_DIR'):
  735. ymake.report_configure_error('skip JTEST_FOR in {}: no args provided'.format(unit.path()))
  736. return
  737. if unit.get('ADD_SRCDIR_TO_TEST_DATA') == "yes":
  738. unit.ondata_files(_common.get_norm_unit_path(unit))
  739. yt_spec = df.YtSpec.from_unit_list_var(unit, (), {})
  740. unit.ondata_files(deserialize_list(yt_spec[df.YtSpec.KEY]))
  741. try:
  742. dart_record = create_dart_record(fields, unit, (), {})
  743. except df.DartValueError:
  744. return
  745. dart_record |= yt_spec
  746. data = dump_test(unit, dart_record)
  747. if data:
  748. unit.set_property(['DART_DATA', data])
  749. @df.with_fields(
  750. (
  751. df.SourceFolderPath.normalized,
  752. df.TestName.normalized_joined_dir_basename_deps,
  753. df.TestedProjectName.normalized,
  754. df.CustomDependencies.test_depends_only,
  755. df.IgnoreClasspathClash.value,
  756. df.ModuleType.value,
  757. df.ModuleLang.value,
  758. df.Classpath.value,
  759. )
  760. )
  761. def onjava_test_deps(fields, unit, *args):
  762. if unit.get("TIDY") == "yes":
  763. # graph changed for clang_tidy tests
  764. return
  765. assert unit.get('MODULE_TYPE') is not None
  766. assert len(args) == 1
  767. mode = args[0]
  768. dart_record = create_dart_record(fields, unit, (args[0],), {})
  769. dart_record[df.ScriptRelPath.KEY] = 'java.dependency.test'
  770. if mode == 'strict':
  771. dart_record[df.StrictClasspathClash.KEY] = 'yes'
  772. data = dump_test(unit, dart_record)
  773. unit.set_property(['DART_DATA', data])
  774. def onsetup_pytest_bin(unit, *args):
  775. use_arcadia_python = unit.get('USE_ARCADIA_PYTHON') == "yes"
  776. if use_arcadia_python:
  777. unit.onresource(['-', 'PY_MAIN={}'.format("library.python.pytest.main:main")]) # XXX
  778. unit.onadd_pytest_bin(list(args))
  779. def onrun(unit, *args):
  780. exectest_cmd = unit.get(["EXECTEST_COMMAND_VALUE"]) or ''
  781. exectest_cmd += "\n" + subprocess.list2cmdline(args)
  782. unit.set(["EXECTEST_COMMAND_VALUE", exectest_cmd])
  783. @df.with_fields(
  784. PY_EXEC_FIELDS_BASE
  785. + (
  786. df.TestName.filename_without_pkg_ext,
  787. df.TestedProjectName.path_filename_basename_without_pkg_ext,
  788. df.BinaryPath.stripped_without_pkg_ext,
  789. df.DockerImage.value,
  790. )
  791. )
  792. def onsetup_exectest(fields, unit, *args):
  793. if unit.get("TIDY") == "yes":
  794. # graph changed for clang_tidy tests
  795. return
  796. command = unit.get(["EXECTEST_COMMAND_VALUE"])
  797. if command is None:
  798. ymake.report_configure_error("EXECTEST must have at least one RUN macro")
  799. return
  800. command = command.replace("$EXECTEST_COMMAND_VALUE", "")
  801. if "PYTHON_BIN" in command:
  802. unit.ondepends('contrib/tools/python')
  803. unit.set(["TEST_BLOB_DATA", base64.b64encode(six.ensure_binary(command))])
  804. if unit.get('ADD_SRCDIR_TO_TEST_DATA') == "yes":
  805. unit.ondata_files(_common.get_norm_unit_path(unit))
  806. yt_spec = df.YtSpec.from_unit(unit, (), {})
  807. if yt_spec and yt_spec[df.YtSpec.KEY]:
  808. unit.ondata_files(deserialize_list(yt_spec[df.YtSpec.KEY]))
  809. dart_record = create_dart_record(fields, unit, (), {})
  810. dart_record[df.ScriptRelPath.KEY] = 'exectest'
  811. if yt_spec:
  812. dart_record |= yt_spec
  813. data = dump_test(unit, dart_record)
  814. if data:
  815. unit.set_property(["DART_DATA", data])
  816. def onsetup_run_python(unit):
  817. if unit.get("USE_ARCADIA_PYTHON") == "yes":
  818. unit.ondepends('contrib/tools/python')
  819. @_common.lazy
  820. def get_linter_configs(unit, config_paths):
  821. rel_config_path = _common.rootrel_arc_src(config_paths, unit)
  822. arc_config_path = unit.resolve_arc_path(rel_config_path)
  823. abs_config_path = unit.resolve(arc_config_path)
  824. with open(abs_config_path, 'r') as fd:
  825. return list(json.load(fd).values())
  826. @df.with_fields(
  827. (
  828. df.LintName.value,
  829. df.TestFiles.py_linter_files,
  830. df.LintConfigs.value,
  831. df.LintExtraParams.from_macro_args,
  832. df.TestName.name_from_macro_args,
  833. df.TestedProjectName.unit_name,
  834. df.SourceFolderPath.normalized,
  835. df.TestEnv.value,
  836. df.UseArcadiaPython.value,
  837. df.LintFileProcessingTime.from_macro_args,
  838. df.Linter.value,
  839. df.CustomDependencies.depends_with_linter,
  840. )
  841. )
  842. def on_add_py_linter_check(fields, unit, *args):
  843. if unit.get("TIDY") == "yes":
  844. return
  845. no_lint_value = _common.get_no_lint_value(unit)
  846. if no_lint_value in ("none", "none_internal"):
  847. return
  848. unlimited = -1
  849. keywords = {
  850. "NAME": 1,
  851. "LINTER": 1,
  852. "DEPENDS": unlimited,
  853. "FILES": unlimited,
  854. "CONFIGS": unlimited,
  855. "GLOBAL_RESOURCES": unlimited,
  856. "FILE_PROCESSING_TIME": 1,
  857. "EXTRA_PARAMS": unlimited,
  858. }
  859. _, spec_args = _common.sort_by_keywords(keywords, args)
  860. global_resources = spec_args.get('GLOBAL_RESOURCES', [])
  861. for resource in global_resources:
  862. unit.onpeerdir(resource)
  863. try:
  864. dart_record = create_dart_record(fields, unit, (), spec_args)
  865. except df.DartValueError as e:
  866. if msg := str(e):
  867. unit.message(['WARN', msg])
  868. return
  869. dart_record[df.ScriptRelPath.KEY] = 'custom_lint'
  870. data = dump_test(unit, dart_record)
  871. if data:
  872. unit.set_property(["DART_DATA", data])
  873. def on_add_linter_check(unit, *args):
  874. if unit.get("TIDY") == "yes":
  875. return
  876. source_root_from_prefix = '${ARCADIA_ROOT}/'
  877. source_root_to_prefix = '$S/'
  878. unlimited = -1
  879. no_lint_value = _common.get_no_lint_value(unit)
  880. if no_lint_value in ("none", "none_internal"):
  881. return
  882. keywords = {
  883. "DEPENDS": unlimited,
  884. "FILES": unlimited,
  885. "CONFIGS": unlimited,
  886. "GLOBAL_RESOURCES": unlimited,
  887. "FILE_PROCESSING_TIME": 1,
  888. "EXTRA_PARAMS": unlimited,
  889. }
  890. flat_args, spec_args = _common.sort_by_keywords(keywords, args)
  891. if len(flat_args) != 2:
  892. unit.message(['ERROR', '_ADD_LINTER_CHECK params: expected 2 free parameters'])
  893. return
  894. configs = []
  895. for cfg in spec_args.get('CONFIGS', []):
  896. filename = unit.resolve(source_root_to_prefix + cfg)
  897. if not os.path.exists(filename):
  898. unit.message(['ERROR', 'Configuration file {} is not found'.format(filename)])
  899. return
  900. configs.append(cfg)
  901. deps = []
  902. lint_name, linter = flat_args
  903. deps.append(os.path.dirname(linter))
  904. test_files = []
  905. for path in spec_args.get('FILES', []):
  906. if path.startswith(source_root_from_prefix):
  907. test_files.append(path.replace(source_root_from_prefix, source_root_to_prefix, 1))
  908. elif path.startswith(source_root_to_prefix):
  909. test_files.append(path)
  910. if lint_name == 'cpp_style':
  911. files_dart = reference_group_var("ALL_SRCS", consts.STYLE_CPP_ALL_EXTS)
  912. else:
  913. if not test_files:
  914. unit.message(['WARN', 'No files to lint for {}'.format(lint_name)])
  915. return
  916. files_dart = serialize_list(test_files)
  917. for arg in spec_args.get('EXTRA_PARAMS', []):
  918. if '=' not in arg:
  919. unit.message(['WARN', 'Wrong EXTRA_PARAMS value: "{}". Values must have format "name=value".'.format(arg)])
  920. return
  921. deps += spec_args.get('DEPENDS', [])
  922. for dep in deps:
  923. unit.ondepends(dep)
  924. for resource in spec_args.get('GLOBAL_RESOURCES', []):
  925. unit.onpeerdir(resource)
  926. test_record = {
  927. 'TEST-NAME': lint_name,
  928. 'SCRIPT-REL-PATH': 'custom_lint',
  929. 'TESTED-PROJECT-NAME': unit.name(),
  930. 'SOURCE-FOLDER-PATH': _common.get_norm_unit_path(unit),
  931. 'CUSTOM-DEPENDENCIES': " ".join(deps),
  932. 'TEST-ENV': prepare_env(unit.get("TEST_ENV_VALUE")),
  933. 'USE_ARCADIA_PYTHON': unit.get('USE_ARCADIA_PYTHON') or '',
  934. # TODO remove FILES, see DEVTOOLS-7052
  935. 'FILES': files_dart,
  936. 'TEST-FILES': files_dart,
  937. # Linter specific parameters
  938. # TODO Add configs to DATA. See YMAKE-427
  939. 'LINT-CONFIGS': serialize_list(configs),
  940. 'LINT-NAME': lint_name,
  941. 'LINT-FILE-PROCESSING-TIME': spec_args.get('FILE_PROCESSING_TIME', [''])[0],
  942. 'LINT-EXTRA-PARAMS': serialize_list(spec_args.get('EXTRA_PARAMS', [])),
  943. 'LINTER': linter,
  944. }
  945. data = dump_test(unit, test_record)
  946. if data:
  947. unit.set_property(["DART_DATA", data])
  948. @df.with_fields(
  949. YTEST_FIELDS_BASE
  950. + (
  951. df.TestName.value,
  952. df.TestPartition.value,
  953. df.ModuleLang.value,
  954. )
  955. )
  956. def clang_tidy(fields, unit, *args):
  957. keywords = {
  958. "DEPENDS": -1,
  959. "DATA": -1,
  960. "TIMEOUT": 1,
  961. "FORK_MODE": 1,
  962. "SPLIT_FACTOR": 1,
  963. "FORK_SUBTESTS": 0,
  964. "FORK_TESTS": 0,
  965. }
  966. flat_args, spec_args = _common.sort_by_keywords(keywords, args)
  967. if unit.get("TIDY_CONFIG"):
  968. default_config_path = unit.get("TIDY_CONFIG")
  969. project_config_path = unit.get("TIDY_CONFIG")
  970. else:
  971. default_config_path = get_default_tidy_config(unit)
  972. project_config_path = get_project_tidy_config(unit)
  973. unit.set(["DEFAULT_TIDY_CONFIG", default_config_path])
  974. unit.set(["PROJECT_TIDY_CONFIG", project_config_path])
  975. dart_record = create_dart_record(fields, unit, flat_args, spec_args)
  976. data = dump_test(unit, dart_record)
  977. if data:
  978. unit.set_property(["DART_DATA", data])
  979. @df.with_fields(
  980. YTEST_FIELDS_BASE
  981. + YTEST_FIELDS_EXTRA
  982. + (
  983. df.TestName.value,
  984. df.TestData.from_macro_args_and_unit,
  985. df.Requirements.from_macro_args_and_unit,
  986. df.TestPartition.value,
  987. df.ModuleLang.value,
  988. df.DockerImage.value,
  989. )
  990. )
  991. def unittest_py(fields, unit, *args):
  992. keywords = {
  993. "DEPENDS": -1,
  994. "DATA": -1,
  995. "TIMEOUT": 1,
  996. "FORK_MODE": 1,
  997. "SPLIT_FACTOR": 1,
  998. "FORK_SUBTESTS": 0,
  999. "FORK_TESTS": 0,
  1000. }
  1001. flat_args, spec_args = _common.sort_by_keywords(keywords, args)
  1002. if unit.get('ADD_SRCDIR_TO_TEST_DATA') == "yes":
  1003. unit.ondata_files(_common.get_norm_unit_path(unit))
  1004. dart_record = create_dart_record(fields, unit, flat_args, spec_args)
  1005. data = dump_test(unit, dart_record)
  1006. if data:
  1007. unit.set_property(["DART_DATA", data])
  1008. @df.with_fields(
  1009. YTEST_FIELDS_BASE
  1010. + YTEST_FIELDS_EXTRA
  1011. + (
  1012. df.TestName.value,
  1013. df.TestData.from_macro_args_and_unit,
  1014. df.Requirements.from_macro_args_and_unit,
  1015. df.TestPartition.value,
  1016. df.ModuleLang.value,
  1017. df.DockerImage.value,
  1018. )
  1019. )
  1020. def gunittest(fields, unit, *args):
  1021. keywords = {
  1022. "DEPENDS": -1,
  1023. "DATA": -1,
  1024. "TIMEOUT": 1,
  1025. "FORK_MODE": 1,
  1026. "SPLIT_FACTOR": 1,
  1027. "FORK_SUBTESTS": 0,
  1028. "FORK_TESTS": 0,
  1029. }
  1030. flat_args, spec_args = _common.sort_by_keywords(keywords, args)
  1031. if unit.get('ADD_SRCDIR_TO_TEST_DATA') == "yes":
  1032. unit.ondata_files(_common.get_norm_unit_path(unit))
  1033. dart_record = create_dart_record(fields, unit, flat_args, spec_args)
  1034. data = dump_test(unit, dart_record)
  1035. if data:
  1036. unit.set_property(["DART_DATA", data])
  1037. @df.with_fields(
  1038. YTEST_FIELDS_BASE
  1039. + YTEST_FIELDS_EXTRA
  1040. + (
  1041. df.TestName.value,
  1042. df.TestData.from_macro_args_and_unit,
  1043. df.Requirements.from_macro_args_and_unit,
  1044. df.TestPartition.value,
  1045. df.ModuleLang.value,
  1046. df.BenchmarkOpts.value,
  1047. df.DockerImage.value,
  1048. )
  1049. )
  1050. def g_benchmark(fields, unit, *args):
  1051. keywords = {
  1052. "DEPENDS": -1,
  1053. "DATA": -1,
  1054. "TIMEOUT": 1,
  1055. "FORK_MODE": 1,
  1056. "SPLIT_FACTOR": 1,
  1057. "FORK_SUBTESTS": 0,
  1058. "FORK_TESTS": 0,
  1059. }
  1060. flat_args, spec_args = _common.sort_by_keywords(keywords, args)
  1061. if unit.get('ADD_SRCDIR_TO_TEST_DATA') == "yes":
  1062. unit.ondata_files(_common.get_norm_unit_path(unit))
  1063. dart_record = create_dart_record(fields, unit, flat_args, spec_args)
  1064. data = dump_test(unit, dart_record)
  1065. if data:
  1066. unit.set_property(["DART_DATA", data])
  1067. @df.with_fields(
  1068. YTEST_FIELDS_BASE
  1069. + YTEST_FIELDS_EXTRA
  1070. + (
  1071. df.TestName.value,
  1072. df.TestData.from_macro_args_and_unit_with_canonical,
  1073. df.Requirements.from_macro_args_and_unit,
  1074. df.TestPartition.value,
  1075. df.ModuleLang.value,
  1076. df.DockerImage.value,
  1077. )
  1078. )
  1079. def go_test(fields, unit, *args):
  1080. keywords = {
  1081. "DEPENDS": -1,
  1082. "DATA": -1,
  1083. "TIMEOUT": 1,
  1084. "FORK_MODE": 1,
  1085. "SPLIT_FACTOR": 1,
  1086. "FORK_SUBTESTS": 0,
  1087. "FORK_TESTS": 0,
  1088. }
  1089. flat_args, spec_args = _common.sort_by_keywords(keywords, args)
  1090. if unit.get('ADD_SRCDIR_TO_TEST_DATA') == "yes":
  1091. unit.ondata_files(_common.get_norm_unit_path(unit))
  1092. unit.ondata_files(get_unit_list_variable(unit, 'TEST_YT_SPEC_VALUE'))
  1093. dart_record = create_dart_record(fields, unit, flat_args, spec_args)
  1094. data = dump_test(unit, dart_record)
  1095. if data:
  1096. unit.set_property(["DART_DATA", data])
  1097. @df.with_fields(
  1098. YTEST_FIELDS_BASE
  1099. + YTEST_FIELDS_EXTRA
  1100. + (
  1101. df.TestName.value,
  1102. df.TestData.from_macro_args_and_unit,
  1103. df.Requirements.from_macro_args_and_unit,
  1104. df.TestPartition.value,
  1105. df.DockerImage.value,
  1106. )
  1107. )
  1108. def boost_test(fields, unit, *args):
  1109. keywords = {
  1110. "DEPENDS": -1,
  1111. "DATA": -1,
  1112. "TIMEOUT": 1,
  1113. "FORK_MODE": 1,
  1114. "SPLIT_FACTOR": 1,
  1115. "FORK_SUBTESTS": 0,
  1116. "FORK_TESTS": 0,
  1117. }
  1118. flat_args, spec_args = _common.sort_by_keywords(keywords, args)
  1119. if unit.get('ADD_SRCDIR_TO_TEST_DATA') == "yes":
  1120. unit.ondata_files(_common.get_norm_unit_path(unit))
  1121. unit.ondata_files(get_unit_list_variable(unit, 'TEST_YT_SPEC_VALUE'))
  1122. dart_record = create_dart_record(fields, unit, flat_args, spec_args)
  1123. data = dump_test(unit, dart_record)
  1124. if data:
  1125. unit.set_property(["DART_DATA", data])
  1126. @df.with_fields(
  1127. YTEST_FIELDS_BASE
  1128. + YTEST_FIELDS_EXTRA
  1129. + (
  1130. df.TestName.value,
  1131. df.TestData.from_macro_args_and_unit,
  1132. df.Requirements.with_maybe_fuzzing,
  1133. df.FuzzDicts.value,
  1134. df.FuzzOpts.value,
  1135. df.Fuzzing.value,
  1136. df.DockerImage.value,
  1137. )
  1138. )
  1139. def fuzz_test(fields, unit, *args):
  1140. keywords = {
  1141. "DEPENDS": -1,
  1142. "DATA": -1,
  1143. "TIMEOUT": 1,
  1144. "FORK_MODE": 1,
  1145. "SPLIT_FACTOR": 1,
  1146. "FORK_SUBTESTS": 0,
  1147. "FORK_TESTS": 0,
  1148. }
  1149. flat_args, spec_args = _common.sort_by_keywords(keywords, args)
  1150. if unit.get('ADD_SRCDIR_TO_TEST_DATA') == "yes":
  1151. unit.ondata_files(_common.get_norm_unit_path(unit))
  1152. unit.ondata_files("fuzzing/{}/corpus.json".format(_common.get_norm_unit_path(unit)))
  1153. unit.ondata_files(get_unit_list_variable(unit, 'TEST_YT_SPEC_VALUE'))
  1154. dart_record = create_dart_record(fields, unit, flat_args, spec_args)
  1155. data = dump_test(unit, dart_record)
  1156. if data:
  1157. unit.set_property(["DART_DATA", data])
  1158. @df.with_fields(
  1159. YTEST_FIELDS_BASE
  1160. + YTEST_FIELDS_EXTRA
  1161. + (
  1162. df.TestName.value,
  1163. df.TestData.from_macro_args_and_unit,
  1164. df.Requirements.from_macro_args_and_unit,
  1165. df.TestPartition.value,
  1166. df.ModuleLang.value,
  1167. df.BenchmarkOpts.value,
  1168. df.DockerImage.value,
  1169. )
  1170. )
  1171. def y_benchmark(fields, unit, *args):
  1172. keywords = {
  1173. "DEPENDS": -1,
  1174. "DATA": -1,
  1175. "TIMEOUT": 1,
  1176. "FORK_MODE": 1,
  1177. "SPLIT_FACTOR": 1,
  1178. "FORK_SUBTESTS": 0,
  1179. "FORK_TESTS": 0,
  1180. }
  1181. flat_args, spec_args = _common.sort_by_keywords(keywords, args)
  1182. unit.ondata_files(get_unit_list_variable(unit, 'TEST_YT_SPEC_VALUE'))
  1183. dart_record = create_dart_record(fields, unit, flat_args, spec_args)
  1184. data = dump_test(unit, dart_record)
  1185. if data:
  1186. unit.set_property(["DART_DATA", data])
  1187. @df.with_fields(
  1188. YTEST_FIELDS_BASE
  1189. + YTEST_FIELDS_EXTRA
  1190. + (
  1191. df.TestName.value,
  1192. df.TestData.from_macro_args_and_unit,
  1193. df.Requirements.from_macro_args_and_unit,
  1194. df.TestPartition.value,
  1195. )
  1196. )
  1197. def coverage_extractor(fields, unit, *args):
  1198. keywords = {
  1199. "DEPENDS": -1,
  1200. "DATA": -1,
  1201. "TIMEOUT": 1,
  1202. "FORK_MODE": 1,
  1203. "SPLIT_FACTOR": 1,
  1204. "FORK_SUBTESTS": 0,
  1205. "FORK_TESTS": 0,
  1206. }
  1207. flat_args, spec_args = _common.sort_by_keywords(keywords, args)
  1208. unit.ondata_files(get_unit_list_variable(unit, 'TEST_YT_SPEC_VALUE'))
  1209. dart_record = create_dart_record(fields, unit, flat_args, spec_args)
  1210. data = dump_test(unit, dart_record)
  1211. if data:
  1212. unit.set_property(["DART_DATA", data])
  1213. @df.with_fields(
  1214. YTEST_FIELDS_BASE
  1215. + YTEST_FIELDS_EXTRA
  1216. + (
  1217. df.TestName.first_flat_with_bench,
  1218. df.TestData.from_macro_args_and_unit,
  1219. df.Requirements.from_macro_args_and_unit,
  1220. df.TestPartition.value,
  1221. df.GoBenchTimeout.value,
  1222. df.ModuleLang.value,
  1223. df.DockerImage.value,
  1224. )
  1225. )
  1226. def go_bench(fields, unit, *args):
  1227. keywords = {
  1228. "DEPENDS": -1,
  1229. "DATA": -1,
  1230. "TIMEOUT": 1,
  1231. "FORK_MODE": 1,
  1232. "SPLIT_FACTOR": 1,
  1233. "FORK_SUBTESTS": 0,
  1234. "FORK_TESTS": 0,
  1235. }
  1236. flat_args, spec_args = _common.sort_by_keywords(keywords, args)
  1237. tags = df.Tag.from_macro_args_and_unit(unit, flat_args, spec_args)[df.Tag.KEY]
  1238. if "ya:run_go_benchmark" not in tags:
  1239. return
  1240. unit.ondata_files(get_unit_list_variable(unit, 'TEST_YT_SPEC_VALUE'))
  1241. dart_record = create_dart_record(fields, unit, flat_args, spec_args)
  1242. data = dump_test(unit, dart_record)
  1243. if data:
  1244. unit.set_property(["DART_DATA", data])
  1245. def onadd_ytest(unit, *args):
  1246. keywords = {
  1247. "DEPENDS": -1,
  1248. "DATA": -1,
  1249. "TIMEOUT": 1,
  1250. "FORK_MODE": 1,
  1251. "SPLIT_FACTOR": 1,
  1252. "FORK_SUBTESTS": 0,
  1253. "FORK_TESTS": 0,
  1254. }
  1255. flat_args, *_ = _common.sort_by_keywords(keywords, args)
  1256. test_type = flat_args[1]
  1257. # TIDY not supported for module
  1258. if unit.get("TIDY_ENABLED") == "yes" and test_type != "clang_tidy":
  1259. return
  1260. # TIDY explicitly disabled for module in ymake.core.conf
  1261. elif test_type == "clang_tidy" and unit.get("TIDY_ENABLED") != "yes":
  1262. return
  1263. # TIDY disabled for module in ya.make
  1264. elif unit.get("TIDY") == "yes" and unit.get("TIDY_ENABLED") != "yes":
  1265. return
  1266. elif test_type == "no.test":
  1267. return
  1268. elif test_type == "clang_tidy" and unit.get("TIDY_ENABLED") == "yes":
  1269. clang_tidy(unit, *args)
  1270. elif test_type == "unittest.py":
  1271. unittest_py(unit, *args)
  1272. elif test_type == "gunittest":
  1273. gunittest(unit, *args)
  1274. elif test_type == "g_benchmark":
  1275. g_benchmark(unit, *args)
  1276. elif test_type == "go.test":
  1277. go_test(unit, *args)
  1278. elif test_type == "boost.test":
  1279. boost_test(unit, *args)
  1280. elif test_type == "fuzz.test":
  1281. fuzz_test(unit, *args)
  1282. elif test_type == "y_benchmark":
  1283. y_benchmark(unit, *args)
  1284. elif test_type == "coverage.extractor" and match_coverage_extractor_requirements(unit):
  1285. coverage_extractor(unit, *args)
  1286. elif test_type == "go.bench":
  1287. go_bench(unit, *args)