ytest.py 45 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113
  1. import os
  2. import re
  3. import sys
  4. import json
  5. import copy
  6. import base64
  7. import shlex
  8. import _common
  9. import lib._metric_resolvers as mr
  10. import _test_const as consts
  11. import _requirements as reqs
  12. import StringIO
  13. import subprocess
  14. import collections
  15. import ymake
  16. MDS_URI_PREFIX = 'https://storage.yandex-team.ru/get-devtools/'
  17. MDS_SHEME = 'mds'
  18. CANON_DATA_DIR_NAME = 'canondata'
  19. CANON_OUTPUT_STORAGE = 'canondata_storage'
  20. CANON_RESULT_FILE_NAME = 'result.json'
  21. CANON_MDS_RESOURCE_REGEX = re.compile(re.escape(MDS_URI_PREFIX) + r'(.*?)($|#)')
  22. CANON_SB_VAULT_REGEX = re.compile(r"\w+=(value|file):[-\w]+:\w+")
  23. CANON_SBR_RESOURCE_REGEX = re.compile(r'(sbr:/?/?(\d+))')
  24. VALID_NETWORK_REQUIREMENTS = ("full", "restricted")
  25. VALID_DNS_REQUIREMENTS = ("default", "local", "dns64")
  26. BLOCK_SEPARATOR = '============================================================='
  27. SPLIT_FACTOR_MAX_VALUE = 1000
  28. SPLIT_FACTOR_TEST_FILES_MAX_VALUE = 4250
  29. PARTITION_MODS = ('SEQUENTIAL', 'MODULO')
  30. DEFAULT_TIDY_CONFIG = "build/config/tests/clang_tidy/config.yaml"
  31. DEFAULT_TIDY_CONFIG_MAP_PATH = "build/yandex_specific/config/clang_tidy/tidy_default_map.json"
  32. PROJECT_TIDY_CONFIG_MAP_PATH = "build/yandex_specific/config/clang_tidy/tidy_project_map.json"
  33. tidy_config_map = None
  34. def ontest_data(unit, *args):
  35. ymake.report_configure_error("TEST_DATA is removed in favour of DATA")
  36. def save_in_file(filepath, data):
  37. if filepath:
  38. with open(filepath, 'a') as file_handler:
  39. if os.stat(filepath).st_size == 0:
  40. print >>file_handler, BLOCK_SEPARATOR
  41. print >> file_handler, data
  42. def prepare_recipes(data):
  43. data = data.replace('"USE_RECIPE_DELIM"', "\n")
  44. data = data.replace("$TEST_RECIPES_VALUE", "")
  45. return base64.b64encode(data or "")
  46. def prepare_env(data):
  47. data = data.replace("$TEST_ENV_VALUE", "")
  48. return serialize_list(shlex.split(data))
  49. def is_yt_spec_contain_pool_info(filename): # XXX switch to yson in ymake + perf test for configure
  50. pool_re = re.compile(r"""['"]*pool['"]*\s*?=""")
  51. cypress_root_re = re.compile(r"""['"]*cypress_root['"]*\s*=""")
  52. with open(filename, 'r') as afile:
  53. yt_spec = afile.read()
  54. return pool_re.search(yt_spec) and cypress_root_re.search(yt_spec)
  55. def validate_sb_vault(name, value):
  56. if not CANON_SB_VAULT_REGEX.match(value):
  57. return "sb_vault value '{}' should follow pattern <ENV_NAME>=:<value|file>:<owner>:<vault key>".format(value)
  58. def validate_numerical_requirement(name, value):
  59. if mr.resolve_value(value) is None:
  60. return "Cannot convert [[imp]]{}[[rst]] to the proper [[imp]]{}[[rst]] requirement value".format(value, name)
  61. def validate_choice_requirement(name, val, valid):
  62. if val not in valid:
  63. return "Unknown [[imp]]{}[[rst]] requirement: [[imp]]{}[[rst]], choose from [[imp]]{}[[rst]]".format(name, val, ", ".join(valid))
  64. def validate_force_sandbox_requirement(name, value, test_size, is_force_sandbox, in_autocheck, is_fuzzing, is_kvm, is_ytexec_run, check_func):
  65. if is_force_sandbox or not in_autocheck or is_fuzzing or is_ytexec_run:
  66. if value == 'all':
  67. return
  68. return validate_numerical_requirement(name, value)
  69. error_msg = validate_numerical_requirement(name, value)
  70. if error_msg:
  71. return error_msg
  72. return check_func(mr.resolve_value(value), test_size, is_kvm)
  73. # TODO: Remove is_kvm param when there will be guarantees on RAM
  74. def validate_requirement(req_name, value, test_size, is_force_sandbox, in_autocheck, is_fuzzing, is_kvm, is_ytexec_run):
  75. req_checks = {
  76. 'container': validate_numerical_requirement,
  77. 'cpu': lambda n, v: validate_force_sandbox_requirement(n, v, test_size, is_force_sandbox, in_autocheck, is_fuzzing, is_kvm, is_ytexec_run, reqs.check_cpu),
  78. 'disk_usage': validate_numerical_requirement,
  79. 'dns': lambda n, v: validate_choice_requirement(n, v, VALID_DNS_REQUIREMENTS),
  80. 'kvm': None,
  81. 'network': lambda n, v: validate_choice_requirement(n, v, VALID_NETWORK_REQUIREMENTS),
  82. 'ram': lambda n, v: validate_force_sandbox_requirement(n, v, test_size, is_force_sandbox, in_autocheck, is_fuzzing, is_kvm, is_ytexec_run, reqs.check_ram),
  83. 'ram_disk': lambda n, v: validate_force_sandbox_requirement(n, v, test_size, is_force_sandbox, in_autocheck, is_fuzzing, is_kvm, is_ytexec_run, reqs.check_ram_disk),
  84. 'sb': None,
  85. 'sb_vault': validate_sb_vault,
  86. }
  87. if req_name not in req_checks:
  88. return "Unknown requirement: [[imp]]{}[[rst]], choose from [[imp]]{}[[rst]]".format(req_name, ", ".join(sorted(req_checks)))
  89. if req_name in ('container', 'disk') and not is_force_sandbox:
  90. return "Only [[imp]]LARGE[[rst]] tests without [[imp]]ya:force_distbuild[[rst]] tag can have [[imp]]{}[[rst]] requirement".format(req_name)
  91. check_func = req_checks[req_name]
  92. if check_func:
  93. return check_func(req_name, value)
  94. def validate_test(unit, kw):
  95. def get_list(key):
  96. return deserialize_list(kw.get(key, ""))
  97. valid_kw = copy.deepcopy(kw)
  98. errors = []
  99. warnings = []
  100. if valid_kw.get('SCRIPT-REL-PATH') == 'boost.test':
  101. project_path = valid_kw.get('BUILD-FOLDER-PATH', "")
  102. if not project_path.startswith(("contrib", "mail", "maps", "tools/idl", "metrika", "devtools", "mds", "yandex_io", "smart_devices")):
  103. errors.append("BOOSTTEST is not allowed here")
  104. elif valid_kw.get('SCRIPT-REL-PATH') == 'gtest':
  105. project_path = valid_kw.get('BUILD-FOLDER-PATH', "")
  106. if not project_path.startswith(("contrib", "devtools", "mail", "mds", "taxi")):
  107. errors.append("GTEST_UGLY is not allowed here, use GTEST instead")
  108. size_timeout = collections.OrderedDict(sorted(consts.TestSize.DefaultTimeouts.items(), key=lambda t: t[1]))
  109. size = valid_kw.get('SIZE', consts.TestSize.Small).lower()
  110. # TODO: use set instead list
  111. tags = get_list("TAG")
  112. requirements_orig = get_list("REQUIREMENTS")
  113. in_autocheck = "ya:not_autocheck" not in tags and 'ya:manual' not in tags
  114. is_fat = 'ya:fat' in tags
  115. is_force_sandbox = 'ya:force_distbuild' not in tags and is_fat
  116. is_ytexec_run = 'ya:yt' in tags
  117. is_fuzzing = valid_kw.get("FUZZING", False)
  118. is_kvm = 'kvm' in requirements_orig
  119. requirements = {}
  120. list_requirements = ('sb_vault')
  121. for req in requirements_orig:
  122. if req in ('kvm', ):
  123. requirements[req] = str(True)
  124. continue
  125. if ":" in req:
  126. req_name, req_value = req.split(":", 1)
  127. if req_name in list_requirements:
  128. requirements[req_name] = ",".join(filter(None, [requirements.get(req_name), req_value]))
  129. else:
  130. if req_name in requirements:
  131. if req_value in ["0"]:
  132. warnings.append("Requirement [[imp]]{}[[rst]] is dropped [[imp]]{}[[rst]] -> [[imp]]{}[[rst]]".format(req_name, requirements[req_name], req_value))
  133. del requirements[req_name]
  134. elif requirements[req_name] != req_value:
  135. warnings.append("Requirement [[imp]]{}[[rst]] is redefined [[imp]]{}[[rst]] -> [[imp]]{}[[rst]]".format(req_name, requirements[req_name], req_value))
  136. requirements[req_name] = req_value
  137. else:
  138. requirements[req_name] = req_value
  139. else:
  140. errors.append("Invalid requirement syntax [[imp]]{}[[rst]]: expect <requirement>:<value>".format(req))
  141. if not errors:
  142. for req_name, req_value in requirements.items():
  143. error_msg = validate_requirement(req_name, req_value, size, is_force_sandbox, in_autocheck, is_fuzzing, is_kvm, is_ytexec_run)
  144. if error_msg:
  145. errors += [error_msg]
  146. invalid_requirements_for_distbuild = [requirement for requirement in requirements.keys() if requirement not in ('ram', 'ram_disk', 'cpu', 'network')]
  147. sb_tags = [tag for tag in tags if tag.startswith('sb:')]
  148. if is_fat:
  149. if size != consts.TestSize.Large:
  150. errors.append("Only LARGE test may have ya:fat tag")
  151. if in_autocheck and not is_force_sandbox:
  152. if invalid_requirements_for_distbuild:
  153. errors.append("'{}' REQUIREMENTS options can be used only for FAT tests without ya:force_distbuild tag. Remove TAG(ya:force_distbuild) or an option.".format(invalid_requirements_for_distbuild))
  154. if sb_tags:
  155. errors.append("You can set sandbox tags '{}' only for FAT tests without ya:force_distbuild. Remove TAG(ya:force_sandbox) or sandbox tags.".format(sb_tags))
  156. if 'ya:sandbox_coverage' in tags:
  157. errors.append("You can set 'ya:sandbox_coverage' tag only for FAT tests without ya:force_distbuild.")
  158. else:
  159. if is_force_sandbox:
  160. errors.append('ya:force_sandbox can be used with LARGE tests only')
  161. if 'ya:nofuse' in tags:
  162. errors.append('ya:nofuse can be used with LARGE tests only')
  163. if 'ya:privileged' in tags:
  164. errors.append("ya:privileged can be used with LARGE tests only")
  165. if in_autocheck and size == consts.TestSize.Large:
  166. errors.append("LARGE test must have ya:fat tag")
  167. if 'ya:privileged' in tags and 'container' not in requirements:
  168. errors.append("Only tests with 'container' requirement can have 'ya:privileged' tag")
  169. if size not in size_timeout:
  170. errors.append("Unknown test size: [[imp]]{}[[rst]], choose from [[imp]]{}[[rst]]".format(size.upper(), ", ".join([sz.upper() for sz in size_timeout.keys()])))
  171. else:
  172. try:
  173. timeout = int(valid_kw.get('TEST-TIMEOUT', size_timeout[size]) or size_timeout[size])
  174. script_rel_path = valid_kw.get('SCRIPT-REL-PATH')
  175. if timeout < 0:
  176. raise Exception("Timeout must be > 0")
  177. if size_timeout[size] < timeout and in_autocheck and script_rel_path != 'java.style':
  178. suggested_size = None
  179. for s, t in size_timeout.items():
  180. if timeout <= t:
  181. suggested_size = s
  182. break
  183. if suggested_size:
  184. suggested_size = ", suggested size: [[imp]]{}[[rst]]".format(suggested_size.upper())
  185. else:
  186. suggested_size = ""
  187. errors.append("Max allowed timeout for test size [[imp]]{}[[rst]] is [[imp]]{} sec[[rst]]{}".format(size.upper(), size_timeout[size], suggested_size))
  188. except Exception as e:
  189. errors.append("Error when parsing test timeout: [[bad]]{}[[rst]]".format(e))
  190. requiremtens_list = []
  191. for req_name, req_value in requirements.iteritems():
  192. requiremtens_list.append(req_name + ":" + req_value)
  193. valid_kw['REQUIREMENTS'] = serialize_list(requiremtens_list)
  194. if valid_kw.get("FUZZ-OPTS"):
  195. for option in get_list("FUZZ-OPTS"):
  196. if not option.startswith("-"):
  197. errors.append("Unrecognized fuzzer option '[[imp]]{}[[rst]]'. All fuzzer options should start with '-'".format(option))
  198. break
  199. eqpos = option.find("=")
  200. if eqpos == -1 or len(option) == eqpos + 1:
  201. errors.append("Unrecognized fuzzer option '[[imp]]{}[[rst]]'. All fuzzer options should obtain value specified after '='".format(option))
  202. break
  203. if option[eqpos - 1] == " " or option[eqpos + 1] == " ":
  204. errors.append("Spaces are not allowed: '[[imp]]{}[[rst]]'".format(option))
  205. break
  206. if option[:eqpos] in ("-runs", "-dict", "-jobs", "-workers", "-artifact_prefix", "-print_final_stats"):
  207. errors.append("You can't use '[[imp]]{}[[rst]]' - it will be automatically calculated or configured during run".format(option))
  208. break
  209. if valid_kw.get("YT-SPEC"):
  210. if not is_ytexec_run:
  211. errors.append("You can use YT_SPEC macro only tests marked with ya:yt tag")
  212. else:
  213. for filename in get_list("YT-SPEC"):
  214. filename = unit.resolve('$S/' + filename)
  215. if not os.path.exists(filename):
  216. errors.append("File '{}' specified in the YT_SPEC macro doesn't exist".format(filename))
  217. continue
  218. if is_yt_spec_contain_pool_info(filename) and "ya:external" not in tags:
  219. tags.append("ya:external")
  220. tags.append("ya:yt_research_pool")
  221. if valid_kw.get("USE_ARCADIA_PYTHON") == "yes" and valid_kw.get("SCRIPT-REL-PATH") == "py.test":
  222. errors.append("PYTEST_SCRIPT is deprecated")
  223. partition = valid_kw.get('TEST_PARTITION', 'SEQUENTIAL')
  224. if partition not in PARTITION_MODS:
  225. raise ValueError('partition mode should be one of {}, detected: {}'.format(PARTITION_MODS, partition))
  226. if valid_kw.get('SPLIT-FACTOR'):
  227. if valid_kw.get('FORK-MODE') == 'none':
  228. errors.append('SPLIT_FACTOR must be use with FORK_TESTS() or FORK_SUBTESTS() macro')
  229. value = 1
  230. try:
  231. value = int(valid_kw.get('SPLIT-FACTOR'))
  232. if value <= 0:
  233. raise ValueError("must be > 0")
  234. if value > SPLIT_FACTOR_MAX_VALUE:
  235. raise ValueError("the maximum allowed value is {}".format(SPLIT_FACTOR_MAX_VALUE))
  236. except ValueError as e:
  237. errors.append('Incorrect SPLIT_FACTOR value: {}'.format(e))
  238. if valid_kw.get('FORK-TEST-FILES') and size != consts.TestSize.Large:
  239. nfiles = count_entries(valid_kw.get('TEST-FILES'))
  240. if nfiles * value > SPLIT_FACTOR_TEST_FILES_MAX_VALUE:
  241. errors.append('Too much chunks generated:{} (limit: {}). Remove FORK_TEST_FILES() macro or reduce SPLIT_FACTOR({}).'.format(
  242. nfiles * value, SPLIT_FACTOR_TEST_FILES_MAX_VALUE, value))
  243. unit_path = get_norm_unit_path(unit)
  244. if not is_fat and "ya:noretries" in tags and not is_ytexec_run \
  245. and not unit_path.startswith("devtools/") \
  246. and not unit_path.startswith("infra/kernel/") \
  247. and not unit_path.startswith("yt/python/yt") \
  248. and not unit_path.startswith("infra/yp_dns_api/tests") \
  249. and not unit_path.startswith("yp/tests"):
  250. errors.append("Only LARGE tests can have 'ya:noretries' tag")
  251. if errors:
  252. return None, warnings, errors
  253. return valid_kw, warnings, errors
  254. def get_norm_unit_path(unit, extra=None):
  255. path = _common.strip_roots(unit.path())
  256. if extra:
  257. return '{}/{}'.format(path, extra)
  258. return path
  259. def dump_test(unit, kw):
  260. valid_kw, warnings, errors = validate_test(unit, kw)
  261. for w in warnings:
  262. unit.message(['warn', w])
  263. for e in errors:
  264. ymake.report_configure_error(e)
  265. if valid_kw is None:
  266. return None
  267. string_handler = StringIO.StringIO()
  268. for k, v in valid_kw.iteritems():
  269. print >>string_handler, k + ': ' + v
  270. print >>string_handler, BLOCK_SEPARATOR
  271. data = string_handler.getvalue()
  272. string_handler.close()
  273. return data
  274. def serialize_list(lst):
  275. lst = filter(None, lst)
  276. return '\"' + ';'.join(lst) + '\"' if lst else ''
  277. def deserialize_list(val):
  278. return filter(None, val.replace('"', "").split(";"))
  279. def count_entries(x):
  280. # see (de)serialize_list
  281. assert x is None or isinstance(x, str), type(x)
  282. if not x:
  283. return 0
  284. return x.count(";") + 1
  285. def get_values_list(unit, key):
  286. res = map(str.strip, (unit.get(key) or '').replace('$' + key, '').strip().split())
  287. return [r for r in res if r and r not in ['""', "''"]]
  288. def get_norm_paths(unit, key):
  289. # return paths without trailing (back)slash
  290. return [x.rstrip('\\/') for x in get_values_list(unit, key)]
  291. def get_unit_list_variable(unit, name):
  292. items = unit.get(name)
  293. if items:
  294. items = items.split(' ')
  295. assert items[0] == "${}".format(name), (items, name)
  296. return items[1:]
  297. return []
  298. def implies(a, b):
  299. return bool((not a) or b)
  300. def match_coverage_extractor_requirements(unit):
  301. # we shouldn't add test if
  302. return all([
  303. # tests are not requested
  304. unit.get("TESTS_REQUESTED") == "yes",
  305. # build doesn't imply clang coverage, which supports segment extraction from the binaries
  306. unit.get("CLANG_COVERAGE") == "yes",
  307. # contrib wasn't requested
  308. implies(get_norm_unit_path(unit).startswith("contrib/"), unit.get("ENABLE_CONTRIB_COVERAGE") == "yes"),
  309. ])
  310. def get_tidy_config_map(unit):
  311. global tidy_config_map
  312. if tidy_config_map is None:
  313. config_map_path = unit.resolve(os.path.join("$S", PROJECT_TIDY_CONFIG_MAP_PATH))
  314. with open(config_map_path, 'r') as afile:
  315. tidy_config_map = json.load(afile)
  316. return tidy_config_map
  317. def get_default_tidy_config(unit):
  318. unit_path = get_norm_unit_path(unit)
  319. default_config_map_path = unit.resolve(os.path.join("$S", DEFAULT_TIDY_CONFIG_MAP_PATH))
  320. with open(default_config_map_path, 'r') as afile:
  321. tidy_default_config_map = json.load(afile)
  322. for project_prefix, config_path in tidy_default_config_map.items():
  323. if unit_path.startswith(project_prefix):
  324. return config_path
  325. return DEFAULT_TIDY_CONFIG
  326. def get_project_tidy_config(unit):
  327. tidy_map = get_tidy_config_map(unit)
  328. unit_path = get_norm_unit_path(unit)
  329. for project_prefix, config_path in tidy_map.items():
  330. if unit_path.startswith(project_prefix):
  331. return config_path
  332. else:
  333. return get_default_tidy_config(unit)
  334. def onadd_ytest(unit, *args):
  335. keywords = {"DEPENDS": -1, "DATA": -1, "TIMEOUT": 1, "FORK_MODE": 1, "SPLIT_FACTOR": 1,
  336. "FORK_SUBTESTS": 0, "FORK_TESTS": 0}
  337. flat_args, spec_args = _common.sort_by_keywords(keywords, args)
  338. test_data = sorted(_common.filter_out_by_keyword(spec_args.get('DATA', []) + get_norm_paths(unit, 'TEST_DATA_VALUE'), 'AUTOUPDATED'))
  339. if flat_args[1] == "fuzz.test":
  340. unit.ondata("arcadia/fuzzing/{}/corpus.json".format(get_norm_unit_path(unit)))
  341. elif flat_args[1] == "go.test":
  342. data, _ = get_canonical_test_resources(unit)
  343. test_data += data
  344. elif flat_args[1] == "coverage.extractor" and not match_coverage_extractor_requirements(unit):
  345. # XXX
  346. # Current ymake implementation doesn't allow to call macro inside the 'when' body
  347. # that's why we add ADD_YTEST(coverage.extractor) to every PROGRAM entry and check requirements later
  348. return
  349. elif flat_args[1] == "clang_tidy" and unit.get("TIDY") != "yes":
  350. # Graph is not prepared
  351. return
  352. elif flat_args[1] == "no.test":
  353. return
  354. test_size = ''.join(spec_args.get('SIZE', [])) or unit.get('TEST_SIZE_NAME') or ''
  355. test_tags = serialize_list(_get_test_tags(unit, spec_args))
  356. test_timeout = ''.join(spec_args.get('TIMEOUT', [])) or unit.get('TEST_TIMEOUT') or ''
  357. test_requirements = spec_args.get('REQUIREMENTS', []) + get_values_list(unit, 'TEST_REQUIREMENTS_VALUE')
  358. if flat_args[1] != "clang_tidy" and unit.get("TIDY") == "yes":
  359. # graph changed for clang_tidy tests
  360. if flat_args[1] in ("unittest.py", "gunittest", "g_benchmark"):
  361. flat_args[1] = "clang_tidy"
  362. test_size = 'SMALL'
  363. test_tags = ''
  364. test_timeout = "60"
  365. test_requirements = []
  366. unit.set(["TEST_YT_SPEC_VALUE", ""])
  367. else:
  368. return
  369. if flat_args[1] == "clang_tidy" and unit.get("TIDY") == "yes":
  370. if unit.get("TIDY_CONFIG"):
  371. default_config_path = unit.get("TIDY_CONFIG")
  372. project_config_path = unit.get("TIDY_CONFIG")
  373. else:
  374. default_config_path = get_default_tidy_config(unit)
  375. project_config_path = get_project_tidy_config(unit)
  376. unit.set(["DEFAULT_TIDY_CONFIG", default_config_path])
  377. unit.set(["PROJECT_TIDY_CONFIG", project_config_path])
  378. fork_mode = []
  379. if 'FORK_SUBTESTS' in spec_args:
  380. fork_mode.append('subtests')
  381. if 'FORK_TESTS' in spec_args:
  382. fork_mode.append('tests')
  383. fork_mode = fork_mode or spec_args.get('FORK_MODE', []) or unit.get('TEST_FORK_MODE').split()
  384. fork_mode = ' '.join(fork_mode) if fork_mode else ''
  385. unit_path = get_norm_unit_path(unit)
  386. test_record = {
  387. 'TEST-NAME': flat_args[0],
  388. 'SCRIPT-REL-PATH': flat_args[1],
  389. 'TESTED-PROJECT-NAME': unit.name(),
  390. 'TESTED-PROJECT-FILENAME': unit.filename(),
  391. 'SOURCE-FOLDER-PATH': unit_path,
  392. # TODO get rid of BUILD-FOLDER-PATH
  393. 'BUILD-FOLDER-PATH': unit_path,
  394. 'BINARY-PATH': "{}/{}".format(unit_path, unit.filename()),
  395. 'GLOBAL-LIBRARY-PATH': unit.global_filename(),
  396. 'CUSTOM-DEPENDENCIES': ' '.join(spec_args.get('DEPENDS', []) + get_values_list(unit, 'TEST_DEPENDS_VALUE')),
  397. 'TEST-RECIPES': prepare_recipes(unit.get("TEST_RECIPES_VALUE")),
  398. 'TEST-ENV': prepare_env(unit.get("TEST_ENV_VALUE")),
  399. # 'TEST-PRESERVE-ENV': 'da',
  400. 'TEST-DATA': serialize_list(test_data),
  401. 'TEST-TIMEOUT': test_timeout,
  402. 'FORK-MODE': fork_mode,
  403. 'SPLIT-FACTOR': ''.join(spec_args.get('SPLIT_FACTOR', [])) or unit.get('TEST_SPLIT_FACTOR') or '',
  404. 'SIZE': test_size,
  405. 'TAG': test_tags,
  406. 'REQUIREMENTS': serialize_list(test_requirements),
  407. 'TEST-CWD': unit.get('TEST_CWD_VALUE') or '',
  408. 'FUZZ-DICTS': serialize_list(spec_args.get('FUZZ_DICTS', []) + get_unit_list_variable(unit, 'FUZZ_DICTS_VALUE')),
  409. 'FUZZ-OPTS': serialize_list(spec_args.get('FUZZ_OPTS', []) + get_unit_list_variable(unit, 'FUZZ_OPTS_VALUE')),
  410. 'YT-SPEC': serialize_list(spec_args.get('YT_SPEC', []) + get_unit_list_variable(unit, 'TEST_YT_SPEC_VALUE')),
  411. 'BLOB': unit.get('TEST_BLOB_DATA') or '',
  412. 'SKIP_TEST': unit.get('SKIP_TEST_VALUE') or '',
  413. 'TEST_IOS_DEVICE_TYPE': unit.get('TEST_IOS_DEVICE_TYPE_VALUE') or '',
  414. 'TEST_IOS_RUNTIME_TYPE': unit.get('TEST_IOS_RUNTIME_TYPE_VALUE') or '',
  415. 'ANDROID_APK_TEST_ACTIVITY': unit.get('ANDROID_APK_TEST_ACTIVITY_VALUE') or '',
  416. 'TEST_PARTITION': unit.get("TEST_PARTITION") or 'SEQUENTIAL',
  417. 'GO_BENCH_TIMEOUT': unit.get('GO_BENCH_TIMEOUT') or '',
  418. }
  419. if flat_args[1] == "go.bench":
  420. if "ya:run_go_benchmark" not in test_record["TAG"]:
  421. return
  422. else:
  423. test_record["TEST-NAME"] += "_bench"
  424. if flat_args[1] == 'fuzz.test' and unit.get('FUZZING') == 'yes':
  425. test_record['FUZZING'] = '1'
  426. # use all cores if fuzzing requested
  427. test_record['REQUIREMENTS'] = serialize_list(filter(None, deserialize_list(test_record['REQUIREMENTS']) + ["cpu:all", "ram:all"]))
  428. data = dump_test(unit, test_record)
  429. if data:
  430. unit.set_property(["DART_DATA", data])
  431. save_in_file(unit.get('TEST_DART_OUT_FILE'), data)
  432. def java_srcdirs_to_data(unit, var):
  433. extra_data = []
  434. for srcdir in (unit.get(var) or '').replace('$' + var, '').split():
  435. if srcdir == '.':
  436. srcdir = unit.get('MODDIR')
  437. if srcdir.startswith('${ARCADIA_ROOT}/') or srcdir.startswith('$ARCADIA_ROOT/'):
  438. srcdir = srcdir.replace('${ARCADIA_ROOT}/', '$S/')
  439. srcdir = srcdir.replace('$ARCADIA_ROOT/', '$S/')
  440. if srcdir.startswith('${CURDIR}/') or srcdir.startswith('$CURDIR/'):
  441. srcdir = srcdir.replace('${CURDIR}/', os.path.join('$S', unit.get('MODDIR')))
  442. srcdir = srcdir.replace('$CURDIR/', os.path.join('$S', unit.get('MODDIR')))
  443. srcdir = unit.resolve_arc_path(srcdir)
  444. if not srcdir.startswith('$'):
  445. srcdir = os.path.join('$S', unit.get('MODDIR'), srcdir)
  446. if srcdir.startswith('$S'):
  447. extra_data.append(srcdir.replace('$S', 'arcadia'))
  448. return serialize_list(extra_data)
  449. def onadd_check(unit, *args):
  450. if unit.get("TIDY") == "yes":
  451. # graph changed for clang_tidy tests
  452. return
  453. flat_args, spec_args = _common.sort_by_keywords({"DEPENDS": -1, "TIMEOUT": 1, "DATA": -1, "TAG": -1, "REQUIREMENTS": -1, "FORK_MODE": 1,
  454. "SPLIT_FACTOR": 1, "FORK_SUBTESTS": 0, "FORK_TESTS": 0, "SIZE": 1}, args)
  455. check_type = flat_args[0]
  456. test_dir = get_norm_unit_path(unit)
  457. test_timeout = ''
  458. fork_mode = ''
  459. extra_test_data = ''
  460. extra_test_dart_data = {}
  461. ymake_java_test = unit.get('YMAKE_JAVA_TEST') == 'yes'
  462. if check_type in ["flake8.py2", "flake8.py3"]:
  463. script_rel_path = check_type
  464. fork_mode = unit.get('TEST_FORK_MODE') or ''
  465. elif check_type == "JAVA_STYLE":
  466. if ymake_java_test and not unit.get('ALL_SRCDIRS') or '':
  467. return
  468. if len(flat_args) < 2:
  469. raise Exception("Not enough arguments for JAVA_STYLE check")
  470. check_level = flat_args[1]
  471. allowed_levels = {
  472. 'base': '/yandex_checks.xml',
  473. 'strict': '/yandex_checks_strict.xml',
  474. 'extended': '/yandex_checks_extended.xml',
  475. 'library': '/yandex_checks_library.xml',
  476. }
  477. if check_level not in allowed_levels:
  478. raise Exception('{} is not allowed in LINT(), use one of {}'.format(check_level, allowed_levels.keys()))
  479. flat_args[1] = allowed_levels[check_level]
  480. if check_level == 'none':
  481. return
  482. script_rel_path = "java.style"
  483. test_timeout = '120'
  484. fork_mode = unit.get('TEST_FORK_MODE') or ''
  485. if ymake_java_test:
  486. extra_test_data = java_srcdirs_to_data(unit, 'ALL_SRCDIRS')
  487. extra_test_dart_data['JDK_RESOURCE'] = 'JDK' + (unit.get('JDK_VERSION') or '_DEFAULT')
  488. elif check_type == "gofmt":
  489. script_rel_path = check_type
  490. go_files = flat_args[1:]
  491. if go_files:
  492. test_dir = os.path.dirname(go_files[0]).lstrip("$S/")
  493. else:
  494. script_rel_path = check_type
  495. use_arcadia_python = unit.get('USE_ARCADIA_PYTHON')
  496. uid_ext = ''
  497. if check_type in ("check.data", "check.resource"):
  498. if unit.get("VALIDATE_DATA") == "no":
  499. return
  500. if check_type == "check.data":
  501. uid_ext = unit.get("SBR_UID_EXT").split(" ", 1)[-1] # strip variable name
  502. data_re = re.compile(r"sbr:/?/?(\d+)=?.*")
  503. data = flat_args[1:]
  504. resources = []
  505. for f in data:
  506. matched = re.match(data_re, f)
  507. if matched:
  508. resources.append(matched.group(1))
  509. if resources:
  510. test_files = serialize_list(resources)
  511. else:
  512. return
  513. else:
  514. test_files = serialize_list(flat_args[1:])
  515. test_record = {
  516. 'TEST-NAME': check_type.lower(),
  517. 'TEST-TIMEOUT': test_timeout,
  518. 'SCRIPT-REL-PATH': script_rel_path,
  519. 'TESTED-PROJECT-NAME': os.path.basename(test_dir),
  520. 'SOURCE-FOLDER-PATH': test_dir,
  521. 'CUSTOM-DEPENDENCIES': " ".join(spec_args.get('DEPENDS', [])),
  522. 'TEST-DATA': extra_test_data,
  523. "SBR-UID-EXT": uid_ext,
  524. 'SPLIT-FACTOR': '',
  525. 'TEST_PARTITION': 'SEQUENTIAL',
  526. 'FORK-MODE': fork_mode,
  527. 'FORK-TEST-FILES': '',
  528. 'SIZE': 'SMALL',
  529. 'TAG': '',
  530. 'REQUIREMENTS': '',
  531. 'USE_ARCADIA_PYTHON': use_arcadia_python or '',
  532. 'OLD_PYTEST': 'no',
  533. 'PYTHON-PATHS': '',
  534. # TODO remove FILES, see DEVTOOLS-7052
  535. 'FILES': test_files,
  536. 'TEST-FILES': test_files,
  537. 'NO_JBUILD': 'yes' if ymake_java_test else 'no',
  538. }
  539. test_record.update(extra_test_dart_data)
  540. data = dump_test(unit, test_record)
  541. if data:
  542. unit.set_property(["DART_DATA", data])
  543. save_in_file(unit.get('TEST_DART_OUT_FILE'), data)
  544. def on_register_no_check_imports(unit):
  545. s = unit.get('NO_CHECK_IMPORTS_FOR_VALUE')
  546. if s not in ('', 'None'):
  547. unit.onresource(['-', 'py/no_check_imports/{}="{}"'.format(_common.pathid(s), s)])
  548. def onadd_check_py_imports(unit, *args):
  549. if unit.get("TIDY") == "yes":
  550. # graph changed for clang_tidy tests
  551. return
  552. if unit.get('NO_CHECK_IMPORTS_FOR_VALUE').strip() == "":
  553. return
  554. unit.onpeerdir(['library/python/testing/import_test'])
  555. check_type = "py.imports"
  556. test_dir = get_norm_unit_path(unit)
  557. use_arcadia_python = unit.get('USE_ARCADIA_PYTHON')
  558. test_files = serialize_list([get_norm_unit_path(unit, unit.filename())])
  559. test_record = {
  560. 'TEST-NAME': "pyimports",
  561. 'TEST-TIMEOUT': '',
  562. 'SCRIPT-REL-PATH': check_type,
  563. 'TESTED-PROJECT-NAME': os.path.basename(test_dir),
  564. 'SOURCE-FOLDER-PATH': test_dir,
  565. 'CUSTOM-DEPENDENCIES': '',
  566. 'TEST-DATA': '',
  567. 'TEST-ENV': prepare_env(unit.get("TEST_ENV_VALUE")),
  568. 'SPLIT-FACTOR': '',
  569. 'TEST_PARTITION': 'SEQUENTIAL',
  570. 'FORK-MODE': '',
  571. 'FORK-TEST-FILES': '',
  572. 'SIZE': 'SMALL',
  573. 'TAG': '',
  574. 'USE_ARCADIA_PYTHON': use_arcadia_python or '',
  575. 'OLD_PYTEST': 'no',
  576. 'PYTHON-PATHS': '',
  577. # TODO remove FILES, see DEVTOOLS-7052
  578. 'FILES': test_files,
  579. 'TEST-FILES': test_files,
  580. }
  581. if unit.get('NO_CHECK_IMPORTS_FOR_VALUE') != "None":
  582. test_record["NO-CHECK"] = serialize_list(get_values_list(unit, 'NO_CHECK_IMPORTS_FOR_VALUE') or ["*"])
  583. else:
  584. test_record["NO-CHECK"] = ''
  585. data = dump_test(unit, test_record)
  586. if data:
  587. unit.set_property(["DART_DATA", data])
  588. save_in_file(unit.get('TEST_DART_OUT_FILE'), data)
  589. def onadd_pytest_script(unit, *args):
  590. if unit.get("TIDY") == "yes":
  591. # graph changed for clang_tidy tests
  592. return
  593. unit.set(["PYTEST_BIN", "no"])
  594. custom_deps = get_values_list(unit, 'TEST_DEPENDS_VALUE')
  595. timeout = filter(None, [unit.get(["TEST_TIMEOUT"])])
  596. if timeout:
  597. timeout = timeout[0]
  598. else:
  599. timeout = '0'
  600. test_type = args[0]
  601. fork_mode = unit.get('TEST_FORK_MODE').split() or ''
  602. split_factor = unit.get('TEST_SPLIT_FACTOR') or ''
  603. test_size = unit.get('TEST_SIZE_NAME') or ''
  604. test_files = get_values_list(unit, 'TEST_SRCS_VALUE')
  605. tags = _get_test_tags(unit)
  606. requirements = get_values_list(unit, 'TEST_REQUIREMENTS_VALUE')
  607. test_data = get_norm_paths(unit, 'TEST_DATA_VALUE')
  608. data, data_files = get_canonical_test_resources(unit)
  609. test_data += data
  610. python_paths = get_values_list(unit, 'TEST_PYTHON_PATH_VALUE')
  611. binary_path = None
  612. test_cwd = unit.get('TEST_CWD_VALUE') or ''
  613. _dump_test(unit, test_type, test_files, timeout, get_norm_unit_path(unit), custom_deps, test_data, python_paths, split_factor, fork_mode, test_size, tags, requirements, binary_path, test_cwd=test_cwd, data_files=data_files)
  614. def onadd_pytest_bin(unit, *args):
  615. if unit.get("TIDY") == "yes":
  616. # graph changed for clang_tidy tests
  617. return
  618. flat, kws = _common.sort_by_keywords({'RUNNER_BIN': 1}, args)
  619. if flat:
  620. ymake.report_configure_error(
  621. 'Unknown arguments found while processing add_pytest_bin macro: {!r}'
  622. .format(flat)
  623. )
  624. runner_bin = kws.get('RUNNER_BIN', [None])[0]
  625. test_type = 'py3test.bin' if (unit.get("PYTHON3") == 'yes') else "pytest.bin"
  626. add_test_to_dart(unit, test_type, runner_bin=runner_bin)
  627. def add_test_to_dart(unit, test_type, binary_path=None, runner_bin=None):
  628. if unit.get("TIDY") == "yes":
  629. # graph changed for clang_tidy tests
  630. return
  631. custom_deps = get_values_list(unit, 'TEST_DEPENDS_VALUE')
  632. timeout = filter(None, [unit.get(["TEST_TIMEOUT"])])
  633. if timeout:
  634. timeout = timeout[0]
  635. else:
  636. timeout = '0'
  637. fork_mode = unit.get('TEST_FORK_MODE').split() or ''
  638. split_factor = unit.get('TEST_SPLIT_FACTOR') or ''
  639. test_size = unit.get('TEST_SIZE_NAME') or ''
  640. test_cwd = unit.get('TEST_CWD_VALUE') or ''
  641. unit_path = unit.path()
  642. test_files = get_values_list(unit, 'TEST_SRCS_VALUE')
  643. tags = _get_test_tags(unit)
  644. requirements = get_values_list(unit, 'TEST_REQUIREMENTS_VALUE')
  645. test_data = get_norm_paths(unit, 'TEST_DATA_VALUE')
  646. data, data_files = get_canonical_test_resources(unit)
  647. test_data += data
  648. python_paths = get_values_list(unit, 'TEST_PYTHON_PATH_VALUE')
  649. yt_spec = get_values_list(unit, 'TEST_YT_SPEC_VALUE')
  650. if not binary_path:
  651. binary_path = os.path.join(unit_path, unit.filename())
  652. _dump_test(unit, test_type, test_files, timeout, get_norm_unit_path(unit), custom_deps, test_data, python_paths, split_factor, fork_mode, test_size, tags, requirements, binary_path, test_cwd=test_cwd, runner_bin=runner_bin, yt_spec=yt_spec, data_files=data_files)
  653. def extract_java_system_properties(unit, args):
  654. if len(args) % 2:
  655. return [], 'Wrong use of SYSTEM_PROPERTIES in {}: odd number of arguments'.format(unit.path())
  656. props = []
  657. for x, y in zip(args[::2], args[1::2]):
  658. if x == 'FILE':
  659. if y.startswith('${BINDIR}') or y.startswith('${ARCADIA_BUILD_ROOT}') or y.startswith('/'):
  660. return [], 'Wrong use of SYSTEM_PROPERTIES in {}: absolute/build file path {}'.format(unit.path(), y)
  661. y = _common.rootrel_arc_src(y, unit)
  662. if not os.path.exists(unit.resolve('$S/' + y)):
  663. return [], 'Wrong use of SYSTEM_PROPERTIES in {}: can\'t resolve {}'.format(unit.path(), y)
  664. y = '${ARCADIA_ROOT}/' + y
  665. props.append({'type': 'file', 'path': y})
  666. else:
  667. props.append({'type': 'inline', 'key': x, 'value': y})
  668. return props, None
  669. def onjava_test(unit, *args):
  670. if unit.get("TIDY") == "yes":
  671. # graph changed for clang_tidy tests
  672. return
  673. assert unit.get('MODULE_TYPE') is not None
  674. if unit.get('MODULE_TYPE') == 'JTEST_FOR':
  675. if not unit.get('UNITTEST_DIR'):
  676. ymake.report_configure_error('skip JTEST_FOR in {}: no args provided'.format(unit.path()))
  677. return
  678. java_cp_arg_type = unit.get('JAVA_CLASSPATH_CMD_TYPE_VALUE') or 'MANIFEST'
  679. if java_cp_arg_type not in ('MANIFEST', 'COMMAND_FILE', 'LIST'):
  680. ymake.report_configure_error('{}: TEST_JAVA_CLASSPATH_CMD_TYPE({}) are invalid. Choose argument from MANIFEST, COMMAND_FILE or LIST)'.format(unit.path(), java_cp_arg_type))
  681. return
  682. unit_path = unit.path()
  683. path = _common.strip_roots(unit_path)
  684. test_data = get_norm_paths(unit, 'TEST_DATA_VALUE')
  685. test_data.append('arcadia/build/scripts/run_junit.py')
  686. test_data.append('arcadia/build/scripts/unpacking_jtest_runner.py')
  687. data, data_files = get_canonical_test_resources(unit)
  688. test_data += data
  689. props, error_mgs = extract_java_system_properties(unit, get_values_list(unit, 'SYSTEM_PROPERTIES_VALUE'))
  690. if error_mgs:
  691. ymake.report_configure_error(error_mgs)
  692. return
  693. for prop in props:
  694. if prop['type'] == 'file':
  695. test_data.append(prop['path'].replace('${ARCADIA_ROOT}', 'arcadia'))
  696. props = base64.b64encode(json.dumps(props, encoding='utf-8'))
  697. test_cwd = unit.get('TEST_CWD_VALUE') or '' # TODO: validate test_cwd value
  698. if unit.get('MODULE_TYPE') == 'JUNIT5':
  699. script_rel_path = 'junit5.test'
  700. else:
  701. script_rel_path = 'junit.test'
  702. ymake_java_test = unit.get('YMAKE_JAVA_TEST') == 'yes'
  703. test_record = {
  704. 'SOURCE-FOLDER-PATH': path,
  705. 'TEST-NAME': '-'.join([os.path.basename(os.path.dirname(path)), os.path.basename(path)]),
  706. 'SCRIPT-REL-PATH': script_rel_path,
  707. 'TEST-TIMEOUT': unit.get('TEST_TIMEOUT') or '',
  708. 'TESTED-PROJECT-NAME': path,
  709. 'TEST-ENV': prepare_env(unit.get("TEST_ENV_VALUE")),
  710. # 'TEST-PRESERVE-ENV': 'da',
  711. 'TEST-DATA': serialize_list(sorted(_common.filter_out_by_keyword(test_data, 'AUTOUPDATED'))),
  712. 'FORK-MODE': unit.get('TEST_FORK_MODE') or '',
  713. 'SPLIT-FACTOR': unit.get('TEST_SPLIT_FACTOR') or '',
  714. 'CUSTOM-DEPENDENCIES': ' '.join(get_values_list(unit, 'TEST_DEPENDS_VALUE')),
  715. 'TAG': serialize_list(_get_test_tags(unit)),
  716. 'SIZE': unit.get('TEST_SIZE_NAME') or '',
  717. 'REQUIREMENTS': serialize_list(get_values_list(unit, 'TEST_REQUIREMENTS_VALUE')),
  718. 'TEST-RECIPES': prepare_recipes(unit.get("TEST_RECIPES_VALUE")),
  719. # JTEST/JTEST_FOR only
  720. 'MODULE_TYPE': unit.get('MODULE_TYPE'),
  721. 'UNITTEST_DIR': unit.get('UNITTEST_DIR') or '',
  722. 'JVM_ARGS': serialize_list(get_values_list(unit, 'JVM_ARGS_VALUE')),
  723. 'SYSTEM_PROPERTIES': props,
  724. 'TEST-CWD': test_cwd,
  725. 'SKIP_TEST': unit.get('SKIP_TEST_VALUE') or '',
  726. 'JAVA_CLASSPATH_CMD_TYPE': java_cp_arg_type,
  727. 'NO_JBUILD': 'yes' if ymake_java_test else 'no',
  728. 'JDK_RESOURCE': 'JDK' + (unit.get('JDK_VERSION') or '_DEFAULT'),
  729. 'JDK_FOR_TESTS': 'JDK' + (unit.get('JDK_VERSION') or '_DEFAULT') + '_FOR_TESTS',
  730. }
  731. test_classpath_origins = unit.get('TEST_CLASSPATH_VALUE')
  732. if test_classpath_origins:
  733. test_record['TEST_CLASSPATH_ORIGINS'] = test_classpath_origins
  734. test_record['TEST_CLASSPATH'] = '${TEST_CLASSPATH_MANAGED}'
  735. elif ymake_java_test:
  736. test_record['TEST_CLASSPATH'] = '${DART_CLASSPATH}'
  737. test_record['TEST_CLASSPATH_DEPS'] = '${DART_CLASSPATH_DEPS}'
  738. if unit.get('UNITTEST_DIR'):
  739. test_record['TEST_JAR'] = '${UNITTEST_MOD}'
  740. else:
  741. test_record['TEST_JAR'] = '{}/{}.jar'.format(unit.get('MODDIR'), unit.get('REALPRJNAME'))
  742. data = dump_test(unit, test_record)
  743. if data:
  744. unit.set_property(['DART_DATA', data])
  745. def onjava_test_deps(unit, *args):
  746. if unit.get("TIDY") == "yes":
  747. # graph changed for clang_tidy tests
  748. return
  749. assert unit.get('MODULE_TYPE') is not None
  750. assert len(args) == 1
  751. mode = args[0]
  752. path = get_norm_unit_path(unit)
  753. ymake_java_test = unit.get('YMAKE_JAVA_TEST') == 'yes'
  754. test_record = {
  755. 'SOURCE-FOLDER-PATH': path,
  756. 'TEST-NAME': '-'.join([os.path.basename(os.path.dirname(path)), os.path.basename(path), 'dependencies']).strip('-'),
  757. 'SCRIPT-REL-PATH': 'java.dependency.test',
  758. 'TEST-TIMEOUT': '',
  759. 'TESTED-PROJECT-NAME': path,
  760. 'TEST-DATA': '',
  761. 'TEST_PARTITION': 'SEQUENTIAL',
  762. 'FORK-MODE': '',
  763. 'SPLIT-FACTOR': '',
  764. 'CUSTOM-DEPENDENCIES': ' '.join(get_values_list(unit, 'TEST_DEPENDS_VALUE')),
  765. 'TAG': '',
  766. 'SIZE': 'SMALL',
  767. 'IGNORE_CLASSPATH_CLASH': ' '.join(get_values_list(unit, 'JAVA_IGNORE_CLASSPATH_CLASH_VALUE')),
  768. 'NO_JBUILD': 'yes' if ymake_java_test else 'no',
  769. # JTEST/JTEST_FOR only
  770. 'MODULE_TYPE': unit.get('MODULE_TYPE'),
  771. 'UNITTEST_DIR': '',
  772. 'SYSTEM_PROPERTIES': '',
  773. 'TEST-CWD': '',
  774. }
  775. if mode == 'strict':
  776. test_record['STRICT_CLASSPATH_CLASH'] = 'yes'
  777. if ymake_java_test:
  778. test_record['CLASSPATH'] = '$B/{}/{}.jar ${{DART_CLASSPATH}}'.format(unit.get('MODDIR'), unit.get('REALPRJNAME'))
  779. data = dump_test(unit, test_record)
  780. unit.set_property(['DART_DATA', data])
  781. def _get_test_tags(unit, spec_args=None):
  782. if spec_args is None:
  783. spec_args = {}
  784. tags = spec_args.get('TAG', []) + get_values_list(unit, 'TEST_TAGS_VALUE')
  785. # DEVTOOLS-7571
  786. if unit.get('SKIP_TEST_VALUE') and 'ya:fat' in tags and "ya:not_autocheck" not in tags:
  787. tags.append("ya:not_autocheck")
  788. return tags
  789. def _dump_test(
  790. unit,
  791. test_type,
  792. test_files,
  793. timeout,
  794. test_dir,
  795. custom_deps,
  796. test_data,
  797. python_paths,
  798. split_factor,
  799. fork_mode,
  800. test_size,
  801. tags,
  802. requirements,
  803. binary_path='',
  804. old_pytest=False,
  805. test_cwd=None,
  806. runner_bin=None,
  807. yt_spec=None,
  808. data_files=None
  809. ):
  810. if test_type == "PY_TEST":
  811. script_rel_path = "py.test"
  812. else:
  813. script_rel_path = test_type
  814. unit_path = unit.path()
  815. fork_test_files = unit.get('FORK_TEST_FILES_MODE')
  816. fork_mode = ' '.join(fork_mode) if fork_mode else ''
  817. use_arcadia_python = unit.get('USE_ARCADIA_PYTHON')
  818. if test_cwd:
  819. test_cwd = test_cwd.replace("$TEST_CWD_VALUE", "").replace('"MACRO_CALLS_DELIM"', "").strip()
  820. test_name = os.path.basename(binary_path)
  821. test_record = {
  822. 'TEST-NAME': os.path.splitext(test_name)[0],
  823. 'TEST-TIMEOUT': timeout,
  824. 'SCRIPT-REL-PATH': script_rel_path,
  825. 'TESTED-PROJECT-NAME': test_name,
  826. 'SOURCE-FOLDER-PATH': test_dir,
  827. 'CUSTOM-DEPENDENCIES': " ".join(custom_deps),
  828. 'TEST-ENV': prepare_env(unit.get("TEST_ENV_VALUE")),
  829. # 'TEST-PRESERVE-ENV': 'da',
  830. 'TEST-DATA': serialize_list(sorted(_common.filter_out_by_keyword(test_data, 'AUTOUPDATED'))),
  831. 'TEST-RECIPES': prepare_recipes(unit.get("TEST_RECIPES_VALUE")),
  832. 'SPLIT-FACTOR': split_factor,
  833. 'TEST_PARTITION': unit.get('TEST_PARTITION') or 'SEQUENTIAL',
  834. 'FORK-MODE': fork_mode,
  835. 'FORK-TEST-FILES': fork_test_files,
  836. 'TEST-FILES': serialize_list(test_files),
  837. 'SIZE': test_size,
  838. 'TAG': serialize_list(tags),
  839. 'REQUIREMENTS': serialize_list(requirements),
  840. 'USE_ARCADIA_PYTHON': use_arcadia_python or '',
  841. 'OLD_PYTEST': 'yes' if old_pytest else 'no',
  842. 'PYTHON-PATHS': serialize_list(python_paths),
  843. 'TEST-CWD': test_cwd or '',
  844. 'SKIP_TEST': unit.get('SKIP_TEST_VALUE') or '',
  845. 'BUILD-FOLDER-PATH': _common.strip_roots(unit_path),
  846. 'BLOB': unit.get('TEST_BLOB_DATA') or '',
  847. 'CANONIZE_SUB_PATH': unit.get('CANONIZE_SUB_PATH') or '',
  848. }
  849. if binary_path:
  850. test_record['BINARY-PATH'] = _common.strip_roots(binary_path)
  851. if runner_bin:
  852. test_record['TEST-RUNNER-BIN'] = runner_bin
  853. if yt_spec:
  854. test_record['YT-SPEC'] = serialize_list(yt_spec)
  855. data = dump_test(unit, test_record)
  856. if data:
  857. unit.set_property(["DART_DATA", data])
  858. save_in_file(unit.get('TEST_DART_OUT_FILE'), data)
  859. def onsetup_pytest_bin(unit, *args):
  860. use_arcadia_python = unit.get('USE_ARCADIA_PYTHON') == "yes"
  861. if use_arcadia_python:
  862. unit.onresource(['-', 'PY_MAIN={}'.format("library.python.pytest.main:main")]) # XXX
  863. unit.onadd_pytest_bin(list(args))
  864. else:
  865. unit.onno_platform()
  866. unit.onadd_pytest_script(["PY_TEST"])
  867. def onrun(unit, *args):
  868. exectest_cmd = unit.get(["EXECTEST_COMMAND_VALUE"]) or ''
  869. exectest_cmd += "\n" + subprocess.list2cmdline(args)
  870. unit.set(["EXECTEST_COMMAND_VALUE", exectest_cmd])
  871. def onsetup_exectest(unit, *args):
  872. command = unit.get(["EXECTEST_COMMAND_VALUE"])
  873. if command is None:
  874. ymake.report_configure_error("EXECTEST must have at least one RUN macro")
  875. return
  876. command = command.replace("$EXECTEST_COMMAND_VALUE", "")
  877. if "PYTHON_BIN" in command:
  878. unit.ondepends('contrib/tools/python')
  879. unit.set(["TEST_BLOB_DATA", base64.b64encode(command)])
  880. add_test_to_dart(unit, "exectest", binary_path=os.path.join(unit.path(), unit.filename()).replace(".pkg", ""))
  881. def onsetup_run_python(unit):
  882. if unit.get("USE_ARCADIA_PYTHON") == "yes":
  883. unit.ondepends('contrib/tools/python')
  884. def get_canonical_test_resources(unit):
  885. unit_path = unit.path()
  886. canon_data_dir = os.path.join(unit.resolve(unit_path), CANON_DATA_DIR_NAME, unit.get('CANONIZE_SUB_PATH') or '')
  887. try:
  888. _, dirs, files = next(os.walk(canon_data_dir))
  889. except StopIteration:
  890. # path doesn't exist
  891. return [], []
  892. if CANON_RESULT_FILE_NAME in files:
  893. return _get_canonical_data_resources_v2(os.path.join(canon_data_dir, CANON_RESULT_FILE_NAME), unit_path)
  894. return [], []
  895. def _load_canonical_file(filename, unit_path):
  896. try:
  897. with open(filename) as results_file:
  898. return json.load(results_file)
  899. except Exception as e:
  900. print>>sys.stderr, "malformed canonical data in {}: {} ({})".format(unit_path, e, filename)
  901. return {}
  902. def _get_resource_from_uri(uri):
  903. m = CANON_MDS_RESOURCE_REGEX.match(uri)
  904. if m:
  905. res_id = m.group(1)
  906. return "{}:{}".format(MDS_SHEME, res_id)
  907. m = CANON_SBR_RESOURCE_REGEX.match(uri)
  908. if m:
  909. # There might be conflict between resources, because all resources in sandbox have 'resource.tar.gz' name
  910. # That's why we use notation with '=' to specify specific path for resource
  911. uri = m.group(1)
  912. res_id = m.group(2)
  913. return "{}={}".format(uri, '/'.join([CANON_OUTPUT_STORAGE, res_id]))
  914. def _get_external_resources_from_canon_data(data):
  915. # Method should work with both canonization versions:
  916. # result.json: {'uri':X 'checksum':Y}
  917. # result.json: {'testname': {'uri':X 'checksum':Y}}
  918. # result.json: {'testname': [{'uri':X 'checksum':Y}]}
  919. # Also there is a bug - if user returns {'uri': 1} from test - machinery will fail
  920. # That's why we check 'uri' and 'checksum' fields presence
  921. # (it's still a bug - user can return {'uri':X, 'checksum': Y}, we need to unify canonization format)
  922. res = set()
  923. if isinstance(data, dict):
  924. if 'uri' in data and 'checksum' in data:
  925. resource = _get_resource_from_uri(data['uri'])
  926. if resource:
  927. res.add(resource)
  928. else:
  929. for k, v in data.iteritems():
  930. res.update(_get_external_resources_from_canon_data(v))
  931. elif isinstance(data, list):
  932. for e in data:
  933. res.update(_get_external_resources_from_canon_data(e))
  934. return res
  935. def _get_canonical_data_resources_v2(filename, unit_path):
  936. return (_get_external_resources_from_canon_data(_load_canonical_file(filename, unit_path)), [filename])