compare.py 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523
  1. #!/usr/bin/env python3
  2. # type: ignore
  3. """
  4. compare.py - versatile benchmark output compare tool
  5. """
  6. import argparse
  7. import json
  8. import os
  9. import sys
  10. import unittest
  11. from argparse import ArgumentParser
  12. import gbench
  13. from gbench import report, util
  14. def check_inputs(in1, in2, flags):
  15. """
  16. Perform checking on the user provided inputs and diagnose any abnormalities
  17. """
  18. in1_kind, in1_err = util.classify_input_file(in1)
  19. in2_kind, in2_err = util.classify_input_file(in2)
  20. output_file = util.find_benchmark_flag("--benchmark_out=", flags)
  21. output_type = util.find_benchmark_flag("--benchmark_out_format=", flags)
  22. if (
  23. in1_kind == util.IT_Executable
  24. and in2_kind == util.IT_Executable
  25. and output_file
  26. ):
  27. print(
  28. (
  29. "WARNING: '--benchmark_out=%s' will be passed to both "
  30. "benchmarks causing it to be overwritten"
  31. )
  32. % output_file
  33. )
  34. if in1_kind == util.IT_JSON and in2_kind == util.IT_JSON:
  35. # When both sides are JSON the only supported flag is
  36. # --benchmark_filter=
  37. for flag in util.remove_benchmark_flags("--benchmark_filter=", flags):
  38. print(
  39. "WARNING: passing %s has no effect since both "
  40. "inputs are JSON" % flag
  41. )
  42. if output_type is not None and output_type != "json":
  43. print(
  44. (
  45. "ERROR: passing '--benchmark_out_format=%s' to 'compare.py`"
  46. " is not supported."
  47. )
  48. % output_type
  49. )
  50. sys.exit(1)
  51. def create_parser():
  52. parser = ArgumentParser(
  53. description="versatile benchmark output compare tool"
  54. )
  55. parser.add_argument(
  56. "-a",
  57. "--display_aggregates_only",
  58. dest="display_aggregates_only",
  59. action="store_true",
  60. help="If there are repetitions, by default, we display everything - the"
  61. " actual runs, and the aggregates computed. Sometimes, it is "
  62. "desirable to only view the aggregates. E.g. when there are a lot "
  63. "of repetitions. Do note that only the display is affected. "
  64. "Internally, all the actual runs are still used, e.g. for U test.",
  65. )
  66. parser.add_argument(
  67. "--no-color",
  68. dest="color",
  69. default=True,
  70. action="store_false",
  71. help="Do not use colors in the terminal output",
  72. )
  73. parser.add_argument(
  74. "-d",
  75. "--dump_to_json",
  76. dest="dump_to_json",
  77. help="Additionally, dump benchmark comparison output to this file in JSON format.",
  78. )
  79. utest = parser.add_argument_group()
  80. utest.add_argument(
  81. "--no-utest",
  82. dest="utest",
  83. default=True,
  84. action="store_false",
  85. help="The tool can do a two-tailed Mann-Whitney U test with the null hypothesis that it is equally likely that a randomly selected value from one sample will be less than or greater than a randomly selected value from a second sample.\nWARNING: requires **LARGE** (no less than {}) number of repetitions to be meaningful!\nThe test is being done by default, if at least {} repetitions were done.\nThis option can disable the U Test.".format(
  86. report.UTEST_OPTIMAL_REPETITIONS, report.UTEST_MIN_REPETITIONS
  87. ),
  88. )
  89. alpha_default = 0.05
  90. utest.add_argument(
  91. "--alpha",
  92. dest="utest_alpha",
  93. default=alpha_default,
  94. type=float,
  95. help=(
  96. "significance level alpha. if the calculated p-value is below this value, then the result is said to be statistically significant and the null hypothesis is rejected.\n(default: %0.4f)"
  97. )
  98. % alpha_default,
  99. )
  100. subparsers = parser.add_subparsers(
  101. help="This tool has multiple modes of operation:", dest="mode"
  102. )
  103. parser_a = subparsers.add_parser(
  104. "benchmarks",
  105. help="The most simple use-case, compare all the output of these two benchmarks",
  106. )
  107. baseline = parser_a.add_argument_group("baseline", "The benchmark baseline")
  108. baseline.add_argument(
  109. "test_baseline",
  110. metavar="test_baseline",
  111. type=argparse.FileType("r"),
  112. nargs=1,
  113. help="A benchmark executable or JSON output file",
  114. )
  115. contender = parser_a.add_argument_group(
  116. "contender", "The benchmark that will be compared against the baseline"
  117. )
  118. contender.add_argument(
  119. "test_contender",
  120. metavar="test_contender",
  121. type=argparse.FileType("r"),
  122. nargs=1,
  123. help="A benchmark executable or JSON output file",
  124. )
  125. parser_a.add_argument(
  126. "benchmark_options",
  127. metavar="benchmark_options",
  128. nargs=argparse.REMAINDER,
  129. help="Arguments to pass when running benchmark executables",
  130. )
  131. parser_b = subparsers.add_parser(
  132. "filters", help="Compare filter one with the filter two of benchmark"
  133. )
  134. baseline = parser_b.add_argument_group("baseline", "The benchmark baseline")
  135. baseline.add_argument(
  136. "test",
  137. metavar="test",
  138. type=argparse.FileType("r"),
  139. nargs=1,
  140. help="A benchmark executable or JSON output file",
  141. )
  142. baseline.add_argument(
  143. "filter_baseline",
  144. metavar="filter_baseline",
  145. type=str,
  146. nargs=1,
  147. help="The first filter, that will be used as baseline",
  148. )
  149. contender = parser_b.add_argument_group(
  150. "contender", "The benchmark that will be compared against the baseline"
  151. )
  152. contender.add_argument(
  153. "filter_contender",
  154. metavar="filter_contender",
  155. type=str,
  156. nargs=1,
  157. help="The second filter, that will be compared against the baseline",
  158. )
  159. parser_b.add_argument(
  160. "benchmark_options",
  161. metavar="benchmark_options",
  162. nargs=argparse.REMAINDER,
  163. help="Arguments to pass when running benchmark executables",
  164. )
  165. parser_c = subparsers.add_parser(
  166. "benchmarksfiltered",
  167. help="Compare filter one of first benchmark with filter two of the second benchmark",
  168. )
  169. baseline = parser_c.add_argument_group("baseline", "The benchmark baseline")
  170. baseline.add_argument(
  171. "test_baseline",
  172. metavar="test_baseline",
  173. type=argparse.FileType("r"),
  174. nargs=1,
  175. help="A benchmark executable or JSON output file",
  176. )
  177. baseline.add_argument(
  178. "filter_baseline",
  179. metavar="filter_baseline",
  180. type=str,
  181. nargs=1,
  182. help="The first filter, that will be used as baseline",
  183. )
  184. contender = parser_c.add_argument_group(
  185. "contender", "The benchmark that will be compared against the baseline"
  186. )
  187. contender.add_argument(
  188. "test_contender",
  189. metavar="test_contender",
  190. type=argparse.FileType("r"),
  191. nargs=1,
  192. help="The second benchmark executable or JSON output file, that will be compared against the baseline",
  193. )
  194. contender.add_argument(
  195. "filter_contender",
  196. metavar="filter_contender",
  197. type=str,
  198. nargs=1,
  199. help="The second filter, that will be compared against the baseline",
  200. )
  201. parser_c.add_argument(
  202. "benchmark_options",
  203. metavar="benchmark_options",
  204. nargs=argparse.REMAINDER,
  205. help="Arguments to pass when running benchmark executables",
  206. )
  207. return parser
  208. def main():
  209. # Parse the command line flags
  210. parser = create_parser()
  211. args, unknown_args = parser.parse_known_args()
  212. if args.mode is None:
  213. parser.print_help()
  214. exit(1)
  215. assert not unknown_args
  216. benchmark_options = args.benchmark_options
  217. if args.mode == "benchmarks":
  218. test_baseline = args.test_baseline[0].name
  219. test_contender = args.test_contender[0].name
  220. filter_baseline = ""
  221. filter_contender = ""
  222. # NOTE: if test_baseline == test_contender, you are analyzing the stdev
  223. description = "Comparing %s to %s" % (test_baseline, test_contender)
  224. elif args.mode == "filters":
  225. test_baseline = args.test[0].name
  226. test_contender = args.test[0].name
  227. filter_baseline = args.filter_baseline[0]
  228. filter_contender = args.filter_contender[0]
  229. # NOTE: if filter_baseline == filter_contender, you are analyzing the
  230. # stdev
  231. description = "Comparing %s to %s (from %s)" % (
  232. filter_baseline,
  233. filter_contender,
  234. args.test[0].name,
  235. )
  236. elif args.mode == "benchmarksfiltered":
  237. test_baseline = args.test_baseline[0].name
  238. test_contender = args.test_contender[0].name
  239. filter_baseline = args.filter_baseline[0]
  240. filter_contender = args.filter_contender[0]
  241. # NOTE: if test_baseline == test_contender and
  242. # filter_baseline == filter_contender, you are analyzing the stdev
  243. description = "Comparing %s (from %s) to %s (from %s)" % (
  244. filter_baseline,
  245. test_baseline,
  246. filter_contender,
  247. test_contender,
  248. )
  249. else:
  250. # should never happen
  251. print("Unrecognized mode of operation: '%s'" % args.mode)
  252. parser.print_help()
  253. exit(1)
  254. check_inputs(test_baseline, test_contender, benchmark_options)
  255. if args.display_aggregates_only:
  256. benchmark_options += ["--benchmark_display_aggregates_only=true"]
  257. options_baseline = []
  258. options_contender = []
  259. if filter_baseline and filter_contender:
  260. options_baseline = ["--benchmark_filter=%s" % filter_baseline]
  261. options_contender = ["--benchmark_filter=%s" % filter_contender]
  262. # Run the benchmarks and report the results
  263. json1 = json1_orig = gbench.util.sort_benchmark_results(
  264. gbench.util.run_or_load_benchmark(
  265. test_baseline, benchmark_options + options_baseline
  266. )
  267. )
  268. json2 = json2_orig = gbench.util.sort_benchmark_results(
  269. gbench.util.run_or_load_benchmark(
  270. test_contender, benchmark_options + options_contender
  271. )
  272. )
  273. # Now, filter the benchmarks so that the difference report can work
  274. if filter_baseline and filter_contender:
  275. replacement = "[%s vs. %s]" % (filter_baseline, filter_contender)
  276. json1 = gbench.report.filter_benchmark(
  277. json1_orig, filter_baseline, replacement
  278. )
  279. json2 = gbench.report.filter_benchmark(
  280. json2_orig, filter_contender, replacement
  281. )
  282. diff_report = gbench.report.get_difference_report(json1, json2, args.utest)
  283. output_lines = gbench.report.print_difference_report(
  284. diff_report,
  285. args.display_aggregates_only,
  286. args.utest,
  287. args.utest_alpha,
  288. args.color,
  289. )
  290. print(description)
  291. for ln in output_lines:
  292. print(ln)
  293. # Optionally, diff and output to JSON
  294. if args.dump_to_json is not None:
  295. with open(args.dump_to_json, "w") as f_json:
  296. json.dump(diff_report, f_json, indent=1)
  297. class TestParser(unittest.TestCase):
  298. def setUp(self):
  299. self.parser = create_parser()
  300. testInputs = os.path.join(
  301. os.path.dirname(os.path.realpath(__file__)), "gbench", "Inputs"
  302. )
  303. self.testInput0 = os.path.join(testInputs, "test1_run1.json")
  304. self.testInput1 = os.path.join(testInputs, "test1_run2.json")
  305. def test_benchmarks_basic(self):
  306. parsed = self.parser.parse_args(
  307. ["benchmarks", self.testInput0, self.testInput1]
  308. )
  309. self.assertFalse(parsed.display_aggregates_only)
  310. self.assertTrue(parsed.utest)
  311. self.assertEqual(parsed.mode, "benchmarks")
  312. self.assertEqual(parsed.test_baseline[0].name, self.testInput0)
  313. self.assertEqual(parsed.test_contender[0].name, self.testInput1)
  314. self.assertFalse(parsed.benchmark_options)
  315. def test_benchmarks_basic_without_utest(self):
  316. parsed = self.parser.parse_args(
  317. ["--no-utest", "benchmarks", self.testInput0, self.testInput1]
  318. )
  319. self.assertFalse(parsed.display_aggregates_only)
  320. self.assertFalse(parsed.utest)
  321. self.assertEqual(parsed.utest_alpha, 0.05)
  322. self.assertEqual(parsed.mode, "benchmarks")
  323. self.assertEqual(parsed.test_baseline[0].name, self.testInput0)
  324. self.assertEqual(parsed.test_contender[0].name, self.testInput1)
  325. self.assertFalse(parsed.benchmark_options)
  326. def test_benchmarks_basic_display_aggregates_only(self):
  327. parsed = self.parser.parse_args(
  328. ["-a", "benchmarks", self.testInput0, self.testInput1]
  329. )
  330. self.assertTrue(parsed.display_aggregates_only)
  331. self.assertTrue(parsed.utest)
  332. self.assertEqual(parsed.mode, "benchmarks")
  333. self.assertEqual(parsed.test_baseline[0].name, self.testInput0)
  334. self.assertEqual(parsed.test_contender[0].name, self.testInput1)
  335. self.assertFalse(parsed.benchmark_options)
  336. def test_benchmarks_basic_with_utest_alpha(self):
  337. parsed = self.parser.parse_args(
  338. ["--alpha=0.314", "benchmarks", self.testInput0, self.testInput1]
  339. )
  340. self.assertFalse(parsed.display_aggregates_only)
  341. self.assertTrue(parsed.utest)
  342. self.assertEqual(parsed.utest_alpha, 0.314)
  343. self.assertEqual(parsed.mode, "benchmarks")
  344. self.assertEqual(parsed.test_baseline[0].name, self.testInput0)
  345. self.assertEqual(parsed.test_contender[0].name, self.testInput1)
  346. self.assertFalse(parsed.benchmark_options)
  347. def test_benchmarks_basic_without_utest_with_utest_alpha(self):
  348. parsed = self.parser.parse_args(
  349. [
  350. "--no-utest",
  351. "--alpha=0.314",
  352. "benchmarks",
  353. self.testInput0,
  354. self.testInput1,
  355. ]
  356. )
  357. self.assertFalse(parsed.display_aggregates_only)
  358. self.assertFalse(parsed.utest)
  359. self.assertEqual(parsed.utest_alpha, 0.314)
  360. self.assertEqual(parsed.mode, "benchmarks")
  361. self.assertEqual(parsed.test_baseline[0].name, self.testInput0)
  362. self.assertEqual(parsed.test_contender[0].name, self.testInput1)
  363. self.assertFalse(parsed.benchmark_options)
  364. def test_benchmarks_with_remainder(self):
  365. parsed = self.parser.parse_args(
  366. ["benchmarks", self.testInput0, self.testInput1, "d"]
  367. )
  368. self.assertFalse(parsed.display_aggregates_only)
  369. self.assertTrue(parsed.utest)
  370. self.assertEqual(parsed.mode, "benchmarks")
  371. self.assertEqual(parsed.test_baseline[0].name, self.testInput0)
  372. self.assertEqual(parsed.test_contender[0].name, self.testInput1)
  373. self.assertEqual(parsed.benchmark_options, ["d"])
  374. def test_benchmarks_with_remainder_after_doubleminus(self):
  375. parsed = self.parser.parse_args(
  376. ["benchmarks", self.testInput0, self.testInput1, "--", "e"]
  377. )
  378. self.assertFalse(parsed.display_aggregates_only)
  379. self.assertTrue(parsed.utest)
  380. self.assertEqual(parsed.mode, "benchmarks")
  381. self.assertEqual(parsed.test_baseline[0].name, self.testInput0)
  382. self.assertEqual(parsed.test_contender[0].name, self.testInput1)
  383. self.assertEqual(parsed.benchmark_options, ["e"])
  384. def test_filters_basic(self):
  385. parsed = self.parser.parse_args(["filters", self.testInput0, "c", "d"])
  386. self.assertFalse(parsed.display_aggregates_only)
  387. self.assertTrue(parsed.utest)
  388. self.assertEqual(parsed.mode, "filters")
  389. self.assertEqual(parsed.test[0].name, self.testInput0)
  390. self.assertEqual(parsed.filter_baseline[0], "c")
  391. self.assertEqual(parsed.filter_contender[0], "d")
  392. self.assertFalse(parsed.benchmark_options)
  393. def test_filters_with_remainder(self):
  394. parsed = self.parser.parse_args(
  395. ["filters", self.testInput0, "c", "d", "e"]
  396. )
  397. self.assertFalse(parsed.display_aggregates_only)
  398. self.assertTrue(parsed.utest)
  399. self.assertEqual(parsed.mode, "filters")
  400. self.assertEqual(parsed.test[0].name, self.testInput0)
  401. self.assertEqual(parsed.filter_baseline[0], "c")
  402. self.assertEqual(parsed.filter_contender[0], "d")
  403. self.assertEqual(parsed.benchmark_options, ["e"])
  404. def test_filters_with_remainder_after_doubleminus(self):
  405. parsed = self.parser.parse_args(
  406. ["filters", self.testInput0, "c", "d", "--", "f"]
  407. )
  408. self.assertFalse(parsed.display_aggregates_only)
  409. self.assertTrue(parsed.utest)
  410. self.assertEqual(parsed.mode, "filters")
  411. self.assertEqual(parsed.test[0].name, self.testInput0)
  412. self.assertEqual(parsed.filter_baseline[0], "c")
  413. self.assertEqual(parsed.filter_contender[0], "d")
  414. self.assertEqual(parsed.benchmark_options, ["f"])
  415. def test_benchmarksfiltered_basic(self):
  416. parsed = self.parser.parse_args(
  417. ["benchmarksfiltered", self.testInput0, "c", self.testInput1, "e"]
  418. )
  419. self.assertFalse(parsed.display_aggregates_only)
  420. self.assertTrue(parsed.utest)
  421. self.assertEqual(parsed.mode, "benchmarksfiltered")
  422. self.assertEqual(parsed.test_baseline[0].name, self.testInput0)
  423. self.assertEqual(parsed.filter_baseline[0], "c")
  424. self.assertEqual(parsed.test_contender[0].name, self.testInput1)
  425. self.assertEqual(parsed.filter_contender[0], "e")
  426. self.assertFalse(parsed.benchmark_options)
  427. def test_benchmarksfiltered_with_remainder(self):
  428. parsed = self.parser.parse_args(
  429. [
  430. "benchmarksfiltered",
  431. self.testInput0,
  432. "c",
  433. self.testInput1,
  434. "e",
  435. "f",
  436. ]
  437. )
  438. self.assertFalse(parsed.display_aggregates_only)
  439. self.assertTrue(parsed.utest)
  440. self.assertEqual(parsed.mode, "benchmarksfiltered")
  441. self.assertEqual(parsed.test_baseline[0].name, self.testInput0)
  442. self.assertEqual(parsed.filter_baseline[0], "c")
  443. self.assertEqual(parsed.test_contender[0].name, self.testInput1)
  444. self.assertEqual(parsed.filter_contender[0], "e")
  445. self.assertEqual(parsed.benchmark_options[0], "f")
  446. def test_benchmarksfiltered_with_remainder_after_doubleminus(self):
  447. parsed = self.parser.parse_args(
  448. [
  449. "benchmarksfiltered",
  450. self.testInput0,
  451. "c",
  452. self.testInput1,
  453. "e",
  454. "--",
  455. "g",
  456. ]
  457. )
  458. self.assertFalse(parsed.display_aggregates_only)
  459. self.assertTrue(parsed.utest)
  460. self.assertEqual(parsed.mode, "benchmarksfiltered")
  461. self.assertEqual(parsed.test_baseline[0].name, self.testInput0)
  462. self.assertEqual(parsed.filter_baseline[0], "c")
  463. self.assertEqual(parsed.test_contender[0].name, self.testInput1)
  464. self.assertEqual(parsed.filter_contender[0], "e")
  465. self.assertEqual(parsed.benchmark_options[0], "g")
  466. if __name__ == "__main__":
  467. # unittest.main()
  468. main()
  469. # vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4
  470. # kate: tab-width: 4; replace-tabs on; indent-width 4; tab-indents: off;
  471. # kate: indent-mode python; remove-trailing-spaces modified;