diff options
author | Pablo Galindo <Pablogsal@gmail.com> | 2020-04-22 23:29:27 +0100 |
---|---|---|
committer | GitHub <noreply@github.com> | 2020-04-22 23:29:27 +0100 |
commit | c5fc15685202cda73f7c3f5c6f299b0945f58508 (patch) | |
tree | 7624ae45b95f2812e801c5879bdbdcb796f570a6 /Tools/peg_generator/scripts/benchmark.py | |
parent | bpo-39939: Add str.removeprefix and str.removesuffix (GH-18939) (diff) | |
download | cpython-c5fc15685202cda73f7c3f5c6f299b0945f58508.tar.gz cpython-c5fc15685202cda73f7c3f5c6f299b0945f58508.tar.bz2 cpython-c5fc15685202cda73f7c3f5c6f299b0945f58508.zip |
bpo-40334: PEP 617 implementation: New PEG parser for CPython (GH-19503)
Co-authored-by: Guido van Rossum <guido@python.org>
Co-authored-by: Lysandros Nikolaou <lisandrosnik@gmail.com>
Diffstat (limited to 'Tools/peg_generator/scripts/benchmark.py')
-rw-r--r-- | Tools/peg_generator/scripts/benchmark.py | 140 |
1 files changed, 140 insertions, 0 deletions
diff --git a/Tools/peg_generator/scripts/benchmark.py b/Tools/peg_generator/scripts/benchmark.py new file mode 100644 index 00000000000..bc751156e89 --- /dev/null +++ b/Tools/peg_generator/scripts/benchmark.py @@ -0,0 +1,140 @@ +#!/usr/bin/env python3.9 + +import argparse +import ast +import sys +import os +import resource +from time import time + +import memory_profiler + +sys.path.insert(0, os.getcwd()) +from peg_extension import parse +from pegen.build import build_parser_and_generator +from scripts.test_parse_directory import parse_directory + +argparser = argparse.ArgumentParser( + prog="benchmark", description="Reproduce the various pegen benchmarks" +) +argparser.add_argument( + "--parser", + action="store", + choices=["pegen", "cpython"], + default="pegen", + help="Which parser to benchmark (default is pegen)", +) +argparser.add_argument( + "--target", + action="store", + choices=["xxl", "stdlib"], + default="xxl", + help="Which target to use for the benchmark (default is xxl.py)", +) + +subcommands = argparser.add_subparsers(title="Benchmarks", dest="subcommand") +command_compile = subcommands.add_parser( + "compile", help="Benchmark parsing and compiling to bytecode" +) +command_parse = subcommands.add_parser("parse", help="Benchmark parsing and generating an ast.AST") +command_check = subcommands.add_parser( + "check", help="Benchmark parsing and throwing the tree away" +) + + +def benchmark(func): + def wrapper(*args): + times = list() + for _ in range(3): + start = time() + result = func(*args) + end = time() + times.append(end - start) + memory = memory_profiler.memory_usage((func, args)) + print(f"{func.__name__}") + print(f"\tTime: {sum(times)/3:.3f} seconds on an average of 3 runs") + print(f"\tMemory: {max(memory)} MiB on an average of 3 runs") + return result + + return wrapper + + +@benchmark +def time_compile(source, parser): + if parser == "cpython": + return compile(source, os.path.join("data", "xxl.py"), "exec") + else: + return parse.parse_string(source, mode=2) + + +@benchmark +def time_parse(source, parser): + if parser == "cpython": + return ast.parse(source, os.path.join("data", "xxl.py"), "exec") + else: + return parse.parse_string(source, mode=1) + + +@benchmark +def time_check(source): + return parse.parse_string(source, mode=0) + + +def run_benchmark_xxl(subcommand, parser, source): + if subcommand == "compile": + time_compile(source, parser) + elif subcommand == "parse": + time_parse(source, parser) + elif subcommand == "check": + time_check(source) + + +def run_benchmark_stdlib(subcommand, parser): + modes = {"compile": 2, "parse": 1, "check": 0} + extension = None + if parser == "pegen": + extension = build_parser_and_generator( + "../../Grammar/python.gram", + "peg_extension/parse.c", + compile_extension=True, + skip_actions=False, + ) + for _ in range(3): + parse_directory( + "../../Lib", + "../../Grammar/python.gram", + verbose=False, + excluded_files=[ + "*/bad*", + "*/lib2to3/tests/data/*", + ], + skip_actions=False, + tree_arg=0, + short=True, + extension=extension, + mode=modes[subcommand], + parser=parser, + ) + + +def main(): + args = argparser.parse_args() + subcommand = args.subcommand + parser = args.parser + target = args.target + + if subcommand is None: + argparser.error("A benchmark to run is required") + if subcommand == "check" and parser == "cpython": + argparser.error("Cannot use check target with the CPython parser") + + if target == "xxl": + with open(os.path.join("data", "xxl.py"), "r") as f: + source = f.read() + run_benchmark_xxl(subcommand, parser, source) + elif target == "stdlib": + run_benchmark_stdlib(subcommand, parser) + + +if __name__ == "__main__": + main() |