Change update_xxx_checks to continue on error when processing mutliple inputs (#137728)

Change llvm/utils/update_xxx_checks.py scripts to:

Catch exceptions individually for each test. On exception print which
test triggered the exception and continue with the remaining test
updates.
This commit is contained in:
Matthias Braun 2025-04-30 10:23:00 -07:00 committed by GitHub
parent ff8fc5bc45
commit 88b03aafdb
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
7 changed files with 1220 additions and 1175 deletions

View File

@ -31,6 +31,8 @@ designed to be authoratitive about what constitutes a good test!
from __future__ import print_function
from sys import stderr
from traceback import print_exc
import argparse
import os # Used to advertise this file's name ("autogenerated_note").
import sys
@ -39,6 +41,158 @@ import re
from UpdateTestChecks import common
def update_test(opt_basename: str, ti: common.TestInfo):
triple_in_ir = None
for l in ti.input_lines:
m = common.TRIPLE_IR_RE.match(l)
if m:
triple_in_ir = m.groups()[0]
break
prefix_list = []
for l in ti.run_lines:
if "|" not in l:
common.warn("Skipping unparsable RUN line: " + l)
continue
(tool_cmd, filecheck_cmd) = tuple([cmd.strip() for cmd in l.split("|", 1)])
common.verify_filecheck_prefixes(filecheck_cmd)
if not tool_cmd.startswith(opt_basename + " "):
common.warn("WSkipping non-%s RUN line: %s" % (opt_basename, l))
continue
if not filecheck_cmd.startswith("FileCheck "):
common.warn("Skipping non-FileChecked RUN line: " + l)
continue
tool_cmd_args = tool_cmd[len(opt_basename) :].strip()
tool_cmd_args = tool_cmd_args.replace("< %s", "").replace("%s", "").strip()
check_prefixes = common.get_check_prefixes(filecheck_cmd)
# FIXME: We should use multiple check prefixes to common check lines. For
# now, we just ignore all but the last.
prefix_list.append((check_prefixes, tool_cmd_args))
ginfo = common.make_analyze_generalizer(version=1)
builder = common.FunctionTestBuilder(
run_list=prefix_list,
flags=type(
"",
(object,),
{
"verbose": ti.args.verbose,
"filters": ti.args.filters,
"function_signature": False,
"check_attributes": False,
"replace_value_regex": [],
},
),
scrubber_args=[],
path=ti.path,
ginfo=ginfo,
)
for prefixes, opt_args in prefix_list:
common.debug("Extracted opt cmd:", opt_basename, opt_args, file=sys.stderr)
common.debug("Extracted FileCheck prefixes:", str(prefixes), file=sys.stderr)
raw_tool_outputs = common.invoke_tool(ti.args.opt_binary, opt_args, ti.path)
if re.search(r"Printing analysis ", raw_tool_outputs) is not None:
# Split analysis outputs by "Printing analysis " declarations.
for raw_tool_output in re.split(r"Printing analysis ", raw_tool_outputs):
builder.process_run_line(
common.ANALYZE_FUNCTION_RE,
common.scrub_body,
raw_tool_output,
prefixes,
)
elif (
re.search(r"(LV|LDist): Checking a loop in ", raw_tool_outputs) is not None
):
for raw_tool_output in re.split(
r"(LV|LDist): Checking a loop in ", raw_tool_outputs
):
builder.process_run_line(
common.LOOP_PASS_DEBUG_RE,
common.scrub_body,
raw_tool_output,
prefixes,
)
else:
common.warn("Don't know how to deal with this output")
continue
builder.processed_prefixes(prefixes)
func_dict = builder.finish_and_get_func_dict()
is_in_function = False
is_in_function_start = False
prefix_set = set([prefix for prefixes, _ in prefix_list for prefix in prefixes])
common.debug("Rewriting FileCheck prefixes:", str(prefix_set), file=sys.stderr)
output_lines = []
generated_prefixes = []
for input_info in ti.iterlines(output_lines):
input_line = input_info.line
args = input_info.args
if is_in_function_start:
if input_line == "":
continue
if input_line.lstrip().startswith(";"):
m = common.CHECK_RE.match(input_line)
if not m or m.group(1) not in prefix_set:
output_lines.append(input_line)
continue
# Print out the various check lines here.
generated_prefixes.extend(
common.add_analyze_checks(
output_lines,
";",
prefix_list,
func_dict,
func_name,
ginfo,
is_filtered=builder.is_filtered(),
)
)
is_in_function_start = False
if is_in_function:
if common.should_add_line_to_output(input_line, prefix_set):
# This input line of the function body will go as-is into the output.
output_lines.append(input_line)
else:
continue
if input_line.strip() == "}":
is_in_function = False
continue
# If it's outside a function, it just gets copied to the output.
output_lines.append(input_line)
m = common.IR_FUNCTION_RE.match(input_line)
if not m:
continue
func_name = m.group(1)
if ti.args.function is not None and func_name != ti.args.function:
# When filtering on a specific function, skip all others.
continue
is_in_function = is_in_function_start = True
if ti.args.gen_unused_prefix_body:
output_lines.extend(
ti.get_checks_for_unused_prefixes(prefix_list, generated_prefixes)
)
common.debug("Writing %d lines to %s..." % (len(output_lines), ti.path))
with open(ti.path, "wb") as f:
f.writelines(["{}\n".format(l).encode("utf-8") for l in output_lines])
def main():
from argparse import RawTextHelpFormatter
@ -61,164 +215,18 @@ def main():
common.error("Unexpected opt name: " + opt_basename)
sys.exit(1)
returncode = 0
for ti in common.itertests(
initial_args.tests, parser, script_name="utils/" + script_name
):
triple_in_ir = None
for l in ti.input_lines:
m = common.TRIPLE_IR_RE.match(l)
if m:
triple_in_ir = m.groups()[0]
break
prefix_list = []
for l in ti.run_lines:
if "|" not in l:
common.warn("Skipping unparsable RUN line: " + l)
continue
(tool_cmd, filecheck_cmd) = tuple([cmd.strip() for cmd in l.split("|", 1)])
common.verify_filecheck_prefixes(filecheck_cmd)
if not tool_cmd.startswith(opt_basename + " "):
common.warn("WSkipping non-%s RUN line: %s" % (opt_basename, l))
continue
if not filecheck_cmd.startswith("FileCheck "):
common.warn("Skipping non-FileChecked RUN line: " + l)
continue
tool_cmd_args = tool_cmd[len(opt_basename) :].strip()
tool_cmd_args = tool_cmd_args.replace("< %s", "").replace("%s", "").strip()
check_prefixes = common.get_check_prefixes(filecheck_cmd)
# FIXME: We should use multiple check prefixes to common check lines. For
# now, we just ignore all but the last.
prefix_list.append((check_prefixes, tool_cmd_args))
ginfo = common.make_analyze_generalizer(version=1)
builder = common.FunctionTestBuilder(
run_list=prefix_list,
flags=type(
"",
(object,),
{
"verbose": ti.args.verbose,
"filters": ti.args.filters,
"function_signature": False,
"check_attributes": False,
"replace_value_regex": [],
},
),
scrubber_args=[],
path=ti.path,
ginfo=ginfo,
)
for prefixes, opt_args in prefix_list:
common.debug("Extracted opt cmd:", opt_basename, opt_args, file=sys.stderr)
common.debug(
"Extracted FileCheck prefixes:", str(prefixes), file=sys.stderr
)
raw_tool_outputs = common.invoke_tool(ti.args.opt_binary, opt_args, ti.path)
if re.search(r"Printing analysis ", raw_tool_outputs) is not None:
# Split analysis outputs by "Printing analysis " declarations.
for raw_tool_output in re.split(
r"Printing analysis ", raw_tool_outputs
):
builder.process_run_line(
common.ANALYZE_FUNCTION_RE,
common.scrub_body,
raw_tool_output,
prefixes,
)
elif (
re.search(r"(LV|LDist): Checking a loop in ", raw_tool_outputs)
is not None
):
for raw_tool_output in re.split(
r"(LV|LDist): Checking a loop in ", raw_tool_outputs
):
builder.process_run_line(
common.LOOP_PASS_DEBUG_RE,
common.scrub_body,
raw_tool_output,
prefixes,
)
else:
common.warn("Don't know how to deal with this output")
continue
builder.processed_prefixes(prefixes)
func_dict = builder.finish_and_get_func_dict()
is_in_function = False
is_in_function_start = False
prefix_set = set([prefix for prefixes, _ in prefix_list for prefix in prefixes])
common.debug("Rewriting FileCheck prefixes:", str(prefix_set), file=sys.stderr)
output_lines = []
generated_prefixes = []
for input_info in ti.iterlines(output_lines):
input_line = input_info.line
args = input_info.args
if is_in_function_start:
if input_line == "":
continue
if input_line.lstrip().startswith(";"):
m = common.CHECK_RE.match(input_line)
if not m or m.group(1) not in prefix_set:
output_lines.append(input_line)
continue
# Print out the various check lines here.
generated_prefixes.extend(
common.add_analyze_checks(
output_lines,
";",
prefix_list,
func_dict,
func_name,
ginfo,
is_filtered=builder.is_filtered(),
)
)
is_in_function_start = False
if is_in_function:
if common.should_add_line_to_output(input_line, prefix_set):
# This input line of the function body will go as-is into the output.
output_lines.append(input_line)
else:
continue
if input_line.strip() == "}":
is_in_function = False
continue
# If it's outside a function, it just gets copied to the output.
output_lines.append(input_line)
m = common.IR_FUNCTION_RE.match(input_line)
if not m:
continue
func_name = m.group(1)
if ti.args.function is not None and func_name != ti.args.function:
# When filtering on a specific function, skip all others.
continue
is_in_function = is_in_function_start = True
if ti.args.gen_unused_prefix_body:
output_lines.extend(
ti.get_checks_for_unused_prefixes(prefix_list, generated_prefixes)
)
common.debug("Writing %d lines to %s..." % (len(output_lines), ti.path))
with open(ti.path, "wb") as f:
f.writelines(["{}\n".format(l).encode("utf-8") for l in output_lines])
try:
update_test(opt_basename, ti)
except Exception:
stderr.write(f"Error: Failed to update test {ti.path}\n")
print_exc()
returncode = 1
return returncode
if __name__ == "__main__":
main()
sys.exit(main())

View File

@ -14,6 +14,8 @@ Usage:
from __future__ import print_function
from sys import stderr
from traceback import print_exc
import argparse
import collections
import json
@ -302,257 +304,142 @@ def exec_run_line(exe):
sys.exit(3)
def main():
initial_args, parser = config()
script_name = os.path.basename(__file__)
def update_test(ti: common.TestInfo):
# Build a list of filechecked and non-filechecked RUN lines.
run_list = []
line2func_list = collections.defaultdict(list)
for ti in common.itertests(
initial_args.tests,
parser,
"utils/" + script_name,
comment_prefix="//",
argparse_callback=infer_dependent_args,
):
# Build a list of filechecked and non-filechecked RUN lines.
run_list = []
line2func_list = collections.defaultdict(list)
subs = {
"%s": ti.path,
"%t": tempfile.NamedTemporaryFile().name,
"%S": os.path.dirname(ti.path),
}
subs = {
"%s": ti.path,
"%t": tempfile.NamedTemporaryFile().name,
"%S": os.path.dirname(ti.path),
}
for l in ti.run_lines:
commands = [cmd.strip() for cmd in l.split("|")]
for l in ti.run_lines:
commands = [cmd.strip() for cmd in l.split("|")]
triple_in_cmd = None
m = common.TRIPLE_ARG_RE.search(commands[0])
if m:
triple_in_cmd = m.groups()[0]
triple_in_cmd = None
m = common.TRIPLE_ARG_RE.search(commands[0])
if m:
triple_in_cmd = m.groups()[0]
# Parse executable args.
exec_args = shlex.split(commands[0])
# Execute non-clang runline.
if exec_args[0] not in SUBST:
# Do lit-like substitutions.
for s in subs:
exec_args = [
i.replace(s, subs[s]) if s in i else i for i in exec_args
]
run_list.append((None, exec_args, None, None))
continue
# This is a clang runline, apply %clang substitution rule, do lit-like substitutions,
# and append args.clang_args
clang_args = exec_args
clang_args[0:1] = SUBST[clang_args[0]]
# Parse executable args.
exec_args = shlex.split(commands[0])
# Execute non-clang runline.
if exec_args[0] not in SUBST:
# Do lit-like substitutions.
for s in subs:
clang_args = [
i.replace(s, subs[s]) if s in i else i for i in clang_args
]
clang_args += ti.args.clang_args
exec_args = [i.replace(s, subs[s]) if s in i else i for i in exec_args]
run_list.append((None, exec_args, None, None))
continue
# This is a clang runline, apply %clang substitution rule, do lit-like substitutions,
# and append args.clang_args
clang_args = exec_args
clang_args[0:1] = SUBST[clang_args[0]]
for s in subs:
clang_args = [i.replace(s, subs[s]) if s in i else i for i in clang_args]
clang_args += ti.args.clang_args
# Extract -check-prefix in FileCheck args
filecheck_cmd = commands[-1]
common.verify_filecheck_prefixes(filecheck_cmd)
if not filecheck_cmd.startswith("FileCheck "):
# Execute non-filechecked clang runline.
exe = [ti.args.clang] + clang_args
run_list.append((None, exe, None, None))
continue
# Extract -check-prefix in FileCheck args
filecheck_cmd = commands[-1]
common.verify_filecheck_prefixes(filecheck_cmd)
if not filecheck_cmd.startswith("FileCheck "):
# Execute non-filechecked clang runline.
exe = [ti.args.clang] + clang_args
run_list.append((None, exe, None, None))
continue
check_prefixes = common.get_check_prefixes(filecheck_cmd)
run_list.append((check_prefixes, clang_args, commands[1:-1], triple_in_cmd))
check_prefixes = common.get_check_prefixes(filecheck_cmd)
run_list.append((check_prefixes, clang_args, commands[1:-1], triple_in_cmd))
# Execute clang, generate LLVM IR, and extract functions.
# Execute clang, generate LLVM IR, and extract functions.
# Store only filechecked runlines.
filecheck_run_list = [i for i in run_list if i[0]]
ginfo = common.make_ir_generalizer(
ti.args.version, ti.args.check_globals == "none"
)
builder = common.FunctionTestBuilder(
run_list=filecheck_run_list,
flags=ti.args,
scrubber_args=[],
path=ti.path,
ginfo=ginfo,
# Store only filechecked runlines.
filecheck_run_list = [i for i in run_list if i[0]]
ginfo = common.make_ir_generalizer(ti.args.version, ti.args.check_globals == "none")
builder = common.FunctionTestBuilder(
run_list=filecheck_run_list,
flags=ti.args,
scrubber_args=[],
path=ti.path,
ginfo=ginfo,
)
for prefixes, args, extra_commands, triple_in_cmd in run_list:
# Execute non-filechecked runline.
if not prefixes:
print(
"NOTE: Executing non-FileChecked RUN line: " + " ".join(args),
file=sys.stderr,
)
exec_run_line(args)
continue
clang_args = args
common.debug("Extracted clang cmd: clang {}".format(clang_args))
common.debug("Extracted FileCheck prefixes: {}".format(prefixes))
# Invoke external tool and extract function bodies.
raw_tool_output = common.invoke_tool(ti.args.clang, clang_args, ti.path)
get_function_body(
builder,
ti.args,
ti.path,
clang_args,
extra_commands,
prefixes,
raw_tool_output,
)
for prefixes, args, extra_commands, triple_in_cmd in run_list:
# Execute non-filechecked runline.
if not prefixes:
print(
"NOTE: Executing non-FileChecked RUN line: " + " ".join(args),
file=sys.stderr,
)
exec_run_line(args)
continue
# Invoke clang -Xclang -ast-dump=json to get mapping from start lines to
# mangled names. Forward all clang args for now.
for k, v in get_line2func_list(
ti.args, clang_args, common.get_globals_name_prefix(raw_tool_output)
).items():
line2func_list[k].extend(v)
clang_args = args
common.debug("Extracted clang cmd: clang {}".format(clang_args))
common.debug("Extracted FileCheck prefixes: {}".format(prefixes))
func_dict = builder.finish_and_get_func_dict()
global_vars_seen_dict = {}
prefix_set = set([prefix for p in filecheck_run_list for prefix in p[0]])
output_lines = []
has_checked_pre_function_globals = False
# Invoke external tool and extract function bodies.
raw_tool_output = common.invoke_tool(ti.args.clang, clang_args, ti.path)
get_function_body(
builder,
ti.args,
ti.path,
clang_args,
extra_commands,
include_generated_funcs = common.find_arg_in_test(
ti,
lambda args: ti.args.include_generated_funcs,
"--include-generated-funcs",
True,
)
generated_prefixes = []
if include_generated_funcs:
# Generate the appropriate checks for each function. We need to emit
# these in the order according to the generated output so that CHECK-LABEL
# works properly. func_order provides that.
# It turns out that when clang generates functions (for example, with
# -fopenmp), it can sometimes cause functions to be re-ordered in the
# output, even functions that exist in the source file. Therefore we
# can't insert check lines before each source function and instead have to
# put them at the end. So the first thing to do is dump out the source
# lines.
common.dump_input_lines(output_lines, ti, prefix_set, "//")
# Now generate all the checks.
def check_generator(my_output_lines, prefixes, func):
return common.add_ir_checks(
my_output_lines,
"//",
prefixes,
raw_tool_output,
func_dict,
func,
False,
ti.args.function_signature,
ginfo,
global_vars_seen_dict,
is_filtered=builder.is_filtered(),
)
# Invoke clang -Xclang -ast-dump=json to get mapping from start lines to
# mangled names. Forward all clang args for now.
for k, v in get_line2func_list(
ti.args, clang_args, common.get_globals_name_prefix(raw_tool_output)
).items():
line2func_list[k].extend(v)
func_dict = builder.finish_and_get_func_dict()
global_vars_seen_dict = {}
prefix_set = set([prefix for p in filecheck_run_list for prefix in p[0]])
output_lines = []
has_checked_pre_function_globals = False
include_generated_funcs = common.find_arg_in_test(
ti,
lambda args: ti.args.include_generated_funcs,
"--include-generated-funcs",
True,
)
generated_prefixes = []
if include_generated_funcs:
# Generate the appropriate checks for each function. We need to emit
# these in the order according to the generated output so that CHECK-LABEL
# works properly. func_order provides that.
# It turns out that when clang generates functions (for example, with
# -fopenmp), it can sometimes cause functions to be re-ordered in the
# output, even functions that exist in the source file. Therefore we
# can't insert check lines before each source function and instead have to
# put them at the end. So the first thing to do is dump out the source
# lines.
common.dump_input_lines(output_lines, ti, prefix_set, "//")
# Now generate all the checks.
def check_generator(my_output_lines, prefixes, func):
return common.add_ir_checks(
my_output_lines,
"//",
prefixes,
func_dict,
func,
False,
ti.args.function_signature,
ginfo,
global_vars_seen_dict,
is_filtered=builder.is_filtered(),
)
if ti.args.check_globals != 'none':
generated_prefixes.extend(
common.add_global_checks(
builder.global_var_dict(),
"//",
run_list,
output_lines,
ginfo,
global_vars_seen_dict,
False,
True,
ti.args.check_globals,
)
)
generated_prefixes.extend(
common.add_checks_at_end(
output_lines,
filecheck_run_list,
builder.func_order(),
"//",
lambda my_output_lines, prefixes, func: check_generator(
my_output_lines, prefixes, func
),
)
)
else:
# Normal mode. Put checks before each source function.
for line_info in ti.iterlines(output_lines):
idx = line_info.line_number
line = line_info.line
args = line_info.args
include_line = True
m = common.CHECK_RE.match(line)
if m and m.group(1) in prefix_set:
continue # Don't append the existing CHECK lines
# Skip special separator comments added by commmon.add_global_checks.
if line.strip() == "//" + common.SEPARATOR:
continue
if idx in line2func_list:
added = set()
for spell, mangled, search in line2func_list[idx]:
# One line may contain multiple function declarations.
# Skip if the mangled name has been added before.
# The line number may come from an included file, we simply require
# the search string (normally the function's spelling name, but is
# the class's spelling name for class specializations) to appear on
# the line to exclude functions from other files.
if mangled in added or search not in line:
continue
if args.functions is None or any(
re.search(regex, spell) for regex in args.functions
):
last_line = output_lines[-1].strip()
while last_line == "//":
# Remove the comment line since we will generate a new comment
# line as part of common.add_ir_checks()
output_lines.pop()
last_line = output_lines[-1].strip()
if (
ti.args.check_globals != 'none'
and not has_checked_pre_function_globals
):
generated_prefixes.extend(
common.add_global_checks(
builder.global_var_dict(),
"//",
run_list,
output_lines,
ginfo,
global_vars_seen_dict,
False,
True,
ti.args.check_globals,
)
)
has_checked_pre_function_globals = True
if added:
output_lines.append("//")
added.add(mangled)
generated_prefixes.extend(
common.add_ir_checks(
output_lines,
"//",
filecheck_run_list,
func_dict,
mangled,
False,
args.function_signature,
ginfo,
global_vars_seen_dict,
is_filtered=builder.is_filtered(),
)
)
if line.rstrip("\n") == "//":
include_line = False
if include_line:
output_lines.append(line.rstrip("\n"))
if ti.args.check_globals != 'none':
if ti.args.check_globals != "none":
generated_prefixes.extend(
common.add_global_checks(
builder.global_var_dict(),
@ -562,19 +449,138 @@ def main():
ginfo,
global_vars_seen_dict,
False,
False,
True,
ti.args.check_globals,
)
)
if ti.args.gen_unused_prefix_body:
output_lines.extend(
ti.get_checks_for_unused_prefixes(run_list, generated_prefixes)
generated_prefixes.extend(
common.add_checks_at_end(
output_lines,
filecheck_run_list,
builder.func_order(),
"//",
lambda my_output_lines, prefixes, func: check_generator(
my_output_lines, prefixes, func
),
)
common.debug("Writing %d lines to %s..." % (len(output_lines), ti.path))
with open(ti.path, "wb") as f:
f.writelines(["{}\n".format(l).encode("utf-8") for l in output_lines])
)
else:
# Normal mode. Put checks before each source function.
for line_info in ti.iterlines(output_lines):
idx = line_info.line_number
line = line_info.line
args = line_info.args
include_line = True
m = common.CHECK_RE.match(line)
if m and m.group(1) in prefix_set:
continue # Don't append the existing CHECK lines
# Skip special separator comments added by commmon.add_global_checks.
if line.strip() == "//" + common.SEPARATOR:
continue
if idx in line2func_list:
added = set()
for spell, mangled, search in line2func_list[idx]:
# One line may contain multiple function declarations.
# Skip if the mangled name has been added before.
# The line number may come from an included file, we simply require
# the search string (normally the function's spelling name, but is
# the class's spelling name for class specializations) to appear on
# the line to exclude functions from other files.
if mangled in added or search not in line:
continue
if args.functions is None or any(
re.search(regex, spell) for regex in args.functions
):
last_line = output_lines[-1].strip()
while last_line == "//":
# Remove the comment line since we will generate a new comment
# line as part of common.add_ir_checks()
output_lines.pop()
last_line = output_lines[-1].strip()
if (
ti.args.check_globals != "none"
and not has_checked_pre_function_globals
):
generated_prefixes.extend(
common.add_global_checks(
builder.global_var_dict(),
"//",
run_list,
output_lines,
ginfo,
global_vars_seen_dict,
False,
True,
ti.args.check_globals,
)
)
has_checked_pre_function_globals = True
if added:
output_lines.append("//")
added.add(mangled)
generated_prefixes.extend(
common.add_ir_checks(
output_lines,
"//",
filecheck_run_list,
func_dict,
mangled,
False,
args.function_signature,
ginfo,
global_vars_seen_dict,
is_filtered=builder.is_filtered(),
)
)
if line.rstrip("\n") == "//":
include_line = False
return 0
if include_line:
output_lines.append(line.rstrip("\n"))
if ti.args.check_globals != "none":
generated_prefixes.extend(
common.add_global_checks(
builder.global_var_dict(),
"//",
run_list,
output_lines,
ginfo,
global_vars_seen_dict,
False,
False,
ti.args.check_globals,
)
)
if ti.args.gen_unused_prefix_body:
output_lines.extend(
ti.get_checks_for_unused_prefixes(run_list, generated_prefixes)
)
common.debug("Writing %d lines to %s..." % (len(output_lines), ti.path))
with open(ti.path, "wb") as f:
f.writelines(["{}\n".format(l).encode("utf-8") for l in output_lines])
def main():
initial_args, parser = config()
script_name = os.path.basename(__file__)
returncode = 0
for ti in common.itertests(
initial_args.tests,
parser,
"utils/" + script_name,
comment_prefix="//",
argparse_callback=infer_dependent_args,
):
try:
update_test(ti)
except Exception:
stderr.write(f"Error: Failed to update test {ti.path}\n")
print_exc()
returncode = 1
return returncode
if __name__ == "__main__":

View File

@ -9,8 +9,11 @@ a single test function.
from __future__ import print_function
from sys import stderr
from traceback import print_exc
import argparse
import os # Used to advertise this file's name ("autogenerated_note").
import sys
from UpdateTestChecks import common
@ -21,6 +24,232 @@ LLC_LIKE_TOOLS = [
]
def update_test(ti: common.TestInfo):
triple_in_ir = None
for l in ti.input_lines:
m = common.TRIPLE_IR_RE.match(l)
if m:
triple_in_ir = m.groups()[0]
break
run_list = []
for l in ti.run_lines:
if "|" not in l:
common.warn("Skipping unparsable RUN line: " + l)
continue
commands = [cmd.strip() for cmd in l.split("|")]
assert len(commands) >= 2
preprocess_cmd = None
if len(commands) > 2:
preprocess_cmd = " | ".join(commands[:-2])
llc_cmd = commands[-2]
filecheck_cmd = commands[-1]
llc_tool = llc_cmd.split(" ")[0]
triple_in_cmd = None
m = common.TRIPLE_ARG_RE.search(llc_cmd)
if m:
triple_in_cmd = m.groups()[0]
march_in_cmd = ti.args.default_march
m = common.MARCH_ARG_RE.search(llc_cmd)
if m:
march_in_cmd = m.groups()[0]
m = common.DEBUG_ONLY_ARG_RE.search(llc_cmd)
if m and m.groups()[0] == "isel":
from UpdateTestChecks import isel as output_type
else:
from UpdateTestChecks import asm as output_type
common.verify_filecheck_prefixes(filecheck_cmd)
llc_like_tools = LLC_LIKE_TOOLS[:]
if ti.args.tool:
llc_like_tools.append(ti.args.tool)
if llc_tool not in llc_like_tools:
common.warn("Skipping non-llc RUN line: " + l)
continue
if not filecheck_cmd.startswith("FileCheck "):
common.warn("Skipping non-FileChecked RUN line: " + l)
continue
llc_cmd_args = llc_cmd[len(llc_tool) :].strip()
llc_cmd_args = llc_cmd_args.replace("< %s", "").replace("%s", "").strip()
if ti.path.endswith(".mir"):
llc_cmd_args += " -x mir"
check_prefixes = common.get_check_prefixes(filecheck_cmd)
# FIXME: We should use multiple check prefixes to common check lines. For
# now, we just ignore all but the last.
run_list.append(
(
check_prefixes,
llc_tool,
llc_cmd_args,
preprocess_cmd,
triple_in_cmd,
march_in_cmd,
)
)
if ti.path.endswith(".mir"):
check_indent = " "
else:
check_indent = ""
ginfo = common.make_asm_generalizer(version=1)
builder = common.FunctionTestBuilder(
run_list=run_list,
flags=type(
"",
(object,),
{
"verbose": ti.args.verbose,
"filters": ti.args.filters,
"function_signature": False,
"check_attributes": False,
"replace_value_regex": [],
},
),
scrubber_args=[ti.args],
path=ti.path,
ginfo=ginfo,
)
for (
prefixes,
llc_tool,
llc_args,
preprocess_cmd,
triple_in_cmd,
march_in_cmd,
) in run_list:
common.debug("Extracted LLC cmd:", llc_tool, llc_args)
common.debug("Extracted FileCheck prefixes:", str(prefixes))
raw_tool_output = common.invoke_tool(
ti.args.llc_binary or llc_tool,
llc_args,
ti.path,
preprocess_cmd,
verbose=ti.args.verbose,
)
triple = triple_in_cmd or triple_in_ir
if not triple:
triple = common.get_triple_from_march(march_in_cmd)
scrubber, function_re = output_type.get_run_handler(triple)
builder.process_run_line(function_re, scrubber, raw_tool_output, prefixes)
builder.processed_prefixes(prefixes)
func_dict = builder.finish_and_get_func_dict()
global_vars_seen_dict = {}
is_in_function = False
is_in_function_start = False
func_name = None
prefix_set = set([prefix for p in run_list for prefix in p[0]])
common.debug("Rewriting FileCheck prefixes:", str(prefix_set))
output_lines = []
include_generated_funcs = common.find_arg_in_test(
ti,
lambda args: ti.args.include_generated_funcs,
"--include-generated-funcs",
True,
)
generated_prefixes = []
if include_generated_funcs:
# Generate the appropriate checks for each function. We need to emit
# these in the order according to the generated output so that CHECK-LABEL
# works properly. func_order provides that.
# We can't predict where various passes might insert functions so we can't
# be sure the input function order is maintained. Therefore, first spit
# out all the source lines.
common.dump_input_lines(output_lines, ti, prefix_set, ";")
# Now generate all the checks.
generated_prefixes = common.add_checks_at_end(
output_lines,
run_list,
builder.func_order(),
check_indent + ";",
lambda my_output_lines, prefixes, func: output_type.add_checks(
my_output_lines,
check_indent + ";",
prefixes,
func_dict,
func,
ginfo,
global_vars_seen_dict,
is_filtered=builder.is_filtered(),
),
)
else:
for input_info in ti.iterlines(output_lines):
input_line = input_info.line
args = input_info.args
if is_in_function_start:
if input_line == "":
continue
if input_line.lstrip().startswith(";"):
m = common.CHECK_RE.match(input_line)
if not m or m.group(1) not in prefix_set:
output_lines.append(input_line)
continue
# Print out the various check lines here.
generated_prefixes.extend(
output_type.add_checks(
output_lines,
check_indent + ";",
run_list,
func_dict,
func_name,
ginfo,
global_vars_seen_dict,
is_filtered=builder.is_filtered(),
)
)
is_in_function_start = False
if is_in_function:
if common.should_add_line_to_output(input_line, prefix_set):
# This input line of the function body will go as-is into the output.
output_lines.append(input_line)
else:
continue
if input_line.strip() == "}":
is_in_function = False
continue
# If it's outside a function, it just gets copied to the output.
output_lines.append(input_line)
m = common.IR_FUNCTION_RE.match(input_line)
if not m:
continue
func_name = m.group(1)
if args.function is not None and func_name != args.function:
# When filtering on a specific function, skip all others.
continue
is_in_function = is_in_function_start = True
if ti.args.gen_unused_prefix_body:
output_lines.extend(
ti.get_checks_for_unused_prefixes(run_list, generated_prefixes)
)
common.debug("Writing %d lines to %s..." % (len(output_lines), ti.path))
with open(ti.path, "wb") as f:
f.writelines(["{}\n".format(l).encode("utf-8") for l in output_lines])
def main():
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument(
@ -71,233 +300,18 @@ def main():
script_name = os.path.basename(__file__)
returncode = 0
for ti in common.itertests(
initial_args.tests, parser, script_name="utils/" + script_name
):
triple_in_ir = None
for l in ti.input_lines:
m = common.TRIPLE_IR_RE.match(l)
if m:
triple_in_ir = m.groups()[0]
break
run_list = []
for l in ti.run_lines:
if "|" not in l:
common.warn("Skipping unparsable RUN line: " + l)
continue
commands = [cmd.strip() for cmd in l.split("|")]
assert len(commands) >= 2
preprocess_cmd = None
if len(commands) > 2:
preprocess_cmd = " | ".join(commands[:-2])
llc_cmd = commands[-2]
filecheck_cmd = commands[-1]
llc_tool = llc_cmd.split(" ")[0]
triple_in_cmd = None
m = common.TRIPLE_ARG_RE.search(llc_cmd)
if m:
triple_in_cmd = m.groups()[0]
march_in_cmd = ti.args.default_march
m = common.MARCH_ARG_RE.search(llc_cmd)
if m:
march_in_cmd = m.groups()[0]
m = common.DEBUG_ONLY_ARG_RE.search(llc_cmd)
if m and m.groups()[0] == "isel":
from UpdateTestChecks import isel as output_type
else:
from UpdateTestChecks import asm as output_type
common.verify_filecheck_prefixes(filecheck_cmd)
llc_like_tools = LLC_LIKE_TOOLS[:]
if ti.args.tool:
llc_like_tools.append(ti.args.tool)
if llc_tool not in llc_like_tools:
common.warn("Skipping non-llc RUN line: " + l)
continue
if not filecheck_cmd.startswith("FileCheck "):
common.warn("Skipping non-FileChecked RUN line: " + l)
continue
llc_cmd_args = llc_cmd[len(llc_tool) :].strip()
llc_cmd_args = llc_cmd_args.replace("< %s", "").replace("%s", "").strip()
if ti.path.endswith(".mir"):
llc_cmd_args += " -x mir"
check_prefixes = common.get_check_prefixes(filecheck_cmd)
# FIXME: We should use multiple check prefixes to common check lines. For
# now, we just ignore all but the last.
run_list.append(
(
check_prefixes,
llc_tool,
llc_cmd_args,
preprocess_cmd,
triple_in_cmd,
march_in_cmd,
)
)
if ti.path.endswith(".mir"):
check_indent = " "
else:
check_indent = ""
ginfo = common.make_asm_generalizer(version=1)
builder = common.FunctionTestBuilder(
run_list=run_list,
flags=type(
"",
(object,),
{
"verbose": ti.args.verbose,
"filters": ti.args.filters,
"function_signature": False,
"check_attributes": False,
"replace_value_regex": [],
},
),
scrubber_args=[ti.args],
path=ti.path,
ginfo=ginfo,
)
for (
prefixes,
llc_tool,
llc_args,
preprocess_cmd,
triple_in_cmd,
march_in_cmd,
) in run_list:
common.debug("Extracted LLC cmd:", llc_tool, llc_args)
common.debug("Extracted FileCheck prefixes:", str(prefixes))
raw_tool_output = common.invoke_tool(
ti.args.llc_binary or llc_tool,
llc_args,
ti.path,
preprocess_cmd,
verbose=ti.args.verbose,
)
triple = triple_in_cmd or triple_in_ir
if not triple:
triple = common.get_triple_from_march(march_in_cmd)
scrubber, function_re = output_type.get_run_handler(triple)
builder.process_run_line(function_re, scrubber, raw_tool_output, prefixes)
builder.processed_prefixes(prefixes)
func_dict = builder.finish_and_get_func_dict()
global_vars_seen_dict = {}
is_in_function = False
is_in_function_start = False
func_name = None
prefix_set = set([prefix for p in run_list for prefix in p[0]])
common.debug("Rewriting FileCheck prefixes:", str(prefix_set))
output_lines = []
include_generated_funcs = common.find_arg_in_test(
ti,
lambda args: ti.args.include_generated_funcs,
"--include-generated-funcs",
True,
)
generated_prefixes = []
if include_generated_funcs:
# Generate the appropriate checks for each function. We need to emit
# these in the order according to the generated output so that CHECK-LABEL
# works properly. func_order provides that.
# We can't predict where various passes might insert functions so we can't
# be sure the input function order is maintained. Therefore, first spit
# out all the source lines.
common.dump_input_lines(output_lines, ti, prefix_set, ";")
# Now generate all the checks.
generated_prefixes = common.add_checks_at_end(
output_lines,
run_list,
builder.func_order(),
check_indent + ";",
lambda my_output_lines, prefixes, func: output_type.add_checks(
my_output_lines,
check_indent + ";",
prefixes,
func_dict,
func,
ginfo,
global_vars_seen_dict,
is_filtered=builder.is_filtered(),
),
)
else:
for input_info in ti.iterlines(output_lines):
input_line = input_info.line
args = input_info.args
if is_in_function_start:
if input_line == "":
continue
if input_line.lstrip().startswith(";"):
m = common.CHECK_RE.match(input_line)
if not m or m.group(1) not in prefix_set:
output_lines.append(input_line)
continue
# Print out the various check lines here.
generated_prefixes.extend(
output_type.add_checks(
output_lines,
check_indent + ";",
run_list,
func_dict,
func_name,
ginfo,
global_vars_seen_dict,
is_filtered=builder.is_filtered(),
)
)
is_in_function_start = False
if is_in_function:
if common.should_add_line_to_output(input_line, prefix_set):
# This input line of the function body will go as-is into the output.
output_lines.append(input_line)
else:
continue
if input_line.strip() == "}":
is_in_function = False
continue
# If it's outside a function, it just gets copied to the output.
output_lines.append(input_line)
m = common.IR_FUNCTION_RE.match(input_line)
if not m:
continue
func_name = m.group(1)
if args.function is not None and func_name != args.function:
# When filtering on a specific function, skip all others.
continue
is_in_function = is_in_function_start = True
if ti.args.gen_unused_prefix_body:
output_lines.extend(
ti.get_checks_for_unused_prefixes(run_list, generated_prefixes)
)
common.debug("Writing %d lines to %s..." % (len(output_lines), ti.path))
with open(ti.path, "wb") as f:
f.writelines(["{}\n".format(l).encode("utf-8") for l in output_lines])
try:
update_test(ti)
except Exception as e:
stderr.write(f"Error: Failed to update test {ti.path}\n")
print_exc()
returncode = 1
return returncode
if __name__ == "__main__":
main()
sys.exit(main())

View File

@ -5,14 +5,16 @@ A test update script. This script is a utility to update LLVM 'llvm-mc' based t
from __future__ import print_function
from sys import stderr
from traceback import print_exc
import argparse
import functools
import os # Used to advertise this file's name ("autogenerated_note").
from UpdateTestChecks import common
import subprocess
import re
import sys
from UpdateTestChecks import common
mc_LIKE_TOOLS = [
"llvm-mc",
@ -121,6 +123,276 @@ def getErrCheckLine(prefix, output, mc_mode, line_offset=1):
)
def update_test(ti: common.TestInfo):
if ti.path.endswith(".s"):
mc_mode = "asm"
elif ti.path.endswith(".txt"):
mc_mode = "dasm"
if ti.args.sort:
raise Exception("sorting with dasm(.txt) file is not supported!")
else:
common.warn("Expected .s and .txt, Skipping file : ", ti.path)
return
triple_in_ir = None
for l in ti.input_lines:
m = common.TRIPLE_IR_RE.match(l)
if m:
triple_in_ir = m.groups()[0]
break
run_list = []
for l in ti.run_lines:
if "|" not in l:
common.warn("Skipping unparsable RUN line: " + l)
continue
commands = [cmd.strip() for cmd in l.split("|")]
assert len(commands) >= 2
mc_cmd = " | ".join(commands[:-1])
filecheck_cmd = commands[-1]
# special handling for negating exit status
# if not is used in runline, disable rc check, since
# the command might or might not
# return non-zero code on a single line run
check_rc = True
mc_cmd_args = mc_cmd.strip().split()
if mc_cmd_args[0] == "not":
check_rc = False
mc_tool = mc_cmd_args[1]
mc_cmd = mc_cmd[len(mc_cmd_args[0]) :].strip()
else:
mc_tool = mc_cmd_args[0]
triple_in_cmd = None
m = common.TRIPLE_ARG_RE.search(mc_cmd)
if m:
triple_in_cmd = m.groups()[0]
march_in_cmd = ti.args.default_march
m = common.MARCH_ARG_RE.search(mc_cmd)
if m:
march_in_cmd = m.groups()[0]
common.verify_filecheck_prefixes(filecheck_cmd)
mc_like_tools = mc_LIKE_TOOLS[:]
if ti.args.tool:
mc_like_tools.append(ti.args.tool)
if mc_tool not in mc_like_tools:
common.warn("Skipping non-mc RUN line: " + l)
continue
if not filecheck_cmd.startswith("FileCheck "):
common.warn("Skipping non-FileChecked RUN line: " + l)
continue
mc_cmd_args = mc_cmd[len(mc_tool) :].strip()
mc_cmd_args = mc_cmd_args.replace("< %s", "").replace("%s", "").strip()
check_prefixes = common.get_check_prefixes(filecheck_cmd)
run_list.append(
(
check_prefixes,
mc_tool,
check_rc,
mc_cmd_args,
triple_in_cmd,
march_in_cmd,
)
)
# find all test line from input
testlines = [l for l in ti.input_lines if isTestLine(l, mc_mode)]
# remove duplicated lines to save running time
testlines = list(dict.fromkeys(testlines))
common.debug("Valid test line found: ", len(testlines))
run_list_size = len(run_list)
testnum = len(testlines)
raw_output = []
raw_prefixes = []
for (
prefixes,
mc_tool,
check_rc,
mc_args,
triple_in_cmd,
march_in_cmd,
) in run_list:
common.debug("Extracted mc cmd:", mc_tool, mc_args)
common.debug("Extracted FileCheck prefixes:", str(prefixes))
common.debug("Extracted triple :", str(triple_in_cmd))
common.debug("Extracted march:", str(march_in_cmd))
triple = triple_in_cmd or triple_in_ir
if not triple:
triple = common.get_triple_from_march(march_in_cmd)
raw_output.append([])
for line in testlines:
# get output for each testline
out = invoke_tool(
ti.args.llvm_mc_binary or mc_tool,
check_rc,
mc_args,
line,
verbose=ti.args.verbose,
)
raw_output[-1].append(out)
common.debug("Collect raw tool lines:", str(len(raw_output[-1])))
raw_prefixes.append(prefixes)
output_lines = []
generated_prefixes = {}
used_prefixes = set()
prefix_set = set([prefix for p in run_list for prefix in p[0]])
common.debug("Rewriting FileCheck prefixes:", str(prefix_set))
for test_id in range(testnum):
input_line = testlines[test_id]
# a {prefix : output, [runid] } dict
# insert output to a prefix-key dict, and do a max sorting
# to select the most-used prefix which share the same output string
p_dict = {}
for run_id in range(run_list_size):
out = raw_output[run_id][test_id]
if hasErr(out):
o = getErrString(out)
else:
o = getOutputString(out)
prefixes = raw_prefixes[run_id]
for p in prefixes:
if p not in p_dict:
p_dict[p] = o, [run_id]
else:
if p_dict[p] == (None, []):
continue
prev_o, run_ids = p_dict[p]
if o == prev_o:
run_ids.append(run_id)
p_dict[p] = o, run_ids
else:
# conflict, discard
p_dict[p] = None, []
p_dict_sorted = dict(sorted(p_dict.items(), key=lambda item: -len(item[1][1])))
# prefix is selected and generated with most shared output lines
# each run_id can only be used once
gen_prefix = ""
used_runid = set()
# line number diff between generated prefix and testline
line_offset = 1
for prefix, tup in p_dict_sorted.items():
o, run_ids = tup
if len(run_ids) == 0:
continue
skip = False
for i in run_ids:
if i in used_runid:
skip = True
else:
used_runid.add(i)
if not skip:
used_prefixes.add(prefix)
if hasErr(o):
newline = getErrCheckLine(prefix, o, mc_mode, line_offset)
else:
newline = getStdCheckLine(prefix, o, mc_mode)
if newline:
gen_prefix += newline
line_offset += 1
generated_prefixes[input_line] = gen_prefix.rstrip("\n")
# write output
for input_info in ti.iterlines(output_lines):
input_line = input_info.line
if input_line in testlines:
output_lines.append(input_line)
output_lines.append(generated_prefixes[input_line])
elif should_add_line_to_output(input_line, prefix_set, mc_mode):
output_lines.append(input_line)
if ti.args.unique or ti.args.sort:
# split with double newlines
test_units = "\n".join(output_lines).split("\n\n")
# select the key line for each test unit
test_dic = {}
for unit in test_units:
lines = unit.split("\n")
for l in lines:
# if contains multiple lines, use
# the first testline or runline as key
if isTestLine(l, mc_mode):
test_dic[unit] = l
break
if isRunLine(l):
test_dic[unit] = l
break
# unique
if ti.args.unique:
new_test_units = []
written_lines = set()
for unit in test_units:
# if not testline/runline, we just add it
if unit not in test_dic:
new_test_units.append(unit)
else:
if test_dic[unit] in written_lines:
common.debug("Duplicated test skipped: ", unit)
continue
written_lines.add(test_dic[unit])
new_test_units.append(unit)
test_units = new_test_units
# sort
if ti.args.sort:
def getkey(l):
# find key of test unit, otherwise use first line
if l in test_dic:
line = test_dic[l]
else:
line = l.split("\n")[0]
# runline placed on the top
return (not isRunLine(line), line)
test_units = sorted(test_units, key=getkey)
# join back to be output string
output_lines = "\n\n".join(test_units).split("\n")
# output
if ti.args.gen_unused_prefix_body:
output_lines.extend(ti.get_checks_for_unused_prefixes(run_list, used_prefixes))
common.debug("Writing %d lines to %s..." % (len(output_lines), ti.path))
with open(ti.path, "wb") as f:
f.writelines(["{}\n".format(l).encode("utf-8") for l in output_lines])
def main():
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument(
@ -156,283 +428,18 @@ def main():
script_name = os.path.basename(__file__)
returncode = 0
for ti in common.itertests(
initial_args.tests, parser, script_name="utils/" + script_name
):
if ti.path.endswith(".s"):
mc_mode = "asm"
elif ti.path.endswith(".txt"):
mc_mode = "dasm"
if ti.args.sort:
print("sorting with dasm(.txt) file is not supported!")
return -1
else:
common.warn("Expected .s and .txt, Skipping file : ", ti.path)
continue
triple_in_ir = None
for l in ti.input_lines:
m = common.TRIPLE_IR_RE.match(l)
if m:
triple_in_ir = m.groups()[0]
break
run_list = []
for l in ti.run_lines:
if "|" not in l:
common.warn("Skipping unparsable RUN line: " + l)
continue
commands = [cmd.strip() for cmd in l.split("|")]
assert len(commands) >= 2
mc_cmd = " | ".join(commands[:-1])
filecheck_cmd = commands[-1]
# special handling for negating exit status
# if not is used in runline, disable rc check, since
# the command might or might not
# return non-zero code on a single line run
check_rc = True
mc_cmd_args = mc_cmd.strip().split()
if mc_cmd_args[0] == "not":
check_rc = False
mc_tool = mc_cmd_args[1]
mc_cmd = mc_cmd[len(mc_cmd_args[0]) :].strip()
else:
mc_tool = mc_cmd_args[0]
triple_in_cmd = None
m = common.TRIPLE_ARG_RE.search(mc_cmd)
if m:
triple_in_cmd = m.groups()[0]
march_in_cmd = ti.args.default_march
m = common.MARCH_ARG_RE.search(mc_cmd)
if m:
march_in_cmd = m.groups()[0]
common.verify_filecheck_prefixes(filecheck_cmd)
mc_like_tools = mc_LIKE_TOOLS[:]
if ti.args.tool:
mc_like_tools.append(ti.args.tool)
if mc_tool not in mc_like_tools:
common.warn("Skipping non-mc RUN line: " + l)
continue
if not filecheck_cmd.startswith("FileCheck "):
common.warn("Skipping non-FileChecked RUN line: " + l)
continue
mc_cmd_args = mc_cmd[len(mc_tool) :].strip()
mc_cmd_args = mc_cmd_args.replace("< %s", "").replace("%s", "").strip()
check_prefixes = common.get_check_prefixes(filecheck_cmd)
run_list.append(
(
check_prefixes,
mc_tool,
check_rc,
mc_cmd_args,
triple_in_cmd,
march_in_cmd,
)
)
# find all test line from input
testlines = [l for l in ti.input_lines if isTestLine(l, mc_mode)]
# remove duplicated lines to save running time
testlines = list(dict.fromkeys(testlines))
common.debug("Valid test line found: ", len(testlines))
run_list_size = len(run_list)
testnum = len(testlines)
raw_output = []
raw_prefixes = []
for (
prefixes,
mc_tool,
check_rc,
mc_args,
triple_in_cmd,
march_in_cmd,
) in run_list:
common.debug("Extracted mc cmd:", mc_tool, mc_args)
common.debug("Extracted FileCheck prefixes:", str(prefixes))
common.debug("Extracted triple :", str(triple_in_cmd))
common.debug("Extracted march:", str(march_in_cmd))
triple = triple_in_cmd or triple_in_ir
if not triple:
triple = common.get_triple_from_march(march_in_cmd)
raw_output.append([])
for line in testlines:
# get output for each testline
out = invoke_tool(
ti.args.llvm_mc_binary or mc_tool,
check_rc,
mc_args,
line,
verbose=ti.args.verbose,
)
raw_output[-1].append(out)
common.debug("Collect raw tool lines:", str(len(raw_output[-1])))
raw_prefixes.append(prefixes)
output_lines = []
generated_prefixes = {}
used_prefixes = set()
prefix_set = set([prefix for p in run_list for prefix in p[0]])
common.debug("Rewriting FileCheck prefixes:", str(prefix_set))
for test_id in range(testnum):
input_line = testlines[test_id]
# a {prefix : output, [runid] } dict
# insert output to a prefix-key dict, and do a max sorting
# to select the most-used prefix which share the same output string
p_dict = {}
for run_id in range(run_list_size):
out = raw_output[run_id][test_id]
if hasErr(out):
o = getErrString(out)
else:
o = getOutputString(out)
prefixes = raw_prefixes[run_id]
for p in prefixes:
if p not in p_dict:
p_dict[p] = o, [run_id]
else:
if p_dict[p] == (None, []):
continue
prev_o, run_ids = p_dict[p]
if o == prev_o:
run_ids.append(run_id)
p_dict[p] = o, run_ids
else:
# conflict, discard
p_dict[p] = None, []
p_dict_sorted = dict(
sorted(p_dict.items(), key=lambda item: -len(item[1][1]))
)
# prefix is selected and generated with most shared output lines
# each run_id can only be used once
gen_prefix = ""
used_runid = set()
# line number diff between generated prefix and testline
line_offset = 1
for prefix, tup in p_dict_sorted.items():
o, run_ids = tup
if len(run_ids) == 0:
continue
skip = False
for i in run_ids:
if i in used_runid:
skip = True
else:
used_runid.add(i)
if not skip:
used_prefixes.add(prefix)
if hasErr(o):
newline = getErrCheckLine(prefix, o, mc_mode, line_offset)
else:
newline = getStdCheckLine(prefix, o, mc_mode)
if newline:
gen_prefix += newline
line_offset += 1
generated_prefixes[input_line] = gen_prefix.rstrip("\n")
# write output
for input_info in ti.iterlines(output_lines):
input_line = input_info.line
if input_line in testlines:
output_lines.append(input_line)
output_lines.append(generated_prefixes[input_line])
elif should_add_line_to_output(input_line, prefix_set, mc_mode):
output_lines.append(input_line)
if ti.args.unique or ti.args.sort:
# split with double newlines
test_units = "\n".join(output_lines).split("\n\n")
# select the key line for each test unit
test_dic = {}
for unit in test_units:
lines = unit.split("\n")
for l in lines:
# if contains multiple lines, use
# the first testline or runline as key
if isTestLine(l, mc_mode):
test_dic[unit] = l
break
if isRunLine(l):
test_dic[unit] = l
break
# unique
if ti.args.unique:
new_test_units = []
written_lines = set()
for unit in test_units:
# if not testline/runline, we just add it
if unit not in test_dic:
new_test_units.append(unit)
else:
if test_dic[unit] in written_lines:
common.debug("Duplicated test skipped: ", unit)
continue
written_lines.add(test_dic[unit])
new_test_units.append(unit)
test_units = new_test_units
# sort
if ti.args.sort:
def getkey(l):
# find key of test unit, otherwise use first line
if l in test_dic:
line = test_dic[l]
else:
line = l.split("\n")[0]
# runline placed on the top
return (not isRunLine(line), line)
test_units = sorted(test_units, key=getkey)
# join back to be output string
output_lines = "\n\n".join(test_units).split("\n")
# output
if ti.args.gen_unused_prefix_body:
output_lines.extend(
ti.get_checks_for_unused_prefixes(run_list, used_prefixes)
)
common.debug("Writing %d lines to %s..." % (len(output_lines), ti.path))
with open(ti.path, "wb") as f:
f.writelines(["{}\n".format(l).encode("utf-8") for l in output_lines])
try:
update_test(ti)
except Exception:
stderr.write(f"Error: Failed to update test {ti.path}\n")
print_exc()
returncode = 1
return returncode
if __name__ == "__main__":
main()
sys.exit(main())

View File

@ -6,6 +6,8 @@ This script is a utility to update LLVM 'llvm-mca' based test cases with new
FileCheck patterns.
"""
from sys import stderr
from traceback import print_exc
import argparse
from collections import defaultdict
import glob
@ -561,27 +563,27 @@ def update_test_file(args, test_path, autogenerated_note):
)
def main():
warnings.showwarning = _showwarning
script_name = "utils/" + os.path.basename(__file__)
parser = _get_parser()
args = common.parse_commandline_args(parser)
if not args.llvm_mca_binary:
raise Error("--llvm-mca-binary value cannot be empty string")
stderr.write("Error: --llvm-mca-binary value cannot be empty string\n")
return 1
if "llvm-mca" not in os.path.basename(args.llvm_mca_binary):
_warn("unexpected binary name: {}".format(args.llvm_mca_binary))
returncode = 0
for ti in common.itertests(args.tests, parser, script_name=script_name):
try:
update_test_file(ti.args, ti.path, ti.test_autogenerated_note)
except Exception:
common.warn("Error processing file", test_file=ti.path)
raise
return 0
stderr.write(f"Error: Failed to update test {ti.path}\n")
print_exc()
returncode = 1
return returncode
if __name__ == "__main__":
try:
warnings.showwarning = _showwarning
sys.exit(main())
except Error as e:
sys.stdout.write("error: {}\n".format(e))
sys.exit(1)
sys.exit(main())

View File

@ -20,6 +20,8 @@ default, or removed if the --remove-common-prefixes flag is provided.
from __future__ import print_function
from sys import stderr
from traceback import print_exc
import argparse
import collections
import glob
@ -495,13 +497,16 @@ def main():
args = common.parse_commandline_args(parser)
script_name = os.path.basename(__file__)
returncode = 0
for ti in common.itertests(args.tests, parser, script_name="utils/" + script_name):
try:
update_test_file(ti.args, ti.path, ti.test_autogenerated_note)
except Exception:
common.warn("Error processing file", test_file=ti.path)
raise
except Exception as e:
stderr.write(f"Error: Failed to update test {ti.path}\n")
print_exc()
returncode = 1
return returncode
if __name__ == "__main__":
main()
sys.exit(main())

View File

@ -34,6 +34,8 @@ Workflow:
from __future__ import print_function
from sys import stderr
from traceback import print_exc
import argparse
import os # Used to advertise this file's name ("autogenerated_note").
import re
@ -42,6 +44,260 @@ import sys
from UpdateTestChecks import common
def update_test(ti: common.TestInfo):
# If requested we scrub trailing attribute annotations, e.g., '#0', together with whitespaces
if ti.args.scrub_attributes:
common.SCRUB_TRAILING_WHITESPACE_TEST_RE = (
common.SCRUB_TRAILING_WHITESPACE_AND_ATTRIBUTES_RE
)
else:
common.SCRUB_TRAILING_WHITESPACE_TEST_RE = common.SCRUB_TRAILING_WHITESPACE_RE
tool_basename = ti.args.tool
prefix_list = []
for l in ti.run_lines:
if "|" not in l:
common.warn("Skipping unparsable RUN line: " + l)
continue
cropped_content = l
if "%if" in l:
match = re.search(r"%{\s*(.*?)\s*%}", l)
if match:
cropped_content = match.group(1)
commands = [cmd.strip() for cmd in cropped_content.split("|")]
assert len(commands) >= 2
preprocess_cmd = None
if len(commands) > 2:
preprocess_cmd = " | ".join(commands[:-2])
tool_cmd = commands[-2]
filecheck_cmd = commands[-1]
common.verify_filecheck_prefixes(filecheck_cmd)
if not tool_cmd.startswith(tool_basename + " "):
common.warn("Skipping non-%s RUN line: %s" % (tool_basename, l))
continue
if not filecheck_cmd.startswith("FileCheck "):
common.warn("Skipping non-FileChecked RUN line: " + l)
continue
tool_cmd_args = tool_cmd[len(tool_basename) :].strip()
tool_cmd_args = tool_cmd_args.replace("< %s", "").replace("%s", "").strip()
check_prefixes = common.get_check_prefixes(filecheck_cmd)
# FIXME: We should use multiple check prefixes to common check lines. For
# now, we just ignore all but the last.
prefix_list.append((check_prefixes, tool_cmd_args, preprocess_cmd))
ginfo = common.make_ir_generalizer(ti.args.version, ti.args.check_globals == "none")
global_vars_seen_dict = {}
builder = common.FunctionTestBuilder(
run_list=prefix_list,
flags=ti.args,
scrubber_args=[],
path=ti.path,
ginfo=ginfo,
)
tool_binary = ti.args.tool_binary
if not tool_binary:
tool_binary = tool_basename
for prefixes, tool_args, preprocess_cmd in prefix_list:
common.debug("Extracted tool cmd: " + tool_basename + " " + tool_args)
common.debug("Extracted FileCheck prefixes: " + str(prefixes))
raw_tool_output = common.invoke_tool(
tool_binary,
tool_args,
ti.path,
preprocess_cmd=preprocess_cmd,
verbose=ti.args.verbose,
)
builder.process_run_line(
common.OPT_FUNCTION_RE,
common.scrub_body,
raw_tool_output,
prefixes,
)
builder.processed_prefixes(prefixes)
prefix_set = set([prefix for prefixes, _, _ in prefix_list for prefix in prefixes])
if not ti.args.reset_variable_names:
original_check_lines = common.collect_original_check_lines(ti, prefix_set)
else:
original_check_lines = {}
func_dict = builder.finish_and_get_func_dict()
is_in_function = False
is_in_function_start = False
has_checked_pre_function_globals = False
common.debug("Rewriting FileCheck prefixes:", str(prefix_set))
output_lines = []
include_generated_funcs = common.find_arg_in_test(
ti,
lambda args: ti.args.include_generated_funcs,
"--include-generated-funcs",
True,
)
generated_prefixes = []
if include_generated_funcs:
# Generate the appropriate checks for each function. We need to emit
# these in the order according to the generated output so that CHECK-LABEL
# works properly. func_order provides that.
# We can't predict where various passes might insert functions so we can't
# be sure the input function order is maintained. Therefore, first spit
# out all the source lines.
common.dump_input_lines(output_lines, ti, prefix_set, ";")
args = ti.args
if args.check_globals != "none":
generated_prefixes.extend(
common.add_global_checks(
builder.global_var_dict(),
";",
prefix_list,
output_lines,
ginfo,
global_vars_seen_dict,
args.preserve_names,
True,
args.check_globals,
)
)
# Now generate all the checks.
generated_prefixes.extend(
common.add_checks_at_end(
output_lines,
prefix_list,
builder.func_order(),
";",
lambda my_output_lines, prefixes, func: common.add_ir_checks(
my_output_lines,
";",
prefixes,
func_dict,
func,
False,
args.function_signature,
ginfo,
global_vars_seen_dict,
is_filtered=builder.is_filtered(),
original_check_lines=original_check_lines.get(func, {}),
),
)
)
else:
# "Normal" mode.
dropped_previous_line = False
for input_line_info in ti.iterlines(output_lines):
input_line = input_line_info.line
args = input_line_info.args
if is_in_function_start:
if input_line == "":
continue
if input_line.lstrip().startswith(";"):
m = common.CHECK_RE.match(input_line)
if not m or m.group(1) not in prefix_set:
output_lines.append(input_line)
continue
# Print out the various check lines here.
generated_prefixes.extend(
common.add_ir_checks(
output_lines,
";",
prefix_list,
func_dict,
func_name,
args.preserve_names,
args.function_signature,
ginfo,
global_vars_seen_dict,
is_filtered=builder.is_filtered(),
original_check_lines=original_check_lines.get(func_name, {}),
)
)
is_in_function_start = False
m = common.IR_FUNCTION_RE.match(input_line)
if m and not has_checked_pre_function_globals:
if args.check_globals:
generated_prefixes.extend(
common.add_global_checks(
builder.global_var_dict(),
";",
prefix_list,
output_lines,
ginfo,
global_vars_seen_dict,
args.preserve_names,
True,
args.check_globals,
)
)
has_checked_pre_function_globals = True
if common.should_add_line_to_output(
input_line,
prefix_set,
skip_global_checks=not is_in_function,
skip_same_checks=dropped_previous_line,
):
# This input line of the function body will go as-is into the output.
# Except make leading whitespace uniform: 2 spaces.
input_line = common.SCRUB_LEADING_WHITESPACE_RE.sub(r" ", input_line)
output_lines.append(input_line)
dropped_previous_line = False
if input_line.strip() == "}":
is_in_function = False
continue
else:
# If we are removing a check line, and the next line is CHECK-SAME, it MUST also be removed
dropped_previous_line = True
if is_in_function:
continue
m = common.IR_FUNCTION_RE.match(input_line)
if not m:
continue
func_name = m.group(1)
if args.function is not None and func_name != args.function:
# When filtering on a specific function, skip all others.
continue
is_in_function = is_in_function_start = True
if args.check_globals != "none":
generated_prefixes.extend(
common.add_global_checks(
builder.global_var_dict(),
";",
prefix_list,
output_lines,
ginfo,
global_vars_seen_dict,
args.preserve_names,
False,
args.check_globals,
)
)
if ti.args.gen_unused_prefix_body:
output_lines.extend(
ti.get_checks_for_unused_prefixes(prefix_list, generated_prefixes)
)
common.debug("Writing %d lines to %s..." % (len(output_lines), ti.path))
with open(ti.path, "wb") as f:
f.writelines(["{}\n".format(l).encode("utf-8") for l in output_lines])
def main():
from argparse import RawTextHelpFormatter
@ -102,271 +358,18 @@ def main():
common.error("Unexpected tool name: " + tool_basename)
sys.exit(1)
returncode = 0
for ti in common.itertests(
initial_args.tests, parser, script_name="utils/" + script_name
):
# If requested we scrub trailing attribute annotations, e.g., '#0', together with whitespaces
if ti.args.scrub_attributes:
common.SCRUB_TRAILING_WHITESPACE_TEST_RE = (
common.SCRUB_TRAILING_WHITESPACE_AND_ATTRIBUTES_RE
)
else:
common.SCRUB_TRAILING_WHITESPACE_TEST_RE = (
common.SCRUB_TRAILING_WHITESPACE_RE
)
tool_basename = ti.args.tool
prefix_list = []
for l in ti.run_lines:
if "|" not in l:
common.warn("Skipping unparsable RUN line: " + l)
continue
cropped_content = l
if "%if" in l:
match = re.search(r"%{\s*(.*?)\s*%}", l)
if match:
cropped_content = match.group(1)
commands = [cmd.strip() for cmd in cropped_content.split("|")]
assert len(commands) >= 2
preprocess_cmd = None
if len(commands) > 2:
preprocess_cmd = " | ".join(commands[:-2])
tool_cmd = commands[-2]
filecheck_cmd = commands[-1]
common.verify_filecheck_prefixes(filecheck_cmd)
if not tool_cmd.startswith(tool_basename + " "):
common.warn("Skipping non-%s RUN line: %s" % (tool_basename, l))
continue
if not filecheck_cmd.startswith("FileCheck "):
common.warn("Skipping non-FileChecked RUN line: " + l)
continue
tool_cmd_args = tool_cmd[len(tool_basename) :].strip()
tool_cmd_args = tool_cmd_args.replace("< %s", "").replace("%s", "").strip()
check_prefixes = common.get_check_prefixes(filecheck_cmd)
# FIXME: We should use multiple check prefixes to common check lines. For
# now, we just ignore all but the last.
prefix_list.append((check_prefixes, tool_cmd_args, preprocess_cmd))
ginfo = common.make_ir_generalizer(
ti.args.version, ti.args.check_globals == "none"
)
global_vars_seen_dict = {}
builder = common.FunctionTestBuilder(
run_list=prefix_list,
flags=ti.args,
scrubber_args=[],
path=ti.path,
ginfo=ginfo,
)
tool_binary = ti.args.tool_binary
if not tool_binary:
tool_binary = tool_basename
for prefixes, tool_args, preprocess_cmd in prefix_list:
common.debug("Extracted tool cmd: " + tool_basename + " " + tool_args)
common.debug("Extracted FileCheck prefixes: " + str(prefixes))
raw_tool_output = common.invoke_tool(
tool_binary,
tool_args,
ti.path,
preprocess_cmd=preprocess_cmd,
verbose=ti.args.verbose,
)
builder.process_run_line(
common.OPT_FUNCTION_RE,
common.scrub_body,
raw_tool_output,
prefixes,
)
builder.processed_prefixes(prefixes)
prefix_set = set(
[prefix for prefixes, _, _ in prefix_list for prefix in prefixes]
)
if not ti.args.reset_variable_names:
original_check_lines = common.collect_original_check_lines(ti, prefix_set)
else:
original_check_lines = {}
func_dict = builder.finish_and_get_func_dict()
is_in_function = False
is_in_function_start = False
has_checked_pre_function_globals = False
common.debug("Rewriting FileCheck prefixes:", str(prefix_set))
output_lines = []
include_generated_funcs = common.find_arg_in_test(
ti,
lambda args: ti.args.include_generated_funcs,
"--include-generated-funcs",
True,
)
generated_prefixes = []
if include_generated_funcs:
# Generate the appropriate checks for each function. We need to emit
# these in the order according to the generated output so that CHECK-LABEL
# works properly. func_order provides that.
# We can't predict where various passes might insert functions so we can't
# be sure the input function order is maintained. Therefore, first spit
# out all the source lines.
common.dump_input_lines(output_lines, ti, prefix_set, ";")
args = ti.args
if args.check_globals != 'none':
generated_prefixes.extend(
common.add_global_checks(
builder.global_var_dict(),
";",
prefix_list,
output_lines,
ginfo,
global_vars_seen_dict,
args.preserve_names,
True,
args.check_globals,
)
)
# Now generate all the checks.
generated_prefixes.extend(
common.add_checks_at_end(
output_lines,
prefix_list,
builder.func_order(),
";",
lambda my_output_lines, prefixes, func: common.add_ir_checks(
my_output_lines,
";",
prefixes,
func_dict,
func,
False,
args.function_signature,
ginfo,
global_vars_seen_dict,
is_filtered=builder.is_filtered(),
original_check_lines=original_check_lines.get(func, {}),
),
)
)
else:
# "Normal" mode.
dropped_previous_line = False
for input_line_info in ti.iterlines(output_lines):
input_line = input_line_info.line
args = input_line_info.args
if is_in_function_start:
if input_line == "":
continue
if input_line.lstrip().startswith(";"):
m = common.CHECK_RE.match(input_line)
if not m or m.group(1) not in prefix_set:
output_lines.append(input_line)
continue
# Print out the various check lines here.
generated_prefixes.extend(
common.add_ir_checks(
output_lines,
";",
prefix_list,
func_dict,
func_name,
args.preserve_names,
args.function_signature,
ginfo,
global_vars_seen_dict,
is_filtered=builder.is_filtered(),
original_check_lines=original_check_lines.get(
func_name, {}
),
)
)
is_in_function_start = False
m = common.IR_FUNCTION_RE.match(input_line)
if m and not has_checked_pre_function_globals:
if args.check_globals:
generated_prefixes.extend(
common.add_global_checks(
builder.global_var_dict(),
";",
prefix_list,
output_lines,
ginfo,
global_vars_seen_dict,
args.preserve_names,
True,
args.check_globals,
)
)
has_checked_pre_function_globals = True
if common.should_add_line_to_output(
input_line,
prefix_set,
skip_global_checks=not is_in_function,
skip_same_checks=dropped_previous_line,
):
# This input line of the function body will go as-is into the output.
# Except make leading whitespace uniform: 2 spaces.
input_line = common.SCRUB_LEADING_WHITESPACE_RE.sub(
r" ", input_line
)
output_lines.append(input_line)
dropped_previous_line = False
if input_line.strip() == "}":
is_in_function = False
continue
else:
# If we are removing a check line, and the next line is CHECK-SAME, it MUST also be removed
dropped_previous_line = True
if is_in_function:
continue
m = common.IR_FUNCTION_RE.match(input_line)
if not m:
continue
func_name = m.group(1)
if args.function is not None and func_name != args.function:
# When filtering on a specific function, skip all others.
continue
is_in_function = is_in_function_start = True
if args.check_globals != 'none':
generated_prefixes.extend(
common.add_global_checks(
builder.global_var_dict(),
";",
prefix_list,
output_lines,
ginfo,
global_vars_seen_dict,
args.preserve_names,
False,
args.check_globals,
)
)
if ti.args.gen_unused_prefix_body:
output_lines.extend(
ti.get_checks_for_unused_prefixes(prefix_list, generated_prefixes)
)
common.debug("Writing %d lines to %s..." % (len(output_lines), ti.path))
with open(ti.path, "wb") as f:
f.writelines(["{}\n".format(l).encode("utf-8") for l in output_lines])
try:
update_test(ti)
except Exception as e:
stderr.write(f"Error: Failed to update test {ti.path}\n")
print_exc()
returncode = 1
return returncode
if __name__ == "__main__":
main()
sys.exit(main())