[NFC][Py Reformat] Reformat python files in libcxx/libcxxabi

This is an ongoing series of commits that are reformatting our
Python code.

Reformatting is done with `black`.

If you end up having problems merging this commit because you
have made changes to a python file, the best way to handle that
is to run git checkout --ours <yourfile> and then reformat it
with black.

If you run into any problems, post to discourse about it and
we will try to help.

RFC Thread below:

https://discourse.llvm.org/t/rfc-document-and-standardize-python-code-style

Reviewed By: #libc, kwk, Mordante

Differential Revision: https://reviews.llvm.org/D150763
This commit is contained in:
Tobias Hieta 2023-05-17 11:09:29 +02:00
parent 2ba14283cd
commit 7bfaa0f09d
No known key found for this signature in database
GPG Key ID: 44F2485E45D59042
65 changed files with 4126 additions and 3025 deletions

View File

@ -3,21 +3,21 @@
import os
import site
site.addsitedir(os.path.join(os.path.dirname(os.path.dirname(__file__)), 'utils'))
site.addsitedir(os.path.join(os.path.dirname(os.path.dirname(__file__)), "utils"))
from libcxx.test.googlebenchmark import GoogleBenchmark
# Tell pylint that we know config and lit_config exist somewhere.
if 'PYLINT_IMPORT' in os.environ:
if "PYLINT_IMPORT" in os.environ:
config = object()
lit_config = object()
# name: The name of this test suite.
config.name = 'libc++ benchmarks'
config.name = "libc++ benchmarks"
config.suffixes = []
config.test_exec_root = os.path.join(config.libcxx_obj_root, 'benchmarks')
config.test_exec_root = os.path.join(config.libcxx_obj_root, "benchmarks")
config.test_source_root = config.test_exec_root
config.test_format = GoogleBenchmark(test_sub_dirs='.',
test_suffix='.libcxx.out',
benchmark_args=config.benchmark_args)
config.test_format = GoogleBenchmark(
test_sub_dirs=".", test_suffix=".libcxx.out", benchmark_args=config.benchmark_args
)

View File

@ -16,106 +16,106 @@ from datetime import date
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.intersphinx', 'sphinx.ext.todo']
extensions = ["sphinx.ext.intersphinx", "sphinx.ext.todo"]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
templates_path = ["_templates"]
# The suffix of source filenames.
source_suffix = '.rst'
source_suffix = ".rst"
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
master_doc = "index"
# General information about the project.
project = u'libc++'
copyright = u'2011-%d, LLVM Project' % date.today().year
project = "libc++"
copyright = "2011-%d, LLVM Project" % date.today().year
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '17.0'
version = "17.0"
# The full version, including alpha/beta/rc tags.
release = '17.0'
release = "17.0"
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# today = ''
# Else, today_fmt is used as the format for a strftime call.
today_fmt = '%Y-%m-%d'
today_fmt = "%Y-%m-%d"
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build', 'Helpers']
exclude_patterns = ["_build", "Helpers"]
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
show_authors = True
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'friendly'
pygments_style = "friendly"
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'haiku'
html_theme = "haiku"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
@ -124,101 +124,95 @@ html_static_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'libcxxdoc'
htmlhelp_basename = "libcxxdoc"
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('contents', 'libcxx.tex', u'libcxx Documentation',
u'LLVM project', 'manual'),
("contents", "libcxx.tex", "libcxx Documentation", "LLVM project", "manual"),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('contents', 'libc++', u'libc++ Documentation',
[u'LLVM project'], 1)
]
man_pages = [("contents", "libc++", "libc++ Documentation", ["LLVM project"], 1)]
# If true, show URL addresses after external links.
#man_show_urls = False
# man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
@ -227,19 +221,25 @@ man_pages = [
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('contents', 'libc++', u'libc++ Documentation',
u'LLVM project', 'libc++', 'One line description of project.',
'Miscellaneous'),
(
"contents",
"libc++",
"libc++ Documentation",
"LLVM project",
"libc++",
"One line description of project.",
"Miscellaneous",
),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# texinfo_show_urls = 'footnote'
# FIXME: Define intersphinx configuration.

View File

@ -1,3 +1,3 @@
# Disable all of the experimental tests if the correct feature is not available.
if 'c++experimental' not in config.available_features:
config.unsupported = True
if "c++experimental" not in config.available_features:
config.unsupported = True

View File

@ -1,10 +1,10 @@
#===----------------------------------------------------------------------===##
# ===----------------------------------------------------------------------===##
#
# Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
# See https://llvm.org/LICENSE.txt for license information.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
#
#===----------------------------------------------------------------------===##
# ===----------------------------------------------------------------------===##
"""Commands used to automate testing gdb pretty printers.
This script is part of a larger framework to test gdb pretty printers. It
@ -31,10 +31,8 @@ has_run_tests = False
class CheckResult(gdb.Command):
def __init__(self):
super(CheckResult, self).__init__(
"print_and_compare", gdb.COMMAND_DATA)
super(CheckResult, self).__init__("print_and_compare", gdb.COMMAND_DATA)
def invoke(self, arg, from_tty):
global has_run_tests
@ -55,7 +53,7 @@ class CheckResult(gdb.Command):
value_str = self._get_value_string(compare_frame, testcase_frame)
# Ignore the convenience variable name and newline
value = value_str[value_str.find("= ") + 2:-1]
value = value_str[value_str.find("= ") + 2 : -1]
gdb.newest_frame().select()
expectation_val = compare_frame.read_var("expectation")
check_literal = expectation_val.string(encoding="utf-8")
@ -66,16 +64,14 @@ class CheckResult(gdb.Command):
if test_fails:
global test_failures
print("FAIL: " + test_loc.symtab.filename +
":" + str(test_loc.line))
print("FAIL: " + test_loc.symtab.filename + ":" + str(test_loc.line))
print("GDB printed:")
print(" " + repr(value))
print("Value should match:")
print(" " + repr(check_literal))
test_failures += 1
else:
print("PASS: " + test_loc.symtab.filename +
":" + str(test_loc.line))
print("PASS: " + test_loc.symtab.filename + ":" + str(test_loc.line))
except RuntimeError as e:
# At this point, lots of different things could be wrong, so don't try to

View File

@ -1,6 +1,7 @@
# Load the same local configuration as the corresponding one in libcxx/test/std
import os
inLibcxx = os.path.join('libcxx', 'test', 'libcxx')
inStd = os.path.join('libcxx', 'test', 'std')
inLibcxx = os.path.join("libcxx", "test", "libcxx")
inStd = os.path.join("libcxx", "test", "std")
localConfig = os.path.normpath(os.path.realpath(__file__)).replace(inLibcxx, inStd)
config.load_from_path(localConfig, lit_config)

View File

@ -1,4 +1,7 @@
# Load the same local configuration as the filesystem tests in libcxx/test/std
import os
std_filesystem_tests = os.path.join(config.test_source_root, 'std', 'input.output', 'filesystems')
config.load_from_path(os.path.join(std_filesystem_tests, 'lit.local.cfg'), lit_config)
std_filesystem_tests = os.path.join(
config.test_source_root, "std", "input.output", "filesystems"
)
config.load_from_path(os.path.join(std_filesystem_tests, "lit.local.cfg"), lit_config)

View File

@ -1,6 +1,7 @@
# Load the same local configuration as the corresponding one in libcxx/test/std
import os
inLibcxx = os.path.join('libcxx', 'test', 'libcxx')
inStd = os.path.join('libcxx', 'test', 'std')
inLibcxx = os.path.join("libcxx", "test", "libcxx")
inStd = os.path.join("libcxx", "test", "std")
localConfig = os.path.normpath(os.path.realpath(__file__)).replace(inLibcxx, inStd)
config.load_from_path(localConfig, lit_config)

View File

@ -1,6 +1,7 @@
# Load the same local configuration as the corresponding one in libcxx/test/std
import os
inLibcxx = os.path.join('libcxx', 'test', 'libcxx')
inStd = os.path.join('libcxx', 'test', 'std')
inLibcxx = os.path.join("libcxx", "test", "libcxx")
inStd = os.path.join("libcxx", "test", "std")
localConfig = os.path.normpath(os.path.realpath(__file__)).replace(inLibcxx, inStd)
config.load_from_path(localConfig, lit_config)

View File

@ -1,6 +1,7 @@
# Load the same local configuration as the corresponding one in libcxx/test/std
import os
inLibcxx = os.path.join('libcxx', 'test', 'libcxx')
inStd = os.path.join('libcxx', 'test', 'std')
inLibcxx = os.path.join("libcxx", "test", "libcxx")
inStd = os.path.join("libcxx", "test", "std")
localConfig = os.path.normpath(os.path.realpath(__file__)).replace(inLibcxx, inStd)
config.load_from_path(localConfig, lit_config)

View File

@ -1,6 +1,7 @@
# Load the same local configuration as the corresponding one in libcxx/test/std
import os
inLibcxx = os.path.join('libcxx', 'test', 'libcxx')
inStd = os.path.join('libcxx', 'test', 'std')
inLibcxx = os.path.join("libcxx", "test", "libcxx")
inStd = os.path.join("libcxx", "test", "std")
localConfig = os.path.normpath(os.path.realpath(__file__)).replace(inLibcxx, inStd)
config.load_from_path(localConfig, lit_config)

View File

@ -1,6 +1,7 @@
# Load the same local configuration as the corresponding one in libcxx/test/std
import os
inLibcxx = os.path.join('libcxx', 'test', 'libcxx')
inStd = os.path.join('libcxx', 'test', 'std')
inLibcxx = os.path.join("libcxx", "test", "libcxx")
inStd = os.path.join("libcxx", "test", "std")
localConfig = os.path.normpath(os.path.realpath(__file__)).replace(inLibcxx, inStd)
config.load_from_path(localConfig, lit_config)

View File

@ -6,25 +6,25 @@
import os
if __name__ == '__main__':
if __name__ == "__main__":
libcxx_test_libcxx_lint = os.path.dirname(os.path.abspath(__file__))
libcxx = os.path.abspath(os.path.join(libcxx_test_libcxx_lint, '../../..'))
cmakelists_name = os.path.join(libcxx, 'include/CMakeLists.txt')
libcxx = os.path.abspath(os.path.join(libcxx_test_libcxx_lint, "../../.."))
cmakelists_name = os.path.join(libcxx, "include/CMakeLists.txt")
assert os.path.isfile(cmakelists_name)
with open(cmakelists_name, 'r') as f:
with open(cmakelists_name, "r") as f:
lines = f.readlines()
assert lines[0] == 'set(files\n'
assert lines[0] == "set(files\n"
okay = True
prevline = lines[1]
for line in lines[2:]:
if (line == ' )\n'):
if line == " )\n":
break
if (line < prevline):
if line < prevline:
okay = False
print('LINES OUT OF ORDER in libcxx/include/CMakeLists.txt!')
print("LINES OUT OF ORDER in libcxx/include/CMakeLists.txt!")
print(prevline)
print(line)
prevline = line

View File

@ -10,45 +10,52 @@ import re
def exclude_from_consideration(path):
return (
path.endswith('.txt') or
path.endswith('.modulemap.in') or
os.path.basename(path) == '__config' or
os.path.basename(path) == '__config_site.in' or
os.path.basename(path) == 'libcxx.imp' or
os.path.basename(path).startswith('__pstl') or # TODO: Remove once PSTL integration is finished
not os.path.isfile(path)
path.endswith(".txt")
or path.endswith(".modulemap.in")
or os.path.basename(path) == "__config"
or os.path.basename(path) == "__config_site.in"
or os.path.basename(path) == "libcxx.imp"
or os.path.basename(path).startswith("__pstl")
or not os.path.isfile(path) # TODO: Remove once PSTL integration is finished
)
def check_for_pragma_GCC_system_header(pretty_fname, lines):
if pretty_fname not in ['__undef_macros']:
if pretty_fname not in ["__undef_macros"]:
for line in lines:
if re.match('# *pragma GCC system_header\n', line):
if re.match("# *pragma GCC system_header\n", line):
return True
print('FAILED TO FIND # pragma GCC system_header in libcxx/include/%s' % pretty_fname)
print(
"FAILED TO FIND # pragma GCC system_header in libcxx/include/%s"
% pretty_fname
)
return False
return True
if __name__ == '__main__':
if __name__ == "__main__":
libcxx_test_libcxx_lint = os.path.dirname(os.path.abspath(__file__))
libcxx_include = os.path.abspath(os.path.join(libcxx_test_libcxx_lint, '../../../include'))
libcxx_include = os.path.abspath(
os.path.join(libcxx_test_libcxx_lint, "../../../include")
)
assert os.path.isdir(libcxx_include)
def pretty(path):
return path[len(libcxx_include) + 1:]
return path[len(libcxx_include) + 1 :]
all_headers = [
p for p in (
glob.glob(os.path.join(libcxx_include, '*')) +
glob.glob(os.path.join(libcxx_include, '__*/*.h'))
) if not exclude_from_consideration(p)
p
for p in (
glob.glob(os.path.join(libcxx_include, "*"))
+ glob.glob(os.path.join(libcxx_include, "__*/*.h"))
)
if not exclude_from_consideration(p)
]
okay = True
for fname in all_headers:
pretty_fname = pretty(fname)
with open(fname, 'r') as f:
with open(fname, "r") as f:
lines = f.readlines()
okay = check_for_pragma_GCC_system_header(pretty_fname, lines) and okay

View File

@ -7,39 +7,53 @@ import os
import re
if __name__ == '__main__':
if __name__ == "__main__":
libcxx_test_libcxx_lint = os.path.dirname(os.path.abspath(__file__))
libcxx = os.path.abspath(os.path.join(libcxx_test_libcxx_lint, '../../..'))
modulemap_name = os.path.join(libcxx, 'include/module.modulemap.in')
libcxx = os.path.abspath(os.path.join(libcxx_test_libcxx_lint, "../../.."))
modulemap_name = os.path.join(libcxx, "include/module.modulemap.in")
assert os.path.isfile(modulemap_name)
okay = True
prevline = None
with open(modulemap_name, 'r') as f:
with open(modulemap_name, "r") as f:
for line in f.readlines():
if re.match(r'^\s*module.*[{]\s*private', line):
if re.match(r"^\s*module.*[{]\s*private", line):
# Check that these lines are all of the expected format.
# This incidentally checks for typos in the module name.
if re.match(r'^\s*module (\w+)\s+[{] private header "\1(.h)?"\s+export [*] [}]', line):
if re.match(
r'^\s*module (\w+)\s+[{] private header "\1(.h)?"\s+export [*] [}]',
line,
):
# It's a top-level private header, such as <__bit_reference>.
pass
elif re.match(r'^\s*module (\w+)\s+[{] private (textual )?header "__(\w+/)*\1[.]h" [}]', line):
elif re.match(
r'^\s*module (\w+)\s+[{] private (textual )?header "__(\w+/)*\1[.]h" [}]',
line,
):
# It's a private submodule, such as <__utility/swap.h>.
pass
elif re.match(r'^\s*module (\w+)_fwd\s+[{] private header "__fwd/\1[.]h" [}]', line):
elif re.match(
r'^\s*module (\w+)_fwd\s+[{] private header "__fwd/\1[.]h" [}]',
line,
):
# It's a private submodule with forward declarations, such as <__fwd/span.h>.
pass
elif re.match(r'^\s*module (?:\w+_)*(\w+)\s+[{] private (textual )?header "__(\w+/)*\1[.]h" [}]', line):
elif re.match(
r'^\s*module (?:\w+_)*(\w+)\s+[{] private (textual )?header "__(\w+/)*\1[.]h" [}]',
line,
):
# It's a private pstl submodule, such as <__algorithm/pstl_backends/cpu_backend.h>
pass
else:
okay = False
print("LINE DOESN'T MATCH REGEX in libcxx/include/module.modulemap.in!")
print(
"LINE DOESN'T MATCH REGEX in libcxx/include/module.modulemap.in!"
)
print(line)
# Check that these lines are alphabetized.
if (prevline is not None) and (line < prevline):
okay = False
print('LINES OUT OF ORDER in libcxx/include/module.modulemap.in!')
print("LINES OUT OF ORDER in libcxx/include/module.modulemap.in!")
print(prevline)
print(line)
prevline = line

View File

@ -1,3 +1,4 @@
# The tests in this directory need to run Python
import pipes, sys
config.substitutions.append(('%{python}', pipes.quote(sys.executable)))
config.substitutions.append(("%{python}", pipes.quote(sys.executable)))

View File

@ -1,6 +1,7 @@
# Load the same local configuration as the corresponding one in libcxx/test/std
import os
inLibcxx = os.path.join('libcxx', 'test', 'libcxx')
inStd = os.path.join('libcxx', 'test', 'std')
inLibcxx = os.path.join("libcxx", "test", "libcxx")
inStd = os.path.join("libcxx", "test", "std")
localConfig = os.path.normpath(os.path.realpath(__file__)).replace(inLibcxx, inStd)
config.load_from_path(localConfig, lit_config)

View File

@ -1,2 +1,2 @@
# Add a Lit feature so we can test conditional addition of compile flags.
config.available_features.add('some-defined-feature')
config.available_features.add("some-defined-feature")

View File

@ -1,10 +1,10 @@
#===----------------------------------------------------------------------===##
# ===----------------------------------------------------------------------===##
#
# Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
# See https://llvm.org/LICENSE.txt for license information.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
#
#===----------------------------------------------------------------------===##
# ===----------------------------------------------------------------------===##
# Note: We prepend arguments with 'x' to avoid thinking there are too few
# arguments in case an argument is an empty string.
@ -23,15 +23,17 @@ from os.path import dirname
# Allow importing 'lit' and the 'libcxx' module. Make sure we put the lit
# path first so we don't find any system-installed version.
monorepoRoot = dirname(dirname(dirname(dirname(dirname(dirname(__file__))))))
sys.path = [os.path.join(monorepoRoot, 'libcxx', 'utils'),
os.path.join(monorepoRoot, 'llvm', 'utils', 'lit')] + sys.path
sys.path = [
os.path.join(monorepoRoot, "libcxx", "utils"),
os.path.join(monorepoRoot, "llvm", "utils", "lit"),
] + sys.path
import libcxx.test.dsl as dsl
import lit.LitConfig
import lit.util
# Steal some parameters from the config running this test so that we can
# bootstrap our own TestingConfig.
args = list(map(lambda s: s[1:], sys.argv[1:8])) # Remove the leading 'x'
args = list(map(lambda s: s[1:], sys.argv[1:8])) # Remove the leading 'x'
SOURCE_ROOT, EXEC_PATH, SUBSTITUTIONS = args
sys.argv[1:8] = []
@ -40,10 +42,12 @@ SUBSTITUTIONS = pickle.loads(base64.b64decode(SUBSTITUTIONS))
for s, sub in SUBSTITUTIONS:
print("Substitution '{}' is '{}'".format(s, sub))
class SetupConfigs(unittest.TestCase):
"""
Base class for the tests below -- it creates a fake TestingConfig.
"""
def setUp(self):
"""
Create a fake TestingConfig that can be populated however we wish for
@ -51,7 +55,7 @@ class SetupConfigs(unittest.TestCase):
minimum required substitutions.
"""
self.litConfig = lit.LitConfig.LitConfig(
progname='lit',
progname="lit",
path=[],
quiet=False,
useValgrind=False,
@ -59,9 +63,10 @@ class SetupConfigs(unittest.TestCase):
valgrindArgs=[],
noExecute=False,
debug=False,
isWindows=platform.system() == 'Windows',
order='smart',
params={})
isWindows=platform.system() == "Windows",
order="smart",
params={},
)
self.config = lit.TestingConfig.TestingConfig.fromdefaults(self.litConfig)
self.config.environment = dict(os.environ)
@ -82,7 +87,7 @@ class SetupConfigs(unittest.TestCase):
def findIndex(list, pred):
"""Finds the index of the first element satisfying 'pred' in a list, or
'len(list)' if there is no such element."""
'len(list)' if there is no such element."""
index = 0
for x in list:
if pred(x):
@ -96,23 +101,27 @@ class TestHasCompileFlag(SetupConfigs):
"""
Tests for libcxx.test.dsl.hasCompileFlag
"""
def test_no_flag_should_work(self):
self.assertTrue(dsl.hasCompileFlag(self.config, ''))
self.assertTrue(dsl.hasCompileFlag(self.config, ""))
def test_flag_exists(self):
self.assertTrue(dsl.hasCompileFlag(self.config, '-O1'))
self.assertTrue(dsl.hasCompileFlag(self.config, "-O1"))
def test_nonexistent_flag(self):
self.assertFalse(dsl.hasCompileFlag(self.config, '-this_is_not_a_flag_any_compiler_has'))
self.assertFalse(
dsl.hasCompileFlag(self.config, "-this_is_not_a_flag_any_compiler_has")
)
def test_multiple_flags(self):
self.assertTrue(dsl.hasCompileFlag(self.config, '-O1 -Dhello'))
self.assertTrue(dsl.hasCompileFlag(self.config, "-O1 -Dhello"))
class TestSourceBuilds(SetupConfigs):
"""
Tests for libcxx.test.dsl.sourceBuilds
"""
def test_valid_program_builds(self):
source = """int main(int, char**) { return 0; }"""
self.assertTrue(dsl.sourceBuilds(self.config, source))
@ -131,6 +140,7 @@ class TestProgramOutput(SetupConfigs):
"""
Tests for libcxx.test.dsl.programOutput
"""
def test_valid_program_returns_output(self):
source = """
#include <cstdio>
@ -156,14 +166,20 @@ class TestProgramOutput(SetupConfigs):
source = """
int main(int, char**) { return 1; }
"""
self.assertRaises(dsl.ConfigurationRuntimeError, lambda: dsl.programOutput(self.config, source))
self.assertRaises(
dsl.ConfigurationRuntimeError,
lambda: dsl.programOutput(self.config, source),
)
def test_program_that_fails_to_compile_raises_compilation_error(self):
# The program doesn't compile
source = """
int main(int, char**) { this doesnt compile }
"""
self.assertRaises(dsl.ConfigurationCompilationError, lambda: dsl.programOutput(self.config, source))
self.assertRaises(
dsl.ConfigurationCompilationError,
lambda: dsl.programOutput(self.config, source),
)
def test_pass_arguments_to_program(self):
source = """
@ -191,14 +207,22 @@ class TestProgramOutput(SetupConfigs):
return 0;
}
"""
compileFlagsIndex = findIndex(self.config.substitutions, lambda x: x[0] == '%{compile_flags}')
compileFlagsIndex = findIndex(
self.config.substitutions, lambda x: x[0] == "%{compile_flags}"
)
compileFlags = self.config.substitutions[compileFlagsIndex][1]
self.config.substitutions[compileFlagsIndex] = ('%{compile_flags}', compileFlags + ' -DMACRO=1')
self.config.substitutions[compileFlagsIndex] = (
"%{compile_flags}",
compileFlags + " -DMACRO=1",
)
output1 = dsl.programOutput(self.config, source)
self.assertEqual(output1, "MACRO=1\n")
self.config.substitutions[compileFlagsIndex] = ('%{compile_flags}', compileFlags + ' -DMACRO=2')
self.config.substitutions[compileFlagsIndex] = (
"%{compile_flags}",
compileFlags + " -DMACRO=2",
)
output2 = dsl.programOutput(self.config, source)
self.assertEqual(output2, "MACRO=2\n")
@ -220,6 +244,7 @@ class TestProgramSucceeds(SetupConfigs):
"""
Tests for libcxx.test.dsl.programSucceeds
"""
def test_success(self):
source = """
int main(int, char**) { return 0; }
@ -236,33 +261,47 @@ class TestProgramSucceeds(SetupConfigs):
source = """
this does not compile
"""
self.assertRaises(dsl.ConfigurationCompilationError, lambda: dsl.programSucceeds(self.config, source))
self.assertRaises(
dsl.ConfigurationCompilationError,
lambda: dsl.programSucceeds(self.config, source),
)
class TestHasLocale(SetupConfigs):
"""
Tests for libcxx.test.dsl.hasLocale
"""
def test_doesnt_explode(self):
# It's really hard to test that a system has a given locale, so at least
# make sure we don't explode when we try to check it.
try:
dsl.hasAnyLocale(self.config, ['en_US.UTF-8'])
dsl.hasAnyLocale(self.config, ["en_US.UTF-8"])
except subprocess.CalledProcessError:
self.fail("checking for hasLocale should not explode")
def test_nonexistent_locale(self):
self.assertFalse(dsl.hasAnyLocale(self.config, ['forsurethisisnotanexistinglocale']))
self.assertFalse(
dsl.hasAnyLocale(self.config, ["forsurethisisnotanexistinglocale"])
)
def test_localization_program_doesnt_compile(self):
compilerIndex = findIndex(self.config.substitutions, lambda x: x[0] == '%{cxx}')
self.config.substitutions[compilerIndex] = ('%{cxx}', 'this-is-certainly-not-a-valid-compiler!!')
self.assertRaises(dsl.ConfigurationCompilationError, lambda: dsl.hasAnyLocale(self.config, ['en_US.UTF-8']))
compilerIndex = findIndex(self.config.substitutions, lambda x: x[0] == "%{cxx}")
self.config.substitutions[compilerIndex] = (
"%{cxx}",
"this-is-certainly-not-a-valid-compiler!!",
)
self.assertRaises(
dsl.ConfigurationCompilationError,
lambda: dsl.hasAnyLocale(self.config, ["en_US.UTF-8"]),
)
class TestCompilerMacros(SetupConfigs):
"""
Tests for libcxx.test.dsl.compilerMacros
"""
def test_basic(self):
macros = dsl.compilerMacros(self.config)
self.assertIsInstance(macros, dict)
@ -273,28 +312,29 @@ class TestCompilerMacros(SetupConfigs):
def test_no_flag(self):
macros = dsl.compilerMacros(self.config)
self.assertIn('__cplusplus', macros.keys())
self.assertIn("__cplusplus", macros.keys())
def test_empty_flag(self):
macros = dsl.compilerMacros(self.config, '')
self.assertIn('__cplusplus', macros.keys())
macros = dsl.compilerMacros(self.config, "")
self.assertIn("__cplusplus", macros.keys())
def test_with_flag(self):
macros = dsl.compilerMacros(self.config, '-DFOO=3')
self.assertIn('__cplusplus', macros.keys())
self.assertEqual(macros['FOO'], '3')
macros = dsl.compilerMacros(self.config, "-DFOO=3")
self.assertIn("__cplusplus", macros.keys())
self.assertEqual(macros["FOO"], "3")
def test_with_flags(self):
macros = dsl.compilerMacros(self.config, '-DFOO=3 -DBAR=hello')
self.assertIn('__cplusplus', macros.keys())
self.assertEqual(macros['FOO'], '3')
self.assertEqual(macros['BAR'], 'hello')
macros = dsl.compilerMacros(self.config, "-DFOO=3 -DBAR=hello")
self.assertIn("__cplusplus", macros.keys())
self.assertEqual(macros["FOO"], "3")
self.assertEqual(macros["BAR"], "hello")
class TestFeatureTestMacros(SetupConfigs):
"""
Tests for libcxx.test.dsl.featureTestMacros
"""
def test_basic(self):
macros = dsl.featureTestMacros(self.config)
self.assertIsInstance(macros, dict)
@ -308,21 +348,22 @@ class TestFeature(SetupConfigs):
"""
Tests for libcxx.test.dsl.Feature
"""
def test_trivial(self):
feature = dsl.Feature(name='name')
feature = dsl.Feature(name="name")
origSubstitutions = copy.deepcopy(self.config.substitutions)
actions = feature.getActions(self.config)
self.assertTrue(len(actions) == 1)
for a in actions:
a.applyTo(self.config)
self.assertEqual(origSubstitutions, self.config.substitutions)
self.assertIn('name', self.config.available_features)
self.assertIn("name", self.config.available_features)
def test_name_can_be_a_callable(self):
feature = dsl.Feature(name=lambda cfg: 'name')
feature = dsl.Feature(name=lambda cfg: "name")
for a in feature.getActions(self.config):
a.applyTo(self.config)
self.assertIn('name', self.config.available_features)
self.assertIn("name", self.config.available_features)
def test_name_is_not_a_string_1(self):
feature = dsl.Feature(name=None)
@ -335,159 +376,250 @@ class TestFeature(SetupConfigs):
self.assertRaises(ValueError, lambda: feature.pretty(self.config))
def test_adding_action(self):
feature = dsl.Feature(name='name', actions=[dsl.AddCompileFlag('-std=c++03')])
origLinkFlags = copy.deepcopy(self.getSubstitution('%{link_flags}'))
feature = dsl.Feature(name="name", actions=[dsl.AddCompileFlag("-std=c++03")])
origLinkFlags = copy.deepcopy(self.getSubstitution("%{link_flags}"))
for a in feature.getActions(self.config):
a.applyTo(self.config)
self.assertIn('name', self.config.available_features)
self.assertIn('-std=c++03', self.getSubstitution('%{compile_flags}'))
self.assertEqual(origLinkFlags, self.getSubstitution('%{link_flags}'))
self.assertIn("name", self.config.available_features)
self.assertIn("-std=c++03", self.getSubstitution("%{compile_flags}"))
self.assertEqual(origLinkFlags, self.getSubstitution("%{link_flags}"))
def test_actions_can_be_a_callable(self):
feature = dsl.Feature(name='name',
actions=lambda cfg: (
self.assertIs(self.config, cfg),
[dsl.AddCompileFlag('-std=c++03')]
)[1])
feature = dsl.Feature(
name="name",
actions=lambda cfg: (
self.assertIs(self.config, cfg),
[dsl.AddCompileFlag("-std=c++03")],
)[1],
)
for a in feature.getActions(self.config):
a.applyTo(self.config)
self.assertIn('-std=c++03', self.getSubstitution('%{compile_flags}'))
self.assertIn("-std=c++03", self.getSubstitution("%{compile_flags}"))
def test_unsupported_feature(self):
feature = dsl.Feature(name='name', when=lambda _: False)
feature = dsl.Feature(name="name", when=lambda _: False)
self.assertEqual(feature.getActions(self.config), [])
def test_is_supported_gets_passed_the_config(self):
feature = dsl.Feature(name='name', when=lambda cfg: (self.assertIs(self.config, cfg), True)[1])
feature = dsl.Feature(
name="name", when=lambda cfg: (self.assertIs(self.config, cfg), True)[1]
)
self.assertEqual(len(feature.getActions(self.config)), 1)
def _throw():
raise ValueError()
class TestParameter(SetupConfigs):
"""
Tests for libcxx.test.dsl.Parameter
"""
def test_empty_name_should_blow_up(self):
self.assertRaises(ValueError, lambda: dsl.Parameter(name='', choices=['c++03'], type=str, help='', actions=lambda _: []))
self.assertRaises(
ValueError,
lambda: dsl.Parameter(
name="", choices=["c++03"], type=str, help="", actions=lambda _: []
),
)
def test_empty_choices_should_blow_up(self):
self.assertRaises(ValueError, lambda: dsl.Parameter(name='std', choices=[], type=str, help='', actions=lambda _: []))
self.assertRaises(
ValueError,
lambda: dsl.Parameter(
name="std", choices=[], type=str, help="", actions=lambda _: []
),
)
def test_no_choices_is_ok(self):
param = dsl.Parameter(name='triple', type=str, help='', actions=lambda _: [])
self.assertEqual(param.name, 'triple')
param = dsl.Parameter(name="triple", type=str, help="", actions=lambda _: [])
self.assertEqual(param.name, "triple")
def test_name_is_set_correctly(self):
param = dsl.Parameter(name='std', choices=['c++03'], type=str, help='', actions=lambda _: [])
self.assertEqual(param.name, 'std')
param = dsl.Parameter(
name="std", choices=["c++03"], type=str, help="", actions=lambda _: []
)
self.assertEqual(param.name, "std")
def test_no_value_provided_and_no_default_value(self):
param = dsl.Parameter(name='std', choices=['c++03'], type=str, help='', actions=lambda _: [])
self.assertRaises(ValueError, lambda: param.getActions(self.config, self.litConfig.params))
param = dsl.Parameter(
name="std", choices=["c++03"], type=str, help="", actions=lambda _: []
)
self.assertRaises(
ValueError, lambda: param.getActions(self.config, self.litConfig.params)
)
def test_no_value_provided_and_default_value(self):
param = dsl.Parameter(name='std', choices=['c++03'], type=str, help='', default='c++03',
actions=lambda std: [dsl.AddFeature(std)])
param = dsl.Parameter(
name="std",
choices=["c++03"],
type=str,
help="",
default="c++03",
actions=lambda std: [dsl.AddFeature(std)],
)
for a in param.getActions(self.config, self.litConfig.params):
a.applyTo(self.config)
self.assertIn('c++03', self.config.available_features)
self.assertIn("c++03", self.config.available_features)
def test_value_provided_on_command_line_and_no_default_value(self):
self.litConfig.params['std'] = 'c++03'
param = dsl.Parameter(name='std', choices=['c++03'], type=str, help='',
actions=lambda std: [dsl.AddFeature(std)])
self.litConfig.params["std"] = "c++03"
param = dsl.Parameter(
name="std",
choices=["c++03"],
type=str,
help="",
actions=lambda std: [dsl.AddFeature(std)],
)
for a in param.getActions(self.config, self.litConfig.params):
a.applyTo(self.config)
self.assertIn('c++03', self.config.available_features)
self.assertIn("c++03", self.config.available_features)
def test_value_provided_on_command_line_and_default_value(self):
"""The value provided on the command line should override the default value"""
self.litConfig.params['std'] = 'c++11'
param = dsl.Parameter(name='std', choices=['c++03', 'c++11'], type=str, default='c++03', help='',
actions=lambda std: [dsl.AddFeature(std)])
self.litConfig.params["std"] = "c++11"
param = dsl.Parameter(
name="std",
choices=["c++03", "c++11"],
type=str,
default="c++03",
help="",
actions=lambda std: [dsl.AddFeature(std)],
)
for a in param.getActions(self.config, self.litConfig.params):
a.applyTo(self.config)
self.assertIn('c++11', self.config.available_features)
self.assertNotIn('c++03', self.config.available_features)
self.assertIn("c++11", self.config.available_features)
self.assertNotIn("c++03", self.config.available_features)
def test_value_provided_in_config_and_default_value(self):
"""The value provided in the config should override the default value"""
self.config.std ='c++11'
param = dsl.Parameter(name='std', choices=['c++03', 'c++11'], type=str, default='c++03', help='',
actions=lambda std: [dsl.AddFeature(std)])
self.config.std = "c++11"
param = dsl.Parameter(
name="std",
choices=["c++03", "c++11"],
type=str,
default="c++03",
help="",
actions=lambda std: [dsl.AddFeature(std)],
)
for a in param.getActions(self.config, self.litConfig.params):
a.applyTo(self.config)
self.assertIn('c++11', self.config.available_features)
self.assertNotIn('c++03', self.config.available_features)
self.assertIn("c++11", self.config.available_features)
self.assertNotIn("c++03", self.config.available_features)
def test_value_provided_in_config_and_on_command_line(self):
"""The value on the command line should override the one in the config"""
self.config.std = 'c++11'
self.litConfig.params['std'] = 'c++03'
param = dsl.Parameter(name='std', choices=['c++03', 'c++11'], type=str, help='',
actions=lambda std: [dsl.AddFeature(std)])
self.config.std = "c++11"
self.litConfig.params["std"] = "c++03"
param = dsl.Parameter(
name="std",
choices=["c++03", "c++11"],
type=str,
help="",
actions=lambda std: [dsl.AddFeature(std)],
)
for a in param.getActions(self.config, self.litConfig.params):
a.applyTo(self.config)
self.assertIn('c++03', self.config.available_features)
self.assertNotIn('c++11', self.config.available_features)
self.assertIn("c++03", self.config.available_features)
self.assertNotIn("c++11", self.config.available_features)
def test_no_actions(self):
self.litConfig.params['std'] = 'c++03'
param = dsl.Parameter(name='std', choices=['c++03'], type=str, help='',
actions=lambda _: [])
self.litConfig.params["std"] = "c++03"
param = dsl.Parameter(
name="std", choices=["c++03"], type=str, help="", actions=lambda _: []
)
actions = param.getActions(self.config, self.litConfig.params)
self.assertEqual(actions, [])
def test_boolean_value_parsed_from_trueish_string_parameter(self):
self.litConfig.params['enable_exceptions'] = "True"
param = dsl.Parameter(name='enable_exceptions', choices=[True, False], type=bool, help='',
actions=lambda exceptions: [] if exceptions else _throw())
self.litConfig.params["enable_exceptions"] = "True"
param = dsl.Parameter(
name="enable_exceptions",
choices=[True, False],
type=bool,
help="",
actions=lambda exceptions: [] if exceptions else _throw(),
)
self.assertEqual(param.getActions(self.config, self.litConfig.params), [])
def test_boolean_value_from_true_boolean_parameter(self):
self.litConfig.params['enable_exceptions'] = True
param = dsl.Parameter(name='enable_exceptions', choices=[True, False], type=bool, help='',
actions=lambda exceptions: [] if exceptions else _throw())
self.litConfig.params["enable_exceptions"] = True
param = dsl.Parameter(
name="enable_exceptions",
choices=[True, False],
type=bool,
help="",
actions=lambda exceptions: [] if exceptions else _throw(),
)
self.assertEqual(param.getActions(self.config, self.litConfig.params), [])
def test_boolean_value_parsed_from_falseish_string_parameter(self):
self.litConfig.params['enable_exceptions'] = "False"
param = dsl.Parameter(name='enable_exceptions', choices=[True, False], type=bool, help='',
actions=lambda exceptions: [] if exceptions else [dsl.AddFeature("-fno-exceptions")])
self.litConfig.params["enable_exceptions"] = "False"
param = dsl.Parameter(
name="enable_exceptions",
choices=[True, False],
type=bool,
help="",
actions=lambda exceptions: []
if exceptions
else [dsl.AddFeature("-fno-exceptions")],
)
for a in param.getActions(self.config, self.litConfig.params):
a.applyTo(self.config)
self.assertIn('-fno-exceptions', self.config.available_features)
self.assertIn("-fno-exceptions", self.config.available_features)
def test_boolean_value_from_false_boolean_parameter(self):
self.litConfig.params['enable_exceptions'] = False
param = dsl.Parameter(name='enable_exceptions', choices=[True, False], type=bool, help='',
actions=lambda exceptions: [] if exceptions else [dsl.AddFeature("-fno-exceptions")])
self.litConfig.params["enable_exceptions"] = False
param = dsl.Parameter(
name="enable_exceptions",
choices=[True, False],
type=bool,
help="",
actions=lambda exceptions: []
if exceptions
else [dsl.AddFeature("-fno-exceptions")],
)
for a in param.getActions(self.config, self.litConfig.params):
a.applyTo(self.config)
self.assertIn('-fno-exceptions', self.config.available_features)
self.assertIn("-fno-exceptions", self.config.available_features)
def test_list_parsed_from_comma_delimited_string_empty(self):
self.litConfig.params['additional_features'] = ""
param = dsl.Parameter(name='additional_features', type=list, help='', actions=lambda f: f)
self.litConfig.params["additional_features"] = ""
param = dsl.Parameter(
name="additional_features", type=list, help="", actions=lambda f: f
)
self.assertEqual(param.getActions(self.config, self.litConfig.params), [])
def test_list_parsed_from_comma_delimited_string_1(self):
self.litConfig.params['additional_features'] = "feature1"
param = dsl.Parameter(name='additional_features', type=list, help='', actions=lambda f: f)
self.assertEqual(param.getActions(self.config, self.litConfig.params), ['feature1'])
self.litConfig.params["additional_features"] = "feature1"
param = dsl.Parameter(
name="additional_features", type=list, help="", actions=lambda f: f
)
self.assertEqual(
param.getActions(self.config, self.litConfig.params), ["feature1"]
)
def test_list_parsed_from_comma_delimited_string_2(self):
self.litConfig.params['additional_features'] = "feature1,feature2"
param = dsl.Parameter(name='additional_features', type=list, help='', actions=lambda f: f)
self.assertEqual(param.getActions(self.config, self.litConfig.params), ['feature1', 'feature2'])
self.litConfig.params["additional_features"] = "feature1,feature2"
param = dsl.Parameter(
name="additional_features", type=list, help="", actions=lambda f: f
)
self.assertEqual(
param.getActions(self.config, self.litConfig.params),
["feature1", "feature2"],
)
def test_list_parsed_from_comma_delimited_string_3(self):
self.litConfig.params['additional_features'] = "feature1,feature2, feature3"
param = dsl.Parameter(name='additional_features', type=list, help='', actions=lambda f: f)
self.assertEqual(param.getActions(self.config, self.litConfig.params), ['feature1', 'feature2', 'feature3'])
self.litConfig.params["additional_features"] = "feature1,feature2, feature3"
param = dsl.Parameter(
name="additional_features", type=list, help="", actions=lambda f: f
)
self.assertEqual(
param.getActions(self.config, self.litConfig.params),
["feature1", "feature2", "feature3"],
)
if __name__ == '__main__':
if __name__ == "__main__":
unittest.main(verbosity=2)

View File

@ -9,6 +9,7 @@
# to pass it to the test, and we decode and unpickle the substitutions from
# within the test.
import base64, lit.util, pickle
base64Encode = lambda s: lit.util.to_string(base64.b64encode(lit.util.to_bytes(s)))
escapedSubstitutions = base64Encode(pickle.dumps(config.substitutions))
config.substitutions.append(('%{substitutions}', escapedSubstitutions))
config.substitutions.append(("%{substitutions}", escapedSubstitutions))

View File

@ -41,7 +41,8 @@ def parse_line(line: str) -> header:
# literal level.)
LIBCXX_HEADER_REGEX = r".*c\+\+(?:/|\\\\)v[0-9]+(?:/|\\\\)(.+)"
def is_libcxx_public_header(header : str) -> bool:
def is_libcxx_public_header(header: str) -> bool:
"""
Returns whether a header is a C++ public header file.
"""
@ -62,7 +63,7 @@ def is_libcxx_public_header(header : str) -> bool:
return True
def is_libcxx_header(header : str) -> bool:
def is_libcxx_header(header: str) -> bool:
"""
Returns whether a header is a libc++ header, excluding the C-compatibility headers.
"""
@ -73,7 +74,9 @@ def is_libcxx_header(header : str) -> bool:
# Skip C compatibility headers (in particular, make sure not to skip libc++ detail headers).
relative = match.group(1)
if relative.endswith(".h") and not (relative.startswith("__") or re.search(r"(/|\\\\)__", relative)):
if relative.endswith(".h") and not (
relative.startswith("__") or re.search(r"(/|\\\\)__", relative)
):
return False
return True
@ -130,7 +133,9 @@ def create_include_graph(path: pathlib.Path) -> List[str]:
# Get actual filenames relative to libc++'s installation directory instead of full paths
relative = lambda h: re.match(LIBCXX_HEADER_REGEX, h).group(1)
top_level = relative(next(h.name for h in headers if h.level == 1)) # There should be only one top-level header
top_level = relative(
next(h.name for h in headers if h.level == 1)
) # There should be only one top-level header
includes = [relative(h.name) for h in headers if h.level != 1]
# Remove duplicates in all includes.

View File

@ -7,4 +7,5 @@
lit_config.fatal(
"You seem to be running Lit directly -- you should be running Lit through "
"<build>/bin/llvm-lit, which will ensure that the right Lit configuration "
"file is used.")
"file is used."
)

View File

@ -1,2 +1,2 @@
if 'no-localization' in config.available_features:
config.unsupported = True
if "no-localization" in config.available_features:
config.unsupported = True

View File

@ -1,2 +1,2 @@
if 'no-localization' in config.available_features:
config.unsupported = True
if "no-localization" in config.available_features:
config.unsupported = True

View File

@ -1,3 +1,3 @@
# Disable all of the experimental tests if the correct feature is not available.
if 'c++experimental' not in config.available_features:
config.unsupported = True
if "c++experimental" not in config.available_features:
config.unsupported = True

View File

@ -1,6 +1,6 @@
# All non-trivial uses of iostreams require localization support
if 'no-localization' in config.available_features:
config.unsupported = True
if "no-localization" in config.available_features:
config.unsupported = True
if 'no-fstream' in config.available_features:
config.unsupported = True
if "no-fstream" in config.available_features:
config.unsupported = True

View File

@ -1,5 +1,5 @@
if 'availability-filesystem-missing' in config.available_features:
config.unsupported = True
if 'no-filesystem' in config.available_features:
if "availability-filesystem-missing" in config.available_features:
config.unsupported = True
if "no-filesystem" in config.available_features:
config.unsupported = True

View File

@ -1,3 +1,3 @@
# All non-trivial uses of iostreams require localization support
if 'no-localization' in config.available_features:
config.unsupported = True
if "no-localization" in config.available_features:
config.unsupported = True

View File

@ -1,3 +1,3 @@
# All non-trivial uses of iostreams require localization support
if 'no-localization' in config.available_features:
config.unsupported = True
if "no-localization" in config.available_features:
config.unsupported = True

View File

@ -1,3 +1,3 @@
# All non-trivial uses of iostreams require localization support
if 'no-localization' in config.available_features:
config.unsupported = True
if "no-localization" in config.available_features:
config.unsupported = True

View File

@ -1,3 +1,3 @@
# All non-trivial uses of iostreams require localization support
if 'no-localization' in config.available_features:
config.unsupported = True
if "no-localization" in config.available_features:
config.unsupported = True

View File

@ -1,3 +1,3 @@
# All non-trivial uses of iostreams require localization support
if 'no-localization' in config.available_features:
config.unsupported = True
if "no-localization" in config.available_features:
config.unsupported = True

View File

@ -1,3 +1,3 @@
# All non-trivial uses of iostreams require localization support
if 'no-localization' in config.available_features:
config.unsupported = True
if "no-localization" in config.available_features:
config.unsupported = True

View File

@ -1,3 +1,3 @@
# All non-trivial uses of iostreams require localization support
if 'no-localization' in config.available_features:
config.unsupported = True
if "no-localization" in config.available_features:
config.unsupported = True

View File

@ -1,3 +1,3 @@
# All non-trivial uses of iostreams require localization support
if 'no-localization' in config.available_features:
config.unsupported = True
if "no-localization" in config.available_features:
config.unsupported = True

View File

@ -1,3 +1,3 @@
# stream iterators rely on the streams library, which requires localization
if 'no-localization' in config.available_features:
config.unsupported = True
if "no-localization" in config.available_features:
config.unsupported = True

View File

@ -1,3 +1,3 @@
# <locale> tests are obviously not supported when localization support is disabled
if 'no-localization' in config.available_features:
config.unsupported = True
if "no-localization" in config.available_features:
config.unsupported = True

View File

@ -1,3 +1,3 @@
# Unfortunately, <regex> uses locales in regex_traits
if 'no-localization' in config.available_features:
config.unsupported = True
if "no-localization" in config.available_features:
config.unsupported = True

View File

@ -1,3 +1,3 @@
# These std::string functions require iostreams, which requires localization
if 'no-localization' in config.available_features:
config.unsupported = True
if "no-localization" in config.available_features:
config.unsupported = True

View File

@ -1,45 +1,51 @@
#!/usr/bin/env python
#===----------------------------------------------------------------------===##
# ===----------------------------------------------------------------------===##
#
# Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
# See https://llvm.org/LICENSE.txt for license information.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
#
#===----------------------------------------------------------------------===##
# ===----------------------------------------------------------------------===##
from argparse import ArgumentParser
import sys
def print_and_exit(msg):
sys.stderr.write(msg + '\n')
sys.stderr.write(msg + "\n")
sys.exit(1)
def main():
parser = ArgumentParser(
description="Concatenate two files into a single file")
parser = ArgumentParser(description="Concatenate two files into a single file")
parser.add_argument(
'-o', '--output', dest='output', required=True,
help='The output file. stdout is used if not given',
type=str, action='store')
"-o",
"--output",
dest="output",
required=True,
help="The output file. stdout is used if not given",
type=str,
action="store",
)
parser.add_argument(
'files', metavar='files', nargs='+',
help='The files to concatenate')
"files", metavar="files", nargs="+", help="The files to concatenate"
)
args = parser.parse_args()
if len(args.files) < 2:
print_and_exit('fewer than 2 inputs provided')
data = ''
print_and_exit("fewer than 2 inputs provided")
data = ""
for filename in args.files:
with open(filename, 'r') as f:
with open(filename, "r") as f:
data += f.read()
if len(data) != 0 and data[-1] != '\n':
data += '\n'
if len(data) != 0 and data[-1] != "\n":
data += "\n"
assert len(data) > 0 and "cannot cat empty files"
with open(args.output, 'w') as f:
with open(args.output, "w") as f:
f.write(data)
if __name__ == '__main__':
if __name__ == "__main__":
main()
sys.exit(0)

View File

@ -1,10 +1,10 @@
#===----------------------------------------------------------------------===##
# ===----------------------------------------------------------------------===##
#
# Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
# See https://llvm.org/LICENSE.txt for license information.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
#
#===----------------------------------------------------------------------===##
# ===----------------------------------------------------------------------===##
"""GDB pretty-printers for libc++.
These should work for objects compiled with either the stable ABI or the unstable ABI.
@ -29,6 +29,7 @@ _long_int_type = gdb.lookup_type("unsigned long long")
_libcpp_big_endian = False
def addr_as_long(addr):
return int(addr.cast(_long_int_type))
@ -97,10 +98,11 @@ def _size_field(node):
# Some common substitutions on the types to reduce visual clutter (A user who
# wants to see the actual details can always use print/r).
_common_substitutions = [
("std::basic_string<char, std::char_traits<char>, std::allocator<char> >",
"std::string"),
("std::basic_string_view<char, std::char_traits<char> >",
"std::string_view"),
(
"std::basic_string<char, std::char_traits<char>, std::allocator<char> >",
"std::string",
),
("std::basic_string_view<char, std::char_traits<char> >", "std::string_view"),
]
@ -116,8 +118,11 @@ def _prettify_typename(gdb_type):
"""
type_without_typedefs = gdb_type.strip_typedefs()
typename = type_without_typedefs.name or type_without_typedefs.tag or \
str(type_without_typedefs)
typename = (
type_without_typedefs.name
or type_without_typedefs.tag
or str(type_without_typedefs)
)
result = _remove_cxx_namespace(typename)
for find_str, subst_str in _common_substitutions:
result = re.sub(find_str, subst_str, result)
@ -247,18 +252,18 @@ class StdStringViewPrinter(object):
"""Print a std::string_view."""
def __init__(self, val):
self.val = val
self.val = val
def display_hint(self):
return "string"
return "string"
def to_string(self): # pylint: disable=g-bad-name
"""GDB calls this to compute the pretty-printed form."""
"""GDB calls this to compute the pretty-printed form."""
ptr = _data_field(self.val)
ptr = ptr.cast(ptr.type.target().strip_typedefs().pointer())
size = _size_field(self.val)
return ptr.lazy_string(length=size)
ptr = _data_field(self.val)
ptr = ptr.cast(ptr.type.target().strip_typedefs().pointer())
size = _size_field(self.val)
return ptr.lazy_string(length=size)
class StdUniquePtrPrinter(object):
@ -273,9 +278,10 @@ class StdUniquePtrPrinter(object):
typename = _remove_generics(_prettify_typename(self.val.type))
if not self.addr:
return "%s is nullptr" % typename
return ("%s<%s> containing" %
(typename,
_remove_generics(_prettify_typename(self.pointee_type))))
return "%s<%s> containing" % (
typename,
_remove_generics(_prettify_typename(self.pointee_type)),
)
def __iter__(self):
if self.addr:
@ -296,7 +302,8 @@ class StdSharedPointerPrinter(object):
"""Returns self as a string."""
typename = _remove_generics(_prettify_typename(self.val.type))
pointee_type = _remove_generics(
_prettify_typename(self.val.type.template_argument(0)))
_prettify_typename(self.val.type.template_argument(0))
)
if not self.addr:
return "%s is nullptr" % typename
refcount = self.val["__cntrl_"]
@ -391,20 +398,24 @@ class StdVectorPrinter(object):
self.typename += "<bool>"
self.length = self.val["__size_"]
bits_per_word = self.val["__bits_per_word"]
self.capacity = _value_of_pair_first(
self.val["__cap_alloc_"]) * bits_per_word
self.iterator = self._VectorBoolIterator(
begin, self.length, bits_per_word)
self.capacity = (
_value_of_pair_first(self.val["__cap_alloc_"]) * bits_per_word
)
self.iterator = self._VectorBoolIterator(begin, self.length, bits_per_word)
else:
end = self.val["__end_"]
self.length = end - begin
self.capacity = _get_base_subobject(
self.val["__end_cap_"])["__value_"] - begin
self.capacity = (
_get_base_subobject(self.val["__end_cap_"])["__value_"] - begin
)
self.iterator = self._VectorIterator(begin, end)
def to_string(self):
return ("%s of length %d, capacity %d" %
(self.typename, self.length, self.capacity))
return "%s of length %d, capacity %d" % (
self.typename,
self.length,
self.capacity,
)
def children(self):
return self.iterator
@ -424,8 +435,9 @@ class StdBitsetPrinter(object):
if self.n_words == 1:
self.values = [int(self.val["__first_"])]
else:
self.values = [int(self.val["__first_"][index])
for index in range(self.n_words)]
self.values = [
int(self.val["__first_"][index]) for index in range(self.n_words)
]
def to_string(self):
typename = _prettify_typename(self.val.type)
@ -454,8 +466,7 @@ class StdDequePrinter(object):
self.start_ptr = self.val["__map_"]["__begin_"]
self.first_block_start_index = int(self.val["__start_"])
self.node_type = self.start_ptr.type
self.block_size = self._calculate_block_size(
val.type.template_argument(0))
self.block_size = self._calculate_block_size(val.type.template_argument(0))
def _calculate_block_size(self, element_type):
"""Calculates the number of elements in a full block."""
@ -473,13 +484,13 @@ class StdDequePrinter(object):
current_addr = self.start_ptr
start_index = self.first_block_start_index
while num_emitted < self.size:
end_index = min(start_index + self.size -
num_emitted, self.block_size)
end_index = min(start_index + self.size - num_emitted, self.block_size)
for _, elem in self._bucket_it(current_addr, start_index, end_index):
yield "", elem
num_emitted += end_index - start_index
current_addr = gdb.Value(addr_as_long(current_addr) + _pointer_size) \
.cast(self.node_type)
current_addr = gdb.Value(addr_as_long(current_addr) + _pointer_size).cast(
self.node_type
)
start_index = 0
def to_string(self):
@ -507,8 +518,10 @@ class StdListPrinter(object):
self.size = int(_value_of_pair_first(size_alloc_field))
dummy_node = self.val["__end_"]
self.nodetype = gdb.lookup_type(
re.sub("__list_node_base", "__list_node",
str(dummy_node.type.strip_typedefs()))).pointer()
re.sub(
"__list_node_base", "__list_node", str(dummy_node.type.strip_typedefs())
)
).pointer()
self.first_node = dummy_node["__next_"]
def to_string(self):
@ -685,7 +698,8 @@ class StdMapPrinter(AbstractRBTreePrinter):
def _init_cast_type(self, val_type):
map_it_type = gdb.lookup_type(
str(val_type.strip_typedefs()) + "::iterator").strip_typedefs()
str(val_type.strip_typedefs()) + "::iterator"
).strip_typedefs()
tree_it_type = map_it_type.template_argument(0)
node_ptr_type = tree_it_type.template_argument(1)
return node_ptr_type
@ -703,7 +717,8 @@ class StdSetPrinter(AbstractRBTreePrinter):
def _init_cast_type(self, val_type):
set_it_type = gdb.lookup_type(
str(val_type.strip_typedefs()) + "::iterator").strip_typedefs()
str(val_type.strip_typedefs()) + "::iterator"
).strip_typedefs()
node_ptr_type = set_it_type.template_argument(1)
return node_ptr_type
@ -730,8 +745,7 @@ class AbstractRBTreeIteratorPrinter(object):
def _is_valid_node(self):
if not self.util.parent(self.addr):
return False
return self.util.is_left_child(self.addr) or \
self.util.is_right_child(self.addr)
return self.util.is_left_child(self.addr) or self.util.is_right_child(self.addr)
def to_string(self):
if not self.addr:
@ -756,8 +770,7 @@ class MapIteratorPrinter(AbstractRBTreeIteratorPrinter):
"""Print a std::(multi)map iterator."""
def __init__(self, val):
self._initialize(val["__i_"],
_remove_generics(_prettify_typename(val.type)))
self._initialize(val["__i_"], _remove_generics(_prettify_typename(val.type)))
def _get_node_value(self, node):
return _cc_field(node)
@ -976,10 +989,12 @@ class LibcxxPrettyPrinter(object):
# Don't attempt types known to be inside libstdcxx.
typename = val.type.name or val.type.tag or str(val.type)
match = re.match("^std::(__.*?)::", typename)
if match is not None and match.group(1) in ["__cxx1998",
"__debug",
"__7",
"__g"]:
if match is not None and match.group(1) in [
"__cxx1998",
"__debug",
"__7",
"__g",
]:
return None
# Handle any using declarations or other typedefs.
@ -1005,13 +1020,13 @@ def _register_libcxx_printers(event):
# already generated as part of a larger data structure, and there is
# no python api to get the endianness. Mixed-endianness debugging
# rare enough that this workaround should be adequate.
_libcpp_big_endian = "big endian" in gdb.execute("show endian",
to_string=True)
_libcpp_big_endian = "big endian" in gdb.execute("show endian", to_string=True)
if not getattr(progspace, _libcxx_printer_name, False):
print("Loading libc++ pretty-printers.")
gdb.printing.register_pretty_printer(
progspace, LibcxxPrettyPrinter(_libcxx_printer_name))
progspace, LibcxxPrettyPrinter(_libcxx_printer_name)
)
setattr(progspace, _libcxx_printer_name, True)

View File

@ -1,11 +1,11 @@
#!/usr/bin/env python
#===----------------------------------------------------------------------===##
# ===----------------------------------------------------------------------===##
#
# Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
# See https://llvm.org/LICENSE.txt for license information.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
#
#===----------------------------------------------------------------------===##
# ===----------------------------------------------------------------------===##
import argparse
import io
@ -14,27 +14,38 @@ import libcxx.sym_check.util
import pprint
import sys
def OutputFile(file):
if isinstance(file, io.IOBase):
return file
assert isinstance(file, str), "Got object {} which is not a str".format(file)
return open(file, 'w', newline='\n')
return open(file, "w", newline="\n")
def main(argv):
parser = argparse.ArgumentParser(
description='Extract a list of symbols from a shared library.')
parser.add_argument('library', metavar='LIB', type=str,
help='The library to extract symbols from.')
parser.add_argument('-o', '--output', dest='output', type=OutputFile, default=sys.stdout,
help='The output file to write the symbols to. It is overwritten if it already exists. '
'If no file is specified, the results are written to standard output.')
description="Extract a list of symbols from a shared library."
)
parser.add_argument(
"library", metavar="LIB", type=str, help="The library to extract symbols from."
)
parser.add_argument(
"-o",
"--output",
dest="output",
type=OutputFile,
default=sys.stdout,
help="The output file to write the symbols to. It is overwritten if it already exists. "
"If no file is specified, the results are written to standard output.",
)
args = parser.parse_args(argv)
symbols = libcxx.sym_check.extract.extract_symbols(args.library)
symbols, _ = libcxx.sym_check.util.filter_stdlib_symbols(symbols)
lines = [pprint.pformat(sym, width=99999) for sym in symbols]
args.output.writelines('\n'.join(sorted(lines)))
args.output.writelines("\n".join(sorted(lines)))
if __name__ == '__main__':
if __name__ == "__main__":
main(sys.argv[1:])

File diff suppressed because it is too large Load Diff

View File

@ -8,9 +8,11 @@ def get_libcxx_paths():
script_name = os.path.basename(__file__)
assert os.path.exists(utils_path)
src_root = os.path.dirname(utils_path)
test_path = os.path.join(src_root, 'test', 'libcxx', 'inclusions')
test_path = os.path.join(src_root, "test", "libcxx", "inclusions")
assert os.path.exists(test_path)
assert os.path.exists(os.path.join(test_path, 'algorithm.inclusions.compile.pass.cpp'))
assert os.path.exists(
os.path.join(test_path, "algorithm.inclusions.compile.pass.cpp")
)
return script_name, src_root, test_path
@ -93,46 +95,46 @@ assert all(v == sorted(v) for k, v in mandatory_inclusions.items())
# <thread> should be marked as UNSUPPORTED, because including <thread>
# is a hard error in that case.
lit_markup = {
"barrier": ["UNSUPPORTED: no-threads"],
"filesystem": ["UNSUPPORTED: no-filesystem"],
"iomanip": ["UNSUPPORTED: no-localization"],
"ios": ["UNSUPPORTED: no-localization"],
"iostream": ["UNSUPPORTED: no-localization"],
"istream": ["UNSUPPORTED: no-localization"],
"latch": ["UNSUPPORTED: no-threads"],
"locale": ["UNSUPPORTED: no-localization"],
"mutex": ["UNSUPPORTED: no-threads"],
"ostream": ["UNSUPPORTED: no-localization"],
"regex": ["UNSUPPORTED: no-localization"],
"semaphore": ["UNSUPPORTED: no-threads"],
"shared_mutex": ["UNSUPPORTED: no-threads"],
"thread": ["UNSUPPORTED: no-threads"]
"barrier": ["UNSUPPORTED: no-threads"],
"filesystem": ["UNSUPPORTED: no-filesystem"],
"iomanip": ["UNSUPPORTED: no-localization"],
"ios": ["UNSUPPORTED: no-localization"],
"iostream": ["UNSUPPORTED: no-localization"],
"istream": ["UNSUPPORTED: no-localization"],
"latch": ["UNSUPPORTED: no-threads"],
"locale": ["UNSUPPORTED: no-localization"],
"mutex": ["UNSUPPORTED: no-threads"],
"ostream": ["UNSUPPORTED: no-localization"],
"regex": ["UNSUPPORTED: no-localization"],
"semaphore": ["UNSUPPORTED: no-threads"],
"shared_mutex": ["UNSUPPORTED: no-threads"],
"thread": ["UNSUPPORTED: no-threads"],
}
def get_std_ver_test(includee):
v = new_in_version.get(includee, "03")
if v == "03":
return ''
return ""
versions = ["03", "11", "14", "17", "20"]
return 'TEST_STD_VER > {} && '.format(max(i for i in versions if i < v))
return "TEST_STD_VER > {} && ".format(max(i for i in versions if i < v))
def get_unsupported_line(includee):
v = new_in_version.get(includee, "03")
return {
"03": [],
"11": ['UNSUPPORTED: c++03'],
"14": ['UNSUPPORTED: c++03, c++11'],
"17": ['UNSUPPORTED: c++03, c++11, c++14'],
"20": ['UNSUPPORTED: c++03, c++11, c++14, c++17'],
"23": ['UNSUPPORTED: c++03, c++11, c++14, c++17, c++20'],
"26": ['UNSUPPORTED: c++03, c++11, c++14, c++17, c++20, c++23'],
"11": ["UNSUPPORTED: c++03"],
"14": ["UNSUPPORTED: c++03, c++11"],
"17": ["UNSUPPORTED: c++03, c++11, c++14"],
"20": ["UNSUPPORTED: c++03, c++11, c++14, c++17"],
"23": ["UNSUPPORTED: c++03, c++11, c++14, c++17, c++20"],
"26": ["UNSUPPORTED: c++03, c++11, c++14, c++17, c++20, c++23"],
}[v]
def get_libcpp_header_symbol(header_name):
return '_LIBCPP_' + header_name.upper().replace('.', '_')
return "_LIBCPP_" + header_name.upper().replace(".", "_")
def get_includer_symbol_test(includer):
@ -157,7 +159,9 @@ def get_ifdef(includer, includee):
""".strip().format(
includee_test=get_std_ver_test(includee),
symbol=symbol,
message="<{}> should include <{}> in C++{} and later".format(includer, includee, version)
message="<{}> should include <{}> in C++{} and later".format(
includer, includee, version
),
)
@ -193,15 +197,19 @@ def produce_tests():
test_body = test_body_template.format(
script_name=script_name,
header=includer,
markup=('\n' + '\n'.join('// ' + m for m in markup_tags) + '\n') if markup_tags else '',
markup=("\n" + "\n".join("// " + m for m in markup_tags) + "\n")
if markup_tags
else "",
test_includers_symbol=get_includer_symbol_test(includer),
test_per_includee='\n'.join(get_ifdef(includer, includee) for includee in includees),
test_per_includee="\n".join(
get_ifdef(includer, includee) for includee in includees
),
)
test_name = "{header}.inclusions.compile.pass.cpp".format(header=includer)
out_path = os.path.join(test_path, test_name)
with open(out_path, 'w', newline='\n') as f:
f.write(test_body + '\n')
with open(out_path, "w", newline="\n") as f:
f.write(test_body + "\n")
if __name__ == '__main__':
if __name__ == "__main__":
produce_tests()

View File

@ -16,12 +16,9 @@ header_restrictions = {
"shared_mutex": "!defined(_LIBCPP_HAS_NO_THREADS)",
"stdatomic.h": "__cplusplus > 202002L && !defined(_LIBCPP_HAS_NO_THREADS)",
"thread": "!defined(_LIBCPP_HAS_NO_THREADS)",
"filesystem": "!defined(_LIBCPP_HAS_NO_FILESYSTEM_LIBRARY)",
# TODO(LLVM-17): simplify this to __cplusplus >= 202002L
"coroutine": "(defined(__cpp_impl_coroutine) && __cpp_impl_coroutine >= 201902L) || (defined(__cpp_coroutines) && __cpp_coroutines >= 201703L)",
"clocale": "!defined(_LIBCPP_HAS_NO_LOCALIZATION)",
"codecvt": "!defined(_LIBCPP_HAS_NO_LOCALIZATION)",
"fstream": "!defined(_LIBCPP_HAS_NO_LOCALIZATION) && !defined(_LIBCPP_HAS_NO_FSTREAM)",
@ -36,12 +33,10 @@ header_restrictions = {
"sstream": "!defined(_LIBCPP_HAS_NO_LOCALIZATION)",
"streambuf": "!defined(_LIBCPP_HAS_NO_LOCALIZATION)",
"strstream": "!defined(_LIBCPP_HAS_NO_LOCALIZATION)",
"wctype.h": "!defined(_LIBCPP_HAS_NO_WIDE_CHARACTERS)",
"cwctype": "!defined(_LIBCPP_HAS_NO_WIDE_CHARACTERS)",
"cwchar": "!defined(_LIBCPP_HAS_NO_WIDE_CHARACTERS)",
"wchar.h": "!defined(_LIBCPP_HAS_NO_WIDE_CHARACTERS)",
"experimental/algorithm": "__cplusplus >= 201103L",
"experimental/deque": "__cplusplus >= 201103L",
"experimental/forward_list": "__cplusplus >= 201103L",
@ -64,89 +59,133 @@ header_restrictions = {
}
private_headers_still_public_in_modules = [
'__assert', '__config',
'__config_site.in', '__debug', '__hash_table',
'__threading_support', '__tree', '__undef_macros', '__verbose_abort'
"__assert",
"__config",
"__config_site.in",
"__debug",
"__hash_table",
"__threading_support",
"__tree",
"__undef_macros",
"__verbose_abort",
]
def find_script(file):
"""Finds the script used to generate a file inside the file itself. The script is delimited by
BEGIN-SCRIPT and END-SCRIPT markers.
BEGIN-SCRIPT and END-SCRIPT markers.
"""
with open(file, 'r') as f:
with open(file, "r") as f:
content = f.read()
match = re.search(r'^BEGIN-SCRIPT$(.+)^END-SCRIPT$', content, flags=re.MULTILINE | re.DOTALL)
match = re.search(
r"^BEGIN-SCRIPT$(.+)^END-SCRIPT$", content, flags=re.MULTILINE | re.DOTALL
)
if not match:
raise RuntimeError("Was unable to find a script delimited with BEGIN-SCRIPT/END-SCRIPT markers in {}".format(test_file))
raise RuntimeError(
"Was unable to find a script delimited with BEGIN-SCRIPT/END-SCRIPT markers in {}".format(
test_file
)
)
return match.group(1)
def execute_script(script, variables):
"""Executes the provided Mako template with the given variables available during the
evaluation of the script, and returns the result.
evaluation of the script, and returns the result.
"""
code = compile(script, 'fake-filename', 'exec')
code = compile(script, "fake-filename", "exec")
output = io.StringIO()
with contextlib.redirect_stdout(output):
exec(code, variables)
output = output.getvalue()
return output
def generate_new_file(file, new_content):
"""Generates the new content of the file by inserting the new content in-between
two '// GENERATED-MARKER' markers located in the file.
two '// GENERATED-MARKER' markers located in the file.
"""
with open(file, 'r') as f:
with open(file, "r") as f:
old_content = f.read()
try:
before, begin_marker, _, end_marker, after = re.split(r'(// GENERATED-MARKER\n)', old_content, flags=re.MULTILINE | re.DOTALL)
before, begin_marker, _, end_marker, after = re.split(
r"(// GENERATED-MARKER\n)", old_content, flags=re.MULTILINE | re.DOTALL
)
except ValueError:
raise RuntimeError("Failed to split {} based on markers, please make sure the file has exactly two '// GENERATED-MARKER' occurrences".format(file))
raise RuntimeError(
"Failed to split {} based on markers, please make sure the file has exactly two '// GENERATED-MARKER' occurrences".format(
file
)
)
return before + begin_marker + new_content + end_marker + after
def produce(test_file, variables):
script = find_script(test_file)
result = execute_script(script, variables)
new_content = generate_new_file(test_file, result)
with open(test_file, 'w', newline='\n') as f:
with open(test_file, "w", newline="\n") as f:
f.write(new_content)
def is_header(file):
"""Returns whether the given file is a header (i.e. not a directory or the modulemap file)."""
return not file.is_dir() and not file.name == 'module.modulemap.in' and file.name != 'libcxx.imp'
return (
not file.is_dir()
and not file.name == "module.modulemap.in"
and file.name != "libcxx.imp"
)
def main():
monorepo_root = pathlib.Path(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
include = pathlib.Path(os.path.join(monorepo_root, 'libcxx', 'include'))
test = pathlib.Path(os.path.join(monorepo_root, 'libcxx', 'test'))
assert(monorepo_root.exists())
monorepo_root = pathlib.Path(
os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
)
include = pathlib.Path(os.path.join(monorepo_root, "libcxx", "include"))
test = pathlib.Path(os.path.join(monorepo_root, "libcxx", "test"))
assert monorepo_root.exists()
toplevel_headers = sorted(str(p.relative_to(include)) for p in include.glob('[a-z]*') if is_header(p))
experimental_headers = sorted(str(p.relative_to(include)) for p in include.glob('experimental/[a-z]*') if is_header(p))
public_headers = toplevel_headers + experimental_headers
private_headers = sorted(str(p.relative_to(include)) for p in include.rglob('*') if is_header(p) and str(p.relative_to(include)).startswith('__') and not p.name.startswith('pstl'))
toplevel_headers = sorted(
str(p.relative_to(include)) for p in include.glob("[a-z]*") if is_header(p)
)
experimental_headers = sorted(
str(p.relative_to(include))
for p in include.glob("experimental/[a-z]*")
if is_header(p)
)
public_headers = toplevel_headers + experimental_headers
private_headers = sorted(
str(p.relative_to(include))
for p in include.rglob("*")
if is_header(p)
and str(p.relative_to(include)).startswith("__")
and not p.name.startswith("pstl")
)
variables = {
'toplevel_headers': toplevel_headers,
'experimental_headers': experimental_headers,
'public_headers': public_headers,
'private_headers': private_headers,
'header_restrictions': header_restrictions,
'private_headers_still_public_in_modules': private_headers_still_public_in_modules
"toplevel_headers": toplevel_headers,
"experimental_headers": experimental_headers,
"public_headers": public_headers,
"private_headers": private_headers,
"header_restrictions": header_restrictions,
"private_headers_still_public_in_modules": private_headers_still_public_in_modules,
}
produce(test.joinpath('libcxx/assertions/headers_declare_verbose_abort.sh.cpp'), variables)
produce(test.joinpath('libcxx/clang_tidy.sh.cpp'), variables)
produce(test.joinpath('libcxx/double_include.sh.cpp'), variables)
produce(test.joinpath('libcxx/min_max_macros.compile.pass.cpp'), variables)
produce(test.joinpath('libcxx/modules_include.sh.cpp'), variables)
produce(test.joinpath('libcxx/nasty_macros.compile.pass.cpp'), variables)
produce(test.joinpath('libcxx/no_assert_include.compile.pass.cpp'), variables)
produce(test.joinpath('libcxx/private_headers.verify.cpp'), variables)
produce(test.joinpath('libcxx/transitive_includes.sh.cpp'), variables)
produce(
test.joinpath("libcxx/assertions/headers_declare_verbose_abort.sh.cpp"),
variables,
)
produce(test.joinpath("libcxx/clang_tidy.sh.cpp"), variables)
produce(test.joinpath("libcxx/double_include.sh.cpp"), variables)
produce(test.joinpath("libcxx/min_max_macros.compile.pass.cpp"), variables)
produce(test.joinpath("libcxx/modules_include.sh.cpp"), variables)
produce(test.joinpath("libcxx/nasty_macros.compile.pass.cpp"), variables)
produce(test.joinpath("libcxx/no_assert_include.compile.pass.cpp"), variables)
produce(test.joinpath("libcxx/private_headers.verify.cpp"), variables)
produce(test.joinpath("libcxx/transitive_includes.sh.cpp"), variables)
if __name__ == '__main__':
if __name__ == "__main__":
main()

View File

@ -2,13 +2,17 @@
import os, pathlib, sys
def generate(private, public):
return f'{{ include: [ "{private}", "private", "<{public}>", "public" ] }}'
def panic(file):
print(f'========== {__file__} error ==========', file=sys.stderr)
print(f'\tFile \'{file}\' is a top-level detail header without a mapping', file=sys.stderr)
print(f"========== {__file__} error ==========", file=sys.stderr)
print(
f"\tFile '{file}' is a top-level detail header without a mapping",
file=sys.stderr,
)
sys.exit(1)
@ -18,50 +22,75 @@ def generate_map(include):
c_headers = []
for i in include.iterdir():
if i.is_dir() and i.name.startswith('__'):
detail_directories.append(f'{i.name}')
if i.is_dir() and i.name.startswith("__"):
detail_directories.append(f"{i.name}")
continue
if i.name.startswith('__'):
if i.name.startswith("__"):
detail_files.append(i.name)
continue
if i.name.endswith('.h'):
if i.name.endswith(".h"):
c_headers.append(i.name)
result = []
temporary_mappings = {'__locale_dir' : 'locale'}
temporary_mappings = {"__locale_dir": "locale"}
for i in detail_directories:
public_header = temporary_mappings.get(i, i.lstrip('_'))
public_header = temporary_mappings.get(i, i.lstrip("_"))
result.append(f'{generate(f"@<{i}/.*>", public_header)},')
for i in detail_files:
public = []
if i == '__assert': continue
elif i == '__availability': continue
elif i == '__bit_reference': continue
elif i == '__bits': public = ['bits']
elif i == '__config_site.in': continue
elif i == '__config': continue
elif i == '__debug': continue
elif i == '__errc': continue
elif i == '__hash_table': public = ['unordered_map', 'unordered_set']
elif i == '__locale': public = ['locale']
elif i == '__mbstate_t.h': continue
elif i == '__mutex_base': continue
elif i == '__node_handle': public = ['map', 'set', 'unordered_map', 'unordered_set']
elif i == '__pstl_algorithm': continue
elif i == '__pstl_config_site.in': continue
elif i == '__pstl_execution': continue
elif i == '__pstl_memory': continue
elif i == '__pstl_numeric': continue
elif i == '__split_buffer': public = ['deque', 'vector']
elif i == '__std_mbstate_t.h': continue
elif i == '__threading_support': public = ['atomic', 'mutex', 'semaphore', 'thread']
elif i == '__tree': public = ['map', 'set']
elif i == '__undef_macros': continue
elif i == '__verbose_abort': continue
else: panic(i)
if i == "__assert":
continue
elif i == "__availability":
continue
elif i == "__bit_reference":
continue
elif i == "__bits":
public = ["bits"]
elif i == "__config_site.in":
continue
elif i == "__config":
continue
elif i == "__debug":
continue
elif i == "__errc":
continue
elif i == "__hash_table":
public = ["unordered_map", "unordered_set"]
elif i == "__locale":
public = ["locale"]
elif i == "__mbstate_t.h":
continue
elif i == "__mutex_base":
continue
elif i == "__node_handle":
public = ["map", "set", "unordered_map", "unordered_set"]
elif i == "__pstl_algorithm":
continue
elif i == "__pstl_config_site.in":
continue
elif i == "__pstl_execution":
continue
elif i == "__pstl_memory":
continue
elif i == "__pstl_numeric":
continue
elif i == "__split_buffer":
public = ["deque", "vector"]
elif i == "__std_mbstate_t.h":
continue
elif i == "__threading_support":
public = ["atomic", "mutex", "semaphore", "thread"]
elif i == "__tree":
public = ["map", "set"]
elif i == "__undef_macros":
continue
elif i == "__verbose_abort":
continue
else:
panic(i)
for p in public:
result.append(f'{generate(f"<{i}>", p)},')
@ -69,16 +98,19 @@ def generate_map(include):
result.sort()
return result
def main():
monorepo_root = pathlib.Path(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
assert(monorepo_root.exists())
include = pathlib.Path(os.path.join(monorepo_root, 'libcxx', 'include'))
monorepo_root = pathlib.Path(
os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
)
assert monorepo_root.exists()
include = pathlib.Path(os.path.join(monorepo_root, "libcxx", "include"))
mapping = generate_map(include)
data = '[\n ' + '\n '.join(mapping) + '\n]\n'
with open(f'{include}/libcxx.imp', 'w') as f:
data = "[\n " + "\n ".join(mapping) + "\n]\n"
with open(f"{include}/libcxx.imp", "w") as f:
f.write(data)
if __name__ == '__main__':
if __name__ == "__main__":
main()

View File

@ -326,10 +326,7 @@ def generate_data_tables() -> str:
Generate Unicode data for [format.string.std]/12
"""
east_asian_width_path = (
Path(__file__).absolute().parent
/ "data"
/ "unicode"
/ "EastAsianWidth.txt"
Path(__file__).absolute().parent / "data" / "unicode" / "EastAsianWidth.txt"
)
properties = list()

View File

@ -1,16 +1,16 @@
#===----------------------------------------------------------------------===##
# ===----------------------------------------------------------------------===##
#
# Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
# See https://llvm.org/LICENSE.txt for license information.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
#
#===----------------------------------------------------------------------===##
# ===----------------------------------------------------------------------===##
"""libcxx python utilities"""
__author__ = 'Eric Fiselier'
__email__ = 'eric@efcs.ca'
__author__ = "Eric Fiselier"
__email__ = "eric@efcs.ca"
__versioninfo__ = (0, 1, 0)
__version__ = ' '.join(str(v) for v in __versioninfo__) + 'dev'
__version__ = " ".join(str(v) for v in __versioninfo__) + "dev"
__all__ = []

View File

@ -1,16 +1,16 @@
#===----------------------------------------------------------------------===##
# ===----------------------------------------------------------------------===##
#
# Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
# See https://llvm.org/LICENSE.txt for license information.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
#
#===----------------------------------------------------------------------===##
# ===----------------------------------------------------------------------===##
"""libcxx abi symbol checker"""
__author__ = 'Eric Fiselier'
__email__ = 'eric@efcs.ca'
__author__ = "Eric Fiselier"
__email__ = "eric@efcs.ca"
__versioninfo__ = (0, 1, 0)
__version__ = ' '.join(str(v) for v in __versioninfo__) + 'dev'
__version__ = " ".join(str(v) for v in __versioninfo__) + "dev"
__all__ = ['diff', 'extract', 'util']
__all__ = ["diff", "extract", "util"]

View File

@ -1,11 +1,11 @@
# -*- Python -*- vim: set syntax=python tabstop=4 expandtab cc=80:
#===----------------------------------------------------------------------===##
# ===----------------------------------------------------------------------===##
#
# Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
# See https://llvm.org/LICENSE.txt for license information.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
#
#===----------------------------------------------------------------------===##
# ===----------------------------------------------------------------------===##
"""
diff - A set of functions for diff-ing two symbol lists.
"""
@ -14,15 +14,15 @@ from libcxx.sym_check import util
def _symbol_difference(lhs, rhs):
lhs_names = set(((n['name'], n['type']) for n in lhs))
rhs_names = set(((n['name'], n['type']) for n in rhs))
lhs_names = set(((n["name"], n["type"]) for n in lhs))
rhs_names = set(((n["name"], n["type"]) for n in rhs))
diff_names = lhs_names - rhs_names
return [n for n in lhs if (n['name'], n['type']) in diff_names]
return [n for n in lhs if (n["name"], n["type"]) in diff_names]
def _find_by_key(sym_list, k):
for sym in sym_list:
if sym['name'] == k:
if sym["name"] == k:
return sym
return None
@ -40,9 +40,8 @@ def changed_symbols(old, new):
for old_sym in old:
if old_sym in new:
continue
new_sym = _find_by_key(new, old_sym['name'])
if (new_sym is not None and not new_sym in old
and old_sym != new_sym):
new_sym = _find_by_key(new, old_sym["name"])
if new_sym is not None and not new_sym in old and old_sym != new_sym:
changed += [(old_sym, new_sym)]
return changed
@ -54,49 +53,51 @@ def diff(old, new):
return added, removed, changed
def report_diff(added_syms, removed_syms, changed_syms, names_only=False,
demangle=True):
def report_diff(
added_syms, removed_syms, changed_syms, names_only=False, demangle=True
):
def maybe_demangle(name):
return util.demangle_symbol(name) if demangle else name
report = ''
report = ""
for sym in added_syms:
report += 'Symbol added: %s\n' % maybe_demangle(sym['name'])
report += "Symbol added: %s\n" % maybe_demangle(sym["name"])
if not names_only:
report += ' %s\n\n' % sym
report += " %s\n\n" % sym
if added_syms and names_only:
report += '\n'
report += "\n"
for sym in removed_syms:
report += 'SYMBOL REMOVED: %s\n' % maybe_demangle(sym['name'])
report += "SYMBOL REMOVED: %s\n" % maybe_demangle(sym["name"])
if not names_only:
report += ' %s\n\n' % sym
report += " %s\n\n" % sym
if removed_syms and names_only:
report += '\n'
report += "\n"
if not names_only:
for sym_pair in changed_syms:
old_sym, new_sym = sym_pair
old_str = '\n OLD SYMBOL: %s' % old_sym
new_str = '\n NEW SYMBOL: %s' % new_sym
report += ('SYMBOL CHANGED: %s%s%s\n\n' %
(maybe_demangle(old_sym['name']),
old_str, new_str))
old_str = "\n OLD SYMBOL: %s" % old_sym
new_str = "\n NEW SYMBOL: %s" % new_sym
report += "SYMBOL CHANGED: %s%s%s\n\n" % (
maybe_demangle(old_sym["name"]),
old_str,
new_str,
)
added = bool(len(added_syms) != 0)
abi_break = bool(len(removed_syms))
if not names_only:
abi_break = abi_break or len(changed_syms)
if added or abi_break:
report += 'Summary\n'
report += ' Added: %d\n' % len(added_syms)
report += ' Removed: %d\n' % len(removed_syms)
report += "Summary\n"
report += " Added: %d\n" % len(added_syms)
report += " Removed: %d\n" % len(removed_syms)
if not names_only:
report += ' Changed: %d\n' % len(changed_syms)
report += " Changed: %d\n" % len(changed_syms)
if not abi_break:
report += 'Symbols added.'
report += "Symbols added."
else:
report += 'ABI BREAKAGE: SYMBOLS ADDED OR REMOVED!'
report += "ABI BREAKAGE: SYMBOLS ADDED OR REMOVED!"
else:
report += 'Symbols match.'
is_different = abi_break or bool(len(added_syms)) \
or bool(len(changed_syms))
report += "Symbols match."
is_different = abi_break or bool(len(added_syms)) or bool(len(changed_syms))
return report, abi_break, is_different

View File

@ -1,11 +1,11 @@
# -*- Python -*- vim: set syntax=python tabstop=4 expandtab cc=80:
#===----------------------------------------------------------------------===##
# ===----------------------------------------------------------------------===##
#
# Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
# See https://llvm.org/LICENSE.txt for license information.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
#
#===----------------------------------------------------------------------===##
# ===----------------------------------------------------------------------===##
"""
extract - A set of function that extract symbol lists from shared libraries.
"""
@ -18,7 +18,8 @@ import sys
from libcxx.sym_check import util
extract_ignore_names = ['_init', '_fini']
extract_ignore_names = ["_init", "_fini"]
class NMExtractor(object):
"""
@ -30,7 +31,7 @@ class NMExtractor(object):
"""
Search for the nm executable and return the path.
"""
return shutil.which('nm')
return shutil.which("nm")
def __init__(self, static_lib):
"""
@ -43,11 +44,10 @@ class NMExtractor(object):
print("ERROR: Could not find nm")
sys.exit(1)
self.static_lib = static_lib
self.flags = ['-P', '-g']
if sys.platform.startswith('aix'):
self.flags = ["-P", "-g"]
if sys.platform.startswith("aix"):
# AIX nm demangles symbols by default, so suppress that.
self.flags.append('-C')
self.flags.append("-C")
def extract(self, lib):
"""
@ -56,8 +56,7 @@ class NMExtractor(object):
"""
cmd = [self.nm_exe] + self.flags + [lib]
out = subprocess.check_output(cmd).decode()
fmt_syms = (self._extract_sym(l)
for l in out.splitlines() if l.strip())
fmt_syms = (self._extract_sym(l) for l in out.splitlines() if l.strip())
# Cast symbol to string.
final_syms = (repr(s) for s in fmt_syms if self._want_sym(s))
# Make unique and sort strings.
@ -71,15 +70,15 @@ class NMExtractor(object):
if len(bits) < 2:
return None
new_sym = {
'name': bits[0],
'type': bits[1],
'is_defined': (bits[1].lower() != 'u')
"name": bits[0],
"type": bits[1],
"is_defined": (bits[1].lower() != "u"),
}
new_sym['name'] = new_sym['name'].replace('@@', '@')
new_sym["name"] = new_sym["name"].replace("@@", "@")
new_sym = self._transform_sym_type(new_sym)
# NM types which we want to save the size for.
if new_sym['type'] == 'OBJECT' and len(bits) > 3:
new_sym['size'] = int(bits[3], 16)
if new_sym["type"] == "OBJECT" and len(bits) > 3:
new_sym["size"] = int(bits[3], 16)
return new_sym
@staticmethod
@ -89,11 +88,14 @@ class NMExtractor(object):
"""
if sym is None or len(sym) < 2:
return False
if sym['name'] in extract_ignore_names:
if sym["name"] in extract_ignore_names:
return False
bad_types = ['t', 'b', 'r', 'd', 'w']
return (sym['type'] not in bad_types
and sym['name'] not in ['__bss_start', '_end', '_edata'])
bad_types = ["t", "b", "r", "d", "w"]
return sym["type"] not in bad_types and sym["name"] not in [
"__bss_start",
"_end",
"_edata",
]
@staticmethod
def _transform_sym_type(sym):
@ -101,14 +103,15 @@ class NMExtractor(object):
Map the nm single letter output for type to either FUNC or OBJECT.
If the type is not recognized it is left unchanged.
"""
func_types = ['T', 'W']
obj_types = ['B', 'D', 'R', 'V', 'S']
if sym['type'] in func_types:
sym['type'] = 'FUNC'
elif sym['type'] in obj_types:
sym['type'] = 'OBJECT'
func_types = ["T", "W"]
obj_types = ["B", "D", "R", "V", "S"]
if sym["type"] in func_types:
sym["type"] = "FUNC"
elif sym["type"] in obj_types:
sym["type"] = "OBJECT"
return sym
class ReadElfExtractor(object):
"""
ReadElfExtractor - Extract symbol lists from libraries using readelf.
@ -119,7 +122,7 @@ class ReadElfExtractor(object):
"""
Search for the readelf executable and return the path.
"""
return shutil.which('readelf')
return shutil.which("readelf")
def __init__(self, static_lib):
"""
@ -133,7 +136,7 @@ class ReadElfExtractor(object):
sys.exit(1)
# TODO: Support readelf for reading symbols from archives
assert not static_lib and "RealElf does not yet support static libs"
self.flags = ['--wide', '--symbols']
self.flags = ["--wide", "--symbols"]
def extract(self, lib):
"""
@ -155,18 +158,18 @@ class ReadElfExtractor(object):
if len(parts) == 7:
continue
new_sym = {
'name': parts[7],
'size': int(parts[2]),
'type': parts[3],
'is_defined': (parts[6] != 'UND')
"name": parts[7],
"size": int(parts[2]),
"type": parts[3],
"is_defined": (parts[6] != "UND"),
}
assert new_sym['type'] in ['OBJECT', 'FUNC', 'NOTYPE', 'TLS']
if new_sym['name'] in extract_ignore_names:
assert new_sym["type"] in ["OBJECT", "FUNC", "NOTYPE", "TLS"]
if new_sym["name"] in extract_ignore_names:
continue
if new_sym['type'] == 'NOTYPE':
if new_sym["type"] == "NOTYPE":
continue
if new_sym['type'] == 'FUNC':
del new_sym['size']
if new_sym["type"] == "FUNC":
del new_sym["size"]
new_syms += [new_sym]
return new_syms
@ -190,98 +193,100 @@ class ReadElfExtractor(object):
end = len(lines)
return lines[start:end]
class AIXDumpExtractor(object):
"""
AIXDumpExtractor - Extract symbol lists from libraries using AIX dump.
"""
"""
AIXDumpExtractor - Extract symbol lists from libraries using AIX dump.
"""
@staticmethod
def find_tool():
"""
Search for the dump executable and return the path.
"""
return shutil.which('dump')
@staticmethod
def find_tool():
"""
Search for the dump executable and return the path.
"""
return shutil.which("dump")
def __init__(self, static_lib):
"""
Initialize the dump executable and flags that will be used to
extract symbols from shared libraries.
"""
# TODO: Support dump for reading symbols from static libraries
assert not static_lib and "static libs not yet supported with dump"
self.tool = self.find_tool()
if self.tool is None:
print("ERROR: Could not find dump")
sys.exit(1)
self.flags = ['-n', '-v']
object_mode = environ.get('OBJECT_MODE')
if object_mode == '32':
self.flags += ['-X32']
elif object_mode == '64':
self.flags += ['-X64']
else:
self.flags += ['-X32_64']
def __init__(self, static_lib):
"""
Initialize the dump executable and flags that will be used to
extract symbols from shared libraries.
"""
# TODO: Support dump for reading symbols from static libraries
assert not static_lib and "static libs not yet supported with dump"
self.tool = self.find_tool()
if self.tool is None:
print("ERROR: Could not find dump")
sys.exit(1)
self.flags = ["-n", "-v"]
object_mode = environ.get("OBJECT_MODE")
if object_mode == "32":
self.flags += ["-X32"]
elif object_mode == "64":
self.flags += ["-X64"]
else:
self.flags += ["-X32_64"]
def extract(self, lib):
"""
Extract symbols from a library and return the results as a dict of
parsed symbols.
"""
cmd = [self.tool] + self.flags + [lib]
out = subprocess.check_output(cmd).decode()
loader_syms = self.get_loader_symbol_table(out)
return self.process_syms(loader_syms)
def extract(self, lib):
"""
Extract symbols from a library and return the results as a dict of
parsed symbols.
"""
cmd = [self.tool] + self.flags + [lib]
out = subprocess.check_output(cmd).decode()
loader_syms = self.get_loader_symbol_table(out)
return self.process_syms(loader_syms)
def process_syms(self, sym_list):
new_syms = []
for s in sym_list:
parts = s.split()
if not parts:
continue
assert len(parts) == 8 or len(parts) == 7
if len(parts) == 7:
continue
new_sym = {
'name': parts[7],
'type': 'FUNC' if parts[4] == 'DS' else 'OBJECT',
'is_defined': (parts[5] != 'EXTref'),
'storage_mapping_class': parts[4],
'import_export': parts[3]
}
if new_sym['name'] in extract_ignore_names:
continue
new_syms += [new_sym]
return new_syms
def process_syms(self, sym_list):
new_syms = []
for s in sym_list:
parts = s.split()
if not parts:
continue
assert len(parts) == 8 or len(parts) == 7
if len(parts) == 7:
continue
new_sym = {
"name": parts[7],
"type": "FUNC" if parts[4] == "DS" else "OBJECT",
"is_defined": (parts[5] != "EXTref"),
"storage_mapping_class": parts[4],
"import_export": parts[3],
}
if new_sym["name"] in extract_ignore_names:
continue
new_syms += [new_sym]
return new_syms
def get_loader_symbol_table(self, out):
lines = out.splitlines()
return filter(lambda n: re.match(r'^\[[0-9]+\]', n), lines)
def get_loader_symbol_table(self, out):
lines = out.splitlines()
return filter(lambda n: re.match(r"^\[[0-9]+\]", n), lines)
@staticmethod
def is_shared_lib(lib):
"""
Check for the shared object flag in XCOFF headers of the input file or
library archive.
"""
dump = AIXDumpExtractor.find_tool()
if dump is None:
print("ERROR: Could not find dump")
sys.exit(1)
cmd = [dump, '-X32_64', '-ov', lib]
out = subprocess.check_output(cmd).decode()
return out.find("SHROBJ") != -1
@staticmethod
def is_shared_lib(lib):
"""
Check for the shared object flag in XCOFF headers of the input file or
library archive.
"""
dump = AIXDumpExtractor.find_tool()
if dump is None:
print("ERROR: Could not find dump")
sys.exit(1)
cmd = [dump, "-X32_64", "-ov", lib]
out = subprocess.check_output(cmd).decode()
return out.find("SHROBJ") != -1
def is_static_library(lib_file):
"""
Determine if a given library is static or shared.
"""
if sys.platform.startswith('aix'):
# An AIX library could be both, but for simplicity assume it isn't.
return not AIXDumpExtractor.is_shared_lib(lib_file)
else:
_, ext = os.path.splitext(lib_file)
return ext == '.a'
"""
Determine if a given library is static or shared.
"""
if sys.platform.startswith("aix"):
# An AIX library could be both, but for simplicity assume it isn't.
return not AIXDumpExtractor.is_shared_lib(lib_file)
else:
_, ext = os.path.splitext(lib_file)
return ext == ".a"
def extract_symbols(lib_file, static_lib=None):
"""
@ -291,7 +296,7 @@ def extract_symbols(lib_file, static_lib=None):
"""
if static_lib is None:
static_lib = is_static_library(lib_file)
if sys.platform.startswith('aix'):
if sys.platform.startswith("aix"):
extractor = AIXDumpExtractor(static_lib=static_lib)
elif ReadElfExtractor.find_tool() and not static_lib:
extractor = ReadElfExtractor(static_lib=static_lib)

View File

@ -1,11 +1,11 @@
# -*- Python -*- vim: set syntax=python tabstop=4 expandtab cc=80:
#===----------------------------------------------------------------------===##
# ===----------------------------------------------------------------------===##
#
# Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
# See https://llvm.org/LICENSE.txt for license information.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
#
#===----------------------------------------------------------------------===##
# ===----------------------------------------------------------------------===##
"""
match - A set of functions for matching symbols in a list to a list of regexs
"""
@ -14,19 +14,19 @@ import re
def find_and_report_matching(symbol_list, regex_list):
report = ''
report = ""
found_count = 0
for regex_str in regex_list:
report += 'Matching regex "%s":\n' % regex_str
matching_list = find_matching_symbols(symbol_list, regex_str)
if not matching_list:
report += ' No matches found\n\n'
report += " No matches found\n\n"
continue
# else
found_count += len(matching_list)
for m in matching_list:
report += ' MATCHES: %s\n' % m['name']
report += '\n'
report += " MATCHES: %s\n" % m["name"]
report += "\n"
return found_count, report
@ -34,6 +34,6 @@ def find_matching_symbols(symbol_list, regex_str):
regex = re.compile(regex_str)
matching_list = []
for s in symbol_list:
if regex.match(s['name']):
if regex.match(s["name"]):
matching_list += [s]
return matching_list

View File

@ -1,10 +1,10 @@
#===----------------------------------------------------------------------===##
# ===----------------------------------------------------------------------===##
#
# Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
# See https://llvm.org/LICENSE.txt for license information.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
#
#===----------------------------------------------------------------------===##
# ===----------------------------------------------------------------------===##
from pprint import pformat
import ast
@ -26,16 +26,16 @@ def read_syms_from_file(filename):
"""
Read a list of symbols in from a file.
"""
with open(filename, 'r') as f:
with open(filename, "r") as f:
data = f.read()
return read_syms_from_list(data.splitlines())
def read_exclusions(filename):
with open(filename, 'r') as f:
with open(filename, "r") as f:
data = f.read()
lines = [l.strip() for l in data.splitlines() if l.strip()]
lines = [l for l in lines if not l.startswith('#')]
lines = [l for l in lines if not l.startswith("#")]
return lines
@ -43,24 +43,24 @@ def write_syms(sym_list, out=None, names_only=False, filter=None):
"""
Write a list of symbols to the file named by out.
"""
out_str = ''
out_str = ""
out_list = sym_list
out_list.sort(key=lambda x: x['name'])
out_list.sort(key=lambda x: x["name"])
if filter is not None:
out_list = filter(out_list)
if names_only:
out_list = [sym['name'] for sym in out_list]
out_list = [sym["name"] for sym in out_list]
for sym in out_list:
# Use pformat for consistent ordering of keys.
out_str += pformat(sym, width=100000) + '\n'
out_str += pformat(sym, width=100000) + "\n"
if out is None:
sys.stdout.write(out_str)
else:
with open(out, 'w') as f:
with open(out, "w") as f:
f.write(out_str)
_cppfilt_exe = shutil.which('c++filt')
_cppfilt_exe = shutil.which("c++filt")
def demangle_symbol(symbol):
@ -73,35 +73,37 @@ def demangle_symbol(symbol):
def is_elf(filename):
with open(filename, 'rb') as f:
with open(filename, "rb") as f:
magic_bytes = f.read(4)
return magic_bytes == b'\x7fELF'
return magic_bytes == b"\x7fELF"
def is_mach_o(filename):
with open(filename, 'rb') as f:
with open(filename, "rb") as f:
magic_bytes = f.read(4)
return magic_bytes in [
b'\xfe\xed\xfa\xce', # MH_MAGIC
b'\xce\xfa\xed\xfe', # MH_CIGAM
b'\xfe\xed\xfa\xcf', # MH_MAGIC_64
b'\xcf\xfa\xed\xfe', # MH_CIGAM_64
b'\xca\xfe\xba\xbe', # FAT_MAGIC
b'\xbe\xba\xfe\xca' # FAT_CIGAM
b"\xfe\xed\xfa\xce", # MH_MAGIC
b"\xce\xfa\xed\xfe", # MH_CIGAM
b"\xfe\xed\xfa\xcf", # MH_MAGIC_64
b"\xcf\xfa\xed\xfe", # MH_CIGAM_64
b"\xca\xfe\xba\xbe", # FAT_MAGIC
b"\xbe\xba\xfe\xca", # FAT_CIGAM
]
def is_xcoff_or_big_ar(filename):
with open(filename, 'rb') as f:
with open(filename, "rb") as f:
magic_bytes = f.read(7)
return magic_bytes[:4] in [
b'\x01DF', # XCOFF32
b'\x01F7' # XCOFF64
] or magic_bytes == b'<bigaf>'
return (
magic_bytes[:4] in [b"\x01DF", b"\x01F7"] # XCOFF32 # XCOFF64
or magic_bytes == b"<bigaf>"
)
def is_library_file(filename):
if sys.platform == 'darwin':
if sys.platform == "darwin":
return is_mach_o(filename)
elif sys.platform.startswith('aix'):
elif sys.platform.startswith("aix"):
return is_xcoff_or_big_ar(filename)
else:
return is_elf(filename)
@ -109,169 +111,167 @@ def is_library_file(filename):
def extract_or_load(filename):
import libcxx.sym_check.extract
if is_library_file(filename):
return libcxx.sym_check.extract.extract_symbols(filename)
return read_syms_from_file(filename)
def adjust_mangled_name(name):
if not name.startswith('__Z'):
if not name.startswith("__Z"):
return name
return name[1:]
new_delete_std_symbols = [
'_Znam',
'_Znwm',
'_ZdaPv',
'_ZdaPvm',
'_ZdlPv',
'_ZdlPvm'
]
new_delete_std_symbols = ["_Znam", "_Znwm", "_ZdaPv", "_ZdaPvm", "_ZdlPv", "_ZdlPvm"]
cxxabi_symbols = [
'___dynamic_cast',
'___gxx_personality_v0',
'_ZTIDi',
'_ZTIDn',
'_ZTIDs',
'_ZTIPDi',
'_ZTIPDn',
'_ZTIPDs',
'_ZTIPKDi',
'_ZTIPKDn',
'_ZTIPKDs',
'_ZTIPKa',
'_ZTIPKb',
'_ZTIPKc',
'_ZTIPKd',
'_ZTIPKe',
'_ZTIPKf',
'_ZTIPKh',
'_ZTIPKi',
'_ZTIPKj',
'_ZTIPKl',
'_ZTIPKm',
'_ZTIPKs',
'_ZTIPKt',
'_ZTIPKv',
'_ZTIPKw',
'_ZTIPKx',
'_ZTIPKy',
'_ZTIPa',
'_ZTIPb',
'_ZTIPc',
'_ZTIPd',
'_ZTIPe',
'_ZTIPf',
'_ZTIPh',
'_ZTIPi',
'_ZTIPj',
'_ZTIPl',
'_ZTIPm',
'_ZTIPs',
'_ZTIPt',
'_ZTIPv',
'_ZTIPw',
'_ZTIPx',
'_ZTIPy',
'_ZTIa',
'_ZTIb',
'_ZTIc',
'_ZTId',
'_ZTIe',
'_ZTIf',
'_ZTIh',
'_ZTIi',
'_ZTIj',
'_ZTIl',
'_ZTIm',
'_ZTIs',
'_ZTIt',
'_ZTIv',
'_ZTIw',
'_ZTIx',
'_ZTIy',
'_ZTSDi',
'_ZTSDn',
'_ZTSDs',
'_ZTSPDi',
'_ZTSPDn',
'_ZTSPDs',
'_ZTSPKDi',
'_ZTSPKDn',
'_ZTSPKDs',
'_ZTSPKa',
'_ZTSPKb',
'_ZTSPKc',
'_ZTSPKd',
'_ZTSPKe',
'_ZTSPKf',
'_ZTSPKh',
'_ZTSPKi',
'_ZTSPKj',
'_ZTSPKl',
'_ZTSPKm',
'_ZTSPKs',
'_ZTSPKt',
'_ZTSPKv',
'_ZTSPKw',
'_ZTSPKx',
'_ZTSPKy',
'_ZTSPa',
'_ZTSPb',
'_ZTSPc',
'_ZTSPd',
'_ZTSPe',
'_ZTSPf',
'_ZTSPh',
'_ZTSPi',
'_ZTSPj',
'_ZTSPl',
'_ZTSPm',
'_ZTSPs',
'_ZTSPt',
'_ZTSPv',
'_ZTSPw',
'_ZTSPx',
'_ZTSPy',
'_ZTSa',
'_ZTSb',
'_ZTSc',
'_ZTSd',
'_ZTSe',
'_ZTSf',
'_ZTSh',
'_ZTSi',
'_ZTSj',
'_ZTSl',
'_ZTSm',
'_ZTSs',
'_ZTSt',
'_ZTSv',
'_ZTSw',
'_ZTSx',
'_ZTSy'
"___dynamic_cast",
"___gxx_personality_v0",
"_ZTIDi",
"_ZTIDn",
"_ZTIDs",
"_ZTIPDi",
"_ZTIPDn",
"_ZTIPDs",
"_ZTIPKDi",
"_ZTIPKDn",
"_ZTIPKDs",
"_ZTIPKa",
"_ZTIPKb",
"_ZTIPKc",
"_ZTIPKd",
"_ZTIPKe",
"_ZTIPKf",
"_ZTIPKh",
"_ZTIPKi",
"_ZTIPKj",
"_ZTIPKl",
"_ZTIPKm",
"_ZTIPKs",
"_ZTIPKt",
"_ZTIPKv",
"_ZTIPKw",
"_ZTIPKx",
"_ZTIPKy",
"_ZTIPa",
"_ZTIPb",
"_ZTIPc",
"_ZTIPd",
"_ZTIPe",
"_ZTIPf",
"_ZTIPh",
"_ZTIPi",
"_ZTIPj",
"_ZTIPl",
"_ZTIPm",
"_ZTIPs",
"_ZTIPt",
"_ZTIPv",
"_ZTIPw",
"_ZTIPx",
"_ZTIPy",
"_ZTIa",
"_ZTIb",
"_ZTIc",
"_ZTId",
"_ZTIe",
"_ZTIf",
"_ZTIh",
"_ZTIi",
"_ZTIj",
"_ZTIl",
"_ZTIm",
"_ZTIs",
"_ZTIt",
"_ZTIv",
"_ZTIw",
"_ZTIx",
"_ZTIy",
"_ZTSDi",
"_ZTSDn",
"_ZTSDs",
"_ZTSPDi",
"_ZTSPDn",
"_ZTSPDs",
"_ZTSPKDi",
"_ZTSPKDn",
"_ZTSPKDs",
"_ZTSPKa",
"_ZTSPKb",
"_ZTSPKc",
"_ZTSPKd",
"_ZTSPKe",
"_ZTSPKf",
"_ZTSPKh",
"_ZTSPKi",
"_ZTSPKj",
"_ZTSPKl",
"_ZTSPKm",
"_ZTSPKs",
"_ZTSPKt",
"_ZTSPKv",
"_ZTSPKw",
"_ZTSPKx",
"_ZTSPKy",
"_ZTSPa",
"_ZTSPb",
"_ZTSPc",
"_ZTSPd",
"_ZTSPe",
"_ZTSPf",
"_ZTSPh",
"_ZTSPi",
"_ZTSPj",
"_ZTSPl",
"_ZTSPm",
"_ZTSPs",
"_ZTSPt",
"_ZTSPv",
"_ZTSPw",
"_ZTSPx",
"_ZTSPy",
"_ZTSa",
"_ZTSb",
"_ZTSc",
"_ZTSd",
"_ZTSe",
"_ZTSf",
"_ZTSh",
"_ZTSi",
"_ZTSj",
"_ZTSl",
"_ZTSm",
"_ZTSs",
"_ZTSt",
"_ZTSv",
"_ZTSw",
"_ZTSx",
"_ZTSy",
]
def is_stdlib_symbol_name(name, sym):
name = adjust_mangled_name(name)
if re.search("@GLIBC|@GCC", name):
# Only when symbol is defined do we consider it ours
return sym['is_defined']
if re.search('(St[0-9])|(__cxa)|(__cxxabi)', name):
return sym["is_defined"]
if re.search("(St[0-9])|(__cxa)|(__cxxabi)", name):
return True
if name in new_delete_std_symbols:
return True
if name in cxxabi_symbols:
return True
if name.startswith('_Z'):
if name.startswith("_Z"):
return True
return False
def filter_stdlib_symbols(syms):
stdlib_symbols = []
other_symbols = []
for s in syms:
canon_name = adjust_mangled_name(s['name'])
canon_name = adjust_mangled_name(s["name"])
if not is_stdlib_symbol_name(canon_name, s):
other_symbols += [s]
else:

View File

@ -1,49 +1,55 @@
#===----------------------------------------------------------------------===##
# ===----------------------------------------------------------------------===##
#
# Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
# See https://llvm.org/LICENSE.txt for license information.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
#
#===----------------------------------------------------------------------===##
# ===----------------------------------------------------------------------===##
import os
def _getSubstitution(substitution, config):
for (orig, replacement) in config.substitutions:
if orig == substitution:
return replacement
raise ValueError('Substitution {} is not in the config.'.format(substitution))
for (orig, replacement) in config.substitutions:
if orig == substitution:
return replacement
raise ValueError("Substitution {} is not in the config.".format(substitution))
def configure(parameters, features, config, lit_config):
note = lambda s: lit_config.note("({}) {}".format(config.name, s))
config.environment = dict(os.environ)
note = lambda s: lit_config.note("({}) {}".format(config.name, s))
config.environment = dict(os.environ)
# Apply the actions supplied by parameters to the configuration first, since
# parameters are things that we request explicitly and which might influence
# what features are implicitly made available next.
for param in parameters:
actions = param.getActions(config, lit_config.params)
for action in actions:
action.applyTo(config)
if lit_config.debug:
note("Applied '{}' as a result of parameter '{}'".format(
action.pretty(config, lit_config.params),
param.pretty(config, lit_config.params)))
# Apply the actions supplied by parameters to the configuration first, since
# parameters are things that we request explicitly and which might influence
# what features are implicitly made available next.
for param in parameters:
actions = param.getActions(config, lit_config.params)
for action in actions:
action.applyTo(config)
if lit_config.debug:
note(
"Applied '{}' as a result of parameter '{}'".format(
action.pretty(config, lit_config.params),
param.pretty(config, lit_config.params),
)
)
# Then, apply the automatically-detected features.
for feature in features:
actions = feature.getActions(config)
for action in actions:
action.applyTo(config)
if lit_config.debug:
note("Applied '{}' as a result of implicitly detected feature '{}'".format(
action.pretty(config, lit_config.params),
feature.pretty(config)))
# Then, apply the automatically-detected features.
for feature in features:
actions = feature.getActions(config)
for action in actions:
action.applyTo(config)
if lit_config.debug:
note(
"Applied '{}' as a result of implicitly detected feature '{}'".format(
action.pretty(config, lit_config.params), feature.pretty(config)
)
)
# Print the basic substitutions
for sub in ('%{cxx}', '%{flags}', '%{compile_flags}', '%{link_flags}', '%{exec}'):
note("Using {} substitution: '{}'".format(sub, _getSubstitution(sub, config)))
# Print the basic substitutions
for sub in ("%{cxx}", "%{flags}", "%{compile_flags}", "%{link_flags}", "%{exec}"):
note("Using {} substitution: '{}'".format(sub, _getSubstitution(sub, config)))
# Print all available features
note("All available features: {}".format(', '.join(config.available_features)))
# Print all available features
note("All available features: {}".format(", ".join(config.available_features)))

File diff suppressed because it is too large Load Diff

View File

@ -1,10 +1,10 @@
#===----------------------------------------------------------------------===##
# ===----------------------------------------------------------------------===##
#
# Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
# See https://llvm.org/LICENSE.txt for license information.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
#
#===----------------------------------------------------------------------===##
# ===----------------------------------------------------------------------===##
from libcxx.test.dsl import *
from lit.BooleanExpression import BooleanExpression
@ -13,107 +13,176 @@ import shutil
import subprocess
import sys
_isClang = lambda cfg: '__clang__' in compilerMacros(cfg) and '__apple_build_version__' not in compilerMacros(cfg)
_isAppleClang = lambda cfg: '__apple_build_version__' in compilerMacros(cfg)
_isGCC = lambda cfg: '__GNUC__' in compilerMacros(cfg) and '__clang__' not in compilerMacros(cfg)
_isMSVC = lambda cfg: '_MSC_VER' in compilerMacros(cfg)
_msvcVersion = lambda cfg: (int(compilerMacros(cfg)['_MSC_VER']) // 100, int(compilerMacros(cfg)['_MSC_VER']) % 100)
_isClang = lambda cfg: "__clang__" in compilerMacros(
cfg
) and "__apple_build_version__" not in compilerMacros(cfg)
_isAppleClang = lambda cfg: "__apple_build_version__" in compilerMacros(cfg)
_isGCC = lambda cfg: "__GNUC__" in compilerMacros(
cfg
) and "__clang__" not in compilerMacros(cfg)
_isMSVC = lambda cfg: "_MSC_VER" in compilerMacros(cfg)
_msvcVersion = lambda cfg: (
int(compilerMacros(cfg)["_MSC_VER"]) // 100,
int(compilerMacros(cfg)["_MSC_VER"]) % 100,
)
def _getSuitableClangTidy(cfg):
try:
# If we didn't build the libcxx-tidy plugin via CMake, we can't run the clang-tidy tests.
if runScriptExitCode(cfg, ['stat %{test-tools}/clang_tidy_checks/libcxx-tidy.plugin']) != 0:
return None
try:
# If we didn't build the libcxx-tidy plugin via CMake, we can't run the clang-tidy tests.
if (
runScriptExitCode(
cfg, ["stat %{test-tools}/clang_tidy_checks/libcxx-tidy.plugin"]
)
!= 0
):
return None
# TODO This should be the last stable release.
# LLVM RELEASE bump to latest stable version
if runScriptExitCode(cfg, ['clang-tidy-16 --version']) == 0:
return 'clang-tidy-16'
# TODO This should be the last stable release.
# LLVM RELEASE bump to latest stable version
if runScriptExitCode(cfg, ["clang-tidy-16 --version"]) == 0:
return "clang-tidy-16"
# LLVM RELEASE bump version
if int(re.search('[0-9]+', commandOutput(cfg, ['clang-tidy --version'])).group()) >= 16:
return 'clang-tidy'
# LLVM RELEASE bump version
if (
int(
re.search(
"[0-9]+", commandOutput(cfg, ["clang-tidy --version"])
).group()
)
>= 16
):
return "clang-tidy"
except ConfigurationRuntimeError:
return None
except ConfigurationRuntimeError:
return None
DEFAULT_FEATURES = [
Feature(name='thread-safety',
when=lambda cfg: hasCompileFlag(cfg, '-Werror=thread-safety'),
actions=[AddCompileFlag('-Werror=thread-safety')]),
Feature(name='diagnose-if-support',
when=lambda cfg: hasCompileFlag(cfg, '-Wuser-defined-warnings'),
actions=[AddCompileFlag('-Wuser-defined-warnings')]),
# Tests to validate whether the compiler has a way to set the maximum number
# of steps during constant evaluation. Since the flag differs per compiler
# store the "valid" flag as a feature. This allows passing the proper compile
# flag to the compiler:
# // ADDITIONAL_COMPILE_FLAGS(has-fconstexpr-steps): -fconstexpr-steps=12345678
# // ADDITIONAL_COMPILE_FLAGS(has-fconstexpr-ops-limit): -fconstexpr-ops-limit=12345678
Feature(name='has-fconstexpr-steps',
when=lambda cfg: hasCompileFlag(cfg, '-fconstexpr-steps=1')),
Feature(name='has-fconstexpr-ops-limit',
when=lambda cfg: hasCompileFlag(cfg, '-fconstexpr-ops-limit=1')),
Feature(name='has-fblocks', when=lambda cfg: hasCompileFlag(cfg, '-fblocks')),
Feature(name='-fsized-deallocation', when=lambda cfg: hasCompileFlag(cfg, '-fsized-deallocation')),
Feature(name='-faligned-allocation', when=lambda cfg: hasCompileFlag(cfg, '-faligned-allocation')),
Feature(name='fdelayed-template-parsing', when=lambda cfg: hasCompileFlag(cfg, '-fdelayed-template-parsing')),
Feature(name='libcpp-no-coroutines', when=lambda cfg: featureTestMacros(cfg).get('__cpp_impl_coroutine', 0) < 201902),
Feature(name='has-fobjc-arc', when=lambda cfg: hasCompileFlag(cfg, '-xobjective-c++ -fobjc-arc') and
sys.platform.lower().strip() == 'darwin'), # TODO: this doesn't handle cross-compiling to Apple platforms.
Feature(name='objective-c++', when=lambda cfg: hasCompileFlag(cfg, '-xobjective-c++ -fobjc-arc')),
Feature(name='verify-support', when=lambda cfg: hasCompileFlag(cfg, '-Xclang -verify-ignore-unexpected')),
Feature(name='non-lockfree-atomics',
when=lambda cfg: sourceBuilds(cfg, """
Feature(
name="thread-safety",
when=lambda cfg: hasCompileFlag(cfg, "-Werror=thread-safety"),
actions=[AddCompileFlag("-Werror=thread-safety")],
),
Feature(
name="diagnose-if-support",
when=lambda cfg: hasCompileFlag(cfg, "-Wuser-defined-warnings"),
actions=[AddCompileFlag("-Wuser-defined-warnings")],
),
# Tests to validate whether the compiler has a way to set the maximum number
# of steps during constant evaluation. Since the flag differs per compiler
# store the "valid" flag as a feature. This allows passing the proper compile
# flag to the compiler:
# // ADDITIONAL_COMPILE_FLAGS(has-fconstexpr-steps): -fconstexpr-steps=12345678
# // ADDITIONAL_COMPILE_FLAGS(has-fconstexpr-ops-limit): -fconstexpr-ops-limit=12345678
Feature(
name="has-fconstexpr-steps",
when=lambda cfg: hasCompileFlag(cfg, "-fconstexpr-steps=1"),
),
Feature(
name="has-fconstexpr-ops-limit",
when=lambda cfg: hasCompileFlag(cfg, "-fconstexpr-ops-limit=1"),
),
Feature(name="has-fblocks", when=lambda cfg: hasCompileFlag(cfg, "-fblocks")),
Feature(
name="-fsized-deallocation",
when=lambda cfg: hasCompileFlag(cfg, "-fsized-deallocation"),
),
Feature(
name="-faligned-allocation",
when=lambda cfg: hasCompileFlag(cfg, "-faligned-allocation"),
),
Feature(
name="fdelayed-template-parsing",
when=lambda cfg: hasCompileFlag(cfg, "-fdelayed-template-parsing"),
),
Feature(
name="libcpp-no-coroutines",
when=lambda cfg: featureTestMacros(cfg).get("__cpp_impl_coroutine", 0) < 201902,
),
Feature(
name="has-fobjc-arc",
when=lambda cfg: hasCompileFlag(cfg, "-xobjective-c++ -fobjc-arc")
and sys.platform.lower().strip() == "darwin",
), # TODO: this doesn't handle cross-compiling to Apple platforms.
Feature(
name="objective-c++",
when=lambda cfg: hasCompileFlag(cfg, "-xobjective-c++ -fobjc-arc"),
),
Feature(
name="verify-support",
when=lambda cfg: hasCompileFlag(cfg, "-Xclang -verify-ignore-unexpected"),
),
Feature(
name="non-lockfree-atomics",
when=lambda cfg: sourceBuilds(
cfg,
"""
#include <atomic>
struct Large { int storage[100]; };
std::atomic<Large> x;
int main(int, char**) { (void)x.load(); return 0; }
""")),
# TODO: Remove this feature once compiler-rt includes __atomic_is_lockfree()
# on all supported platforms.
Feature(name='is-lockfree-runtime-function',
when=lambda cfg: sourceBuilds(cfg, """
""",
),
),
# TODO: Remove this feature once compiler-rt includes __atomic_is_lockfree()
# on all supported platforms.
Feature(
name="is-lockfree-runtime-function",
when=lambda cfg: sourceBuilds(
cfg,
"""
#include <atomic>
struct Large { int storage[100]; };
std::atomic<Large> x;
int main(int, char**) { return x.is_lock_free(); }
""")),
# Some tests rely on creating shared libraries which link in the C++ Standard Library. In some
# cases, this doesn't work (e.g. if the library was built as a static archive and wasn't compiled
# as position independent). This feature informs the test suite of whether it's possible to create
# a shared library in a shell test by using the '-shared' compiler flag.
#
# Note: To implement this check properly, we need to make sure that we use something inside the
# compiled library, not only in the headers. It should be safe to assume that all implementations
# define `operator new` in the compiled library.
Feature(name='cant-build-shared-library',
when=lambda cfg: not sourceBuilds(cfg, """
""",
),
),
# Some tests rely on creating shared libraries which link in the C++ Standard Library. In some
# cases, this doesn't work (e.g. if the library was built as a static archive and wasn't compiled
# as position independent). This feature informs the test suite of whether it's possible to create
# a shared library in a shell test by using the '-shared' compiler flag.
#
# Note: To implement this check properly, we need to make sure that we use something inside the
# compiled library, not only in the headers. It should be safe to assume that all implementations
# define `operator new` in the compiled library.
Feature(
name="cant-build-shared-library",
when=lambda cfg: not sourceBuilds(
cfg,
"""
void f() { new int(3); }
""", ['-shared'])),
# Check for a Windows UCRT bug (fixed in UCRT/Windows 10.0.20348.0):
# https://developercommunity.visualstudio.com/t/utf-8-locales-break-ctype-functions-for-wchar-type/1653678
Feature(name='win32-broken-utf8-wchar-ctype',
when=lambda cfg: not '_LIBCPP_HAS_NO_LOCALIZATION' in compilerMacros(cfg) and '_WIN32' in compilerMacros(cfg) and not programSucceeds(cfg, """
""",
["-shared"],
),
),
# Check for a Windows UCRT bug (fixed in UCRT/Windows 10.0.20348.0):
# https://developercommunity.visualstudio.com/t/utf-8-locales-break-ctype-functions-for-wchar-type/1653678
Feature(
name="win32-broken-utf8-wchar-ctype",
when=lambda cfg: not "_LIBCPP_HAS_NO_LOCALIZATION" in compilerMacros(cfg)
and "_WIN32" in compilerMacros(cfg)
and not programSucceeds(
cfg,
"""
#include <locale.h>
#include <wctype.h>
int main(int, char**) {
setlocale(LC_ALL, "en_US.UTF-8");
return towlower(L'\\xDA') != L'\\xFA';
}
""")),
# Check for a Windows UCRT bug (fixed in UCRT/Windows 10.0.19041.0).
# https://developercommunity.visualstudio.com/t/printf-formatting-with-g-outputs-too/1660837
Feature(name='win32-broken-printf-g-precision',
when=lambda cfg: '_WIN32' in compilerMacros(cfg) and not programSucceeds(cfg, """
""",
),
),
# Check for a Windows UCRT bug (fixed in UCRT/Windows 10.0.19041.0).
# https://developercommunity.visualstudio.com/t/printf-formatting-with-g-outputs-too/1660837
Feature(
name="win32-broken-printf-g-precision",
when=lambda cfg: "_WIN32" in compilerMacros(cfg)
and not programSucceeds(
cfg,
"""
#include <stdio.h>
#include <string.h>
int main(int, char**) {
@ -121,70 +190,126 @@ DEFAULT_FEATURES = [
snprintf(buf, sizeof(buf), "%#.*g", 0, 0.0);
return strcmp(buf, "0.");
}
""")),
# Check for Glibc < 2.27, where the ru_RU.UTF-8 locale had
# mon_decimal_point == ".", which our tests don't handle.
Feature(name='glibc-old-ru_RU-decimal-point',
when=lambda cfg: not '_LIBCPP_HAS_NO_LOCALIZATION' in compilerMacros(cfg) and not programSucceeds(cfg, """
""",
),
),
# Check for Glibc < 2.27, where the ru_RU.UTF-8 locale had
# mon_decimal_point == ".", which our tests don't handle.
Feature(
name="glibc-old-ru_RU-decimal-point",
when=lambda cfg: not "_LIBCPP_HAS_NO_LOCALIZATION" in compilerMacros(cfg)
and not programSucceeds(
cfg,
"""
#include <locale.h>
#include <string.h>
int main(int, char**) {
setlocale(LC_ALL, "ru_RU.UTF-8");
return strcmp(localeconv()->mon_decimal_point, ",");
}
""")),
Feature(name='has-unix-headers',
when=lambda cfg: sourceBuilds(cfg, """
""",
),
),
Feature(
name="has-unix-headers",
when=lambda cfg: sourceBuilds(
cfg,
"""
#include <unistd.h>
#include <sys/wait.h>
int main(int, char**) {
return 0;
}
""")),
# Whether Bash can run on the executor.
# This is not always the case, for example when running on embedded systems.
#
# For the corner case of bash existing, but it being missing in the path
# set in %{exec} as "--env PATH=one-single-dir", the executor does find
# and executes bash, but bash then can't find any other common shell
# utilities. Test executing "bash -c 'bash --version'" to see if bash
# manages to find binaries to execute.
Feature(name='executor-has-no-bash',
when=lambda cfg: runScriptExitCode(cfg, ['%{exec} bash -c \'bash --version\'']) != 0),
Feature(name='has-clang-tidy',
when=lambda cfg: _getSuitableClangTidy(cfg) is not None,
actions=[AddSubstitution('%{clang-tidy}', lambda cfg: _getSuitableClangTidy(cfg))]),
Feature(name='apple-clang', when=_isAppleClang),
Feature(name=lambda cfg: 'apple-clang-{__clang_major__}'.format(**compilerMacros(cfg)), when=_isAppleClang),
Feature(name=lambda cfg: 'apple-clang-{__clang_major__}.{__clang_minor__}'.format(**compilerMacros(cfg)), when=_isAppleClang),
Feature(name=lambda cfg: 'apple-clang-{__clang_major__}.{__clang_minor__}.{__clang_patchlevel__}'.format(**compilerMacros(cfg)), when=_isAppleClang),
Feature(name='clang', when=_isClang),
Feature(name=lambda cfg: 'clang-{__clang_major__}'.format(**compilerMacros(cfg)), when=_isClang),
Feature(name=lambda cfg: 'clang-{__clang_major__}.{__clang_minor__}'.format(**compilerMacros(cfg)), when=_isClang),
Feature(name=lambda cfg: 'clang-{__clang_major__}.{__clang_minor__}.{__clang_patchlevel__}'.format(**compilerMacros(cfg)), when=_isClang),
# Note: Due to a GCC bug (https://gcc.gnu.org/bugzilla/show_bug.cgi?id=104760), we must disable deprecation warnings
# on GCC or spurious diagnostics are issued.
#
# TODO:
# - Enable -Wplacement-new with GCC.
# - Enable -Wclass-memaccess with GCC.
Feature(name='gcc', when=_isGCC,
actions=[AddCompileFlag('-D_LIBCPP_DISABLE_DEPRECATION_WARNINGS'),
AddCompileFlag('-Wno-placement-new'),
AddCompileFlag('-Wno-class-memaccess')]),
Feature(name=lambda cfg: 'gcc-{__GNUC__}'.format(**compilerMacros(cfg)), when=_isGCC),
Feature(name=lambda cfg: 'gcc-{__GNUC__}.{__GNUC_MINOR__}'.format(**compilerMacros(cfg)), when=_isGCC),
Feature(name=lambda cfg: 'gcc-{__GNUC__}.{__GNUC_MINOR__}.{__GNUC_PATCHLEVEL__}'.format(**compilerMacros(cfg)), when=_isGCC),
Feature(name='msvc', when=_isMSVC),
Feature(name=lambda cfg: 'msvc-{}'.format(*_msvcVersion(cfg)), when=_isMSVC),
Feature(name=lambda cfg: 'msvc-{}.{}'.format(*_msvcVersion(cfg)), when=_isMSVC),
""",
),
),
# Whether Bash can run on the executor.
# This is not always the case, for example when running on embedded systems.
#
# For the corner case of bash existing, but it being missing in the path
# set in %{exec} as "--env PATH=one-single-dir", the executor does find
# and executes bash, but bash then can't find any other common shell
# utilities. Test executing "bash -c 'bash --version'" to see if bash
# manages to find binaries to execute.
Feature(
name="executor-has-no-bash",
when=lambda cfg: runScriptExitCode(cfg, ["%{exec} bash -c 'bash --version'"])
!= 0,
),
Feature(
name="has-clang-tidy",
when=lambda cfg: _getSuitableClangTidy(cfg) is not None,
actions=[
AddSubstitution("%{clang-tidy}", lambda cfg: _getSuitableClangTidy(cfg))
],
),
Feature(name="apple-clang", when=_isAppleClang),
Feature(
name=lambda cfg: "apple-clang-{__clang_major__}".format(**compilerMacros(cfg)),
when=_isAppleClang,
),
Feature(
name=lambda cfg: "apple-clang-{__clang_major__}.{__clang_minor__}".format(
**compilerMacros(cfg)
),
when=_isAppleClang,
),
Feature(
name=lambda cfg: "apple-clang-{__clang_major__}.{__clang_minor__}.{__clang_patchlevel__}".format(
**compilerMacros(cfg)
),
when=_isAppleClang,
),
Feature(name="clang", when=_isClang),
Feature(
name=lambda cfg: "clang-{__clang_major__}".format(**compilerMacros(cfg)),
when=_isClang,
),
Feature(
name=lambda cfg: "clang-{__clang_major__}.{__clang_minor__}".format(
**compilerMacros(cfg)
),
when=_isClang,
),
Feature(
name=lambda cfg: "clang-{__clang_major__}.{__clang_minor__}.{__clang_patchlevel__}".format(
**compilerMacros(cfg)
),
when=_isClang,
),
# Note: Due to a GCC bug (https://gcc.gnu.org/bugzilla/show_bug.cgi?id=104760), we must disable deprecation warnings
# on GCC or spurious diagnostics are issued.
#
# TODO:
# - Enable -Wplacement-new with GCC.
# - Enable -Wclass-memaccess with GCC.
Feature(
name="gcc",
when=_isGCC,
actions=[
AddCompileFlag("-D_LIBCPP_DISABLE_DEPRECATION_WARNINGS"),
AddCompileFlag("-Wno-placement-new"),
AddCompileFlag("-Wno-class-memaccess"),
],
),
Feature(
name=lambda cfg: "gcc-{__GNUC__}".format(**compilerMacros(cfg)), when=_isGCC
),
Feature(
name=lambda cfg: "gcc-{__GNUC__}.{__GNUC_MINOR__}".format(
**compilerMacros(cfg)
),
when=_isGCC,
),
Feature(
name=lambda cfg: "gcc-{__GNUC__}.{__GNUC_MINOR__}.{__GNUC_PATCHLEVEL__}".format(
**compilerMacros(cfg)
),
when=_isGCC,
),
Feature(name="msvc", when=_isMSVC),
Feature(name=lambda cfg: "msvc-{}".format(*_msvcVersion(cfg)), when=_isMSVC),
Feature(name=lambda cfg: "msvc-{}.{}".format(*_msvcVersion(cfg)), when=_isMSVC),
]
# Deduce and add the test features that that are implied by the #defines in
@ -198,54 +323,70 @@ DEFAULT_FEATURES = [
# Note that features that are more strongly tied to libc++ are named libcpp-foo,
# while features that are more general in nature are not prefixed with 'libcpp-'.
macros = {
'_LIBCPP_HAS_NO_MONOTONIC_CLOCK': 'no-monotonic-clock',
'_LIBCPP_HAS_NO_THREADS': 'no-threads',
'_LIBCPP_HAS_THREAD_API_EXTERNAL': 'libcpp-has-thread-api-external',
'_LIBCPP_HAS_THREAD_API_PTHREAD': 'libcpp-has-thread-api-pthread',
'_LIBCPP_NO_VCRUNTIME': 'libcpp-no-vcruntime',
'_LIBCPP_ABI_VERSION': 'libcpp-abi-version',
'_LIBCPP_HAS_NO_FILESYSTEM_LIBRARY': 'no-filesystem',
'_LIBCPP_HAS_NO_RANDOM_DEVICE': 'no-random-device',
'_LIBCPP_HAS_NO_LOCALIZATION': 'no-localization',
'_LIBCPP_HAS_NO_FSTREAM': 'no-fstream',
'_LIBCPP_HAS_NO_WIDE_CHARACTERS': 'no-wide-characters',
'_LIBCPP_HAS_NO_UNICODE': 'libcpp-has-no-unicode',
'_LIBCPP_ENABLE_DEBUG_MODE': 'libcpp-has-debug-mode',
"_LIBCPP_HAS_NO_MONOTONIC_CLOCK": "no-monotonic-clock",
"_LIBCPP_HAS_NO_THREADS": "no-threads",
"_LIBCPP_HAS_THREAD_API_EXTERNAL": "libcpp-has-thread-api-external",
"_LIBCPP_HAS_THREAD_API_PTHREAD": "libcpp-has-thread-api-pthread",
"_LIBCPP_NO_VCRUNTIME": "libcpp-no-vcruntime",
"_LIBCPP_ABI_VERSION": "libcpp-abi-version",
"_LIBCPP_HAS_NO_FILESYSTEM_LIBRARY": "no-filesystem",
"_LIBCPP_HAS_NO_RANDOM_DEVICE": "no-random-device",
"_LIBCPP_HAS_NO_LOCALIZATION": "no-localization",
"_LIBCPP_HAS_NO_FSTREAM": "no-fstream",
"_LIBCPP_HAS_NO_WIDE_CHARACTERS": "no-wide-characters",
"_LIBCPP_HAS_NO_UNICODE": "libcpp-has-no-unicode",
"_LIBCPP_ENABLE_DEBUG_MODE": "libcpp-has-debug-mode",
}
for macro, feature in macros.items():
DEFAULT_FEATURES.append(
Feature(name=lambda cfg, m=macro, f=feature: f + ('={}'.format(compilerMacros(cfg)[m]) if compilerMacros(cfg)[m] else ''),
when=lambda cfg, m=macro: m in compilerMacros(cfg))
)
DEFAULT_FEATURES.append(
Feature(
name=lambda cfg, m=macro, f=feature: f
+ ("={}".format(compilerMacros(cfg)[m]) if compilerMacros(cfg)[m] else ""),
when=lambda cfg, m=macro: m in compilerMacros(cfg),
)
)
# Mapping from canonical locale names (used in the tests) to possible locale
# names on various systems. Each locale is considered supported if any of the
# alternative names is supported.
locales = {
'en_US.UTF-8': ['en_US.UTF-8', 'en_US.utf8', 'English_United States.1252'],
'fr_FR.UTF-8': ['fr_FR.UTF-8', 'fr_FR.utf8', 'French_France.1252'],
'ja_JP.UTF-8': ['ja_JP.UTF-8', 'ja_JP.utf8', 'Japanese_Japan.923'],
'ru_RU.UTF-8': ['ru_RU.UTF-8', 'ru_RU.utf8', 'Russian_Russia.1251'],
'zh_CN.UTF-8': ['zh_CN.UTF-8', 'zh_CN.utf8', 'Chinese_China.936'],
'fr_CA.ISO8859-1': ['fr_CA.ISO8859-1', 'French_Canada.1252'],
'cs_CZ.ISO8859-2': ['cs_CZ.ISO8859-2', 'Czech_Czech Republic.1250']
"en_US.UTF-8": ["en_US.UTF-8", "en_US.utf8", "English_United States.1252"],
"fr_FR.UTF-8": ["fr_FR.UTF-8", "fr_FR.utf8", "French_France.1252"],
"ja_JP.UTF-8": ["ja_JP.UTF-8", "ja_JP.utf8", "Japanese_Japan.923"],
"ru_RU.UTF-8": ["ru_RU.UTF-8", "ru_RU.utf8", "Russian_Russia.1251"],
"zh_CN.UTF-8": ["zh_CN.UTF-8", "zh_CN.utf8", "Chinese_China.936"],
"fr_CA.ISO8859-1": ["fr_CA.ISO8859-1", "French_Canada.1252"],
"cs_CZ.ISO8859-2": ["cs_CZ.ISO8859-2", "Czech_Czech Republic.1250"],
}
for locale, alts in locales.items():
# Note: Using alts directly in the lambda body here will bind it to the value at the
# end of the loop. Assigning it to a default argument works around this issue.
DEFAULT_FEATURES.append(Feature(name='locale.{}'.format(locale),
when=lambda cfg, alts=alts: hasAnyLocale(cfg, alts)))
# Note: Using alts directly in the lambda body here will bind it to the value at the
# end of the loop. Assigning it to a default argument works around this issue.
DEFAULT_FEATURES.append(
Feature(
name="locale.{}".format(locale),
when=lambda cfg, alts=alts: hasAnyLocale(cfg, alts),
)
)
# Add features representing the target platform name: darwin, linux, windows, etc...
DEFAULT_FEATURES += [
Feature(name='darwin', when=lambda cfg: '__APPLE__' in compilerMacros(cfg)),
Feature(name='windows', when=lambda cfg: '_WIN32' in compilerMacros(cfg)),
Feature(name='windows-dll', when=lambda cfg: '_WIN32' in compilerMacros(cfg) and sourceBuilds(cfg, """
Feature(name="darwin", when=lambda cfg: "__APPLE__" in compilerMacros(cfg)),
Feature(name="windows", when=lambda cfg: "_WIN32" in compilerMacros(cfg)),
Feature(
name="windows-dll",
when=lambda cfg: "_WIN32" in compilerMacros(cfg)
and sourceBuilds(
cfg,
"""
#include <iostream>
int main(int, char**) { return 0; }
""") and programSucceeds(cfg, """
""",
)
and programSucceeds(
cfg,
"""
#include <iostream>
#include <windows.h>
#include <winnt.h>
@ -272,57 +413,76 @@ DEFAULT_FEATURES += [
// loaded from a DLL.
return 0;
}
"""), actions=[AddCompileFlag('-DTEST_WINDOWS_DLL')]),
Feature(name='linux', when=lambda cfg: '__linux__' in compilerMacros(cfg)),
Feature(name='netbsd', when=lambda cfg: '__NetBSD__' in compilerMacros(cfg)),
Feature(name='freebsd', when=lambda cfg: '__FreeBSD__' in compilerMacros(cfg)),
Feature(name='LIBCXX-FREEBSD-FIXME', when=lambda cfg: '__FreeBSD__' in compilerMacros(cfg)),
""",
),
actions=[AddCompileFlag("-DTEST_WINDOWS_DLL")],
),
Feature(name="linux", when=lambda cfg: "__linux__" in compilerMacros(cfg)),
Feature(name="netbsd", when=lambda cfg: "__NetBSD__" in compilerMacros(cfg)),
Feature(name="freebsd", when=lambda cfg: "__FreeBSD__" in compilerMacros(cfg)),
Feature(
name="LIBCXX-FREEBSD-FIXME",
when=lambda cfg: "__FreeBSD__" in compilerMacros(cfg),
),
]
# Add features representing the build host platform name.
# The build host could differ from the target platform for cross-compilation.
DEFAULT_FEATURES += [
Feature(name='buildhost={}'.format(sys.platform.lower().strip())),
# sys.platform can often be represented by a "sub-system", such as 'win32', 'cygwin', 'mingw', freebsd13 & etc.
# We define a consolidated feature on a few platforms.
Feature(name='buildhost=windows', when=lambda cfg: platform.system().lower().startswith('windows')),
Feature(name='buildhost=freebsd', when=lambda cfg: platform.system().lower().startswith('freebsd')),
Feature(name='buildhost=aix', when=lambda cfg: platform.system().lower().startswith('aix'))
Feature(name="buildhost={}".format(sys.platform.lower().strip())),
# sys.platform can often be represented by a "sub-system", such as 'win32', 'cygwin', 'mingw', freebsd13 & etc.
# We define a consolidated feature on a few platforms.
Feature(
name="buildhost=windows",
when=lambda cfg: platform.system().lower().startswith("windows"),
),
Feature(
name="buildhost=freebsd",
when=lambda cfg: platform.system().lower().startswith("freebsd"),
),
Feature(
name="buildhost=aix",
when=lambda cfg: platform.system().lower().startswith("aix"),
),
]
# Detect whether GDB is on the system, has Python scripting and supports
# adding breakpoint commands. If so add a substitution to access it.
def check_gdb(cfg):
gdb_path = shutil.which('gdb')
if gdb_path is None:
return False
gdb_path = shutil.which("gdb")
if gdb_path is None:
return False
# Check that we can set breakpoint commands, which was added in 8.3.
# Using the quit command here means that gdb itself exits, not just
# the "python <...>" command.
test_src = """\
# Check that we can set breakpoint commands, which was added in 8.3.
# Using the quit command here means that gdb itself exits, not just
# the "python <...>" command.
test_src = """\
try:
gdb.Breakpoint(\"main\").commands=\"foo\"
except AttributeError:
gdb.execute(\"quit 1\")
gdb.execute(\"quit\")"""
try:
stdout = subprocess.check_output(
[gdb_path, "-ex", "python " + test_src, "--batch"],
stderr=subprocess.DEVNULL, universal_newlines=True)
except subprocess.CalledProcessError:
# We can't set breakpoint commands
return False
try:
stdout = subprocess.check_output(
[gdb_path, "-ex", "python " + test_src, "--batch"],
stderr=subprocess.DEVNULL,
universal_newlines=True,
)
except subprocess.CalledProcessError:
# We can't set breakpoint commands
return False
# Check we actually ran the Python
return not "Python scripting is not supported" in stdout
# Check we actually ran the Python
return not "Python scripting is not supported" in stdout
DEFAULT_FEATURES += [
Feature(name='host-has-gdb-with-python',
when=check_gdb,
actions=[AddSubstitution('%{gdb}', lambda cfg: shutil.which('gdb'))]
)
Feature(
name="host-has-gdb-with-python",
when=check_gdb,
actions=[AddSubstitution("%{gdb}", lambda cfg: shutil.which("gdb"))],
)
]
# Define features for back-deployment testing.
@ -339,56 +499,105 @@ DEFAULT_FEATURES += [
# be achieved by creating a `.verify.cpp` test that checks for the right errors, and
# mark that test as requiring `stdlib=<vendor>-libc++ && target=<target>`.
DEFAULT_FEATURES += [
# Tests that require std::to_chars(floating-point) in the built library
Feature(name='availability-fp_to_chars-missing',
when=lambda cfg: BooleanExpression.evaluate('stdlib=apple-libc++ && target={{.+}}-apple-macosx{{(10.9|10.10|10.11|10.12|10.13|10.14|10.15|11.0|12.0|13.0)(.0)?}}', cfg.available_features)),
# Tests that require https://wg21.link/P0482 support in the built library
Feature(name='availability-char8_t_support-missing',
when=lambda cfg: BooleanExpression.evaluate('stdlib=apple-libc++ && target={{.+}}-apple-macosx{{(10.9|10.10|10.11|10.12|10.13|10.14|10.15|11.0)(.0)?}}', cfg.available_features)),
# Tests that require __libcpp_verbose_abort support in the built library
Feature(name='availability-verbose_abort-missing',
when=lambda cfg: BooleanExpression.evaluate('stdlib=apple-libc++ && target={{.+}}-apple-macosx{{(10.9|10.10|10.11|10.12|10.13|10.14|10.15|11.0|12.0|13.0)(.0)?}}', cfg.available_features)),
# Tests that require std::bad_variant_access in the built library
Feature(name='availability-bad_variant_access-missing',
when=lambda cfg: BooleanExpression.evaluate('stdlib=apple-libc++ && target={{.+}}-apple-macosx10.{{(9|10|11|12)(.0)?}}', cfg.available_features)),
# Tests that require std::bad_optional_access in the built library
Feature(name='availability-bad_optional_access-missing',
when=lambda cfg: BooleanExpression.evaluate('stdlib=apple-libc++ && target={{.+}}-apple-macosx10.{{(9|10|11|12)(.0)?}}', cfg.available_features)),
# Tests that require std::bad_any_cast in the built library
Feature(name='availability-bad_any_cast-missing',
when=lambda cfg: BooleanExpression.evaluate('stdlib=apple-libc++ && target={{.+}}-apple-macosx10.{{(9|10|11|12)(.0)?}}', cfg.available_features)),
# Tests that require std::pmr support in the built library
Feature(name='availability-pmr-missing',
when=lambda cfg: BooleanExpression.evaluate('stdlib=apple-libc++ && target={{.+}}-apple-macosx{{(10.9|10.10|10.11|10.12|10.13|10.14|10.15|11.0)(.0)?}}', cfg.available_features)),
# Tests that require std::filesystem support in the built library
Feature(name='availability-filesystem-missing',
when=lambda cfg: BooleanExpression.evaluate('stdlib=apple-libc++ && target={{.+}}-apple-macosx10.{{(9|10|11|12|13|14)(.0)?}}', cfg.available_features)),
# Tests that require the C++20 synchronization library (P1135R6 implemented by https://llvm.org/D68480) in the built library
Feature(name='availability-synchronization_library-missing',
when=lambda cfg: BooleanExpression.evaluate('stdlib=apple-libc++ && target={{.+}}-apple-macosx10.{{(9|10|11|12|13|14|15)(.0)?}}', cfg.available_features)),
# Tests that require support for std::shared_mutex and std::shared_timed_mutex in the built library
Feature(name='availability-shared_mutex-missing',
when=lambda cfg: BooleanExpression.evaluate('stdlib=apple-libc++ && target={{.+}}-apple-macosx10.{{(9|10|11)(.0)?}}', cfg.available_features)),
# Tests that require support for aligned allocation in the built library. This is about `operator new(..., std::align_val_t, ...)` specifically,
# not other forms of aligned allocation.
Feature(name='availability-aligned_allocation-missing',
when=lambda cfg: BooleanExpression.evaluate('stdlib=apple-libc++ && target={{.+}}-apple-macosx10.{{(9|10|11|12)(.0)?}}', cfg.available_features)),
# Tests that require 64-bit architecture
Feature(name='32-bit-pointer',
when=lambda cfg: sourceBuilds(cfg, """
# Tests that require std::to_chars(floating-point) in the built library
Feature(
name="availability-fp_to_chars-missing",
when=lambda cfg: BooleanExpression.evaluate(
"stdlib=apple-libc++ && target={{.+}}-apple-macosx{{(10.9|10.10|10.11|10.12|10.13|10.14|10.15|11.0|12.0|13.0)(.0)?}}",
cfg.available_features,
),
),
# Tests that require https://wg21.link/P0482 support in the built library
Feature(
name="availability-char8_t_support-missing",
when=lambda cfg: BooleanExpression.evaluate(
"stdlib=apple-libc++ && target={{.+}}-apple-macosx{{(10.9|10.10|10.11|10.12|10.13|10.14|10.15|11.0)(.0)?}}",
cfg.available_features,
),
),
# Tests that require __libcpp_verbose_abort support in the built library
Feature(
name="availability-verbose_abort-missing",
when=lambda cfg: BooleanExpression.evaluate(
"stdlib=apple-libc++ && target={{.+}}-apple-macosx{{(10.9|10.10|10.11|10.12|10.13|10.14|10.15|11.0|12.0|13.0)(.0)?}}",
cfg.available_features,
),
),
# Tests that require std::bad_variant_access in the built library
Feature(
name="availability-bad_variant_access-missing",
when=lambda cfg: BooleanExpression.evaluate(
"stdlib=apple-libc++ && target={{.+}}-apple-macosx10.{{(9|10|11|12)(.0)?}}",
cfg.available_features,
),
),
# Tests that require std::bad_optional_access in the built library
Feature(
name="availability-bad_optional_access-missing",
when=lambda cfg: BooleanExpression.evaluate(
"stdlib=apple-libc++ && target={{.+}}-apple-macosx10.{{(9|10|11|12)(.0)?}}",
cfg.available_features,
),
),
# Tests that require std::bad_any_cast in the built library
Feature(
name="availability-bad_any_cast-missing",
when=lambda cfg: BooleanExpression.evaluate(
"stdlib=apple-libc++ && target={{.+}}-apple-macosx10.{{(9|10|11|12)(.0)?}}",
cfg.available_features,
),
),
# Tests that require std::pmr support in the built library
Feature(
name="availability-pmr-missing",
when=lambda cfg: BooleanExpression.evaluate(
"stdlib=apple-libc++ && target={{.+}}-apple-macosx{{(10.9|10.10|10.11|10.12|10.13|10.14|10.15|11.0)(.0)?}}",
cfg.available_features,
),
),
# Tests that require std::filesystem support in the built library
Feature(
name="availability-filesystem-missing",
when=lambda cfg: BooleanExpression.evaluate(
"stdlib=apple-libc++ && target={{.+}}-apple-macosx10.{{(9|10|11|12|13|14)(.0)?}}",
cfg.available_features,
),
),
# Tests that require the C++20 synchronization library (P1135R6 implemented by https://llvm.org/D68480) in the built library
Feature(
name="availability-synchronization_library-missing",
when=lambda cfg: BooleanExpression.evaluate(
"stdlib=apple-libc++ && target={{.+}}-apple-macosx10.{{(9|10|11|12|13|14|15)(.0)?}}",
cfg.available_features,
),
),
# Tests that require support for std::shared_mutex and std::shared_timed_mutex in the built library
Feature(
name="availability-shared_mutex-missing",
when=lambda cfg: BooleanExpression.evaluate(
"stdlib=apple-libc++ && target={{.+}}-apple-macosx10.{{(9|10|11)(.0)?}}",
cfg.available_features,
),
),
# Tests that require support for aligned allocation in the built library. This is about `operator new(..., std::align_val_t, ...)` specifically,
# not other forms of aligned allocation.
Feature(
name="availability-aligned_allocation-missing",
when=lambda cfg: BooleanExpression.evaluate(
"stdlib=apple-libc++ && target={{.+}}-apple-macosx10.{{(9|10|11|12)(.0)?}}",
cfg.available_features,
),
),
# Tests that require 64-bit architecture
Feature(
name="32-bit-pointer",
when=lambda cfg: sourceBuilds(
cfg,
"""
int main(int, char**) {
static_assert(sizeof(void *) == 4);
}
""")),
""",
),
),
]

View File

@ -1,10 +1,10 @@
#===----------------------------------------------------------------------===##
# ===----------------------------------------------------------------------===##
#
# Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
# See https://llvm.org/LICENSE.txt for license information.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
#
#===----------------------------------------------------------------------===##
# ===----------------------------------------------------------------------===##
import lit
import lit.formats
@ -13,6 +13,7 @@ import pipes
import re
import shutil
def _getTempPaths(test):
"""
Return the values to use for the %T and %t substitutions, respectively.
@ -22,40 +23,45 @@ def _getTempPaths(test):
"""
tmpDir, _ = lit.TestRunner.getTempPaths(test)
_, testName = os.path.split(test.getExecPath())
tmpDir = os.path.join(tmpDir, testName + '.dir')
tmpBase = os.path.join(tmpDir, 't')
tmpDir = os.path.join(tmpDir, testName + ".dir")
tmpBase = os.path.join(tmpDir, "t")
return tmpDir, tmpBase
def _checkBaseSubstitutions(substitutions):
substitutions = [s for (s, _) in substitutions]
for s in ['%{cxx}', '%{compile_flags}', '%{link_flags}', '%{flags}', '%{exec}']:
for s in ["%{cxx}", "%{compile_flags}", "%{link_flags}", "%{flags}", "%{exec}"]:
assert s in substitutions, "Required substitution {} was not provided".format(s)
def _executeScriptInternal(test, litConfig, commands):
"""
Returns (stdout, stderr, exitCode, timeoutInfo, parsedCommands)
"""
Returns (stdout, stderr, exitCode, timeoutInfo, parsedCommands)
TODO: This really should be easier to access from Lit itself
"""
parsedCommands = parseScript(test, preamble=commands)
TODO: This really should be easier to access from Lit itself
"""
parsedCommands = parseScript(test, preamble=commands)
_, tmpBase = _getTempPaths(test)
execDir = os.path.dirname(test.getExecPath())
res = lit.TestRunner.executeScriptInternal(test, litConfig, tmpBase, parsedCommands, execDir)
if isinstance(res, lit.Test.Result): # Handle failure to parse the Lit test
res = ('', res.output, 127, None)
(out, err, exitCode, timeoutInfo) = res
_, tmpBase = _getTempPaths(test)
execDir = os.path.dirname(test.getExecPath())
res = lit.TestRunner.executeScriptInternal(
test, litConfig, tmpBase, parsedCommands, execDir
)
if isinstance(res, lit.Test.Result): # Handle failure to parse the Lit test
res = ("", res.output, 127, None)
(out, err, exitCode, timeoutInfo) = res
# TODO: As a temporary workaround until https://reviews.llvm.org/D81892 lands, manually
# split any stderr output that is included in stdout. It shouldn't be there, but
# the Lit internal shell conflates stderr and stdout.
conflatedErrorOutput = re.search("(# command stderr:.+$)", out, flags=re.DOTALL)
if conflatedErrorOutput:
conflatedErrorOutput = conflatedErrorOutput.group(0)
out = out[:-len(conflatedErrorOutput)]
err += conflatedErrorOutput
# TODO: As a temporary workaround until https://reviews.llvm.org/D81892 lands, manually
# split any stderr output that is included in stdout. It shouldn't be there, but
# the Lit internal shell conflates stderr and stdout.
conflatedErrorOutput = re.search("(# command stderr:.+$)", out, flags=re.DOTALL)
if conflatedErrorOutput:
conflatedErrorOutput = conflatedErrorOutput.group(0)
out = out[: -len(conflatedErrorOutput)]
err += conflatedErrorOutput
return (out, err, exitCode, timeoutInfo, parsedCommands)
return (out, err, exitCode, timeoutInfo, parsedCommands)
def parseScript(test, preamble):
"""
@ -78,32 +84,41 @@ def parseScript(test, preamble):
# Check base substitutions and add the %{build} and %{run} convenience substitutions
_checkBaseSubstitutions(substitutions)
substitutions.append(('%{build}', '%{cxx} %s %{flags} %{compile_flags} %{link_flags} -o %t.exe'))
substitutions.append(('%{run}', '%{exec} %t.exe'))
substitutions.append(
("%{build}", "%{cxx} %s %{flags} %{compile_flags} %{link_flags} -o %t.exe")
)
substitutions.append(("%{run}", "%{exec} %t.exe"))
# Parse the test file, including custom directives
additionalCompileFlags = []
fileDependencies = []
parsers = [
lit.TestRunner.IntegratedTestKeywordParser('FILE_DEPENDENCIES:',
lit.TestRunner.ParserKind.LIST,
initial_value=fileDependencies),
lit.TestRunner.IntegratedTestKeywordParser('ADDITIONAL_COMPILE_FLAGS:',
lit.TestRunner.ParserKind.LIST,
initial_value=additionalCompileFlags)
lit.TestRunner.IntegratedTestKeywordParser(
"FILE_DEPENDENCIES:",
lit.TestRunner.ParserKind.LIST,
initial_value=fileDependencies,
),
lit.TestRunner.IntegratedTestKeywordParser(
"ADDITIONAL_COMPILE_FLAGS:",
lit.TestRunner.ParserKind.LIST,
initial_value=additionalCompileFlags,
),
]
# Add conditional parsers for ADDITIONAL_COMPILE_FLAGS. This should be replaced by first
# class support for conditional keywords in Lit, which would allow evaluating arbitrary
# Lit boolean expressions instead.
for feature in test.config.available_features:
parser = lit.TestRunner.IntegratedTestKeywordParser('ADDITIONAL_COMPILE_FLAGS({}):'.format(feature),
lit.TestRunner.ParserKind.LIST,
initial_value=additionalCompileFlags)
parser = lit.TestRunner.IntegratedTestKeywordParser(
"ADDITIONAL_COMPILE_FLAGS({}):".format(feature),
lit.TestRunner.ParserKind.LIST,
initial_value=additionalCompileFlags,
)
parsers.append(parser)
scriptInTest = lit.TestRunner.parseIntegratedTestScript(test, additional_parsers=parsers,
require_script=not preamble)
scriptInTest = lit.TestRunner.parseIntegratedTestScript(
test, additional_parsers=parsers, require_script=not preamble
)
if isinstance(scriptInTest, lit.Test.Result):
return scriptInTest
@ -113,17 +128,22 @@ def parseScript(test, preamble):
# that file to the execution directory. Execute the copy from %S to allow
# relative paths from the test directory.
for dep in fileDependencies:
script += ['%dbg(SETUP) cd %S && cp {} %T'.format(dep)]
script += ["%dbg(SETUP) cd %S && cp {} %T".format(dep)]
script += preamble
script += scriptInTest
# Add compile flags specified with ADDITIONAL_COMPILE_FLAGS.
substitutions = [(s, x + ' ' + ' '.join(additionalCompileFlags)) if s == '%{compile_flags}'
else (s, x) for (s, x) in substitutions]
substitutions = [
(s, x + " " + " ".join(additionalCompileFlags))
if s == "%{compile_flags}"
else (s, x)
for (s, x) in substitutions
]
# Perform substitutions in the script itself.
script = lit.TestRunner.applySubstitutions(script, substitutions,
recursion_limit=test.config.recursiveExpansionLimit)
script = lit.TestRunner.applySubstitutions(
script, substitutions, recursion_limit=test.config.recursiveExpansionLimit
)
return script
@ -213,80 +233,100 @@ class CxxStandardLibraryTest(lit.formats.TestFormat):
Equivalent to `%{exec} %t.exe`. This is intended to be used
in conjunction with the %{build} substitution.
"""
def getTestsInDirectory(self, testSuite, pathInSuite, litConfig, localConfig):
SUPPORTED_SUFFIXES = ['[.]pass[.]cpp$', '[.]pass[.]mm$',
'[.]compile[.]pass[.]cpp$', '[.]compile[.]pass[.]mm$',
'[.]compile[.]fail[.]cpp$',
'[.]link[.]pass[.]cpp$', '[.]link[.]pass[.]mm$',
'[.]link[.]fail[.]cpp$',
'[.]sh[.][^.]+$',
'[.]verify[.]cpp$',
'[.]fail[.]cpp$']
SUPPORTED_SUFFIXES = [
"[.]pass[.]cpp$",
"[.]pass[.]mm$",
"[.]compile[.]pass[.]cpp$",
"[.]compile[.]pass[.]mm$",
"[.]compile[.]fail[.]cpp$",
"[.]link[.]pass[.]cpp$",
"[.]link[.]pass[.]mm$",
"[.]link[.]fail[.]cpp$",
"[.]sh[.][^.]+$",
"[.]verify[.]cpp$",
"[.]fail[.]cpp$",
]
sourcePath = testSuite.getSourcePath(pathInSuite)
for filename in os.listdir(sourcePath):
# Ignore dot files and excluded tests.
if filename.startswith('.') or filename in localConfig.excludes:
if filename.startswith(".") or filename in localConfig.excludes:
continue
filepath = os.path.join(sourcePath, filename)
if not os.path.isdir(filepath):
if any([re.search(ext, filename) for ext in SUPPORTED_SUFFIXES]):
yield lit.Test.Test(testSuite, pathInSuite + (filename,), localConfig)
yield lit.Test.Test(
testSuite, pathInSuite + (filename,), localConfig
)
def execute(self, test, litConfig):
VERIFY_FLAGS = '-Xclang -verify -Xclang -verify-ignore-unexpected=note -ferror-limit=0'
supportsVerify = 'verify-support' in test.config.available_features
VERIFY_FLAGS = (
"-Xclang -verify -Xclang -verify-ignore-unexpected=note -ferror-limit=0"
)
supportsVerify = "verify-support" in test.config.available_features
filename = test.path_in_suite[-1]
if re.search('[.]sh[.][^.]+$', filename):
steps = [ ] # The steps are already in the script
if re.search("[.]sh[.][^.]+$", filename):
steps = [] # The steps are already in the script
return self._executeShTest(test, litConfig, steps)
elif filename.endswith('.compile.pass.cpp') or filename.endswith('.compile.pass.mm'):
elif filename.endswith(".compile.pass.cpp") or filename.endswith(
".compile.pass.mm"
):
steps = [
"%dbg(COMPILED WITH) %{cxx} %s %{flags} %{compile_flags} -fsyntax-only"
]
return self._executeShTest(test, litConfig, steps)
elif filename.endswith('.compile.fail.cpp'):
elif filename.endswith(".compile.fail.cpp"):
steps = [
"%dbg(COMPILED WITH) ! %{cxx} %s %{flags} %{compile_flags} -fsyntax-only"
]
return self._executeShTest(test, litConfig, steps)
elif filename.endswith('.link.pass.cpp') or filename.endswith('.link.pass.mm'):
elif filename.endswith(".link.pass.cpp") or filename.endswith(".link.pass.mm"):
steps = [
"%dbg(COMPILED WITH) %{cxx} %s %{flags} %{compile_flags} %{link_flags} -o %t.exe"
]
return self._executeShTest(test, litConfig, steps)
elif filename.endswith('.link.fail.cpp'):
elif filename.endswith(".link.fail.cpp"):
steps = [
"%dbg(COMPILED WITH) %{cxx} %s %{flags} %{compile_flags} -c -o %t.o",
"%dbg(LINKED WITH) ! %{cxx} %t.o %{flags} %{link_flags} -o %t.exe"
"%dbg(LINKED WITH) ! %{cxx} %t.o %{flags} %{link_flags} -o %t.exe",
]
return self._executeShTest(test, litConfig, steps)
elif filename.endswith('.verify.cpp'):
elif filename.endswith(".verify.cpp"):
if not supportsVerify:
return lit.Test.Result(lit.Test.UNSUPPORTED,
"Test {} requires support for Clang-verify, which isn't supported by the compiler".format(test.getFullName()))
return lit.Test.Result(
lit.Test.UNSUPPORTED,
"Test {} requires support for Clang-verify, which isn't supported by the compiler".format(
test.getFullName()
),
)
steps = [
# Note: Use -Wno-error to make sure all diagnostics are not treated as errors,
# which doesn't make sense for clang-verify tests.
"%dbg(COMPILED WITH) %{{cxx}} %s %{{flags}} %{{compile_flags}} -fsyntax-only -Wno-error {}".format(VERIFY_FLAGS)
"%dbg(COMPILED WITH) %{{cxx}} %s %{{flags}} %{{compile_flags}} -fsyntax-only -Wno-error {}".format(
VERIFY_FLAGS
)
]
return self._executeShTest(test, litConfig, steps)
# Make sure to check these ones last, since they will match other
# suffixes above too.
elif filename.endswith('.pass.cpp') or filename.endswith('.pass.mm'):
elif filename.endswith(".pass.cpp") or filename.endswith(".pass.mm"):
steps = [
"%dbg(COMPILED WITH) %{cxx} %s %{flags} %{compile_flags} %{link_flags} -o %t.exe",
"%dbg(EXECUTED AS) %{exec} %t.exe"
"%dbg(EXECUTED AS) %{exec} %t.exe",
]
return self._executeShTest(test, litConfig, steps)
# This is like a .verify.cpp test when clang-verify is supported,
# otherwise it's like a .compile.fail.cpp test. This is only provided
# for backwards compatibility with the test suite.
elif filename.endswith('.fail.cpp'):
elif filename.endswith(".fail.cpp"):
if supportsVerify:
steps = [
"%dbg(COMPILED WITH) %{{cxx}} %s %{{flags}} %{{compile_flags}} -fsyntax-only -Wno-error {}".format(VERIFY_FLAGS)
"%dbg(COMPILED WITH) %{{cxx}} %s %{{flags}} %{{compile_flags}} -fsyntax-only -Wno-error {}".format(
VERIFY_FLAGS
)
]
else:
steps = [
@ -294,19 +334,25 @@ class CxxStandardLibraryTest(lit.formats.TestFormat):
]
return self._executeShTest(test, litConfig, steps)
else:
return lit.Test.Result(lit.Test.UNRESOLVED, "Unknown test suffix for '{}'".format(filename))
return lit.Test.Result(
lit.Test.UNRESOLVED, "Unknown test suffix for '{}'".format(filename)
)
def _executeShTest(self, test, litConfig, steps):
if test.config.unsupported:
return lit.Test.Result(lit.Test.UNSUPPORTED, 'Test is unsupported')
return lit.Test.Result(lit.Test.UNSUPPORTED, "Test is unsupported")
script = parseScript(test, steps)
if isinstance(script, lit.Test.Result):
return script
if litConfig.noExecute:
return lit.Test.Result(lit.Test.XFAIL if test.isExpectedToFail() else lit.Test.PASS)
return lit.Test.Result(
lit.Test.XFAIL if test.isExpectedToFail() else lit.Test.PASS
)
else:
_, tmpBase = _getTempPaths(test)
useExternalSh = False
return lit.TestRunner._runShTest(test, litConfig, useExternalSh, script, tmpBase)
return lit.TestRunner._runShTest(
test, litConfig, useExternalSh, script, tmpBase
)

View File

@ -8,20 +8,21 @@ import lit.TestRunner
import lit.util
from lit.formats.base import TestFormat
kIsWindows = sys.platform in ['win32', 'cygwin']
kIsWindows = sys.platform in ["win32", "cygwin"]
class GoogleBenchmark(TestFormat):
def __init__(self, test_sub_dirs, test_suffix, benchmark_args=[]):
self.benchmark_args = list(benchmark_args)
self.test_sub_dirs = os.path.normcase(str(test_sub_dirs)).split(';')
self.test_sub_dirs = os.path.normcase(str(test_sub_dirs)).split(";")
# On Windows, assume tests will also end in '.exe'.
exe_suffix = str(test_suffix)
if kIsWindows:
exe_suffix += '.exe'
exe_suffix += ".exe"
# Also check for .py files for testing purposes.
self.test_suffixes = {exe_suffix, test_suffix + '.py'}
self.test_suffixes = {exe_suffix, test_suffix + ".py"}
def getBenchmarkTests(self, path, litConfig, localConfig):
"""getBenchmarkTests(path) - [name]
@ -36,14 +37,14 @@ class GoogleBenchmark(TestFormat):
# TODO: allow splitting tests according to the "benchmark family" so
# the output for a single family of tests all belongs to the same test
# target.
list_test_cmd = [path, '--benchmark_list_tests']
list_test_cmd = [path, "--benchmark_list_tests"]
try:
output = subprocess.check_output(list_test_cmd,
env=localConfig.environment)
output = subprocess.check_output(list_test_cmd, env=localConfig.environment)
except subprocess.CalledProcessError as exc:
litConfig.warning(
"unable to discover google-benchmarks in %r: %s. Process output: %s"
% (path, sys.exc_info()[1], exc.output))
% (path, sys.exc_info()[1], exc.output)
)
raise StopIteration
nested_tests = []
@ -53,69 +54,72 @@ class GoogleBenchmark(TestFormat):
continue
index = 0
while ln[index*2:index*2+2] == ' ':
while ln[index * 2 : index * 2 + 2] == " ":
index += 1
while len(nested_tests) > index:
nested_tests.pop()
ln = ln[index*2:]
if ln.endswith('.'):
ln = ln[index * 2 :]
if ln.endswith("."):
nested_tests.append(ln)
elif any([name.startswith('DISABLED_')
for name in nested_tests + [ln]]):
elif any([name.startswith("DISABLED_") for name in nested_tests + [ln]]):
# Gtest will internally skip these tests. No need to launch a
# child process for it.
continue
else:
yield ''.join(nested_tests) + ln
yield "".join(nested_tests) + ln
def getTestsInDirectory(self, testSuite, path_in_suite,
litConfig, localConfig):
def getTestsInDirectory(self, testSuite, path_in_suite, litConfig, localConfig):
source_path = testSuite.getSourcePath(path_in_suite)
for subdir in self.test_sub_dirs:
dir_path = os.path.join(source_path, subdir)
if not os.path.isdir(dir_path):
continue
for fn in lit.util.listdir_files(dir_path,
suffixes=self.test_suffixes):
for fn in lit.util.listdir_files(dir_path, suffixes=self.test_suffixes):
# Discover the tests in this executable.
execpath = os.path.join(source_path, subdir, fn)
testnames = self.getBenchmarkTests(execpath, litConfig, localConfig)
for testname in testnames:
testPath = path_in_suite + (subdir, fn, testname)
yield lit.Test.Test(testSuite, testPath, localConfig,
file_path=execpath)
yield lit.Test.Test(
testSuite, testPath, localConfig, file_path=execpath
)
def execute(self, test, litConfig):
testPath,testName = os.path.split(test.getSourcePath())
testPath, testName = os.path.split(test.getSourcePath())
while not os.path.exists(testPath):
# Handle GTest parametrized and typed tests, whose name includes
# some '/'s.
testPath, namePrefix = os.path.split(testPath)
testName = namePrefix + '/' + testName
testName = namePrefix + "/" + testName
cmd = [testPath, '--benchmark_filter=%s$' % testName ] + self.benchmark_args
cmd = [testPath, "--benchmark_filter=%s$" % testName] + self.benchmark_args
if litConfig.noExecute:
return lit.Test.PASS, ''
return lit.Test.PASS, ""
try:
out, err, exitCode = lit.util.executeCommand(
cmd, env=test.config.environment,
timeout=litConfig.maxIndividualTestTime)
cmd,
env=test.config.environment,
timeout=litConfig.maxIndividualTestTime,
)
except lit.util.ExecuteCommandTimeoutException:
return (lit.Test.TIMEOUT,
'Reached timeout of {} seconds'.format(
litConfig.maxIndividualTestTime)
)
return (
lit.Test.TIMEOUT,
"Reached timeout of {} seconds".format(litConfig.maxIndividualTestTime),
)
if exitCode:
return lit.Test.FAIL, ('exit code: %d\n' % exitCode) + out + err
return lit.Test.FAIL, ("exit code: %d\n" % exitCode) + out + err
passing_test_line = testName
if passing_test_line not in out:
msg = ('Unable to find %r in google benchmark output:\n\n%s%s' %
(passing_test_line, out, err))
msg = "Unable to find %r in google benchmark output:\n\n%s%s" % (
passing_test_line,
out,
err,
)
return lit.Test.UNRESOLVED, msg
return lit.Test.PASS, err + out

View File

@ -1,126 +1,158 @@
#===----------------------------------------------------------------------===##
# ===----------------------------------------------------------------------===##
#
# Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
# See https://llvm.org/LICENSE.txt for license information.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
#
#===----------------------------------------------------------------------===##
# ===----------------------------------------------------------------------===##
from libcxx.test.dsl import *
from libcxx.test.features import _isMSVC
import re
_warningFlags = [
'-Werror',
'-Wall',
'-Wctad-maybe-unsupported',
'-Wextra',
'-Wshadow',
'-Wundef',
'-Wunused-template',
'-Wno-unused-command-line-argument',
'-Wno-attributes',
'-Wno-pessimizing-move',
'-Wno-c++11-extensions',
'-Wno-noexcept-type',
'-Wno-aligned-allocation-unavailable',
'-Wno-atomic-alignment',
# GCC warns about places where we might want to add sized allocation/deallocation
# functions, but we know better what we're doing/testing in the test suite.
'-Wno-sized-deallocation',
# Turn off warnings about user-defined literals with reserved suffixes. Those are
# just noise since we are testing the Standard Library itself.
'-Wno-literal-suffix', # GCC
'-Wno-user-defined-literals', # Clang
# GCC warns about this when TEST_IS_CONSTANT_EVALUATED is used on a non-constexpr
# function. (This mostely happens in C++11 mode.)
# TODO(mordante) investigate a solution for this issue.
'-Wno-tautological-compare',
# -Wstringop-overread and -Wstringop-overflow seem to be a bit buggy currently
'-Wno-stringop-overread',
'-Wno-stringop-overflow',
# These warnings should be enabled in order to support the MSVC
# team using the test suite; They enable the warnings below and
# expect the test suite to be clean.
'-Wsign-compare',
'-Wunused-variable',
'-Wunused-parameter',
'-Wunreachable-code',
'-Wno-unused-local-typedef',
"-Werror",
"-Wall",
"-Wctad-maybe-unsupported",
"-Wextra",
"-Wshadow",
"-Wundef",
"-Wunused-template",
"-Wno-unused-command-line-argument",
"-Wno-attributes",
"-Wno-pessimizing-move",
"-Wno-c++11-extensions",
"-Wno-noexcept-type",
"-Wno-aligned-allocation-unavailable",
"-Wno-atomic-alignment",
# GCC warns about places where we might want to add sized allocation/deallocation
# functions, but we know better what we're doing/testing in the test suite.
"-Wno-sized-deallocation",
# Turn off warnings about user-defined literals with reserved suffixes. Those are
# just noise since we are testing the Standard Library itself.
"-Wno-literal-suffix", # GCC
"-Wno-user-defined-literals", # Clang
# GCC warns about this when TEST_IS_CONSTANT_EVALUATED is used on a non-constexpr
# function. (This mostely happens in C++11 mode.)
# TODO(mordante) investigate a solution for this issue.
"-Wno-tautological-compare",
# -Wstringop-overread and -Wstringop-overflow seem to be a bit buggy currently
"-Wno-stringop-overread",
"-Wno-stringop-overflow",
# These warnings should be enabled in order to support the MSVC
# team using the test suite; They enable the warnings below and
# expect the test suite to be clean.
"-Wsign-compare",
"-Wunused-variable",
"-Wunused-parameter",
"-Wunreachable-code",
"-Wno-unused-local-typedef",
]
_allStandards = ['c++03', 'c++11', 'c++14', 'c++17', 'c++20', 'c++23', 'c++26']
_allStandards = ["c++03", "c++11", "c++14", "c++17", "c++20", "c++23", "c++26"]
def getStdFlag(cfg, std):
fallbacks = {
'c++11': 'c++0x',
'c++14': 'c++1y',
'c++17': 'c++1z',
'c++20': 'c++2a',
'c++23': 'c++2b',
}
# TODO(LLVM-17) Remove this clang-tidy-16 work-around
if std == 'c++23':
std = 'c++2b'
if hasCompileFlag(cfg, '-std='+std):
return '-std='+std
if std in fallbacks and hasCompileFlag(cfg, '-std='+fallbacks[std]):
return '-std='+fallbacks[std]
return None
fallbacks = {
"c++11": "c++0x",
"c++14": "c++1y",
"c++17": "c++1z",
"c++20": "c++2a",
"c++23": "c++2b",
}
# TODO(LLVM-17) Remove this clang-tidy-16 work-around
if std == "c++23":
std = "c++2b"
if hasCompileFlag(cfg, "-std=" + std):
return "-std=" + std
if std in fallbacks and hasCompileFlag(cfg, "-std=" + fallbacks[std]):
return "-std=" + fallbacks[std]
return None
DEFAULT_PARAMETERS = [
Parameter(name='target_triple', type=str,
help="The target triple to compile the test suite for. This must be "
"compatible with the target that the tests will be run on.",
actions=lambda triple: filter(None, [
AddFeature('target={}'.format(triple)),
AddFlagIfSupported('--target={}'.format(triple)),
AddSubstitution('%{triple}', triple)
])),
Parameter(name='std', choices=_allStandards, type=str,
help="The version of the standard to compile the test suite with.",
default=lambda cfg: next(s for s in reversed(_allStandards) if getStdFlag(cfg, s)),
actions=lambda std: [
AddFeature(std),
AddSubstitution('%{cxx_std}', re.sub('\+','x', std)),
AddCompileFlag(lambda cfg: getStdFlag(cfg, std)),
]),
Parameter(name='enable_modules', choices=[True, False], type=bool, default=False,
help="Whether to build the test suite with Clang modules enabled.",
actions=lambda modules: [
AddFeature('modules-build'),
AddCompileFlag('-fmodules'),
AddCompileFlag('-fcxx-modules'), # AppleClang disregards -fmodules entirely when compiling C++. This enables modules for C++.
] if modules else []),
Parameter(name='enable_modules_lsv', choices=[True, False], type=bool, default=False,
help="Whether to enable Local Submodule Visibility in the Modules build.",
actions=lambda lsv: [
AddCompileFlag('-Xclang -fmodules-local-submodule-visibility'),
] if lsv else []),
Parameter(name='enable_exceptions', choices=[True, False], type=bool, default=True,
help="Whether to enable exceptions when compiling the test suite.",
actions=lambda exceptions: [] if exceptions else [
AddFeature('no-exceptions'),
AddCompileFlag('-fno-exceptions')
]),
Parameter(name='enable_rtti', choices=[True, False], type=bool, default=True,
help="Whether to enable RTTI when compiling the test suite.",
actions=lambda rtti: [] if rtti else [
AddFeature('no-rtti'),
AddCompileFlag('-fno-rtti')
]),
Parameter(name='stdlib', choices=['llvm-libc++', 'apple-libc++', 'libstdc++', 'msvc'], type=str, default='llvm-libc++',
help="""The C++ Standard Library implementation being tested.
Parameter(
name="target_triple",
type=str,
help="The target triple to compile the test suite for. This must be "
"compatible with the target that the tests will be run on.",
actions=lambda triple: filter(
None,
[
AddFeature("target={}".format(triple)),
AddFlagIfSupported("--target={}".format(triple)),
AddSubstitution("%{triple}", triple),
],
),
),
Parameter(
name="std",
choices=_allStandards,
type=str,
help="The version of the standard to compile the test suite with.",
default=lambda cfg: next(
s for s in reversed(_allStandards) if getStdFlag(cfg, s)
),
actions=lambda std: [
AddFeature(std),
AddSubstitution("%{cxx_std}", re.sub("\+", "x", std)),
AddCompileFlag(lambda cfg: getStdFlag(cfg, std)),
],
),
Parameter(
name="enable_modules",
choices=[True, False],
type=bool,
default=False,
help="Whether to build the test suite with Clang modules enabled.",
actions=lambda modules: [
AddFeature("modules-build"),
AddCompileFlag("-fmodules"),
AddCompileFlag(
"-fcxx-modules"
), # AppleClang disregards -fmodules entirely when compiling C++. This enables modules for C++.
]
if modules
else [],
),
Parameter(
name="enable_modules_lsv",
choices=[True, False],
type=bool,
default=False,
help="Whether to enable Local Submodule Visibility in the Modules build.",
actions=lambda lsv: [
AddCompileFlag("-Xclang -fmodules-local-submodule-visibility"),
]
if lsv
else [],
),
Parameter(
name="enable_exceptions",
choices=[True, False],
type=bool,
default=True,
help="Whether to enable exceptions when compiling the test suite.",
actions=lambda exceptions: []
if exceptions
else [AddFeature("no-exceptions"), AddCompileFlag("-fno-exceptions")],
),
Parameter(
name="enable_rtti",
choices=[True, False],
type=bool,
default=True,
help="Whether to enable RTTI when compiling the test suite.",
actions=lambda rtti: []
if rtti
else [AddFeature("no-rtti"), AddCompileFlag("-fno-rtti")],
),
Parameter(
name="stdlib",
choices=["llvm-libc++", "apple-libc++", "libstdc++", "msvc"],
type=str,
default="llvm-libc++",
help="""The C++ Standard Library implementation being tested.
Note that this parameter can also be used to encode different 'flavors' of the same
standard library, such as libc++ as shipped by a different vendor, if it has different
@ -134,88 +166,148 @@ DEFAULT_PARAMETERS = [
- libstdc++: The GNU C++ library typically shipped with GCC.
- msvc: The Microsoft implementation of the C++ Standard Library.
""",
actions=lambda stdlib: filter(None, [
AddFeature('stdlib={}'.format(stdlib)),
# Also add an umbrella feature 'stdlib=libc++' for all flavors of libc++, to simplify
# the test suite.
AddFeature('stdlib=libc++') if re.match('.+-libc\+\+', stdlib) else None
])),
Parameter(name='enable_warnings', choices=[True, False], type=bool, default=True,
help="Whether to enable warnings when compiling the test suite.",
actions=lambda warnings: [] if not warnings else
[AddOptionalWarningFlag(w) for w in _warningFlags] +
[AddCompileFlag('-D_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER')]
actions=lambda stdlib: filter(
None,
[
AddFeature("stdlib={}".format(stdlib)),
# Also add an umbrella feature 'stdlib=libc++' for all flavors of libc++, to simplify
# the test suite.
AddFeature("stdlib=libc++")
if re.match(".+-libc\+\+", stdlib)
else None,
],
),
),
Parameter(
name="enable_warnings",
choices=[True, False],
type=bool,
default=True,
help="Whether to enable warnings when compiling the test suite.",
actions=lambda warnings: []
if not warnings
else [AddOptionalWarningFlag(w) for w in _warningFlags]
+ [AddCompileFlag("-D_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER")],
),
Parameter(
name="use_sanitizer",
choices=[
"",
"Address",
"HWAddress",
"Undefined",
"Memory",
"MemoryWithOrigins",
"Thread",
"DataFlow",
"Leaks",
],
type=str,
default="",
help="An optional sanitizer to enable when building and running the test suite.",
actions=lambda sanitizer: filter(
None,
[
AddFlag("-g -fno-omit-frame-pointer") if sanitizer else None,
AddFlag(
"-fsanitize=undefined -fno-sanitize=float-divide-by-zero -fno-sanitize-recover=all"
)
if sanitizer == "Undefined"
else None,
AddFeature("ubsan") if sanitizer == "Undefined" else None,
AddFlag("-fsanitize=address") if sanitizer == "Address" else None,
AddFeature("asan") if sanitizer == "Address" else None,
AddFlag("-fsanitize=hwaddress") if sanitizer == "HWAddress" else None,
AddFeature("hwasan") if sanitizer == "HWAddress" else None,
AddFlag("-fsanitize=memory")
if sanitizer in ["Memory", "MemoryWithOrigins"]
else None,
AddFeature("msan")
if sanitizer in ["Memory", "MemoryWithOrigins"]
else None,
AddFlag("-fsanitize-memory-track-origins")
if sanitizer == "MemoryWithOrigins"
else None,
AddFlag("-fsanitize=thread") if sanitizer == "Thread" else None,
AddFeature("tsan") if sanitizer == "Thread" else None,
AddFlag("-fsanitize=dataflow") if sanitizer == "DataFlow" else None,
AddFlag("-fsanitize=leaks") if sanitizer == "Leaks" else None,
AddFeature("sanitizer-new-delete")
if sanitizer
in ["Address", "HWAddress", "Memory", "MemoryWithOrigins", "Thread"]
else None,
],
),
),
Parameter(
name="enable_experimental",
choices=[True, False],
type=bool,
default=True,
help="Whether to enable tests for experimental C++ Library features.",
actions=lambda experimental: [
# When linking in MSVC mode via the Clang driver, a -l<foo>
# maps to <foo>.lib, so we need to use -llibc++experimental here
# to make it link against the static libc++experimental.lib.
# We can't check for the feature 'msvc' in available_features
# as those features are added after processing parameters.
AddFeature("c++experimental"),
PrependLinkFlag(
lambda cfg: "-llibc++experimental"
if _isMSVC(cfg)
else "-lc++experimental"
),
Parameter(name='use_sanitizer', choices=['', 'Address', 'HWAddress', 'Undefined', 'Memory', 'MemoryWithOrigins', 'Thread', 'DataFlow', 'Leaks'], type=str, default='',
help="An optional sanitizer to enable when building and running the test suite.",
actions=lambda sanitizer: filter(None, [
AddFlag('-g -fno-omit-frame-pointer') if sanitizer else None,
AddFlag('-fsanitize=undefined -fno-sanitize=float-divide-by-zero -fno-sanitize-recover=all') if sanitizer == 'Undefined' else None,
AddFeature('ubsan') if sanitizer == 'Undefined' else None,
AddFlag('-fsanitize=address') if sanitizer == 'Address' else None,
AddFeature('asan') if sanitizer == 'Address' else None,
AddFlag('-fsanitize=hwaddress') if sanitizer == 'HWAddress' else None,
AddFeature('hwasan') if sanitizer == 'HWAddress' else None,
AddFlag('-fsanitize=memory') if sanitizer in ['Memory', 'MemoryWithOrigins'] else None,
AddFeature('msan') if sanitizer in ['Memory', 'MemoryWithOrigins'] else None,
AddFlag('-fsanitize-memory-track-origins') if sanitizer == 'MemoryWithOrigins' else None,
AddFlag('-fsanitize=thread') if sanitizer == 'Thread' else None,
AddFeature('tsan') if sanitizer == 'Thread' else None,
AddFlag('-fsanitize=dataflow') if sanitizer == 'DataFlow' else None,
AddFlag('-fsanitize=leaks') if sanitizer == 'Leaks' else None,
AddFeature('sanitizer-new-delete') if sanitizer in ['Address', 'HWAddress', 'Memory', 'MemoryWithOrigins', 'Thread'] else None,
])),
Parameter(name='enable_experimental', choices=[True, False], type=bool, default=True,
help="Whether to enable tests for experimental C++ Library features.",
actions=lambda experimental: [
# When linking in MSVC mode via the Clang driver, a -l<foo>
# maps to <foo>.lib, so we need to use -llibc++experimental here
# to make it link against the static libc++experimental.lib.
# We can't check for the feature 'msvc' in available_features
# as those features are added after processing parameters.
AddFeature('c++experimental'),
PrependLinkFlag(lambda cfg: '-llibc++experimental' if _isMSVC(cfg) else '-lc++experimental'),
AddCompileFlag('-D_LIBCPP_ENABLE_EXPERIMENTAL'),
] if experimental else [
AddFeature('libcpp-has-no-incomplete-pstl'),
]),
Parameter(name='long_tests', choices=[True, False], type=bool, default=True,
help="Whether to enable tests that take longer to run. This can be useful when running on a very slow device.",
actions=lambda enabled: [] if not enabled else [
AddFeature('long_tests')
]),
Parameter(name='enable_assertions', choices=[True, False], type=bool, default=False,
help="Whether to enable assertions when compiling the test suite. This is only meaningful when "
"running the tests against libc++.",
actions=lambda assertions: [
AddCompileFlag('-D_LIBCPP_ENABLE_ASSERTIONS=1'),
AddFeature('libcpp-has-assertions')
] if assertions else []),
Parameter(name='additional_features', type=list, default=[],
help="A comma-delimited list of additional features that will be enabled when running the tests. "
"This should be used sparingly since specifying ad-hoc features manually is error-prone and "
"brittle in the long run as changes are made to the test suite.",
actions=lambda features: [AddFeature(f) for f in features]),
Parameter(name='enable_transitive_includes', choices=[True, False], type=bool, default=True,
help="Whether to enable backwards-compatibility transitive includes when running the tests. This "
"is provided to ensure that the trimmed-down version of libc++ does not bit-rot in between "
"points at which we bulk-remove transitive includes.",
actions=lambda enabled: [] if enabled else [
AddFeature('transitive-includes-disabled'),
AddCompileFlag('-D_LIBCPP_REMOVE_TRANSITIVE_INCLUDES')
]),
AddCompileFlag("-D_LIBCPP_ENABLE_EXPERIMENTAL"),
]
if experimental
else [
AddFeature("libcpp-has-no-incomplete-pstl"),
],
),
Parameter(
name="long_tests",
choices=[True, False],
type=bool,
default=True,
help="Whether to enable tests that take longer to run. This can be useful when running on a very slow device.",
actions=lambda enabled: [] if not enabled else [AddFeature("long_tests")],
),
Parameter(
name="enable_assertions",
choices=[True, False],
type=bool,
default=False,
help="Whether to enable assertions when compiling the test suite. This is only meaningful when "
"running the tests against libc++.",
actions=lambda assertions: [
AddCompileFlag("-D_LIBCPP_ENABLE_ASSERTIONS=1"),
AddFeature("libcpp-has-assertions"),
]
if assertions
else [],
),
Parameter(
name="additional_features",
type=list,
default=[],
help="A comma-delimited list of additional features that will be enabled when running the tests. "
"This should be used sparingly since specifying ad-hoc features manually is error-prone and "
"brittle in the long run as changes are made to the test suite.",
actions=lambda features: [AddFeature(f) for f in features],
),
Parameter(
name="enable_transitive_includes",
choices=[True, False],
type=bool,
default=True,
help="Whether to enable backwards-compatibility transitive includes when running the tests. This "
"is provided to ensure that the trimmed-down version of libc++ does not bit-rot in between "
"points at which we bulk-remove transitive includes.",
actions=lambda enabled: []
if enabled
else [
AddFeature("transitive-includes-disabled"),
AddCompileFlag("-D_LIBCPP_REMOVE_TRANSITIVE_INCLUDES"),
],
),
]

View File

@ -1,11 +1,11 @@
#!/usr/bin/env python
#===----------------------------------------------------------------------===##
# ===----------------------------------------------------------------------===##
#
# Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
# See https://llvm.org/LICENSE.txt for license information.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
#
#===----------------------------------------------------------------------===##
# ===----------------------------------------------------------------------===##
"""run.py is a utility for running a program.
@ -21,10 +21,12 @@ import subprocess
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--execdir', type=str, required=True)
parser.add_argument('--codesign_identity', type=str, required=False, default=None)
parser.add_argument('--env', type=str, nargs='*', required=False, default=[])
parser.add_argument('--prepend_env', type=str, nargs='*', required=False, default=[])
parser.add_argument("--execdir", type=str, required=True)
parser.add_argument("--codesign_identity", type=str, required=False, default=None)
parser.add_argument("--env", type=str, nargs="*", required=False, default=[])
parser.add_argument(
"--prepend_env", type=str, nargs="*", required=False, default=[]
)
parser.add_argument("command", nargs=argparse.ONE_OR_MORE)
args = parser.parse_args()
commandLine = args.command
@ -35,35 +37,37 @@ def main():
# below. This allows us to do custom processing like codesigning test-executables.
# It's also possible for there to be no such executable, for example in the case
# of a .sh.cpp test.
isTestExe = lambda exe: exe.endswith('.tmp.exe') and os.path.exists(exe)
isTestExe = lambda exe: exe.endswith(".tmp.exe") and os.path.exists(exe)
# Do any necessary codesigning of test-executables found in the command line.
if args.codesign_identity:
for exe in filter(isTestExe, commandLine):
subprocess.check_call(['xcrun', 'codesign', '-f', '-s', args.codesign_identity, exe], env={})
subprocess.check_call(
["xcrun", "codesign", "-f", "-s", args.codesign_identity, exe], env={}
)
# Extract environment variables into a dictionary
env = {k : v for (k, v) in map(lambda s: s.split('=', 1), args.env)}
env = {k: v for (k, v) in map(lambda s: s.split("=", 1), args.env)}
# Set environment variables where we prepend the given value to the
# existing environment variable.
for (k, v) in map(lambda s: s.split('=', 1), args.prepend_env):
for (k, v) in map(lambda s: s.split("=", 1), args.prepend_env):
if k in os.environ:
v = v + os.pathsep + os.environ[k]
env[k] = v
if platform.system() == 'Windows':
if platform.system() == "Windows":
# Pass some extra variables through on Windows:
# COMSPEC is needed for running subprocesses via std::system().
if 'COMSPEC' in os.environ:
env['COMSPEC'] = os.environ.get('COMSPEC')
if "COMSPEC" in os.environ:
env["COMSPEC"] = os.environ.get("COMSPEC")
# TEMP is needed for placing temp files in a sensible directory.
if 'TEMP' in os.environ:
env['TEMP'] = os.environ.get('TEMP')
if "TEMP" in os.environ:
env["TEMP"] = os.environ.get("TEMP")
# Run the command line with the given environment in the execution directory.
return subprocess.call(commandLine, cwd=args.execdir, env=env, shell=False)
if __name__ == '__main__':
if __name__ == "__main__":
exit(main())

View File

@ -1,11 +1,11 @@
#!/usr/bin/env python
#===----------------------------------------------------------------------===##
# ===----------------------------------------------------------------------===##
#
# Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
# See https://llvm.org/LICENSE.txt for license information.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
#
#===----------------------------------------------------------------------===##
# ===----------------------------------------------------------------------===##
"""
Runs an executable on a remote host.
@ -25,37 +25,43 @@ import tempfile
from shlex import quote as cmd_quote
def ssh(args, command):
cmd = ['ssh', '-oBatchMode=yes']
cmd = ["ssh", "-oBatchMode=yes"]
if args.extra_ssh_args is not None:
cmd.extend(shlex.split(args.extra_ssh_args))
return cmd + [args.host, command]
def scp(args, src, dst):
cmd = ['scp', '-q', '-oBatchMode=yes']
cmd = ["scp", "-q", "-oBatchMode=yes"]
if args.extra_scp_args is not None:
cmd.extend(shlex.split(args.extra_scp_args))
return cmd + [src, '{}:{}'.format(args.host, dst)]
return cmd + [src, "{}:{}".format(args.host, dst)]
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--host', type=str, required=True)
parser.add_argument('--execdir', type=str, required=True)
parser.add_argument('--tempdir', type=str, required=False, default='/tmp')
parser.add_argument('--extra-ssh-args', type=str, required=False)
parser.add_argument('--extra-scp-args', type=str, required=False)
parser.add_argument('--codesign_identity', type=str, required=False, default=None)
parser.add_argument('--env', type=str, nargs='*', required=False, default=[])
parser.add_argument('--prepend_env', type=str, nargs='*', required=False, default=[])
parser.add_argument("--host", type=str, required=True)
parser.add_argument("--execdir", type=str, required=True)
parser.add_argument("--tempdir", type=str, required=False, default="/tmp")
parser.add_argument("--extra-ssh-args", type=str, required=False)
parser.add_argument("--extra-scp-args", type=str, required=False)
parser.add_argument("--codesign_identity", type=str, required=False, default=None)
parser.add_argument("--env", type=str, nargs="*", required=False, default=[])
parser.add_argument(
"--prepend_env", type=str, nargs="*", required=False, default=[]
)
parser.add_argument("command", nargs=argparse.ONE_OR_MORE)
args = parser.parse_args()
commandLine = args.command
# Create a temporary directory where the test will be run.
# That is effectively the value of %T on the remote host.
tmp = subprocess.check_output(ssh(args, 'mktemp -d {}/libcxx.XXXXXXXXXX'.format(args.tempdir)), universal_newlines=True).strip()
tmp = subprocess.check_output(
ssh(args, "mktemp -d {}/libcxx.XXXXXXXXXX".format(args.tempdir)),
universal_newlines=True,
).strip()
# HACK:
# If an argument is a file that ends in `.tmp.exe`, assume it is the name
@ -64,20 +70,23 @@ def main():
# and changing their path when running on the remote host. It's also possible
# for there to be no such executable, for example in the case of a .sh.cpp
# test.
isTestExe = lambda exe: exe.endswith('.tmp.exe') and os.path.exists(exe)
isTestExe = lambda exe: exe.endswith(".tmp.exe") and os.path.exists(exe)
pathOnRemote = lambda file: posixpath.join(tmp, os.path.basename(file))
try:
# Do any necessary codesigning of test-executables found in the command line.
if args.codesign_identity:
for exe in filter(isTestExe, commandLine):
subprocess.check_call(['xcrun', 'codesign', '-f', '-s', args.codesign_identity, exe], env={})
subprocess.check_call(
["xcrun", "codesign", "-f", "-s", args.codesign_identity, exe],
env={},
)
# tar up the execution directory (which contains everything that's needed
# to run the test), and copy the tarball over to the remote host.
try:
tmpTar = tempfile.NamedTemporaryFile(suffix='.tar', delete=False)
with tarfile.open(fileobj=tmpTar, mode='w') as tarball:
tmpTar = tempfile.NamedTemporaryFile(suffix=".tar", delete=False)
with tarfile.open(fileobj=tmpTar, mode="w") as tarball:
tarball.add(args.execdir, arcname=os.path.basename(args.execdir))
# Make sure we close the file before we scp it, because accessing
@ -93,22 +102,22 @@ def main():
# Untar the dependencies in the temporary directory and remove the tarball.
remoteCommands = [
'tar -xf {} -C {} --strip-components 1'.format(remoteTarball, tmp),
'rm {}'.format(remoteTarball)
"tar -xf {} -C {} --strip-components 1".format(remoteTarball, tmp),
"rm {}".format(remoteTarball),
]
# Make sure all test-executables in the remote command line have 'execute'
# permissions on the remote host. The host that compiled the test-executable
# might not have a notion of 'executable' permissions.
for exe in map(pathOnRemote, filter(isTestExe, commandLine)):
remoteCommands.append('chmod +x {}'.format(exe))
remoteCommands.append("chmod +x {}".format(exe))
# Execute the command through SSH in the temporary directory, with the
# correct environment. We tweak the command line to run it on the remote
# host by transforming the path of test-executables to their path in the
# temporary directory on the remote host.
commandLine = (pathOnRemote(x) if isTestExe(x) else x for x in commandLine)
remoteCommands.append('cd {}'.format(tmp))
remoteCommands.append("cd {}".format(tmp))
if args.prepend_env:
# We can't sensibly know the original value of the env vars
@ -117,17 +126,17 @@ def main():
if args.env:
env = list(map(cmd_quote, args.env))
remoteCommands.append('export {}'.format(' '.join(args.env)))
remoteCommands.append("export {}".format(" ".join(args.env)))
remoteCommands.append(subprocess.list2cmdline(commandLine))
# Finally, SSH to the remote host and execute all the commands.
rc = subprocess.call(ssh(args, ' && '.join(remoteCommands)))
rc = subprocess.call(ssh(args, " && ".join(remoteCommands)))
return rc
finally:
# Make sure the temporary directory is removed when we're done.
subprocess.check_call(ssh(args, 'rm -r {}'.format(tmp)))
subprocess.check_call(ssh(args, "rm -r {}".format(tmp)))
if __name__ == '__main__':
if __name__ == "__main__":
exit(main())

View File

@ -1,11 +1,11 @@
#!/usr/bin/env python
#===----------------------------------------------------------------------===##
# ===----------------------------------------------------------------------===##
#
# Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
# See https://llvm.org/LICENSE.txt for license information.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
#
#===----------------------------------------------------------------------===##
# ===----------------------------------------------------------------------===##
"""
sym_diff - Compare two symbol lists and output the differences.
"""
@ -17,34 +17,60 @@ from libcxx.sym_check import diff, util
def main():
parser = ArgumentParser(
description='Extract a list of symbols from a shared library.')
description="Extract a list of symbols from a shared library."
)
parser.add_argument(
'--names-only', dest='names_only',
help='Only print symbol names',
action='store_true', default=False)
"--names-only",
dest="names_only",
help="Only print symbol names",
action="store_true",
default=False,
)
parser.add_argument(
'--removed-only', dest='removed_only',
help='Only print removed symbols',
action='store_true', default=False)
parser.add_argument('--only-stdlib-symbols', dest='only_stdlib',
help="Filter all symbols not related to the stdlib",
action='store_true', default=False)
parser.add_argument('--strict', dest='strict',
help="Exit with a non-zero status if any symbols "
"differ",
action='store_true', default=False)
"--removed-only",
dest="removed_only",
help="Only print removed symbols",
action="store_true",
default=False,
)
parser.add_argument(
'-o', '--output', dest='output',
help='The output file. stdout is used if not given',
type=str, action='store', default=None)
"--only-stdlib-symbols",
dest="only_stdlib",
help="Filter all symbols not related to the stdlib",
action="store_true",
default=False,
)
parser.add_argument(
'--demangle', dest='demangle', action='store_true', default=False)
"--strict",
dest="strict",
help="Exit with a non-zero status if any symbols " "differ",
action="store_true",
default=False,
)
parser.add_argument(
'old_syms', metavar='old-syms', type=str,
help='The file containing the old symbol list or a library')
"-o",
"--output",
dest="output",
help="The output file. stdout is used if not given",
type=str,
action="store",
default=None,
)
parser.add_argument(
'new_syms', metavar='new-syms', type=str,
help='The file containing the new symbol list or a library')
"--demangle", dest="demangle", action="store_true", default=False
)
parser.add_argument(
"old_syms",
metavar="old-syms",
type=str,
help="The file containing the old symbol list or a library",
)
parser.add_argument(
"new_syms",
metavar="new-syms",
type=str,
help="The file containing the new symbol list or a library",
)
args = parser.parse_args()
old_syms_list = util.extract_or_load(args.old_syms)
@ -58,15 +84,16 @@ def main():
if args.removed_only:
added = {}
report, is_break, is_different = diff.report_diff(
added, removed, changed, names_only=args.names_only,
demangle=args.demangle)
added, removed, changed, names_only=args.names_only, demangle=args.demangle
)
if args.output is None:
print(report)
else:
with open(args.output, 'w') as f:
f.write(report + '\n')
with open(args.output, "w") as f:
f.write(report + "\n")
exit_code = 1 if is_break or (args.strict and is_different) else 0
sys.exit(exit_code)
if __name__ == '__main__':
if __name__ == "__main__":
main()

View File

@ -7,4 +7,5 @@
lit_config.fatal(
"You seem to be running Lit directly -- you should be running Lit through "
"<build>/bin/llvm-lit, which will ensure that the right Lit configuration "
"file is used.")
"file is used."
)

View File

@ -1,6 +1,9 @@
def is_arm_linux_eabi(triple):
return ('arm' in triple) and ('linux' in triple) and ('eabi' in triple)
return ("arm" in triple) and ("linux" in triple) and ("eabi" in triple)
is_native = hasattr(config.root, 'target_triple') and (config.root.host_triple == config.root.target_triple)
is_native = hasattr(config.root, "target_triple") and (
config.root.host_triple == config.root.target_triple
)
if not is_native or not is_arm_linux_eabi(config.root.host_triple):
config.unsupported = True

View File

@ -16,106 +16,106 @@ from datetime import date
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.intersphinx', 'sphinx.ext.todo']
extensions = ["sphinx.ext.intersphinx", "sphinx.ext.todo"]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
templates_path = ["_templates"]
# The suffix of source filenames.
source_suffix = '.rst'
source_suffix = ".rst"
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
master_doc = "index"
# General information about the project.
project = u'libunwind'
copyright = u'2011-%d, LLVM Project' % date.today().year
project = "libunwind"
copyright = "2011-%d, LLVM Project" % date.today().year
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '17.0'
version = "17.0"
# The full version, including alpha/beta/rc tags.
release = '17.0'
release = "17.0"
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# today = ''
# Else, today_fmt is used as the format for a strftime call.
today_fmt = '%Y-%m-%d'
today_fmt = "%Y-%m-%d"
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
exclude_patterns = ["_build"]
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
show_authors = True
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'friendly'
pygments_style = "friendly"
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'haiku'
html_theme = "haiku"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
@ -124,101 +124,95 @@ html_static_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'libunwinddoc'
htmlhelp_basename = "libunwinddoc"
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('contents', 'libunwind.tex', u'libunwind Documentation',
u'LLVM project', 'manual'),
("contents", "libunwind.tex", "libunwind Documentation", "LLVM project", "manual"),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('contents', 'libunwind', u'libunwind Documentation',
[u'LLVM project'], 1)
]
man_pages = [("contents", "libunwind", "libunwind Documentation", ["LLVM project"], 1)]
# If true, show URL addresses after external links.
#man_show_urls = False
# man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
@ -227,19 +221,25 @@ man_pages = [
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('contents', 'libunwind', u'libunwind Documentation',
u'LLVM project', 'libunwind', 'LLVM Unwinder',
'Miscellaneous'),
(
"contents",
"libunwind",
"libunwind Documentation",
"LLVM project",
"libunwind",
"LLVM Unwinder",
"Miscellaneous",
),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# texinfo_show_urls = 'footnote'
# FIXME: Define intersphinx configuration.

View File

@ -7,4 +7,5 @@
lit_config.fatal(
"You seem to be running Lit directly -- you should be running Lit through "
"<build>/bin/llvm-lit, which will ensure that the right Lit configuration "
"file is used.")
"file is used."
)