Initial commit

This commit is contained in:
Yervant7
2024-01-16 19:28:29 -03:00
commit 8c0524dc0f
9054 changed files with 1081727 additions and 0 deletions

View File

@@ -0,0 +1,18 @@
add_custom_target(libcxx-generate-public-header-transitive-inclusion-tests
COMMAND "${Python3_EXECUTABLE}" "${LIBCXX_SOURCE_DIR}/utils/generate_header_inclusion_tests.py"
COMMENT "Generate tests checking for mandated transitive includes in public headers.")
add_custom_target(libcxx-generate-public-header-tests
COMMAND "${Python3_EXECUTABLE}" "${LIBCXX_SOURCE_DIR}/utils/generate_header_tests.py"
COMMENT "Generate tests for including public headers.")
add_custom_target(libcxx-generate-feature-test-macros
COMMAND "${Python3_EXECUTABLE}" "${LIBCXX_SOURCE_DIR}/utils/generate_feature_test_macro_components.py"
COMMENT "Generate the <version> header and tests for feature test macros.")
add_custom_target(libcxx-generate-files
DEPENDS libcxx-generate-public-header-transitive-inclusion-tests
libcxx-generate-public-header-tests
libcxx-generate-feature-test-macros
COMMENT "Create all the auto-generated files in libc++ and its tests.")

View File

@@ -0,0 +1,45 @@
#!/usr/bin/env python
#===----------------------------------------------------------------------===##
#
# Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
# See https://llvm.org/LICENSE.txt for license information.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
#
#===----------------------------------------------------------------------===##
from argparse import ArgumentParser
import sys
def print_and_exit(msg):
sys.stderr.write(msg + '\n')
sys.exit(1)
def main():
parser = ArgumentParser(
description="Concatenate two files into a single file")
parser.add_argument(
'-o', '--output', dest='output', required=True,
help='The output file. stdout is used if not given',
type=str, action='store')
parser.add_argument(
'files', metavar='files', nargs='+',
help='The files to concatenate')
args = parser.parse_args()
if len(args.files) < 2:
print_and_exit('fewer than 2 inputs provided')
data = ''
for filename in args.files:
with open(filename, 'r') as f:
data += f.read()
if len(data) != 0 and data[-1] != '\n':
data += '\n'
assert len(data) > 0 and "cannot cat empty files"
with open(args.output, 'w') as f:
f.write(data)
if __name__ == '__main__':
main()
sys.exit(0)

View File

@@ -0,0 +1,108 @@
#===----------------------------------------------------------------------===##
#
# Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
# See https://llvm.org/LICENSE.txt for license information.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
#
#===----------------------------------------------------------------------===##
#
# This Dockerfile describes the base image used to run the various libc++
# build bots. By default, the image runs the Buildkite Agent, however one
# can also just start the image with a shell to debug CI failures.
#
# To start a Buildkite Agent, run it as:
# $ docker run --env-file <secrets> -it $(docker build -q libcxx/utils/ci)
#
# The environment variables in `<secrets>` should be the ones necessary
# to run a BuildKite agent.
#
# If you're only looking to run the Docker image locally for debugging a
# build bot, see the `run-buildbot-container` script located in this directory.
#
# A pre-built version of this image is maintained on DockerHub as ldionne/libcxx-builder.
# To update the image, rebuild it and push it to ldionne/libcxx-builder (which
# will obviously only work if you have permission to do so).
#
# $ docker build -t ldionne/libcxx-builder libcxx/utils/ci
# $ docker push ldionne/libcxx-builder
#
FROM ubuntu:jammy
# Make sure apt-get doesn't try to prompt for stuff like our time zone, etc.
ENV DEBIAN_FRONTEND=noninteractive
RUN apt-get update && apt-get install -y bash curl
# Install various tools used by the build or the test suite
RUN apt-get update && apt-get install -y ninja-build python3 python3-sphinx python3-distutils python3-psutil git gdb
# Locales for gdb and localization tests
RUN apt-get update && apt-get install -y language-pack-en language-pack-fr \
language-pack-ja language-pack-ru \
language-pack-zh-hans
# These two are not enabled by default so generate them
RUN printf "fr_CA ISO-8859-1\ncs_CZ ISO-8859-2" >> /etc/locale.gen
RUN mkdir /usr/local/share/i1en/
RUN printf "fr_CA ISO-8859-1\ncs_CZ ISO-8859-2" >> /usr/local/share/i1en/SUPPORTED
RUN locale-gen
# Install Clang <latest>, <latest-1> and ToT, which are the ones we support.
# We also install <latest-2> because we need to support the "latest-1" of the
# current LLVM release branch, which is effectively the <latest-2> of the
# tip-of-trunk LLVM. For example, after branching LLVM 14 but before branching
# LLVM 15, we still need to have Clang 12 in this Docker image because the LLVM
# 14 release branch CI uses it. The tip-of-trunk CI will never use Clang 12,
# though.
ENV LLVM_LATEST_VERSION=14
RUN apt-get update && apt-get install -y lsb-release wget software-properties-common
RUN wget https://apt.llvm.org/llvm.sh -O /tmp/llvm.sh
# TODO Use the apt.llvm.org version after branching to LLVM 15
RUN apt-get update && apt-get install -y clang-$(($LLVM_LATEST_VERSION - 2))
#RUN bash /tmp/llvm.sh $(($LLVM_LATEST_VERSION - 2)) # for CI transitions
RUN bash /tmp/llvm.sh $(($LLVM_LATEST_VERSION - 1)) # previous release
RUN bash /tmp/llvm.sh $LLVM_LATEST_VERSION # latest release
RUN bash /tmp/llvm.sh $(($LLVM_LATEST_VERSION + 1)) # current ToT
# Make the latest version of Clang the "default" compiler on the system
# TODO: In the future, all jobs should be using an explicitly-versioned version of Clang instead,
# and we can get rid of this entirely.
RUN ln -fs /usr/bin/clang++-$LLVM_LATEST_VERSION /usr/bin/c++ && [ -e $(readlink /usr/bin/c++) ]
RUN ln -fs /usr/bin/clang-$LLVM_LATEST_VERSION /usr/bin/cc && [ -e $(readlink /usr/bin/cc) ]
# Install clang-format
RUN apt-get install -y clang-format-$LLVM_LATEST_VERSION
RUN ln -s /usr/bin/clang-format-$LLVM_LATEST_VERSION /usr/bin/clang-format && [ -e $(readlink /usr/bin/clang-format) ]
RUN ln -s /usr/bin/git-clang-format-$LLVM_LATEST_VERSION /usr/bin/git-clang-format && [ -e $(readlink /usr/bin/git-clang-format) ]
# Install clang-tidy
RUN apt-get install -y clang-tidy-$LLVM_LATEST_VERSION
RUN ln -s /usr/bin/clang-tidy-$LLVM_LATEST_VERSION /usr/bin/clang-tidy && [ -e $(readlink /usr/bin/clang-tidy) ]
# Install the most recent GCC, like clang install the previous version as a transition.
ENV GCC_LATEST_VERSION=12
RUN apt-get update && apt install -y gcc-$((GCC_LATEST_VERSION - 1)) g++-$((GCC_LATEST_VERSION - 1))
RUN apt-get update && apt install -y gcc-$GCC_LATEST_VERSION g++-$GCC_LATEST_VERSION
# Install a recent CMake
RUN wget https://github.com/Kitware/CMake/releases/download/v3.21.1/cmake-3.21.1-linux-x86_64.sh -O /tmp/install-cmake.sh
RUN bash /tmp/install-cmake.sh --prefix=/usr --exclude-subdir --skip-license
RUN rm /tmp/install-cmake.sh
# Change the user to a non-root user, since some of the libc++ tests
# (e.g. filesystem) require running as non-root. Also setup passwordless sudo.
RUN apt-get update && apt-get install -y sudo
RUN echo "ALL ALL = (ALL) NOPASSWD: ALL" >> /etc/sudoers
RUN useradd --create-home libcxx-builder
USER libcxx-builder
WORKDIR /home/libcxx-builder
# Install the Buildkite agent and dependencies. This must be done as non-root
# for the Buildkite agent to be installed in a path where we can find it.
RUN bash -c "$(curl -sL https://raw.githubusercontent.com/buildkite/agent/main/install.sh)"
ENV PATH="${PATH}:/home/libcxx-builder/.buildkite-agent/bin"
RUN echo "tags=\"queue=libcxx-builders,arch=$(uname -m),os=linux\"" >> "/home/libcxx-builder/.buildkite-agent/buildkite-agent.cfg"
# By default, start the Buildkite agent (this requires a token).
CMD buildkite-agent start

View File

@@ -0,0 +1,178 @@
#!/usr/bin/env bash
#===----------------------------------------------------------------------===##
#
# Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
# See https://llvm.org/LICENSE.txt for license information.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
#
#===----------------------------------------------------------------------===##
set -e
PROGNAME="$(basename "${0}")"
function error() { printf "error: %s\n" "$*"; exit 1; }
function usage() {
cat <<EOF
Usage:
${PROGNAME} [options]
[-h|--help] Display this help and exit.
--llvm-root <DIR> Path to the root of the LLVM monorepo. Only the libcxx
and libcxxabi directories are required.
--build-dir <DIR> Path to the directory to use for building. This will
contain intermediate build products.
--install-dir <DIR> Path to the directory to install the library to.
--symbols-dir <DIR> Path to the directory to install the .dSYM bundle to.
--architectures "<arch>..." A whitespace separated list of architectures to build for.
The library will be built for each architecture independently,
and a universal binary containing all architectures will be
created from that.
--headers-only Only install the header part of the library -- don't actually
build the full library.
--version X[.Y[.Z]] The version of the library to encode in the dylib.
EOF
}
while [[ $# -gt 0 ]]; do
case ${1} in
-h|--help)
usage
exit 0
;;
--llvm-root)
llvm_root="${2}"
shift; shift
;;
--build-dir)
build_dir="${2}"
shift; shift
;;
--symbols-dir)
symbols_dir="${2}"
shift; shift
;;
--install-dir)
install_dir="${2}"
shift; shift
;;
--architectures)
architectures="${2}"
shift; shift
;;
--headers-only)
headers_only=true
shift
;;
--version)
version="${2}"
shift; shift
;;
*)
error "Unknown argument '${1}'"
;;
esac
done
for arg in llvm_root build_dir symbols_dir install_dir architectures version; do
if [ -z ${!arg+x} ]; then
error "Missing required argument '--${arg//_/-}'"
elif [ "${!arg}" == "" ]; then
error "Argument to --${arg//_/-} must not be empty"
fi
done
# Allow using relative paths
function realpath() {
if [[ $1 = /* ]]; then echo "$1"; else echo "$(pwd)/${1#./}"; fi
}
for arg in llvm_root build_dir symbols_dir install_dir; do
path="$(realpath "${!arg}")"
eval "${arg}=\"${path}\""
done
function step() {
separator="$(printf "%0.s-" $(seq 1 ${#1}))"
echo
echo "${separator}"
echo "${1}"
echo "${separator}"
}
for arch in ${architectures}; do
step "Building libc++.dylib and libc++abi.dylib for architecture ${arch}"
mkdir -p "${build_dir}/${arch}"
xcrun cmake -S "${llvm_root}/runtimes" \
-B "${build_dir}/${arch}" \
-GNinja \
-DCMAKE_MAKE_PROGRAM="$(xcrun --find ninja)" \
-C "${llvm_root}/libcxx/cmake/caches/Apple.cmake" \
-DLLVM_ENABLE_RUNTIMES="libcxx;libcxxabi" \
-DCMAKE_INSTALL_PREFIX="${build_dir}/${arch}-install" \
-DCMAKE_INSTALL_NAME_DIR="/usr/lib" \
-DCMAKE_OSX_ARCHITECTURES="${arch}" \
-DLIBCXXABI_LIBRARY_VERSION="${version}"
if [ "$headers_only" = true ]; then
xcrun cmake --build "${build_dir}/${arch}" --target install-cxx-headers install-cxxabi-headers -- -v
else
xcrun cmake --build "${build_dir}/${arch}" --target install-cxx install-cxxabi -- -v
fi
done
function universal_dylib() {
dylib=${1}
inputs=$(for arch in ${architectures}; do echo "${build_dir}/${arch}-install/lib/${dylib}"; done)
step "Creating a universal dylib ${dylib} from the dylibs for all architectures"
xcrun lipo -create ${inputs} -output "${build_dir}/${dylib}"
step "Installing the (stripped) universal dylib to ${install_dir}/usr/lib"
mkdir -p "${install_dir}/usr/lib"
cp "${build_dir}/${dylib}" "${install_dir}/usr/lib/${dylib}"
xcrun strip -S "${install_dir}/usr/lib/${dylib}"
step "Installing the unstripped dylib and the dSYM bundle to ${symbols_dir}"
xcrun dsymutil "${build_dir}/${dylib}" -o "${symbols_dir}/${dylib}.dSYM"
cp "${build_dir}/${dylib}" "${symbols_dir}/${dylib}"
}
if [ "$headers_only" != true ]; then
universal_dylib libc++.1.dylib
universal_dylib libc++abi.dylib
(cd "${install_dir}/usr/lib" && ln -s "libc++.1.dylib" libc++.dylib)
fi
# Install the headers by copying the headers from one of the built architectures
# into the install directory. Headers from all architectures should be the same.
step "Installing the libc++ and libc++abi headers to ${install_dir}/usr/include"
any_arch=$(echo ${architectures} | cut -d ' ' -f 1)
mkdir -p "${install_dir}/usr/include"
ditto "${build_dir}/${any_arch}-install/include" "${install_dir}/usr/include"
if [[ $EUID -eq 0 ]]; then # Only chown if we're running as root
chown -R root:wheel "${install_dir}/usr/include"
fi
if [ "$headers_only" != true ]; then
step "Installing the libc++ and libc++abi licenses"
mkdir -p "${install_dir}/usr/local/OpenSourceLicenses"
cp "${llvm_root}/libcxx/LICENSE.TXT" "${install_dir}/usr/local/OpenSourceLicenses/libcxx.txt"
cp "${llvm_root}/libcxxabi/LICENSE.TXT" "${install_dir}/usr/local/OpenSourceLicenses/libcxxabi.txt"
# Also install universal static archives for libc++ and libc++abi
libcxx_archives=$(for arch in ${architectures}; do echo "${build_dir}/${arch}-install/lib/libc++.a"; done)
libcxxabi_archives=$(for arch in ${architectures}; do echo "${build_dir}/${arch}-install/lib/libc++abi.a"; done)
step "Creating universal static archives for libc++ and libc++abi from the static archives for each architecture"
mkdir -p "${install_dir}/usr/local/lib/libcxx"
xcrun libtool -static ${libcxx_archives} -o "${install_dir}/usr/local/lib/libcxx/libc++-static.a"
xcrun libtool -static ${libcxxabi_archives} -o "${install_dir}/usr/local/lib/libcxx/libc++abi-static.a"
fi

View File

@@ -0,0 +1,40 @@
#!/usr/bin/env bash
#===----------------------------------------------------------------------===##
#
# Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
# See https://llvm.org/LICENSE.txt for license information.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
#
#===----------------------------------------------------------------------===##
#
# This file generates a Buildkite pipeline that triggers the libc++ CI
# job(s) if needed.
# See https://buildkite.com/docs/agent/v3/cli-pipeline#pipeline-format.
#
# Invoked by CI on pre-merge check for a commit.
#
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
if ! git diff --name-only HEAD~1 | grep -q -E "^libcxx/|^libcxxabi/|^libunwind/|^runtimes/|^cmake/"; then
# libcxx/, libcxxabi/, libunwind/, runtimes/ or cmake/ are not affected
exit 0
fi
reviewID="$(git log --format=%B -n 1 | sed -nE 's/^Review-ID:[[:space:]]*(.+)$/\1/p')"
if [[ "${reviewID}" != "" ]]; then
buildMessage="https://llvm.org/${reviewID}"
else
buildMessage="Push to branch ${BUILDKITE_BRANCH}"
fi
cat <<EOF
steps:
- trigger: "libcxx-ci"
build:
message: "${buildMessage}"
commit: "${BUILDKITE_COMMIT}"
branch: "${BUILDKITE_BRANCH}"
EOF

View File

@@ -0,0 +1,25 @@
#!/usr/bin/env bash
#===----------------------------------------------------------------------===##
#
# Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
# See https://llvm.org/LICENSE.txt for license information.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
#
#===----------------------------------------------------------------------===##
#
# This file generates a Buildkite pipeline that triggers the libc++ CI jobs.
# See https://buildkite.com/docs/agent/v3/cli-pipeline#pipeline-format.
#
# Invoked by CI on full builds.
#
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
cat <<EOF
steps:
- trigger: "libcxx-ci"
build:
message: "${BUILDKITE_MESSAGE}"
commit: "${BUILDKITE_COMMIT}"
branch: "${BUILDKITE_BRANCH}"
EOF

View File

@@ -0,0 +1,860 @@
#===----------------------------------------------------------------------===##
#
# Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
# See https://llvm.org/LICENSE.txt for license information.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
#
#===----------------------------------------------------------------------===##
#
# This file describes the various pre-commit CI bots used to test libc++.
#
# This file should never contain logic -- all the logic must be offloaded
# into scripts. This is critical to being able to reproduce CI issues outside
# of the CI environment, which is important for debugging.
#
# It is also worth noting that this script is split into several sections, the
# goal being to reduce the load on testers when a commit is known to fail.
#
steps:
#
# Light pre-commit tests for things like formatting or when people forget
# to update generated files.
#
- label: "Format"
command: "libcxx/utils/ci/run-buildbot check-format"
artifact_paths:
- "**/clang-format.patch"
agents:
queue: "libcxx-builders"
os: "linux"
retry:
automatic:
- exit_status: -1 # Agent was lost
limit: 2
soft_fail:
- exit_status: 1
timeout_in_minutes: 120
- label: "Generated output"
command: "libcxx/utils/ci/run-buildbot check-generated-output"
artifact_paths:
- "**/generated_output.patch"
- "**/generated_output.status"
agents:
queue: "libcxx-builders"
os: "linux"
retry:
automatic:
- exit_status: -1 # Agent was lost
limit: 2
timeout_in_minutes: 120
- label: "Documentation"
command: "libcxx/utils/ci/run-buildbot documentation"
artifact_paths:
- "**/test-results.xml"
agents:
queue: "libcxx-builders"
os: "linux"
retry:
automatic:
- exit_status: -1 # Agent was lost
limit: 2
timeout_in_minutes: 120
#
# General testing with the default configuration, under all the supported
# Standard modes, with Clang and GCC. This catches most issues upfront.
# The goal of this step is to catch most issues while being very fast.
#
- wait
- label: "C++2b"
command: "libcxx/utils/ci/run-buildbot generic-cxx2b"
artifact_paths:
- "**/test-results.xml"
- "**/*.abilist"
agents:
queue: "libcxx-builders"
os: "linux"
retry:
automatic:
- exit_status: -1 # Agent was lost
limit: 2
timeout_in_minutes: 120
- label: "C++11"
command: "libcxx/utils/ci/run-buildbot generic-cxx11"
artifact_paths:
- "**/test-results.xml"
- "**/*.abilist"
agents:
queue: "libcxx-builders"
os: "linux"
retry:
automatic:
- exit_status: -1 # Agent was lost
limit: 2
timeout_in_minutes: 120
- label: "C++03"
command: "libcxx/utils/ci/run-buildbot generic-cxx03"
artifact_paths:
- "**/test-results.xml"
- "**/*.abilist"
agents:
queue: "libcxx-builders"
os: "linux"
retry:
automatic:
- exit_status: -1 # Agent was lost
limit: 2
timeout_in_minutes: 120
- label: "Modular build"
command: "libcxx/utils/ci/run-buildbot generic-modules"
artifact_paths:
- "**/test-results.xml"
- "**/*.abilist"
agents:
queue: "libcxx-builders"
os: "linux"
retry:
automatic:
- exit_status: -1 # Agent was lost
limit: 2
timeout_in_minutes: 120
- label: "GCC 12 / C++latest"
command: "libcxx/utils/ci/run-buildbot generic-gcc"
artifact_paths:
- "**/test-results.xml"
- "**/*.abilist"
agents:
queue: "libcxx-builders"
os: "linux"
retry:
automatic:
- exit_status: -1 # Agent was lost
limit: 2
timeout_in_minutes: 120
#
# All other supported configurations of libc++.
#
- wait
- label: "C++20"
command: "libcxx/utils/ci/run-buildbot generic-cxx20"
artifact_paths:
- "**/test-results.xml"
- "**/*.abilist"
agents:
queue: "libcxx-builders"
os: "linux"
retry:
automatic:
- exit_status: -1 # Agent was lost
limit: 2
timeout_in_minutes: 120
- label: "C++17"
command: "libcxx/utils/ci/run-buildbot generic-cxx17"
artifact_paths:
- "**/test-results.xml"
- "**/*.abilist"
agents:
queue: "libcxx-builders"
os: "linux"
retry:
automatic:
- exit_status: -1 # Agent was lost
limit: 2
timeout_in_minutes: 120
- label: "C++14"
command: "libcxx/utils/ci/run-buildbot generic-cxx14"
artifact_paths:
- "**/test-results.xml"
- "**/*.abilist"
agents:
queue: "libcxx-builders"
os: "linux"
retry:
automatic:
- exit_status: -1 # Agent was lost
limit: 2
timeout_in_minutes: 120
# Tests with the supported compilers.
- label: "GCC 12 / C++11"
command: "libcxx/utils/ci/run-buildbot generic-gcc-cxx11"
artifact_paths:
- "**/test-results.xml"
- "**/*.abilist"
agents:
queue: "libcxx-builders"
os: "linux"
retry:
automatic:
- exit_status: -1 # Agent was lost
limit: 2
timeout_in_minutes: 120
- label: "Clang 13"
command: "libcxx/utils/ci/run-buildbot generic-clang-13"
artifact_paths:
- "**/test-results.xml"
- "**/*.abilist"
agents:
queue: "libcxx-builders"
os: "linux"
retry:
automatic:
- exit_status: -1 # Agent was lost
limit: 2
timeout_in_minutes: 120
- label: "Clang 14"
command: "libcxx/utils/ci/run-buildbot generic-clang-14"
artifact_paths:
- "**/test-results.xml"
- "**/*.abilist"
agents:
queue: "libcxx-builders"
os: "linux"
retry:
automatic:
- exit_status: -1 # Agent was lost
limit: 2
timeout_in_minutes: 120
# Tests with the sanitizers.
- group: "Sanitizers"
steps:
- label: "ASAN"
command: "libcxx/utils/ci/run-buildbot generic-asan"
artifact_paths:
- "**/test-results.xml"
- "**/*.abilist"
agents:
queue: "libcxx-builders"
os: "linux"
retry:
automatic:
- exit_status: -1 # Agent was lost
limit: 2
timeout_in_minutes: 120
- label: "TSAN"
command: "libcxx/utils/ci/run-buildbot generic-tsan"
artifact_paths:
- "**/test-results.xml"
- "**/*.abilist"
agents:
queue: "libcxx-builders"
os: "linux"
retry:
automatic:
- exit_status: -1 # Agent was lost
limit: 2
timeout_in_minutes: 120
- label: "UBSAN"
command: "libcxx/utils/ci/run-buildbot generic-ubsan"
artifact_paths:
- "**/test-results.xml"
- "**/*.abilist"
agents:
queue: "libcxx-builders"
os: "linux"
retry:
automatic:
- exit_status: -1 # Agent was lost
limit: 2
timeout_in_minutes: 120
- label: "MSAN"
command: "libcxx/utils/ci/run-buildbot generic-msan"
artifact_paths:
- "**/test-results.xml"
- "**/*.abilist"
agents:
queue: "libcxx-builders"
os: "linux"
retry:
automatic:
- exit_status: -1 # Agent was lost
limit: 2
timeout_in_minutes: 120
# Tests with the various supported ways to build libc++.
- label: "Bootstrapping build"
command: "libcxx/utils/ci/run-buildbot bootstrapping-build"
artifact_paths:
- "**/test-results.xml"
- "**/*.abilist"
agents:
queue: "libcxx-builders"
os: "linux"
retry:
automatic:
- exit_status: -1 # Agent was lost
limit: 2
timeout_in_minutes: 120
- group: "Legacy"
steps:
- label: "Legacy Lit configuration"
command: "libcxx/utils/ci/run-buildbot legacy-test-config"
artifact_paths:
- "**/test-results.xml"
- "**/*.abilist"
agents:
queue: "libcxx-builders"
os: "linux"
retry:
automatic:
- exit_status: -1 # Agent was lost
limit: 2
timeout_in_minutes: 120
- label: "Legacy LLVM_ENABLE_PROJECTS build"
command: "libcxx/utils/ci/run-buildbot legacy-project-build"
artifact_paths:
- "**/test-results.xml"
- "**/*.abilist"
agents:
queue: "libcxx-builders"
os: "linux"
retry:
automatic:
- exit_status: -1 # Agent was lost
limit: 2
timeout_in_minutes: 120
# Tests with various build configurations.
- label: "Static libraries"
command: "libcxx/utils/ci/run-buildbot generic-static"
artifact_paths:
- "**/test-results.xml"
- "**/*.abilist"
agents:
queue: "libcxx-builders"
os: "linux"
retry:
automatic:
- exit_status: -1 # Agent was lost
limit: 2
timeout_in_minutes: 120
- label: "Shared library with merged ABI and unwinder libraries"
command: "libcxx/utils/ci/run-buildbot generic-merged"
artifact_paths:
- "**/test-results.xml"
- "**/*.abilist"
agents:
queue: "libcxx-builders"
os: "linux"
retry:
automatic:
- exit_status: -1 # Agent was lost
limit: 2
timeout_in_minutes: 120
- label: "Assertions enabled"
command: "libcxx/utils/ci/run-buildbot generic-assertions"
artifact_paths:
- "**/test-results.xml"
- "**/*.abilist"
agents:
queue: "libcxx-builders"
os: "linux"
retry:
automatic:
- exit_status: -1 # Agent was lost
limit: 2
timeout_in_minutes: 120
- label: "Debug mode"
command: "libcxx/utils/ci/run-buildbot generic-debug-mode"
artifact_paths:
- "**/test-results.xml"
- "**/*.abilist"
agents:
queue: "libcxx-builders"
os: "linux"
retry:
automatic:
- exit_status: -1 # Agent was lost
limit: 2
timeout_in_minutes: 120
- label: "No transitive includes"
command: "libcxx/utils/ci/run-buildbot generic-no-transitive-includes"
artifact_paths:
- "**/test-results.xml"
- "**/*.abilist"
agents:
queue: "libcxx-builders"
os: "linux"
retry:
automatic:
- exit_status: -1 # Agent was lost
limit: 2
timeout_in_minutes: 120
- label: "With LLVM's libunwind"
command: "libcxx/utils/ci/run-buildbot generic-with_llvm_unwinder"
artifact_paths:
- "**/test-results.xml"
- "**/*.abilist"
agents:
queue: "libcxx-builders"
os: "linux"
retry:
automatic:
- exit_status: -1 # Agent was lost
limit: 2
timeout_in_minutes: 120
- group: "Parts disabled"
steps:
- label: "No threads"
command: "libcxx/utils/ci/run-buildbot generic-no-threads"
artifact_paths:
- "**/test-results.xml"
- "**/*.abilist"
agents:
queue: "libcxx-builders"
os: "linux"
retry:
automatic:
- exit_status: -1 # Agent was lost
limit: 2
timeout_in_minutes: 120
- label: "No filesystem"
command: "libcxx/utils/ci/run-buildbot generic-no-filesystem"
artifact_paths:
- "**/test-results.xml"
- "**/*.abilist"
agents:
queue: "libcxx-builders"
os: "linux"
retry:
automatic:
- exit_status: -1 # Agent was lost
limit: 2
timeout_in_minutes: 120
- label: "No random device"
command: "libcxx/utils/ci/run-buildbot generic-no-random_device"
artifact_paths:
- "**/test-results.xml"
- "**/*.abilist"
agents:
queue: "libcxx-builders"
os: "linux"
retry:
automatic:
- exit_status: -1 # Agent was lost
limit: 2
timeout_in_minutes: 120
- label: "No locale"
command: "libcxx/utils/ci/run-buildbot generic-no-localization"
artifact_paths:
- "**/test-results.xml"
- "**/*.abilist"
agents:
queue: "libcxx-builders"
os: "linux"
retry:
automatic:
- exit_status: -1 # Agent was lost
limit: 2
timeout_in_minutes: 120
- label: "No Unicode"
command: "libcxx/utils/ci/run-buildbot generic-no-unicode"
artifact_paths:
- "**/test-results.xml"
- "**/*.abilist"
agents:
queue: "libcxx-builders"
os: "linux"
retry:
automatic:
- exit_status: -1 # Agent was lost
limit: 2
timeout_in_minutes: 120
- label: "No wide characters"
command: "libcxx/utils/ci/run-buildbot generic-no-wide-characters"
artifact_paths:
- "**/test-results.xml"
- "**/*.abilist"
agents:
queue: "libcxx-builders"
os: "linux"
retry:
automatic:
- exit_status: -1 # Agent was lost
limit: 2
timeout_in_minutes: 120
- label: "No experimental features"
command: "libcxx/utils/ci/run-buildbot generic-no-experimental"
artifact_paths:
- "**/test-results.xml"
- "**/*.abilist"
agents:
queue: "libcxx-builders"
os: "linux"
retry:
automatic:
- exit_status: -1 # Agent was lost
limit: 2
timeout_in_minutes: 120
- label: "No exceptions"
command: "libcxx/utils/ci/run-buildbot generic-noexceptions"
artifact_paths:
- "**/test-results.xml"
- "**/*.abilist"
agents:
queue: "libcxx-builders"
os: "linux"
retry:
automatic:
- exit_status: -1 # Agent was lost
limit: 2
timeout_in_minutes: 120
- label: "Unstable ABI"
command: "libcxx/utils/ci/run-buildbot generic-abi-unstable"
artifact_paths:
- "**/test-results.xml"
- "**/*.abilist"
agents:
queue: "libcxx-builders"
os: "linux"
retry:
automatic:
- exit_status: -1 # Agent was lost
limit: 2
timeout_in_minutes: 120
# Other non-testing CI jobs
- label: "Benchmarks"
command: "libcxx/utils/ci/run-buildbot benchmarks"
artifact_paths:
- "**/test-results.xml"
- "**/*.abilist"
agents:
queue: "libcxx-builders"
os: "linux"
retry:
automatic:
- exit_status: -1 # Agent was lost
limit: 2
timeout_in_minutes: 120
# Tests on non-Unix platforms
- group: ":windows: Windows"
steps:
- label: "Clang-cl (DLL)"
command: "bash libcxx/utils/ci/run-buildbot clang-cl-dll"
artifact_paths:
- "**/test-results.xml"
- "**/*.abilist"
agents:
queue: "windows"
retry:
automatic:
- exit_status: -1 # Agent was lost
limit: 2
timeout_in_minutes: 120
- label: "Clang-cl (Static)"
command: "bash libcxx/utils/ci/run-buildbot clang-cl-static"
artifact_paths:
- "**/test-results.xml"
- "**/*.abilist"
agents:
queue: "windows"
retry:
automatic:
- exit_status: -1 # Agent was lost
limit: 2
timeout_in_minutes: 120
- label: "MinGW (DLL, x86_64)"
command: "bash libcxx/utils/ci/run-buildbot mingw-dll"
artifact_paths:
- "**/test-results.xml"
- "**/*.abilist"
agents:
queue: "windows"
retry:
automatic:
- exit_status: -1 # Agent was lost
limit: 2
timeout_in_minutes: 120
- label: "MinGW (Static, x86_64)"
command: "bash libcxx/utils/ci/run-buildbot mingw-static"
artifact_paths:
- "**/test-results.xml"
- "**/*.abilist"
agents:
queue: "windows"
retry:
automatic:
- exit_status: -1 # Agent was lost
limit: 2
timeout_in_minutes: 120
- label: "MinGW (DLL, i686)"
command: "bash libcxx/utils/ci/run-buildbot mingw-dll-i686"
artifact_paths:
- "**/test-results.xml"
- "**/*.abilist"
agents:
queue: "windows"
retry:
automatic:
- exit_status: -1 # Agent was lost
limit: 2
timeout_in_minutes: 120
- group: ":apple: Apple"
steps:
- label: "MacOS x86_64"
command: "libcxx/utils/ci/run-buildbot generic-cxx20"
artifact_paths:
- "**/test-results.xml"
- "**/*.abilist"
agents:
queue: "libcxx-builders"
os: "macos"
arch: "x86_64"
retry:
automatic:
- exit_status: -1 # Agent was lost
limit: 2
timeout_in_minutes: 120
- label: "MacOS arm64"
command: "libcxx/utils/ci/run-buildbot generic-cxx20"
artifact_paths:
- "**/test-results.xml"
- "**/*.abilist"
agents:
queue: "libcxx-builders"
os: "macos"
arch: "arm64"
retry:
automatic:
- exit_status: -1 # Agent was lost
limit: 2
timeout_in_minutes: 120
# Build with the configuration we use to generate libc++.dylib on Apple platforms
- label: "Apple system"
command: "libcxx/utils/ci/run-buildbot apple-system"
artifact_paths:
- "**/test-results.xml"
- "**/*.abilist"
agents:
queue: "libcxx-builders"
os: "macos"
arch: "arm64" # This can technically run on any architecture, but we have more resources on arm64 so we pin this job to arm64
retry:
automatic:
- exit_status: -1 # Agent was lost
limit: 2
timeout_in_minutes: 120
# Test back-deployment to older Apple platforms
- label: "Apple back-deployment macosx10.9"
command: "libcxx/utils/ci/run-buildbot apple-system-backdeployment-10.9"
artifact_paths:
- "**/test-results.xml"
- "**/*.abilist"
agents:
queue: "libcxx-builders"
os: "macos"
arch: "x86_64" # We need to use x86_64 for back-deployment CI on this target since macOS didn't support arm64 back then.
retry:
automatic:
- exit_status: -1 # Agent was lost
limit: 2
timeout_in_minutes: 120
- label: "Apple back-deployment macosx10.15"
command: "libcxx/utils/ci/run-buildbot apple-system-backdeployment-10.15"
artifact_paths:
- "**/test-results.xml"
- "**/*.abilist"
agents:
queue: "libcxx-builders"
os: "macos"
arch: "x86_64" # We need to use x86_64 for back-deployment CI on this target since macOS didn't support arm64 back then.
retry:
automatic:
- exit_status: -1 # Agent was lost
limit: 2
timeout_in_minutes: 120
- label: "Apple back-deployment macosx11.0 arm64"
command: "libcxx/utils/ci/run-buildbot apple-system-backdeployment-11.0"
artifact_paths:
- "**/test-results.xml"
- "**/*.abilist"
agents:
queue: "libcxx-builders"
os: "macos"
arch: "arm64"
retry:
automatic:
- exit_status: -1 # Agent was lost
limit: 2
timeout_in_minutes: 120
- label: "Apple back-deployment with assertions enabled"
command: "libcxx/utils/ci/run-buildbot apple-system-backdeployment-assertions-11.0"
artifact_paths:
- "**/test-results.xml"
- "**/*.abilist"
agents:
queue: "libcxx-builders"
os: "macos"
retry:
automatic:
- exit_status: -1 # Agent was lost
limit: 2
timeout_in_minutes: 120
- group: "ARM"
steps:
- label: "AArch64"
command: "libcxx/utils/ci/run-buildbot aarch64"
artifact_paths:
- "**/test-results.xml"
- "**/*.abilist"
agents:
queue: "libcxx-builders-linaro-arm"
arch: "aarch64"
retry:
automatic:
- exit_status: -1 # Agent was lost
limit: 2
timeout_in_minutes: 120
- label: "AArch64 -fno-exceptions"
command: "libcxx/utils/ci/run-buildbot aarch64-noexceptions"
artifact_paths:
- "**/test-results.xml"
- "**/*.abilist"
agents:
queue: "libcxx-builders-linaro-arm"
arch: "aarch64"
retry:
automatic:
- exit_status: -1 # Agent was lost
limit: 2
timeout_in_minutes: 120
- label: "Armv8"
command: "libcxx/utils/ci/run-buildbot armv8"
artifact_paths:
- "**/test-results.xml"
- "**/*.abilist"
agents:
queue: "libcxx-builders-linaro-arm"
arch: "armv8l"
retry:
automatic:
- exit_status: -1 # Agent was lost
limit: 2
timeout_in_minutes: 120
- label: "Armv8 -fno-exceptions"
command: "libcxx/utils/ci/run-buildbot armv8-noexceptions"
artifact_paths:
- "**/test-results.xml"
- "**/*.abilist"
agents:
queue: "libcxx-builders-linaro-arm"
arch: "armv8l"
retry:
automatic:
- exit_status: -1 # Agent was lost
limit: 2
timeout_in_minutes: 120
- label: "Armv7"
command: "libcxx/utils/ci/run-buildbot armv7"
artifact_paths:
- "**/test-results.xml"
- "**/*.abilist"
agents:
queue: "libcxx-builders-linaro-arm"
arch: "armv8l" # Compiling for v7, running on v8 hardware
retry:
automatic:
- exit_status: -1 # Agent was lost
limit: 2
timeout_in_minutes: 120
- label: "Armv7 -fno-exceptions"
command: "libcxx/utils/ci/run-buildbot armv7-noexceptions"
artifact_paths:
- "**/test-results.xml"
- "**/*.abilist"
agents:
queue: "libcxx-builders-linaro-arm"
arch: "armv8l" # Compiling for v7, running on v8 hardware
retry:
automatic:
- exit_status: -1 # Agent was lost
limit: 2
timeout_in_minutes: 120
- group: "AIX"
steps:
- label: "AIX (32-bit)"
command: "libcxx/utils/ci/run-buildbot aix"
artifact_paths:
- "**/test-results.xml"
- "**/*.abilist"
env:
OBJECT_MODE: "32"
agents:
queue: libcxx-builders
os: aix
retry:
automatic:
- exit_status: -1 # Agent was lost
limit: 2
timeout_in_minutes: 120
- label: "AIX (64-bit)"
command: "libcxx/utils/ci/run-buildbot aix"
artifact_paths:
- "**/test-results.xml"
- "**/*.abilist"
env:
OBJECT_MODE: "64"
agents:
queue: libcxx-builders
os: aix
retry:
automatic:
- exit_status: -1 # Agent was lost
limit: 2
timeout_in_minutes: 120

View File

@@ -0,0 +1,116 @@
#!/usr/bin/env bash
# This simple script can be used to set up a CI node running MacOS.
# An additional requirement that is *not* handled by this script is the
# installation of Xcode, which requires manual intervention.
#
# This script should first be run from an administrator account to install
# the dependencies necessary for running CI. It can be run without having
# to clone the LLVM repository with:
#
# $ /bin/bash -c "$(curl -fsSl https://raw.githubusercontent.com/llvm/llvm-project/main/libcxx/utils/ci/macos-ci-setup)"
#
# If you perform system updates, you should re-run the script from the
# administrator account -- this allows updating the packages used for
# CI and the BuildKite agent tags.
#
# Once the necessary dependencies have been installed, you can switch
# to a non-administrator account and run the script again, passing the
# --setup-launchd argument. That will install a Launchd agent to run the
# BuildKite agent whenever the current user is logged in. You should enable
# automatic login for that user, so that if the CI node goes down, the user
# is logged back in automatically when the node goes up again, and the
# BuildKite agent starts automatically.
#
# Alternatively, you can simply run the BuildKite agent by hand using:
#
# $ caffeinate -s buildkite-agent start --build-path /tmp/buildkite-builds
set -e
# Install a Launchd agent that will automatically start the BuildKite agent at login
if [[ ${1} == "--setup-launchd" ]]; then
HOMEBREW_PREFIX="$(brew --prefix)"
mkdir -p ~/Library/LaunchAgents
cat <<EOF > ~/Library/LaunchAgents/libcxx.buildkite-agent.plist
<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
<plist version="1.0">
<dict>
<key>Label</key>
<string>libcxx.buildkite-agent</string>
<key>ProgramArguments</key>
<array>
<string>${HOMEBREW_PREFIX}/bin/buildkite-agent</string>
<string>start</string>
<string>--build-path</string>
<string>${HOME}/libcxx.buildkite-agent/builds</string>
</array>
<key>EnvironmentVariables</key>
<dict>
<key>PATH</key>
<string>${HOMEBREW_PREFIX}/bin:/usr/bin:/bin:/usr/sbin:/sbin</string>
</dict>
<key>RunAtLoad</key>
<true/>
<key>KeepAlive</key>
<dict>
<key>SuccessfulExit</key>
<false/>
</dict>
<key>ProcessType</key>
<string>Interactive</string>
<key>ThrottleInterval</key>
<integer>30</integer>
<key>StandardOutPath</key>
<string>${HOME}/libcxx.buildkite-agent/stdout.log</string>
<key>StandardErrorPath</key>
<string>${HOME}/libcxx.buildkite-agent/stderr.log</string>
</dict>
</plist>
EOF
echo "Starting BuildKite agent"
launchctl load ~/Library/LaunchAgents/libcxx.buildkite-agent.plist
else
echo "Installing CI dependencies for macOS"
if [[ -z "${BUILDKITE_AGENT_TOKEN}" ]]; then
echo "The BUILDKITE_AGENT_TOKEN environment variable must be set to a BuildKite Agent token when calling this script."
exit 1
fi
# Install Homebrew
if ! which -s brew; then
/bin/bash -c "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/master/install.sh)"
fi
# Install the required tools to run CI
brew update
for package in sphinx-doc python3 ninja cmake clang-format buildkite/buildkite/buildkite-agent; do
if brew ls --versions "${package}" >/dev/null; then
brew upgrade "${package}"
else
brew install "${package}"
fi
done
python3 -m pip install --upgrade psutil
echo "Setting up BuildKite Agent config"
version="$(sw_vers -productVersion | sed -E 's/([0-9]+).([0-9]+).[0-9]+/\1.\2/')"
arch="$(uname -m)"
cat <<EOF > "$(brew --prefix)/etc/buildkite-agent/buildkite-agent.cfg"
token="${BUILDKITE_AGENT_TOKEN}"
tags="queue=libcxx-builders,arch=${arch},os=macos,os=macos${version}"
build-path=/tmp/buildkite-builds # Note that this is actually overwritten when starting the agent with launchd
EOF
fi

View File

@@ -0,0 +1,33 @@
#!/bin/bash -eu
#
# This script runs the continuous fuzzing tests on OSS-Fuzz.
#
if [[ ${SANITIZER} = *undefined* ]]; then
CXXFLAGS="${CXXFLAGS} -fsanitize=unsigned-integer-overflow -fsanitize-trap=unsigned-integer-overflow"
fi
BUILD=cxx_build_dir
INSTALL=cxx_install_dir
mkdir ${BUILD}
cmake -S ${PWD} -B ${BUILD} \
-DLLVM_ENABLE_PROJECTS="libcxx;libcxxabi" \
-DCMAKE_BUILD_TYPE=RelWithDebInfo \
-DCMAKE_INSTALL_PREFIX="${INSTALL}"
cmake --build ${BUILD} --target install-cxx-headers
for test in libcxx/test/libcxx/fuzzing/*.pass.cpp; do
exe="$(basename ${test})"
exe="${exe%.pass.cpp}"
${CXX} ${CXXFLAGS} \
-std=c++14 \
-DLIBCPP_OSS_FUZZ \
-D_LIBCPP_HAS_NO_VENDOR_AVAILABILITY_ANNOTATIONS \
-nostdinc++ -cxx-isystem ${INSTALL}/include/c++/v1 \
-lpthread -ldl \
-o "${OUT}/${exe}" \
${test} \
${LIB_FUZZING_ENGINE}
done

View File

@@ -0,0 +1,637 @@
#!/usr/bin/env bash
#===----------------------------------------------------------------------===##
#
# Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
# See https://llvm.org/LICENSE.txt for license information.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
#
#===----------------------------------------------------------------------===##
set -ex
set -o pipefail
unset LANG
unset LC_ALL
unset LC_COLLATE
PROGNAME="$(basename "${0}")"
function usage() {
cat <<EOF
Usage:
${PROGNAME} [options] <BUILDER>
[-h|--help] Display this help and exit.
--llvm-root <DIR> Path to the root of the LLVM monorepo. By default, we try
to figure it out based on the current working directory.
--build-dir <DIR> The directory to use for building the library. By default,
this is '<llvm-root>/build/<builder>'.
--osx-roots <DIR> Path to pre-downloaded macOS dylibs. By default, we download
them from Green Dragon. This is only relevant at all when
running back-deployment testing if one wants to override
the old dylibs we use to run the tests with different ones.
EOF
}
while [[ $# -gt 0 ]]; do
case ${1} in
-h|--help)
usage
exit 0
;;
--llvm-root)
MONOREPO_ROOT="${2}"
shift; shift
;;
--build-dir)
BUILD_DIR="${2}"
shift; shift
;;
--osx-roots)
OSX_ROOTS="${2}"
shift; shift
;;
*)
BUILDER="${1}"
shift
;;
esac
done
MONOREPO_ROOT="${MONOREPO_ROOT:="$(git rev-parse --show-toplevel)"}"
BUILD_DIR="${BUILD_DIR:=${MONOREPO_ROOT}/build/${BUILDER}}"
INSTALL_DIR="${BUILD_DIR}/install"
# If we can find Ninja/CMake provided by Xcode, use those since we know their
# version will generally work with the Clang shipped in Xcode (e.g. if Clang
# knows about -std=c++20, the CMake bundled in Xcode will probably know about
# that flag too).
if xcrun --find ninja &>/dev/null; then NINJA="$(xcrun --find ninja)"; else NINJA="ninja"; fi
if xcrun --find cmake &>/dev/null; then CMAKE="$(xcrun --find cmake)"; else CMAKE="cmake"; fi
function clean() {
rm -rf "${BUILD_DIR}"
}
function generate-cmake-base() {
echo "--- Generating CMake"
${CMAKE} \
-S "${MONOREPO_ROOT}/runtimes" \
-B "${BUILD_DIR}" \
-GNinja -DCMAKE_MAKE_PROGRAM="${NINJA}" \
-DCMAKE_BUILD_TYPE=RelWithDebInfo \
-DCMAKE_INSTALL_PREFIX="${INSTALL_DIR}" \
-DLIBCXX_ENABLE_WERROR=YES \
-DLIBCXXABI_ENABLE_WERROR=YES \
-DLIBUNWIND_ENABLE_WERROR=YES \
-DLLVM_LIT_ARGS="-sv --show-unsupported --xunit-xml-output test-results.xml --timeout=1500 --time-tests" \
"${@}"
}
function generate-cmake() {
generate-cmake-base \
-DLLVM_ENABLE_RUNTIMES="libcxx;libcxxabi;libunwind" \
-DLIBCXX_CXX_ABI=libcxxabi \
"${@}"
}
function generate-cmake-libcxx-win() {
# TODO: Clang-cl in MSVC configurations don't have access to compiler_rt
# builtins helpers for int128 division. See
# https://reviews.llvm.org/D91139#2429595 for a comment about longterm
# intent for handling the issue. In the meantime, define
# -D_LIBCPP_HAS_NO_INT128 (both when building the library itself and
# when building tests) to allow enabling filesystem for running tests,
# even if it uses a non-permanent ABI.
generate-cmake-base \
-DLLVM_ENABLE_RUNTIMES="libcxx" \
-DCMAKE_C_COMPILER=clang-cl \
-DCMAKE_CXX_COMPILER=clang-cl \
-DLIBCXX_ENABLE_FILESYSTEM=YES \
-DLIBCXX_EXTRA_SITE_DEFINES="_LIBCPP_HAS_NO_INT128" \
"${@}"
}
function check-runtimes() {
echo "--- Installing libc++, libc++abi and libunwind to a fake location"
${NINJA} -vC "${BUILD_DIR}" install-cxx install-cxxabi install-unwind
echo "+++ Running the libc++ tests"
${NINJA} -vC "${BUILD_DIR}" check-cxx
echo "+++ Running the libc++abi tests"
${NINJA} -vC "${BUILD_DIR}" check-cxxabi
echo "+++ Running the libunwind tests"
${NINJA} -vC "${BUILD_DIR}" check-unwind
}
# TODO: The goal is to test this against all configurations. We should also move
# this to the Lit test suite instead of being a separate CMake target.
function check-abi-list() {
echo "+++ Running the libc++ ABI list test"
${NINJA} -vC "${BUILD_DIR}" check-cxx-abilist || (
echo "+++ Generating the libc++ ABI list after failed check"
${NINJA} -vC "${BUILD_DIR}" generate-cxx-abilist
false
)
}
function check-cxx-benchmarks() {
echo "--- Running the benchmarks"
${NINJA} -vC "${BUILD_DIR}" check-cxx-benchmarks
}
# Print the version of a few tools to aid diagnostics in some cases
${CMAKE} --version
${NINJA} --version
case "${BUILDER}" in
check-format)
clean
echo "+++ Checking formatting"
# We need to set --extensions so that clang-format checks extensionless files.
mkdir -p ${BUILD_DIR}
git-clang-format \
--binary /usr/bin/clang-format --diff \
--extensions ',h,hh,hpp,hxx,c,cc,cxx,cpp' HEAD~1 \
-- \
libcxx/{benchmarks,include,src,test} \
libcxxabi/{fuzz,include,src,test} \
| tee ${BUILD_DIR}/clang-format.patch
# Check if the diff is empty, fail otherwise.
! grep -q '^--- a' ${BUILD_DIR}/clang-format.patch
;;
check-generated-output)
# `! foo` doesn't work properly with `set -e`, use `! foo || false` instead.
# https://stackoverflow.com/questions/57681955/set-e-does-not-respect-logical-not
clean
generate-cmake
# Reject patches that forgot to re-run the generator scripts.
echo "+++ Making sure the generator scripts were run"
${NINJA} -vC "${BUILD_DIR}" libcxx-generate-files
git diff | tee ${BUILD_DIR}/generated_output.patch
git ls-files -o --exclude-standard | tee ${BUILD_DIR}/generated_output.status
! grep -q '^--- a' ${BUILD_DIR}/generated_output.patch || false
if [ -s ${BUILD_DIR}/generated_output.status ]; then
echo "It looks like not all the generator scripts were run,"
echo "did you forget to build the libcxx-generate-files target?"
echo "Did you add all new files it generated?"
false
fi
# Reject patches that introduce non-ASCII characters or hard tabs.
# Depends on LC_COLLATE set at the top of this script.
! grep -rn '[^ -~]' libcxx/include/ || false
# Reject patches that introduce dependency cycles in the headers.
python3 libcxx/utils/graph_header_deps.py >/dev/null
;;
generic-cxx03)
clean
generate-cmake -C "${MONOREPO_ROOT}/libcxx/cmake/caches/Generic-cxx03.cmake"
check-runtimes
check-abi-list
;;
generic-cxx11)
clean
generate-cmake -C "${MONOREPO_ROOT}/libcxx/cmake/caches/Generic-cxx11.cmake"
check-runtimes
check-abi-list
;;
generic-cxx14)
clean
generate-cmake -C "${MONOREPO_ROOT}/libcxx/cmake/caches/Generic-cxx14.cmake"
check-runtimes
check-abi-list
;;
generic-cxx17)
clean
generate-cmake -C "${MONOREPO_ROOT}/libcxx/cmake/caches/Generic-cxx17.cmake"
check-runtimes
check-abi-list
;;
generic-cxx20)
clean
generate-cmake -C "${MONOREPO_ROOT}/libcxx/cmake/caches/Generic-cxx20.cmake"
check-runtimes
check-abi-list
;;
generic-cxx2b)
clean
generate-cmake -C "${MONOREPO_ROOT}/libcxx/cmake/caches/Generic-cxx2b.cmake"
check-runtimes
check-abi-list
;;
generic-assertions)
clean
generate-cmake -C "${MONOREPO_ROOT}/libcxx/cmake/caches/Generic-assertions.cmake"
check-runtimes
check-abi-list
;;
generic-debug-mode)
clean
generate-cmake -C "${MONOREPO_ROOT}/libcxx/cmake/caches/Generic-debug-mode.cmake"
check-runtimes
# We don't check the ABI lists because the debug mode ABI is not stable
;;
generic-noexceptions)
clean
generate-cmake -C "${MONOREPO_ROOT}/libcxx/cmake/caches/Generic-noexceptions.cmake"
check-runtimes
check-abi-list
;;
generic-modules)
clean
generate-cmake -C "${MONOREPO_ROOT}/libcxx/cmake/caches/Generic-modules.cmake"
check-runtimes
check-abi-list
;;
generic-static)
clean
generate-cmake -C "${MONOREPO_ROOT}/libcxx/cmake/caches/Generic-static.cmake"
check-runtimes
;;
generic-merged)
clean
generate-cmake -C "${MONOREPO_ROOT}/libcxx/cmake/caches/Generic-merged.cmake" \
-DLIBCXX_TEST_CONFIG="llvm-libc++-shared.cfg.in" \
-DLIBCXXABI_TEST_CONFIG="llvm-libc++abi-merged.cfg.in" \
-DLIBUNWIND_TEST_CONFIG="llvm-libunwind-merged.cfg.in"
check-runtimes
;;
generic-clang-13)
export CC=clang-13
export CXX=clang++-13
clean
generate-cmake
check-runtimes
check-abi-list
;;
generic-clang-14)
export CC=clang-14
export CXX=clang++-14
clean
generate-cmake
check-runtimes
check-abi-list
;;
generic-gcc)
export CC=gcc-12
export CXX=g++-12
clean
generate-cmake -DLIBCXX_ENABLE_WERROR=NO \
-DLIBCXXABI_ENABLE_WERROR=NO \
-DLIBUNWIND_ENABLE_WERROR=NO
check-runtimes
;;
generic-gcc-cxx11)
export CC=gcc-12
export CXX=g++-12
clean
generate-cmake -C "${MONOREPO_ROOT}/libcxx/cmake/caches/Generic-cxx11.cmake" \
-DLIBCXX_ENABLE_WERROR=NO \
-DLIBCXXABI_ENABLE_WERROR=NO \
-DLIBUNWIND_ENABLE_WERROR=NO
check-runtimes
;;
generic-asan)
clean
generate-cmake -C "${MONOREPO_ROOT}/libcxx/cmake/caches/Generic-asan.cmake"
check-runtimes
;;
generic-msan)
clean
generate-cmake -C "${MONOREPO_ROOT}/libcxx/cmake/caches/Generic-msan.cmake"
check-runtimes
;;
generic-tsan)
clean
generate-cmake -C "${MONOREPO_ROOT}/libcxx/cmake/caches/Generic-tsan.cmake"
check-runtimes
;;
generic-ubsan)
clean
generate-cmake -C "${MONOREPO_ROOT}/libcxx/cmake/caches/Generic-ubsan.cmake"
check-runtimes
;;
generic-with_llvm_unwinder)
clean
generate-cmake -DLIBCXXABI_USE_LLVM_UNWINDER=ON
check-runtimes
;;
generic-no-transitive-includes)
clean
generate-cmake -C "${MONOREPO_ROOT}/libcxx/cmake/caches/Generic-no-transitive-includes.cmake"
check-runtimes
;;
generic-no-threads)
clean
generate-cmake -C "${MONOREPO_ROOT}/libcxx/cmake/caches/Generic-no-threads.cmake"
check-runtimes
;;
generic-no-filesystem)
clean
generate-cmake -C "${MONOREPO_ROOT}/libcxx/cmake/caches/Generic-no-filesystem.cmake"
check-runtimes
;;
generic-no-random_device)
clean
generate-cmake -C "${MONOREPO_ROOT}/libcxx/cmake/caches/Generic-no-random_device.cmake"
check-runtimes
;;
generic-no-localization)
clean
generate-cmake -C "${MONOREPO_ROOT}/libcxx/cmake/caches/Generic-no-localization.cmake"
check-runtimes
;;
generic-no-unicode)
clean
generate-cmake -C "${MONOREPO_ROOT}/libcxx/cmake/caches/Generic-no-unicode.cmake"
check-runtimes
;;
generic-no-wide-characters)
clean
generate-cmake -C "${MONOREPO_ROOT}/libcxx/cmake/caches/Generic-no-wide-characters.cmake"
check-runtimes
;;
generic-no-experimental)
clean
generate-cmake -C "${MONOREPO_ROOT}/libcxx/cmake/caches/Generic-no-experimental.cmake"
check-runtimes
check-abi-list
;;
generic-abi-unstable)
clean
generate-cmake -C "${MONOREPO_ROOT}/libcxx/cmake/caches/Generic-abi-unstable.cmake"
check-runtimes
;;
apple-system)
clean
arch="$(uname -m)"
xcrun --sdk macosx \
${MONOREPO_ROOT}/libcxx/utils/ci/apple-install-libcxx.sh \
--llvm-root ${MONOREPO_ROOT} \
--build-dir ${BUILD_DIR} \
--install-dir ${INSTALL_DIR} \
--symbols-dir "${BUILD_DIR}/symbols" \
--architectures "${arch}" \
--version "999.99"
# TODO: It would be better to run the tests against the fake-installed version of libc++ instead
xcrun --sdk macosx ninja -vC "${BUILD_DIR}/${arch}" check-cxx check-cxxabi check-cxx-abilist
;;
apple-system-backdeployment-assertions-*)
clean
if [[ "${OSX_ROOTS}" == "" ]]; then
echo "--- Downloading previous macOS dylibs"
PREVIOUS_DYLIBS_URL="https://dl.dropboxusercontent.com/s/gmcfxwgl9f9n6pu/libcxx-roots.tar.gz"
OSX_ROOTS="${BUILD_DIR}/macos-roots"
mkdir -p "${OSX_ROOTS}"
curl "${PREVIOUS_DYLIBS_URL}" | tar -xz --strip-components=1 -C "${OSX_ROOTS}"
fi
DEPLOYMENT_TARGET="${BUILDER#apple-system-backdeployment-assertions-}"
# TODO: On Apple platforms, we never produce libc++abi.1.dylib or libunwind.1.dylib,
# only libc++abi.dylib and libunwind.dylib. Fix that in the build so that the
# tests stop searching for @rpath/libc++abi.1.dylib and @rpath/libunwind.1.dylib.
cp "${OSX_ROOTS}/macOS/libc++abi/${DEPLOYMENT_TARGET}/libc++abi.dylib" \
"${OSX_ROOTS}/macOS/libc++abi/${DEPLOYMENT_TARGET}/libc++abi.1.dylib"
cp "${OSX_ROOTS}/macOS/libunwind/${DEPLOYMENT_TARGET}/libunwind.dylib" \
"${OSX_ROOTS}/macOS/libunwind/${DEPLOYMENT_TARGET}/libunwind.1.dylib"
arch="$(uname -m)"
PARAMS="target_triple=${arch}-apple-macosx${DEPLOYMENT_TARGET}"
PARAMS+=";cxx_runtime_root=${OSX_ROOTS}/macOS/libc++/${DEPLOYMENT_TARGET}"
PARAMS+=";abi_runtime_root=${OSX_ROOTS}/macOS/libc++abi/${DEPLOYMENT_TARGET}"
PARAMS+=";unwind_runtime_root=${OSX_ROOTS}/macOS/libunwind/${DEPLOYMENT_TARGET}"
PARAMS+=";use_system_cxx_lib=True"
PARAMS+=";enable_assertions=True"
# TODO: Enable experimental features during back-deployment -- right now some of the availability
# annotations are incorrect, leading to test failures that could be avoided.
PARAMS+=";enable_experimental=False"
generate-cmake -C "${MONOREPO_ROOT}/libcxx/cmake/caches/Apple.cmake" \
-DLIBCXX_TEST_CONFIG="apple-libc++-backdeployment.cfg.in" \
-DLIBCXXABI_TEST_CONFIG="apple-libc++abi-backdeployment.cfg.in" \
-DLIBUNWIND_TEST_CONFIG="apple-libunwind-backdeployment.cfg.in" \
-DLIBCXX_TEST_PARAMS="${PARAMS}" \
-DLIBCXXABI_TEST_PARAMS="${PARAMS}" \
-DLIBUNWIND_TEST_PARAMS="${PARAMS}"
check-runtimes
;;
apple-system-backdeployment-*)
clean
if [[ "${OSX_ROOTS}" == "" ]]; then
echo "--- Downloading previous macOS dylibs"
PREVIOUS_DYLIBS_URL="https://dl.dropboxusercontent.com/s/gmcfxwgl9f9n6pu/libcxx-roots.tar.gz"
OSX_ROOTS="${BUILD_DIR}/macos-roots"
mkdir -p "${OSX_ROOTS}"
curl "${PREVIOUS_DYLIBS_URL}" | tar -xz --strip-components=1 -C "${OSX_ROOTS}"
fi
DEPLOYMENT_TARGET="${BUILDER#apple-system-backdeployment-}"
# TODO: On Apple platforms, we never produce libc++abi.1.dylib or libunwind.1.dylib,
# only libc++abi.dylib and libunwind.dylib. Fix that in the build so that the
# tests stop searching for @rpath/libc++abi.1.dylib and @rpath/libunwind.1.dylib.
cp "${OSX_ROOTS}/macOS/libc++abi/${DEPLOYMENT_TARGET}/libc++abi.dylib" \
"${OSX_ROOTS}/macOS/libc++abi/${DEPLOYMENT_TARGET}/libc++abi.1.dylib"
cp "${OSX_ROOTS}/macOS/libunwind/${DEPLOYMENT_TARGET}/libunwind.dylib" \
"${OSX_ROOTS}/macOS/libunwind/${DEPLOYMENT_TARGET}/libunwind.1.dylib"
arch="$(uname -m)"
PARAMS="target_triple=${arch}-apple-macosx${DEPLOYMENT_TARGET}"
PARAMS+=";cxx_runtime_root=${OSX_ROOTS}/macOS/libc++/${DEPLOYMENT_TARGET}"
PARAMS+=";abi_runtime_root=${OSX_ROOTS}/macOS/libc++abi/${DEPLOYMENT_TARGET}"
PARAMS+=";unwind_runtime_root=${OSX_ROOTS}/macOS/libunwind/${DEPLOYMENT_TARGET}"
PARAMS+=";use_system_cxx_lib=True"
# TODO: Enable experimental features during back-deployment -- right now some of the availability
# annotations are incorrect, leading to test failures that could be avoided.
PARAMS+=";enable_experimental=False"
generate-cmake -C "${MONOREPO_ROOT}/libcxx/cmake/caches/Apple.cmake" \
-DLIBCXX_TEST_CONFIG="apple-libc++-backdeployment.cfg.in" \
-DLIBCXXABI_TEST_CONFIG="apple-libc++abi-backdeployment.cfg.in" \
-DLIBUNWIND_TEST_CONFIG="apple-libunwind-backdeployment.cfg.in" \
-DLIBCXX_TEST_PARAMS="${PARAMS}" \
-DLIBCXXABI_TEST_PARAMS="${PARAMS}" \
-DLIBUNWIND_TEST_PARAMS="${PARAMS}"
check-runtimes
;;
benchmarks)
clean
generate-cmake
check-cxx-benchmarks
;;
documentation)
clean
generate-cmake -DLLVM_ENABLE_SPHINX=ON
echo "+++ Generating documentation"
${NINJA} -vC "${BUILD_DIR}" docs-libcxx-html
;;
bootstrapping-build)
clean
echo "--- Generating CMake"
${CMAKE} \
-S "${MONOREPO_ROOT}/llvm" \
-B "${BUILD_DIR}" \
-GNinja -DCMAKE_MAKE_PROGRAM="${NINJA}" \
-DCMAKE_BUILD_TYPE=RelWithDebInfo \
-DCMAKE_INSTALL_PREFIX="${INSTALL_DIR}" \
-DLLVM_ENABLE_PROJECTS="clang" \
-DLLVM_ENABLE_RUNTIMES="libcxx;libcxxabi;libunwind" \
-DLLVM_RUNTIME_TARGETS="$(c++ --print-target-triple)" \
-DLLVM_TARGETS_TO_BUILD="host" \
-DRUNTIMES_BUILD_ALLOW_DARWIN=ON \
-DLLVM_ENABLE_ASSERTIONS=ON
echo "+++ Running the libc++ and libc++abi tests"
${NINJA} -C "${BUILD_DIR}" check-runtimes
echo "--- Installing libc++ and libc++abi to a fake location"
${NINJA} -C "${BUILD_DIR}" install-runtimes
;;
legacy-test-config)
clean
generate-cmake -DLIBCXX_TEST_CONFIG="legacy.cfg.in" \
-DLIBCXXABI_TEST_CONFIG="${MONOREPO_ROOT}/libcxxabi/test/lit.site.cfg.in" \
-DLIBUNWIND_TEST_CONFIG="${MONOREPO_ROOT}/libunwind/test/lit.site.cfg.in"
check-runtimes
;;
legacy-project-build)
clean
echo "--- Generating CMake"
${CMAKE} \
-S "${MONOREPO_ROOT}/llvm" \
-B "${BUILD_DIR}" \
-DLLVM_ENABLE_PROJECTS="libcxx;libunwind;libcxxabi" \
-GNinja -DCMAKE_MAKE_PROGRAM="${NINJA}" \
-DCMAKE_BUILD_TYPE=RelWithDebInfo \
-DCMAKE_INSTALL_PREFIX="${INSTALL_DIR}" \
-DLLVM_LIT_ARGS="-sv --show-unsupported --xunit-xml-output test-results.xml --timeout=1500" \
-DLIBCXX_CXX_ABI=libcxxabi
check-runtimes
;;
aarch64)
clean
generate-cmake -C "${MONOREPO_ROOT}/libcxx/cmake/caches/AArch64.cmake"
check-runtimes
;;
aarch64-noexceptions)
clean
generate-cmake -C "${MONOREPO_ROOT}/libcxx/cmake/caches/AArch64.cmake" \
-DLIBCXX_ENABLE_EXCEPTIONS=OFF \
-DLIBCXXABI_ENABLE_EXCEPTIONS=OFF
check-runtimes
;;
# Aka Armv8 32 bit
armv8)
clean
generate-cmake -C "${MONOREPO_ROOT}/libcxx/cmake/caches/Armv8Arm.cmake"
check-runtimes
;;
armv8-noexceptions)
clean
generate-cmake -C "${MONOREPO_ROOT}/libcxx/cmake/caches/Armv8Thumb-noexceptions.cmake"
check-runtimes
;;
# Armv7 32 bit. One building Arm only one Thumb only code.
armv7)
clean
generate-cmake -C "${MONOREPO_ROOT}/libcxx/cmake/caches/Armv7Arm.cmake"
check-runtimes
;;
armv7-noexceptions)
clean
generate-cmake -C "${MONOREPO_ROOT}/libcxx/cmake/caches/Armv7Thumb-noexceptions.cmake"
check-runtimes
;;
clang-cl-dll)
clean
# TODO: Currently, building with the experimental library breaks running
# tests (the test linking look for the c++experimental library with the
# wrong name, and the statically linked c++experimental can't be linked
# correctly when libc++ visibility attributes indicate dllimport linkage
# anyway), thus just disable the experimental library. Remove this
# setting when cmake and the test driver does the right thing automatically.
generate-cmake-libcxx-win -DLIBCXX_TEST_PARAMS="enable_experimental=False"
echo "+++ Running the libc++ tests"
${NINJA} -vC "${BUILD_DIR}" check-cxx
;;
clang-cl-static)
clean
generate-cmake-libcxx-win -DLIBCXX_ENABLE_SHARED=OFF
echo "+++ Running the libc++ tests"
${NINJA} -vC "${BUILD_DIR}" check-cxx
;;
mingw-dll)
clean
# Explicitly specify the compiler with a triple prefix. The CI
# environment has got two installations of Clang; the default one
# defaults to MSVC mode, while there's an installation of llvm-mingw
# further back in PATH. By calling the compiler with an explicit
# triple prefix, we use the one that is bundled with a mingw sysroot.
generate-cmake \
-DCMAKE_C_COMPILER=x86_64-w64-mingw32-clang \
-DCMAKE_CXX_COMPILER=x86_64-w64-mingw32-clang++ \
-C "${MONOREPO_ROOT}/libcxx/cmake/caches/MinGW.cmake"
echo "+++ Running the libc++ tests"
${NINJA} -vC "${BUILD_DIR}" check-cxx
;;
mingw-static)
clean
generate-cmake \
-DCMAKE_C_COMPILER=x86_64-w64-mingw32-clang \
-DCMAKE_CXX_COMPILER=x86_64-w64-mingw32-clang++ \
-C "${MONOREPO_ROOT}/libcxx/cmake/caches/MinGW.cmake" \
-DLIBCXX_ENABLE_SHARED=OFF \
-DLIBUNWIND_ENABLE_SHARED=OFF
echo "+++ Running the libc++ tests"
${NINJA} -vC "${BUILD_DIR}" check-cxx
;;
mingw-dll-i686)
clean
generate-cmake \
-DCMAKE_C_COMPILER=i686-w64-mingw32-clang \
-DCMAKE_CXX_COMPILER=i686-w64-mingw32-clang++ \
-C "${MONOREPO_ROOT}/libcxx/cmake/caches/MinGW.cmake"
echo "+++ Running the libc++ tests"
${NINJA} -vC "${BUILD_DIR}" check-cxx
;;
aix)
export CC=clang
export CXX=clang++
clean
generate-cmake -C "${MONOREPO_ROOT}/libcxx/cmake/caches/AIX.cmake" \
-DLIBCXX_TEST_CONFIG="ibm-libc++-shared.cfg.in" \
-DLIBCXXABI_TEST_CONFIG="ibm-libc++abi-shared.cfg.in" \
-DLIBUNWIND_TEST_CONFIG="ibm-libunwind-shared.cfg.in"
check-abi-list
check-runtimes
;;
#################################################################
# Insert vendor-specific internal configurations below.
#
# This allows vendors to extend this file with their own internal
# configurations without running into merge conflicts with upstream.
#################################################################
#################################################################
*)
echo "${BUILDER} is not a known configuration"
exit 1
;;
esac

View File

@@ -0,0 +1,31 @@
#!/usr/bin/env bash
# This script starts a shell in a container running the libc++ build bot Docker
# image. That image emulates the environment used by libc++'s Linux builders on
# BuildKite.
#
# Once you're inside the shell, you can run the various build jobs with the
# `run-buildbot` script.
#
# This script must be run from within the LLVM monorepo. Furthermore, the
# monorepo will be mounted as `/llvm` inside the container. Be careful, the
# state in `/llvm` is shared between the container and the host machine, which
# is useful for editing files on the host machine and re-running the build bot
# in the container.
#
# If you are on Linux you will likely not be able to write to the mount because
# the user in the container doesn't have permissions to do so.
# If you need to do this, give that user permission to do so after running
# the container or add this flag to run the container as your local user IDs:
# --user $(id -u):$(id -g)
set -e
MONOREPO_ROOT="$(git rev-parse --show-toplevel)"
if [[ ! -d "${MONOREPO_ROOT}/libcxx/utils/ci" ]]; then
echo "Was unable to find the root of the LLVM monorepo; are you running from within the monorepo?"
exit 1
fi
docker pull ldionne/libcxx-builder
docker run -it --volume "${MONOREPO_ROOT}:/llvm" --workdir "/llvm" --cap-add=SYS_PTRACE ldionne/libcxx-builder \
bash -c 'git config --global --add safe.directory /llvm ; exec bash'

View File

@@ -0,0 +1,997 @@
#===----------------------------------------------------------------------===##
#
# Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
# See https://llvm.org/LICENSE.txt for license information.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
#
#===----------------------------------------------------------------------===##
"""GDB pretty-printers for libc++.
These should work for objects compiled with either the stable ABI or the unstable ABI.
"""
from __future__ import print_function
import math
import re
import gdb
# One under-documented feature of the gdb pretty-printer API
# is that clients can call any other member of the API
# before they call to_string.
# Therefore all self.FIELDs must be set in the pretty-printer's
# __init__ function.
_void_pointer_type = gdb.lookup_type("void").pointer()
_long_int_type = gdb.lookup_type("unsigned long long")
_libcpp_big_endian = False
def addr_as_long(addr):
return int(addr.cast(_long_int_type))
# The size of a pointer in bytes.
_pointer_size = _void_pointer_type.sizeof
def _remove_cxx_namespace(typename):
"""Removed libc++ specific namespace from the type.
Arguments:
typename(string): A type, such as std::__u::something.
Returns:
A string without the libc++ specific part, such as std::something.
"""
return re.sub("std::__.*?::", "std::", typename)
def _remove_generics(typename):
"""Remove generics part of the type. Assumes typename is not empty.
Arguments:
typename(string): A type such as std::my_collection<element>.
Returns:
The prefix up to the generic part, such as std::my_collection.
"""
match = re.match("^([^<]+)", typename)
return match.group(1)
# Some common substitutions on the types to reduce visual clutter (A user who
# wants to see the actual details can always use print/r).
_common_substitutions = [
("std::basic_string<char, std::char_traits<char>, std::allocator<char> >",
"std::string"),
("std::basic_string_view<char, std::char_traits<char> >",
"std::string_view"),
]
def _prettify_typename(gdb_type):
"""Returns a pretty name for the type, or None if no name can be found.
Arguments:
gdb_type(gdb.Type): A type object.
Returns:
A string, without type_defs, libc++ namespaces, and common substitutions
applied.
"""
type_without_typedefs = gdb_type.strip_typedefs()
typename = type_without_typedefs.name or type_without_typedefs.tag or \
str(type_without_typedefs)
result = _remove_cxx_namespace(typename)
for find_str, subst_str in _common_substitutions:
result = re.sub(find_str, subst_str, result)
return result
def _typename_for_nth_generic_argument(gdb_type, n):
"""Returns a pretty string for the nth argument of the given type.
Arguments:
gdb_type(gdb.Type): A type object, such as the one for std::map<int, int>
n: The (zero indexed) index of the argument to return.
Returns:
A string for the nth argument, such a "std::string"
"""
element_type = gdb_type.template_argument(n)
return _prettify_typename(element_type)
def _typename_with_n_generic_arguments(gdb_type, n):
"""Return a string for the type with the first n (1, ...) generic args."""
base_type = _remove_generics(_prettify_typename(gdb_type))
arg_list = [base_type]
template = "%s<"
for i in range(n):
arg_list.append(_typename_for_nth_generic_argument(gdb_type, i))
template += "%s, "
result = (template[:-2] + ">") % tuple(arg_list)
return result
def _typename_with_first_generic_argument(gdb_type):
return _typename_with_n_generic_arguments(gdb_type, 1)
class StdTuplePrinter(object):
"""Print a std::tuple."""
class _Children(object):
"""Class to iterate over the tuple's children."""
def __init__(self, val):
self.val = val
self.child_iter = iter(self.val["__base_"].type.fields())
self.count = 0
def __iter__(self):
return self
def __next__(self):
# child_iter raises StopIteration when appropriate.
field_name = next(self.child_iter)
child = self.val["__base_"][field_name]["__value_"]
self.count += 1
return ("[%d]" % self.count, child)
next = __next__ # Needed for GDB built against Python 2.7.
def __init__(self, val):
self.val = val
def to_string(self):
typename = _remove_generics(_prettify_typename(self.val.type))
if not self.val.type.fields():
return "empty %s" % typename
return "%s containing" % typename
def children(self):
if not self.val.type.fields():
return iter(())
return self._Children(self.val)
def _get_base_subobject(child_class_value, index=0):
"""Returns the object's value in the form of the parent class at index.
This function effectively casts the child_class_value to the base_class's
type, but the type-to-cast to is stored in the field at index, and once
we know the field, we can just return the data.
Args:
child_class_value: the value to cast
index: the parent class index
Raises:
Exception: field at index was not a base-class field.
"""
field = child_class_value.type.fields()[index]
if not field.is_base_class:
raise Exception("Not a base-class field.")
return child_class_value[field]
def _value_of_pair_first(value):
"""Convenience for _get_base_subobject, for the common case."""
return _get_base_subobject(value, 0)["__value_"]
class StdStringPrinter(object):
"""Print a std::string."""
def __init__(self, val):
self.val = val
def to_string(self):
"""Build a python string from the data whether stored inline or separately."""
value_field = _value_of_pair_first(self.val["__r_"])
short_field = value_field["__s"]
short_size = short_field["__size_"]
if short_size == 0:
return ""
if short_field["__is_long_"]:
long_field = value_field["__l"]
data = long_field["__data_"]
size = long_field["__size_"]
else:
data = short_field["__data_"]
size = short_field["__size_"]
return data.lazy_string(length=size)
def display_hint(self):
return "string"
class StdStringViewPrinter(object):
"""Print a std::string_view."""
def __init__(self, val):
self.val = val
def display_hint(self):
return "string"
def to_string(self): # pylint: disable=g-bad-name
"""GDB calls this to compute the pretty-printed form."""
ptr = self.val["__data"]
ptr = ptr.cast(ptr.type.target().strip_typedefs().pointer())
size = self.val["__size"]
return ptr.lazy_string(length=size)
class StdUniquePtrPrinter(object):
"""Print a std::unique_ptr."""
def __init__(self, val):
self.val = val
self.addr = _value_of_pair_first(self.val["__ptr_"])
self.pointee_type = self.val.type.template_argument(0)
def to_string(self):
typename = _remove_generics(_prettify_typename(self.val.type))
if not self.addr:
return "%s is nullptr" % typename
return ("%s<%s> containing" %
(typename,
_remove_generics(_prettify_typename(self.pointee_type))))
def __iter__(self):
if self.addr:
yield "__ptr_", self.addr.cast(self.pointee_type.pointer())
def children(self):
return self
class StdSharedPointerPrinter(object):
"""Print a std::shared_ptr."""
def __init__(self, val):
self.val = val
self.addr = self.val["__ptr_"]
def to_string(self):
"""Returns self as a string."""
typename = _remove_generics(_prettify_typename(self.val.type))
pointee_type = _remove_generics(
_prettify_typename(self.val.type.template_argument(0)))
if not self.addr:
return "%s is nullptr" % typename
refcount = self.val["__cntrl_"]
if refcount != 0:
try:
usecount = refcount["__shared_owners_"] + 1
weakcount = refcount["__shared_weak_owners_"]
if usecount == 0:
state = "expired, weak %d" % weakcount
else:
state = "count %d, weak %d" % (usecount, weakcount)
except:
# Debug info for a class with virtual functions is emitted
# in the same place as its key function. That means that
# for std::shared_ptr, __shared_owners_ is emitted into
# into libcxx.[so|a] itself, rather than into the shared_ptr
# instantiation point. So if libcxx.so was built without
# debug info, these fields will be missing.
state = "count ?, weak ? (libc++ missing debug info)"
return "%s<%s> %s containing" % (typename, pointee_type, state)
def __iter__(self):
if self.addr:
yield "__ptr_", self.addr
def children(self):
return self
class StdVectorPrinter(object):
"""Print a std::vector."""
class _VectorBoolIterator(object):
"""Class to iterate over the bool vector's children."""
def __init__(self, begin, size, bits_per_word):
self.item = begin
self.size = size
self.bits_per_word = bits_per_word
self.count = 0
self.offset = 0
def __iter__(self):
return self
def __next__(self):
"""Retrieve the next element."""
self.count += 1
if self.count > self.size:
raise StopIteration
entry = self.item.dereference()
if entry & (1 << self.offset):
outbit = 1
else:
outbit = 0
self.offset += 1
if self.offset >= self.bits_per_word:
self.item += 1
self.offset = 0
return ("[%d]" % self.count, outbit)
next = __next__ # Needed for GDB built against Python 2.7.
class _VectorIterator(object):
"""Class to iterate over the non-bool vector's children."""
def __init__(self, begin, end):
self.item = begin
self.end = end
self.count = 0
def __iter__(self):
return self
def __next__(self):
self.count += 1
if self.item == self.end:
raise StopIteration
entry = self.item.dereference()
self.item += 1
return ("[%d]" % self.count, entry)
next = __next__ # Needed for GDB built against Python 2.7.
def __init__(self, val):
"""Set val, length, capacity, and iterator for bool and normal vectors."""
self.val = val
self.typename = _remove_generics(_prettify_typename(val.type))
begin = self.val["__begin_"]
if self.val.type.template_argument(0).code == gdb.TYPE_CODE_BOOL:
self.typename += "<bool>"
self.length = self.val["__size_"]
bits_per_word = self.val["__bits_per_word"]
self.capacity = _value_of_pair_first(
self.val["__cap_alloc_"]) * bits_per_word
self.iterator = self._VectorBoolIterator(
begin, self.length, bits_per_word)
else:
end = self.val["__end_"]
self.length = end - begin
self.capacity = _get_base_subobject(
self.val["__end_cap_"])["__value_"] - begin
self.iterator = self._VectorIterator(begin, end)
def to_string(self):
return ("%s of length %d, capacity %d" %
(self.typename, self.length, self.capacity))
def children(self):
return self.iterator
def display_hint(self):
return "array"
class StdBitsetPrinter(object):
"""Print a std::bitset."""
def __init__(self, val):
self.val = val
self.n_words = int(self.val["__n_words"])
self.bits_per_word = int(self.val["__bits_per_word"])
self.bit_count = self.val.type.template_argument(0)
if self.n_words == 1:
self.values = [int(self.val["__first_"])]
else:
self.values = [int(self.val["__first_"][index])
for index in range(self.n_words)]
def to_string(self):
typename = _prettify_typename(self.val.type)
return "%s" % typename
def _list_it(self):
for bit in range(self.bit_count):
word = bit // self.bits_per_word
word_bit = bit % self.bits_per_word
if self.values[word] & (1 << word_bit):
yield ("[%d]" % bit, 1)
def __iter__(self):
return self._list_it()
def children(self):
return self
class StdDequePrinter(object):
"""Print a std::deque."""
def __init__(self, val):
self.val = val
self.size = int(_value_of_pair_first(val["__size_"]))
self.start_ptr = self.val["__map_"]["__begin_"]
self.first_block_start_index = int(self.val["__start_"])
self.node_type = self.start_ptr.type
self.block_size = self._calculate_block_size(
val.type.template_argument(0))
def _calculate_block_size(self, element_type):
"""Calculates the number of elements in a full block."""
size = element_type.sizeof
# Copied from struct __deque_block_size implementation of libcxx.
return 4096 / size if size < 256 else 16
def _bucket_it(self, start_addr, start_index, end_index):
for i in range(start_index, end_index):
yield i, (start_addr.dereference() + i).dereference()
def _list_it(self):
"""Primary iteration worker."""
num_emitted = 0
current_addr = self.start_ptr
start_index = self.first_block_start_index
while num_emitted < self.size:
end_index = min(start_index + self.size -
num_emitted, self.block_size)
for _, elem in self._bucket_it(current_addr, start_index, end_index):
yield "", elem
num_emitted += end_index - start_index
current_addr = gdb.Value(addr_as_long(current_addr) + _pointer_size) \
.cast(self.node_type)
start_index = 0
def to_string(self):
typename = _remove_generics(_prettify_typename(self.val.type))
if self.size:
return "%s with %d elements" % (typename, self.size)
return "%s is empty" % typename
def __iter__(self):
return self._list_it()
def children(self):
return self
def display_hint(self):
return "array"
class StdListPrinter(object):
"""Print a std::list."""
def __init__(self, val):
self.val = val
size_alloc_field = self.val["__size_alloc_"]
self.size = int(_value_of_pair_first(size_alloc_field))
dummy_node = self.val["__end_"]
self.nodetype = gdb.lookup_type(
re.sub("__list_node_base", "__list_node",
str(dummy_node.type.strip_typedefs()))).pointer()
self.first_node = dummy_node["__next_"]
def to_string(self):
typename = _remove_generics(_prettify_typename(self.val.type))
if self.size:
return "%s with %d elements" % (typename, self.size)
return "%s is empty" % typename
def _list_iter(self):
current_node = self.first_node
for _ in range(self.size):
yield "", current_node.cast(self.nodetype).dereference()["__value_"]
current_node = current_node.dereference()["__next_"]
def __iter__(self):
return self._list_iter()
def children(self):
return self if self.nodetype else iter(())
def display_hint(self):
return "array"
class StdQueueOrStackPrinter(object):
"""Print a std::queue or std::stack."""
def __init__(self, val):
self.val = val
self.underlying = val["c"]
def to_string(self):
typename = _remove_generics(_prettify_typename(self.val.type))
return "%s wrapping" % typename
def children(self):
return iter([("", self.underlying)])
def display_hint(self):
return "array"
class StdPriorityQueuePrinter(object):
"""Print a std::priority_queue."""
def __init__(self, val):
self.val = val
self.underlying = val["c"]
def to_string(self):
# TODO(tamur): It would be nice to print the top element. The technical
# difficulty is that, the implementation refers to the underlying
# container, which is a generic class. libstdcxx pretty printers do not
# print the top element.
typename = _remove_generics(_prettify_typename(self.val.type))
return "%s wrapping" % typename
def children(self):
return iter([("", self.underlying)])
def display_hint(self):
return "array"
class RBTreeUtils(object):
"""Utility class for std::(multi)map, and std::(multi)set and iterators."""
def __init__(self, cast_type, root):
self.cast_type = cast_type
self.root = root
def left_child(self, node):
result = node.cast(self.cast_type).dereference()["__left_"]
return result
def right_child(self, node):
result = node.cast(self.cast_type).dereference()["__right_"]
return result
def parent(self, node):
"""Return the parent of node, if it exists."""
# If this is the root, then from the algorithm's point of view, it has no
# parent.
if node == self.root:
return None
# We don't have enough information to tell if this is the end_node (which
# doesn't have a __parent_ field), or the root (which doesn't have a parent
# from the algorithm's point of view), so cast_type may not be correct for
# this particular node. Use heuristics.
# The end_node's left child is the root. Note that when printing interators
# in isolation, the root is unknown.
if self.left_child(node) == self.root:
return None
parent = node.cast(self.cast_type).dereference()["__parent_"]
# If the value at the offset of __parent_ doesn't look like a valid pointer,
# then assume that node is the end_node (and therefore has no parent).
# End_node type has a pointer embedded, so should have pointer alignment.
if addr_as_long(parent) % _void_pointer_type.alignof:
return None
# This is ugly, but the only other option is to dereference an invalid
# pointer. 0x8000 is fairly arbitrary, but has had good results in
# practice. If there was a way to tell if a pointer is invalid without
# actually dereferencing it and spewing error messages, that would be ideal.
if parent < 0x8000:
return None
return parent
def is_left_child(self, node):
parent = self.parent(node)
return parent is not None and self.left_child(parent) == node
def is_right_child(self, node):
parent = self.parent(node)
return parent is not None and self.right_child(parent) == node
class AbstractRBTreePrinter(object):
"""Abstract super class for std::(multi)map, and std::(multi)set."""
def __init__(self, val):
self.val = val
tree = self.val["__tree_"]
self.size = int(_value_of_pair_first(tree["__pair3_"]))
dummy_root = tree["__pair1_"]
root = _value_of_pair_first(dummy_root)["__left_"]
cast_type = self._init_cast_type(val.type)
self.util = RBTreeUtils(cast_type, root)
def _get_key_value(self, node):
"""Subclasses should override to return a list of values to yield."""
raise NotImplementedError
def _traverse(self):
"""Traverses the binary search tree in order."""
current = self.util.root
skip_left_child = False
while True:
if not skip_left_child and self.util.left_child(current):
current = self.util.left_child(current)
continue
skip_left_child = False
for key_value in self._get_key_value(current):
yield "", key_value
right_child = self.util.right_child(current)
if right_child:
current = right_child
continue
while self.util.is_right_child(current):
current = self.util.parent(current)
if self.util.is_left_child(current):
current = self.util.parent(current)
skip_left_child = True
continue
break
def __iter__(self):
return self._traverse()
def children(self):
return self if self.util.cast_type and self.size > 0 else iter(())
def to_string(self):
typename = _remove_generics(_prettify_typename(self.val.type))
if self.size:
return "%s with %d elements" % (typename, self.size)
return "%s is empty" % typename
class StdMapPrinter(AbstractRBTreePrinter):
"""Print a std::map or std::multimap."""
def _init_cast_type(self, val_type):
map_it_type = gdb.lookup_type(
str(val_type.strip_typedefs()) + "::iterator").strip_typedefs()
tree_it_type = map_it_type.template_argument(0)
node_ptr_type = tree_it_type.template_argument(1)
return node_ptr_type
def display_hint(self):
return "map"
def _get_key_value(self, node):
key_value = node.cast(self.util.cast_type).dereference()[
"__value_"]["__cc"]
return [key_value["first"], key_value["second"]]
class StdSetPrinter(AbstractRBTreePrinter):
"""Print a std::set."""
def _init_cast_type(self, val_type):
set_it_type = gdb.lookup_type(
str(val_type.strip_typedefs()) + "::iterator").strip_typedefs()
node_ptr_type = set_it_type.template_argument(1)
return node_ptr_type
def display_hint(self):
return "array"
def _get_key_value(self, node):
key_value = node.cast(self.util.cast_type).dereference()["__value_"]
return [key_value]
class AbstractRBTreeIteratorPrinter(object):
"""Abstract super class for std::(multi)map, and std::(multi)set iterator."""
def _initialize(self, val, typename):
self.typename = typename
self.val = val
self.addr = self.val["__ptr_"]
cast_type = self.val.type.template_argument(1)
self.util = RBTreeUtils(cast_type, None)
if self.addr:
self.node = self.addr.cast(cast_type).dereference()
def _is_valid_node(self):
if not self.util.parent(self.addr):
return False
return self.util.is_left_child(self.addr) or \
self.util.is_right_child(self.addr)
def to_string(self):
if not self.addr:
return "%s is nullptr" % self.typename
return "%s " % self.typename
def _get_node_value(self, node):
raise NotImplementedError
def __iter__(self):
addr_str = "[%s]" % str(self.addr)
if not self._is_valid_node():
yield addr_str, " end()"
else:
yield addr_str, self._get_node_value(self.node)
def children(self):
return self if self.addr else iter(())
class MapIteratorPrinter(AbstractRBTreeIteratorPrinter):
"""Print a std::(multi)map iterator."""
def __init__(self, val):
self._initialize(val["__i_"],
_remove_generics(_prettify_typename(val.type)))
def _get_node_value(self, node):
return node["__value_"]["__cc"]
class SetIteratorPrinter(AbstractRBTreeIteratorPrinter):
"""Print a std::(multi)set iterator."""
def __init__(self, val):
self._initialize(val, _remove_generics(_prettify_typename(val.type)))
def _get_node_value(self, node):
return node["__value_"]
class StdFposPrinter(object):
"""Print a std::fpos or std::streampos."""
def __init__(self, val):
self.val = val
def to_string(self):
typename = _remove_generics(_prettify_typename(self.val.type))
offset = self.val["__off_"]
state = self.val["__st_"]
count = state["__count"]
value = state["__value"]["__wch"]
return "%s with stream offset:%s with state: {count:%s value:%s}" % (
typename, offset, count, value)
class AbstractUnorderedCollectionPrinter(object):
"""Abstract super class for std::unordered_(multi)[set|map]."""
def __init__(self, val):
self.val = val
self.table = val["__table_"]
self.sentinel = self.table["__p1_"]
self.size = int(_value_of_pair_first(self.table["__p2_"]))
node_base_type = self.sentinel.type.template_argument(0)
self.cast_type = node_base_type.template_argument(0)
def _list_it(self, sentinel_ptr):
next_ptr = _value_of_pair_first(sentinel_ptr)["__next_"]
while str(next_ptr.cast(_void_pointer_type)) != "0x0":
next_val = next_ptr.cast(self.cast_type).dereference()
for key_value in self._get_key_value(next_val):
yield "", key_value
next_ptr = next_val["__next_"]
def to_string(self):
typename = _remove_generics(_prettify_typename(self.val.type))
if self.size:
return "%s with %d elements" % (typename, self.size)
return "%s is empty" % typename
def _get_key_value(self, node):
"""Subclasses should override to return a list of values to yield."""
raise NotImplementedError
def children(self):
return self if self.cast_type and self.size > 0 else iter(())
def __iter__(self):
return self._list_it(self.sentinel)
class StdUnorderedSetPrinter(AbstractUnorderedCollectionPrinter):
"""Print a std::unordered_(multi)set."""
def _get_key_value(self, node):
return [node["__value_"]]
def display_hint(self):
return "array"
class StdUnorderedMapPrinter(AbstractUnorderedCollectionPrinter):
"""Print a std::unordered_(multi)map."""
def _get_key_value(self, node):
key_value = node["__value_"]["__cc"]
return [key_value["first"], key_value["second"]]
def display_hint(self):
return "map"
class AbstractHashMapIteratorPrinter(object):
"""Abstract class for unordered collection iterators."""
def _initialize(self, val, addr):
self.val = val
self.typename = _remove_generics(_prettify_typename(self.val.type))
self.addr = addr
if self.addr:
self.node = self.addr.cast(self.cast_type).dereference()
def _get_key_value(self):
"""Subclasses should override to return a list of values to yield."""
raise NotImplementedError
def to_string(self):
if not self.addr:
return "%s = end()" % self.typename
return "%s " % self.typename
def children(self):
return self if self.addr else iter(())
def __iter__(self):
for key_value in self._get_key_value():
yield "", key_value
class StdUnorderedSetIteratorPrinter(AbstractHashMapIteratorPrinter):
"""Print a std::(multi)set iterator."""
def __init__(self, val):
self.cast_type = val.type.template_argument(0)
self._initialize(val, val["__node_"])
def _get_key_value(self):
return [self.node["__value_"]]
def display_hint(self):
return "array"
class StdUnorderedMapIteratorPrinter(AbstractHashMapIteratorPrinter):
"""Print a std::(multi)map iterator."""
def __init__(self, val):
self.cast_type = val.type.template_argument(0).template_argument(0)
self._initialize(val, val["__i_"]["__node_"])
def _get_key_value(self):
key_value = self.node["__value_"]["__cc"]
return [key_value["first"], key_value["second"]]
def display_hint(self):
return "map"
def _remove_std_prefix(typename):
match = re.match("^std::(.+)", typename)
return match.group(1) if match is not None else ""
class LibcxxPrettyPrinter(object):
"""PrettyPrinter object so gdb-commands like 'info pretty-printers' work."""
def __init__(self, name):
super(LibcxxPrettyPrinter, self).__init__()
self.name = name
self.enabled = True
self.lookup = {
"basic_string": StdStringPrinter,
"string": StdStringPrinter,
"string_view": StdStringViewPrinter,
"tuple": StdTuplePrinter,
"unique_ptr": StdUniquePtrPrinter,
"shared_ptr": StdSharedPointerPrinter,
"weak_ptr": StdSharedPointerPrinter,
"bitset": StdBitsetPrinter,
"deque": StdDequePrinter,
"list": StdListPrinter,
"queue": StdQueueOrStackPrinter,
"stack": StdQueueOrStackPrinter,
"priority_queue": StdPriorityQueuePrinter,
"map": StdMapPrinter,
"multimap": StdMapPrinter,
"set": StdSetPrinter,
"multiset": StdSetPrinter,
"vector": StdVectorPrinter,
"__map_iterator": MapIteratorPrinter,
"__map_const_iterator": MapIteratorPrinter,
"__tree_iterator": SetIteratorPrinter,
"__tree_const_iterator": SetIteratorPrinter,
"fpos": StdFposPrinter,
"unordered_set": StdUnorderedSetPrinter,
"unordered_multiset": StdUnorderedSetPrinter,
"unordered_map": StdUnorderedMapPrinter,
"unordered_multimap": StdUnorderedMapPrinter,
"__hash_map_iterator": StdUnorderedMapIteratorPrinter,
"__hash_map_const_iterator": StdUnorderedMapIteratorPrinter,
"__hash_iterator": StdUnorderedSetIteratorPrinter,
"__hash_const_iterator": StdUnorderedSetIteratorPrinter,
}
self.subprinters = []
for name, subprinter in self.lookup.items():
# Subprinters and names are used only for the rarely used command "info
# pretty" (and related), so the name of the first data structure it prints
# is a reasonable choice.
if subprinter not in self.subprinters:
subprinter.name = name
self.subprinters.append(subprinter)
def __call__(self, val):
"""Return the pretty printer for a val, if the type is supported."""
# Do not handle any type that is not a struct/class.
if val.type.strip_typedefs().code != gdb.TYPE_CODE_STRUCT:
return None
# Don't attempt types known to be inside libstdcxx.
typename = val.type.name or val.type.tag or str(val.type)
match = re.match("^std::(__.*?)::", typename)
if match is not None and match.group(1) in ["__cxx1998",
"__debug",
"__7",
"__g"]:
return None
# Handle any using declarations or other typedefs.
typename = _prettify_typename(val.type)
if not typename:
return None
without_generics = _remove_generics(typename)
lookup_name = _remove_std_prefix(without_generics)
if lookup_name in self.lookup:
return self.lookup[lookup_name](val)
return None
_libcxx_printer_name = "libcxx_pretty_printer"
# These are called for every binary object file, which could be thousands in
# certain pathological cases. Limit our pretty printers to the progspace.
def _register_libcxx_printers(event):
progspace = event.new_objfile.progspace
# It would be ideal to get the endianness at print time, but
# gdb.execute clears gdb's internal wrap buffer, removing any values
# already generated as part of a larger data structure, and there is
# no python api to get the endianness. Mixed-endianness debugging
# rare enough that this workaround should be adequate.
_libcpp_big_endian = "big endian" in gdb.execute("show endian",
to_string=True)
if not getattr(progspace, _libcxx_printer_name, False):
print("Loading libc++ pretty-printers.")
gdb.printing.register_pretty_printer(
progspace, LibcxxPrettyPrinter(_libcxx_printer_name))
setattr(progspace, _libcxx_printer_name, True)
def _unregister_libcxx_printers(event):
progspace = event.progspace
if getattr(progspace, _libcxx_printer_name, False):
for printer in progspace.pretty_printers:
if getattr(printer, "name", "none") == _libcxx_printer_name:
progspace.pretty_printers.remove(printer)
setattr(progspace, _libcxx_printer_name, False)
break
def register_libcxx_printer_loader():
"""Register event handlers to load libc++ pretty-printers."""
gdb.events.new_objfile.connect(_register_libcxx_printers)
gdb.events.clear_objfiles.connect(_unregister_libcxx_printers)

View File

@@ -0,0 +1,40 @@
#!/usr/bin/env python
#===----------------------------------------------------------------------===##
#
# Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
# See https://llvm.org/LICENSE.txt for license information.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
#
#===----------------------------------------------------------------------===##
import argparse
import io
import libcxx.sym_check.extract
import libcxx.sym_check.util
import pprint
import sys
def OutputFile(file):
if isinstance(file, io.IOBase):
return file
assert isinstance(file, str), "Got object {} which is not a str".format(file)
return open(file, 'w', newline='\n')
def main(argv):
parser = argparse.ArgumentParser(
description='Extract a list of symbols from a shared library.')
parser.add_argument('library', metavar='LIB', type=str,
help='The library to extract symbols from.')
parser.add_argument('-o', '--output', dest='output', type=OutputFile, default=sys.stdout,
help='The output file to write the symbols to. It is overwritten if it already exists. '
'If no file is specified, the results are written to standard output.')
args = parser.parse_args(argv)
symbols = libcxx.sym_check.extract.extract_symbols(args.library)
symbols, _ = libcxx.sym_check.util.filter_stdlib_symbols(symbols)
lines = [pprint.pformat(sym, width=99999) for sym in symbols]
args.output.writelines('\n'.join(sorted(lines)))
if __name__ == '__main__':
main(sys.argv[1:])

View File

@@ -0,0 +1,326 @@
#!/usr/bin/env python
# ===----------------------------------------------------------------------===##
#
# Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
# See https://llvm.org/LICENSE.txt for license information.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
#
# ===----------------------------------------------------------------------===##
# The code is based on
# https://github.com/microsoft/STL/blob/main/tools/unicode_properties_parse/grapheme_break_property_data_gen.py
#
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
from io import StringIO
from pathlib import Path
from dataclasses import dataclass, field
from typing import Optional
import re
@dataclass
class PropertyRange:
lower: int = -1
upper: int = -1
prop: str = None
@dataclass
class Entry:
lower: int = -1
offset: int = -1
prop: int = -1
LINE_REGEX = re.compile(
r"^(?P<lower>[0-9A-F]{4,5})(?:\.\.(?P<upper>[0-9A-F]{4,5}))?\s*;\s*(?P<prop>\w+)"
)
def parsePropertyLine(inputLine: str) -> Optional[PropertyRange]:
result = PropertyRange()
if m := LINE_REGEX.match(inputLine):
lower_str, upper_str, result.prop = m.group("lower", "upper", "prop")
result.lower = int(lower_str, base=16)
result.upper = result.lower
if upper_str is not None:
result.upper = int(upper_str, base=16)
return result
else:
return None
def compactPropertyRanges(input: list[PropertyRange]) -> list[PropertyRange]:
"""
Merges consecutive ranges with the same property to one range.
Merging the ranges results in fewer ranges in the output table,
reducing binary and improving lookup performance.
"""
result = list()
for x in input:
if (
len(result)
and result[-1].prop == x.prop
and result[-1].upper + 1 == x.lower
):
result[-1].upper = x.upper
continue
result.append(x)
return result
PROP_VALUE_ENUMERATOR_TEMPLATE = "__{}"
PROP_VALUE_ENUM_TEMPLATE = """
enum class __property : uint8_t {{
// Values generated from the data files.
{enumerators},
// The properies below aren't stored in the "database".
// Text position properties.
__sot,
__eot,
// The code unit has none of above properties.
__none
}};
"""
DATA_ARRAY_TEMPLATE = """
/// The entries of the extended grapheme cluster bondary property table.
///
/// The data is generated from
/// - https://www.unicode.org/Public/UCD/latest/ucd/auxiliary/GraphemeBreakProperty.txt
/// - https://www.unicode.org/Public/UCD/latest/ucd/emoji/emoji-data.txt
///
/// The data has 3 values
/// - bits [0, 3] The property. One of the values generated form the datafiles
/// of \\ref __property
/// - bits [4, 10] The size of the range.
/// - bits [11, 31] The lower bound code point of the range. The upper bound of
/// the range is lower bound + size.
///
/// The 7 bits for the size allow a maximum range of 128 elements. Some ranges
/// in the Unicode tables are larger. They are stored in multiple consecutive
/// ranges in the data table. An alternative would be to store the sizes in a
/// separate 16-bit value. The original MSVC STL code had such an approach, but
/// this approach uses less space for the data and is about 4% faster in the
/// following benchmark.
/// libcxx/benchmarks/std_format_spec_string_unicode.bench.cpp
inline constexpr uint32_t __entries[{size}] = {{{entries}}};
/// Returns the extended grapheme cluster bondary property of a code point.
[[nodiscard]] _LIBCPP_HIDE_FROM_ABI constexpr __property __get_property(const char32_t __code_point) noexcept {{
// TODO FMT use std::ranges::upper_bound.
// The algorithm searches for the upper bound of the range and, when found,
// steps back one entry. This algorithm is used since the code point can be
// anywhere in the range. After a lower bound is found the next step is to
// compare whether the code unit is indeed in the range.
//
// Since the entry contains a code unit, size, and property the code point
// being sought needs to be adjusted. Just shifting the code point to the
// proper position doesn't work; suppose an entry has property 0, size 1,
// and lower bound 3. This results in the entry 0x1810.
// When searching for code point 3 it will search for 0x1800, find 0x1810
// and moves to the previous entry. Thus the lower bound value will never
// be found.
// The simple solution is to set the bits belonging to the property and
// size. Then the upper bound for code point 3 will return the entry after
// 0x1810. After moving to the previous entry the algorithm arrives at the
// correct entry.
ptrdiff_t __i = std::upper_bound(__entries, std::end(__entries), (__code_point << 11) | 0x7ffu) - __entries;
if (__i == 0)
return __property::__none;
--__i;
uint32_t __upper_bound = (__entries[__i] >> 11) + ((__entries[__i] >> 4) & 0x7f);
if (__code_point <= __upper_bound)
return static_cast<__property>(__entries[__i] & 0xf);
return __property::__none;
}}
"""
MSVC_FORMAT_UCD_TABLES_HPP_TEMPLATE = """
// -*- C++ -*-
//===----------------------------------------------------------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
// WARNING, this entire header is generated by
// utiles/generate_extended_grapheme_cluster_table.py
// DO NOT MODIFY!
// UNICODE, INC. LICENSE AGREEMENT - DATA FILES AND SOFTWARE
//
// See Terms of Use <https://www.unicode.org/copyright.html>
// for definitions of Unicode Inc.'s Data Files and Software.
//
// NOTICE TO USER: Carefully read the following legal agreement.
// BY DOWNLOADING, INSTALLING, COPYING OR OTHERWISE USING UNICODE INC.'S
// DATA FILES ("DATA FILES"), AND/OR SOFTWARE ("SOFTWARE"),
// YOU UNEQUIVOCALLY ACCEPT, AND AGREE TO BE BOUND BY, ALL OF THE
// TERMS AND CONDITIONS OF THIS AGREEMENT.
// IF YOU DO NOT AGREE, DO NOT DOWNLOAD, INSTALL, COPY, DISTRIBUTE OR USE
// THE DATA FILES OR SOFTWARE.
//
// COPYRIGHT AND PERMISSION NOTICE
//
// Copyright (c) 1991-2022 Unicode, Inc. All rights reserved.
// Distributed under the Terms of Use in https://www.unicode.org/copyright.html.
//
// Permission is hereby granted, free of charge, to any person obtaining
// a copy of the Unicode data files and any associated documentation
// (the "Data Files") or Unicode software and any associated documentation
// (the "Software") to deal in the Data Files or Software
// without restriction, including without limitation the rights to use,
// copy, modify, merge, publish, distribute, and/or sell copies of
// the Data Files or Software, and to permit persons to whom the Data Files
// or Software are furnished to do so, provided that either
// (a) this copyright and permission notice appear with all copies
// of the Data Files or Software, or
// (b) this copyright and permission notice appear in associated
// Documentation.
//
// THE DATA FILES AND SOFTWARE ARE PROVIDED "AS IS", WITHOUT WARRANTY OF
// ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
// WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
// NONINFRINGEMENT OF THIRD PARTY RIGHTS.
// IN NO EVENT SHALL THE COPYRIGHT HOLDER OR HOLDERS INCLUDED IN THIS
// NOTICE BE LIABLE FOR ANY CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL
// DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
// DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
// TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
// PERFORMANCE OF THE DATA FILES OR SOFTWARE.
//
// Except as contained in this notice, the name of a copyright holder
// shall not be used in advertising or otherwise to promote the sale,
// use or other dealings in these Data Files or Software without prior
// written authorization of the copyright holder.
#ifndef _LIBCPP___FORMAT_EXTENDED_GRAPHEME_CLUSTER_TABLE_H
#define _LIBCPP___FORMAT_EXTENDED_GRAPHEME_CLUSTER_TABLE_H
#include <__algorithm/upper_bound.h>
#include <__config>
#include <__iterator/access.h>
#include <cstddef>
#include <cstdint>
#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER)
# pragma GCC system_header
#endif
_LIBCPP_BEGIN_NAMESPACE_STD
#if _LIBCPP_STD_VER > 17
namespace __extended_grapheme_custer_property_boundary {{
{content}
}} // __extended_grapheme_custer_property_boundary
#endif //_LIBCPP_STD_VER > 17
_LIBCPP_END_NAMESPACE_STD
#endif // _LIBCPP___FORMAT_EXTENDED_GRAPHEME_CLUSTER_TABLE_H
"""
def property_ranges_to_table(
ranges: list[PropertyRange], props: list[str]
) -> list[Entry]:
assert len(props) < 16
result = list[Entry]()
high = -1
for range in sorted(ranges, key=lambda x: x.lower):
# Validate overlapping ranges
assert range.lower > high
high = range.upper
while True:
e = Entry(range.lower, range.upper - range.lower, props.index(range.prop))
if e.offset <= 127:
result.append(e)
break
e.offset = 127
result.append(e)
range.lower += 128
return result
cpp_entrytemplate = "0x{:08x}"
def generate_cpp_data(prop_name: str, ranges: list[PropertyRange]) -> str:
result = StringIO()
prop_values = sorted(set(x.prop for x in ranges))
table = property_ranges_to_table(ranges, prop_values)
enumerator_values = [PROP_VALUE_ENUMERATOR_TEMPLATE.format(x) for x in prop_values]
result.write(
PROP_VALUE_ENUM_TEMPLATE.format(enumerators=",".join(enumerator_values))
)
result.write(
DATA_ARRAY_TEMPLATE.format(
prop_name=prop_name,
size=len(table),
entries=",".join(
[
cpp_entrytemplate.format(x.lower << 11 | x.offset << 4 | x.prop)
for x in table
]
),
)
)
return result.getvalue()
def generate_data_tables() -> str:
"""
Generate Unicode data for inclusion into <format> from
GraphemeBreakProperty.txt and emoji-data.txt.
GraphemeBreakProperty.txt can be found at
https://www.unicode.org/Public/UCD/latest/ucd/auxiliary/GraphemeBreakProperty.txt
emoji-data.txt can be found at
https://www.unicode.org/Public/UCD/latest/ucd/emoji/emoji-data.txt
Both files are expected to be in the same directory as this script.
"""
gbp_data_path = Path(__file__).absolute().with_name("GraphemeBreakProperty.txt")
emoji_data_path = Path(__file__).absolute().with_name("emoji-data.txt")
gbp_ranges = list()
emoji_ranges = list()
with gbp_data_path.open(encoding="utf-8") as f:
gbp_ranges = compactPropertyRanges(
[x for line in f if (x := parsePropertyLine(line))]
)
with emoji_data_path.open(encoding="utf-8") as f:
emoji_ranges = compactPropertyRanges(
[x for line in f if (x := parsePropertyLine(line))]
)
[gbp_ranges.append(x) for x in emoji_ranges if x.prop == "Extended_Pictographic"]
gpb_cpp_data = generate_cpp_data("Grapheme_Break", gbp_ranges)
return "\n".join([gpb_cpp_data])
if __name__ == "__main__":
print(
MSVC_FORMAT_UCD_TABLES_HPP_TEMPLATE.lstrip().format(
content=generate_data_tables()
)
)

View File

@@ -0,0 +1,245 @@
#!/usr/bin/env python
# ===----------------------------------------------------------------------===##
#
# Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
# See https://llvm.org/LICENSE.txt for license information.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
#
# ===----------------------------------------------------------------------===##
# The code is based on
# https://github.com/microsoft/STL/blob/main/tools/unicode_properties_parse/grapheme_break_test_data_gen.py
#
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
from pathlib import Path
from dataclasses import dataclass, field
from typing import Optional, TextIO
from array import array
@dataclass
class BreakTestItem:
code_points: list[int] = field(default_factory=list)
encoded: str = ""
breaks_utf8: list[int] = field(default_factory=list)
breaks_utf16: list[int] = field(default_factory=list)
breaks_utf32: list[int] = field(default_factory=list)
class CommentLine:
pass
class EOF:
pass
def parseBreakTestLine(input: TextIO) -> Optional[BreakTestItem]:
result = BreakTestItem()
code_point = -1
utf8 = 0
utf16 = 0
utf32 = 0
while True:
c = input.read(1)
if c == "\N{DIVISION SIGN}":
# The line starts with a division sign, don't add it to the output.
if code_point != -1:
result.code_points.append(code_point)
code_point = -1
result.breaks_utf8.append(utf8)
result.breaks_utf16.append(utf16)
result.breaks_utf32.append(utf32)
assert input.read(1).isspace()
continue
if c == "\N{MULTIPLICATION SIGN}":
assert input.read(1).isspace()
continue
if c.isalnum():
while next := input.read(1):
if next.isalnum():
c += next
else:
assert next.isspace()
break
i = int(c, base=16)
if code_point == -1:
code_point = i
result.encoded += f"\\U{i:08x}"
c = chr(i)
utf8 += c.encode().__len__()
# Since we only care about the number of code units the byte order
# doesn't matter. The byte order is specified to avoid the BOM
utf16 += int(c.encode("utf-16-le").__len__() / 2)
utf32 += int(c.encode("utf-32-le").__len__() / 4)
continue
if c == "#":
input.readline()
return result
if c == "\n":
return result
if c == "":
return None
assert False
cpp_template = """// -*- C++ -*-
//===----------------------------------------------------------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
// WARNING, this entire header is generated by
// utiles/generate_extended_grapheme_cluster_test.py
// DO NOT MODIFY!
// UNICODE, INC. LICENSE AGREEMENT - DATA FILES AND SOFTWARE
//
// See Terms of Use <https://www.unicode.org/copyright.html>
// for definitions of Unicode Inc.'s Data Files and Software.
//
// NOTICE TO USER: Carefully read the following legal agreement.
// BY DOWNLOADING, INSTALLING, COPYING OR OTHERWISE USING UNICODE INC.'S
// DATA FILES ("DATA FILES"), AND/OR SOFTWARE ("SOFTWARE"),
// YOU UNEQUIVOCALLY ACCEPT, AND AGREE TO BE BOUND BY, ALL OF THE
// TERMS AND CONDITIONS OF THIS AGREEMENT.
// IF YOU DO NOT AGREE, DO NOT DOWNLOAD, INSTALL, COPY, DISTRIBUTE OR USE
// THE DATA FILES OR SOFTWARE.
//
// COPYRIGHT AND PERMISSION NOTICE
//
// Copyright (c) 1991-2022 Unicode, Inc. All rights reserved.
// Distributed under the Terms of Use in https://www.unicode.org/copyright.html.
//
// Permission is hereby granted, free of charge, to any person obtaining
// a copy of the Unicode data files and any associated documentation
// (the "Data Files") or Unicode software and any associated documentation
// (the "Software") to deal in the Data Files or Software
// without restriction, including without limitation the rights to use,
// copy, modify, merge, publish, distribute, and/or sell copies of
// the Data Files or Software, and to permit persons to whom the Data Files
// or Software are furnished to do so, provided that either
// (a) this copyright and permission notice appear with all copies
// of the Data Files or Software, or
// (b) this copyright and permission notice appear in associated
// Documentation.
//
// THE DATA FILES AND SOFTWARE ARE PROVIDED "AS IS", WITHOUT WARRANTY OF
// ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
// WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
// NONINFRINGEMENT OF THIRD PARTY RIGHTS.
// IN NO EVENT SHALL THE COPYRIGHT HOLDER OR HOLDERS INCLUDED IN THIS
// NOTICE BE LIABLE FOR ANY CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL
// DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
// DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
// TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
// PERFORMANCE OF THE DATA FILES OR SOFTWARE.
//
// Except as contained in this notice, the name of a copyright holder
// shall not be used in advertising or otherwise to promote the sale,
// use or other dealings in these Data Files or Software without prior
// written authorization of the copyright holder.
#ifndef LIBCXX_TEST_STD_UTILITIES_FORMAT_FORMAT_STRING_FORMAT_STRING_STD_EXTENDED_GRAPHEME_CLUSTER_H
#define LIBCXX_TEST_STD_UTILITIES_FORMAT_FORMAT_STRING_FORMAT_STRING_STD_EXTENDED_GRAPHEME_CLUSTER_H
#include <array>
#include <string_view>
#include <vector>
template <class CharT>
struct data {{
/// The input to parse.
std::basic_string_view<CharT> input;
/// The first code point all extended grapheme clusters in the input.
std::vector<char32_t> code_points;
/// The offset of the last code units of the extended grapheme clusters in the input.
///
/// The vector has the same number of entries as \\ref code_points.
std::vector<size_t> breaks;
}};
/// The data for UTF-8.
std::array<data<char>, {0}> data_utf8 = {{{{ {1} }}}};
/// The data for UTF-16.
///
/// Note that most of the data for the UTF-16 and UTF-32 are identical. However
/// since the size of the code units differ the breaks can contain different
/// values.
std::array<data<wchar_t>, {0}> data_utf16 = {{{{ {2} }}}};
/// The data for UTF-8.
///
/// Note that most of the data for the UTF-16 and UTF-32 are identical. However
/// since the size of the code units differ the breaks can contain different
/// values.
std::array<data<wchar_t>, {0}> data_utf32 = {{{{ {3} }}}};
#endif // LIBCXX_TEST_STD_UTILITIES_FORMAT_FORMAT_STRING_FORMAT_STRING_STD_EXTENDED_GRAPHEME_CLUSTER_H
"""
cpp_test_data_line_template = "{{ {}, {{ {} }}, {{ {} }} }}"
def lineToCppDataLineUtf8(line: BreakTestItem) -> str:
return cpp_test_data_line_template.format(
f'"{line.encoded}"',
",".join([str(x) for x in line.code_points]),
",".join([str(x) for x in line.breaks_utf8]),
)
def lineToCppDataLineUtf16(line: BreakTestItem) -> str:
return cpp_test_data_line_template.format(
f'L"{line.encoded}"',
",".join([str(x) for x in line.code_points]),
",".join([str(x) for x in line.breaks_utf16]),
)
def lineToCppDataLineUtf32(line: BreakTestItem) -> str:
return cpp_test_data_line_template.format(
f'L"{line.encoded}"',
",".join([str(x) for x in line.code_points]),
",".join([str(x) for x in line.breaks_utf32]),
)
"""
Generate test data from "GraphemeBreakText.txt"
This file can be downloaded from:
https://www.unicode.org/Public/UCD/latest/ucd/auxiliary/GraphemeBreakTest.txt
This script looks for GraphemeBreakTest.txt in same directory as this script
"""
def generate_all() -> str:
test_data_path = Path(__file__)
test_data_path = test_data_path.absolute()
test_data_path = test_data_path.with_name("GraphemeBreakTest.txt")
lines = list()
with open(test_data_path, mode="rt", encoding="utf-8") as file:
while line := parseBreakTestLine(file):
if len(line.encoded) > 0:
lines.append(line)
return cpp_template.format(
len(lines),
",".join(map(lineToCppDataLineUtf8, lines)),
",".join(map(lineToCppDataLineUtf16, lines)),
",".join(map(lineToCppDataLineUtf32, lines)),
)
if __name__ == "__main__":
print(generate_all())

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,207 @@
#!/usr/bin/env python
import os
def get_libcxx_paths():
utils_path = os.path.dirname(os.path.abspath(__file__))
script_name = os.path.basename(__file__)
assert os.path.exists(utils_path)
src_root = os.path.dirname(utils_path)
test_path = os.path.join(src_root, 'test', 'libcxx', 'inclusions')
assert os.path.exists(test_path)
assert os.path.exists(os.path.join(test_path, 'algorithm.inclusions.compile.pass.cpp'))
return script_name, src_root, test_path
script_name, source_root, test_path = get_libcxx_paths()
# This table was produced manually, by grepping the TeX source of the Standard's
# library clauses for the string "#include". Each header's synopsis contains
# explicit "#include" directives for its mandatory inclusions.
# For example, [algorithm.syn] contains "#include <initializer_list>".
#
mandatory_inclusions = {
"algorithm": ["initializer_list"],
"array": ["compare", "initializer_list"],
"bitset": ["iosfwd", "string"],
"chrono": ["compare"],
"cinttypes": ["cstdint"],
"complex.h": ["complex"],
"coroutine": ["compare"],
"deque": ["compare", "initializer_list"],
"filesystem": ["compare"],
"forward_list": ["compare", "initializer_list"],
"ios": ["iosfwd"],
"iostream": ["ios", "istream", "ostream", "streambuf"],
"iterator": ["compare", "concepts"],
"list": ["compare", "initializer_list"],
"map": ["compare", "initializer_list"],
"memory": ["compare"],
"optional": ["compare"],
"queue": ["compare", "initializer_list"],
"random": ["initializer_list"],
"ranges": ["compare", "initializer_list", "iterator"],
"regex": ["compare", "initializer_list"],
"set": ["compare", "initializer_list"],
"stack": ["compare", "initializer_list"],
"string_view": ["compare"],
"string": ["compare", "initializer_list"],
# TODO "syncstream": ["ostream"],
"system_error": ["compare"],
"tgmath.h": ["cmath", "complex"],
"thread": ["compare"],
"tuple": ["compare"],
"typeindex": ["compare"],
"unordered_map": ["compare", "initializer_list"],
"unordered_set": ["compare", "initializer_list"],
"utility": ["compare", "initializer_list"],
"valarray": ["initializer_list"],
"variant": ["compare"],
"vector": ["compare", "initializer_list"],
}
new_in_version = {
"chrono": "11",
"compare": "20",
"concepts": "20",
"coroutine": "20",
"cuchar": "11",
"filesystem": "17",
"initializer_list": "11",
"optional": "17",
"ranges": "20",
"string_view": "17",
"syncstream": "20",
"system_error": "11",
"thread": "11",
"tuple": "11",
"uchar.h": "11",
"unordered_map": "11",
"unordered_set": "11",
"variant": "17",
}
assert all(v == sorted(v) for k, v in mandatory_inclusions.items())
# Map from each header to the Lit annotations that should be used for
# tests that include that header.
#
# For example, when threads are not supported, any test that includes
# <thread> should be marked as UNSUPPORTED, because including <thread>
# is a hard error in that case.
lit_markup = {
"barrier": ["UNSUPPORTED: no-threads"],
"filesystem": ["UNSUPPORTED: no-filesystem"],
"format": ["UNSUPPORTED: libcpp-has-no-incomplete-format"],
"iomanip": ["UNSUPPORTED: no-localization"],
"ios": ["UNSUPPORTED: no-localization"],
"iostream": ["UNSUPPORTED: no-localization"],
"istream": ["UNSUPPORTED: no-localization"],
"latch": ["UNSUPPORTED: no-threads"],
"locale": ["UNSUPPORTED: no-localization"],
"mutex": ["UNSUPPORTED: no-threads"],
"ostream": ["UNSUPPORTED: no-localization"],
"ranges": ["UNSUPPORTED: libcpp-has-no-incomplete-ranges"],
"regex": ["UNSUPPORTED: no-localization"],
"semaphore": ["UNSUPPORTED: no-threads"],
"shared_mutex": ["UNSUPPORTED: no-threads"],
"thread": ["UNSUPPORTED: no-threads"]
}
def get_std_ver_test(includee):
v = new_in_version.get(includee, "03")
if v == "03":
return ''
versions = ["03", "11", "14", "17", "20"]
return 'TEST_STD_VER > {} && '.format(max(i for i in versions if i < v))
def get_unsupported_line(includee):
v = new_in_version.get(includee, "03")
return {
"03": [],
"11": ['UNSUPPORTED: c++03'],
"14": ['UNSUPPORTED: c++03, c++11'],
"17": ['UNSUPPORTED: c++03, c++11, c++14'],
"20": ['UNSUPPORTED: c++03, c++11, c++14, c++17'],
"2b": ['UNSUPPORTED: c++03, c++11, c++14, c++17, c++20'],
}[v]
def get_libcpp_header_symbol(header_name):
return '_LIBCPP_' + header_name.upper().replace('.', '_')
def get_includer_symbol_test(includer):
symbol = get_libcpp_header_symbol(includer)
return """
#if !defined({symbol})
# error "{message}"
#endif
""".strip().format(
symbol=symbol,
message="<{}> was expected to define {}".format(includer, symbol),
)
def get_ifdef(includer, includee):
version = max(new_in_version.get(h, "03") for h in [includer, includee])
symbol = get_libcpp_header_symbol(includee)
return """
#if {includee_test}!defined({symbol})
# error "{message}"
#endif
""".strip().format(
includee_test=get_std_ver_test(includee),
symbol=symbol,
message="<{}> should include <{}> in C++{} and later".format(includer, includee, version)
)
test_body_template = """
//===----------------------------------------------------------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// WARNING: This test was generated by {script_name}
// and should not be edited manually.
//
// clang-format off
{markup}
// <{header}>
// Test that <{header}> includes all the other headers it's supposed to.
#include <{header}>
#include "test_macros.h"
{test_includers_symbol}
{test_per_includee}
""".strip()
def produce_tests():
for includer, includees in mandatory_inclusions.items():
markup_tags = get_unsupported_line(includer) + lit_markup.get(includer, [])
test_body = test_body_template.format(
script_name=script_name,
header=includer,
markup=('\n' + '\n'.join('// ' + m for m in markup_tags) + '\n') if markup_tags else '',
test_includers_symbol=get_includer_symbol_test(includer),
test_per_includee='\n'.join(get_ifdef(includer, includee) for includee in includees),
)
test_name = "{header}.inclusions.compile.pass.cpp".format(header=includer)
out_path = os.path.join(test_path, test_name)
with open(out_path, 'w', newline='\n') as f:
f.write(test_body + '\n')
if __name__ == '__main__':
produce_tests()

View File

@@ -0,0 +1,151 @@
#!/usr/bin/env python
import contextlib
import glob
import io
import os
import pathlib
import re
header_restrictions = {
"barrier": "!defined(_LIBCPP_HAS_NO_THREADS)",
"future": "!defined(_LIBCPP_HAS_NO_THREADS)",
"latch": "!defined(_LIBCPP_HAS_NO_THREADS)",
"mutex": "!defined(_LIBCPP_HAS_NO_THREADS)",
"semaphore": "!defined(_LIBCPP_HAS_NO_THREADS)",
"shared_mutex": "!defined(_LIBCPP_HAS_NO_THREADS)",
"stdatomic.h": "__cplusplus > 202002L && !defined(_LIBCPP_HAS_NO_THREADS)",
"thread": "!defined(_LIBCPP_HAS_NO_THREADS)",
"filesystem": "!defined(_LIBCPP_HAS_NO_FILESYSTEM_LIBRARY)",
"clocale": "!defined(_LIBCPP_HAS_NO_LOCALIZATION)",
"codecvt": "!defined(_LIBCPP_HAS_NO_LOCALIZATION)",
"fstream": "!defined(_LIBCPP_HAS_NO_LOCALIZATION)",
"iomanip": "!defined(_LIBCPP_HAS_NO_LOCALIZATION)",
"ios": "!defined(_LIBCPP_HAS_NO_LOCALIZATION)",
"iostream": "!defined(_LIBCPP_HAS_NO_LOCALIZATION)",
"istream": "!defined(_LIBCPP_HAS_NO_LOCALIZATION)",
"locale.h": "!defined(_LIBCPP_HAS_NO_LOCALIZATION)",
"locale": "!defined(_LIBCPP_HAS_NO_LOCALIZATION)",
"ostream": "!defined(_LIBCPP_HAS_NO_LOCALIZATION)",
"regex": "!defined(_LIBCPP_HAS_NO_LOCALIZATION)",
"sstream": "!defined(_LIBCPP_HAS_NO_LOCALIZATION)",
"streambuf": "!defined(_LIBCPP_HAS_NO_LOCALIZATION)",
"strstream": "!defined(_LIBCPP_HAS_NO_LOCALIZATION)",
"wctype.h": "!defined(_LIBCPP_HAS_NO_WIDE_CHARACTERS)",
"cwctype": "!defined(_LIBCPP_HAS_NO_WIDE_CHARACTERS)",
"cwchar": "!defined(_LIBCPP_HAS_NO_WIDE_CHARACTERS)",
"wchar.h": "!defined(_LIBCPP_HAS_NO_WIDE_CHARACTERS)",
"experimental/algorithm": "__cplusplus >= 201103L",
"experimental/coroutine": "__cplusplus >= 201103L && !defined(_LIBCPP_HAS_NO_EXPERIMENTAL_COROUTINES)",
"experimental/deque": "__cplusplus >= 201103L",
"experimental/forward_list": "__cplusplus >= 201103L",
"experimental/functional": "__cplusplus >= 201103L",
"experimental/iterator": "__cplusplus >= 201103L",
"experimental/list": "__cplusplus >= 201103L",
"experimental/map": "__cplusplus >= 201103L",
"experimental/memory_resource": "__cplusplus >= 201103L",
"experimental/propagate_const": "__cplusplus >= 201103L",
"experimental/regex": "!defined(_LIBCPP_HAS_NO_LOCALIZATION) && __cplusplus >= 201103L",
"experimental/set": "__cplusplus >= 201103L",
"experimental/simd": "__cplusplus >= 201103L",
"experimental/span": "__cplusplus >= 201103L",
"experimental/string": "__cplusplus >= 201103L",
"experimental/type_traits": "__cplusplus >= 201103L",
"experimental/unordered_map": "__cplusplus >= 201103L",
"experimental/unordered_set": "__cplusplus >= 201103L",
"experimental/utility": "__cplusplus >= 201103L",
"experimental/vector": "__cplusplus >= 201103L",
}
private_headers_still_public_in_modules = [
'__assert', '__bsd_locale_defaults.h', '__bsd_locale_fallbacks.h', '__config',
'__config_site.in', '__debug', '__hash_table',
'__threading_support', '__tree', '__undef_macros', '__verbose_abort'
]
def find_script(file):
"""Finds the script used to generate a file inside the file itself. The script is delimited by
BEGIN-SCRIPT and END-SCRIPT markers.
"""
with open(file, 'r') as f:
content = f.read()
match = re.search(r'^BEGIN-SCRIPT$(.+)^END-SCRIPT$', content, flags=re.MULTILINE | re.DOTALL)
if not match:
raise RuntimeError("Was unable to find a script delimited with BEGIN-SCRIPT/END-SCRIPT markers in {}".format(test_file))
return match.group(1)
def execute_script(script, variables):
"""Executes the provided Mako template with the given variables available during the
evaluation of the script, and returns the result.
"""
code = compile(script, 'fake-filename', 'exec')
output = io.StringIO()
with contextlib.redirect_stdout(output):
exec(code, variables)
output = output.getvalue()
return output
def generate_new_file(file, new_content):
"""Generates the new content of the file by inserting the new content in-between
two '// GENERATED-MARKER' markers located in the file.
"""
with open(file, 'r') as f:
old_content = f.read()
try:
before, begin_marker, _, end_marker, after = re.split(r'(// GENERATED-MARKER\n)', old_content, flags=re.MULTILINE | re.DOTALL)
except ValueError:
raise RuntimeError("Failed to split {} based on markers, please make sure the file has exactly two '// GENERATED-MARKER' occurrences".format(file))
return before + begin_marker + new_content + end_marker + after
def produce(test_file, variables):
script = find_script(test_file)
result = execute_script(script, variables)
new_content = generate_new_file(test_file, result)
with open(test_file, 'w', newline='\n') as f:
f.write(new_content)
def is_header(file):
"""Returns whether the given file is a header (i.e. not a directory or the modulemap file)."""
return not file.is_dir() and not file.name == 'module.modulemap.in'
def main():
monorepo_root = pathlib.Path(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
include = pathlib.Path(os.path.join(monorepo_root, 'libcxx', 'include'))
test = pathlib.Path(os.path.join(monorepo_root, 'libcxx', 'test'))
assert(monorepo_root.exists())
toplevel_headers = sorted(str(p.relative_to(include)) for p in include.glob('[a-z]*') if is_header(p))
experimental_headers = sorted(str(p.relative_to(include)) for p in include.glob('experimental/[a-z]*') if is_header(p))
extended_headers = sorted(str(p.relative_to(include)) for p in include.glob('ext/[a-z]*') if is_header(p))
public_headers = toplevel_headers + experimental_headers + extended_headers
private_headers = sorted(str(p.relative_to(include)) for p in include.rglob('*') if is_header(p) and str(p.relative_to(include)).startswith('__'))
variables = {
'toplevel_headers': toplevel_headers,
'experimental_headers': experimental_headers,
'extended_headers': extended_headers,
'public_headers': public_headers,
'private_headers': private_headers,
'header_restrictions': header_restrictions,
'private_headers_still_public_in_modules': private_headers_still_public_in_modules
}
produce(test.joinpath('libcxx/assertions/headers_declare_verbose_abort.sh.cpp'), variables)
produce(test.joinpath('libcxx/clang_tidy.sh.cpp'), variables)
produce(test.joinpath('libcxx/double_include.sh.cpp'), variables)
produce(test.joinpath('libcxx/min_max_macros.compile.pass.cpp'), variables)
produce(test.joinpath('libcxx/modules_include.sh.cpp'), variables)
produce(test.joinpath('libcxx/nasty_macros.compile.pass.cpp'), variables)
produce(test.joinpath('libcxx/no_assert_include.compile.pass.cpp'), variables)
produce(test.joinpath('libcxx/private_headers.verify.cpp'), variables)
produce(test.joinpath('libcxx/transitive_includes.sh.cpp'), variables)
if __name__ == '__main__':
main()

View File

@@ -0,0 +1,224 @@
#!/usr/bin/env python
#===----------------------------------------------------------------------===##
#
# Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
# See https://llvm.org/LICENSE.txt for license information.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
#
#===----------------------------------------------------------------------===##
import argparse
import os
import re
import sys
def is_config_header(h):
return os.path.basename(h) in ['__config', '__undef_macros', 'version']
def is_experimental_header(h):
return ('experimental/' in h) or ('ext/' in h)
def is_support_header(h):
return '__support/' in h
class FileEntry:
def __init__(self, includes, individual_linecount):
self.includes = includes
self.individual_linecount = individual_linecount
self.cumulative_linecount = None # documentation: this gets filled in later
self.is_graph_root = None # documentation: this gets filled in later
def list_all_roots_under(root):
result = []
for root, _, files in os.walk(root):
for fname in files:
if os.path.basename(root).startswith('__') or fname.startswith('__'):
pass
elif ('.' in fname and not fname.endswith('.h')):
pass
else:
result.append(root + '/' + fname)
return result
def build_file_entry(fname, options):
assert os.path.exists(fname)
def locate_header_file(h, paths):
for p in paths:
fullname = p + '/' + h
if os.path.exists(fullname):
return fullname
if options.error_on_file_not_found:
raise RuntimeError('Header not found: %s, included by %s' % (h, fname))
return None
local_includes = []
system_includes = []
linecount = 0
with open(fname, 'r', encoding='utf-8') as f:
for line in f.readlines():
linecount += 1
m = re.match(r'\s*#\s*include\s+"([^"]*)"', line)
if m is not None:
local_includes.append(m.group(1))
m = re.match(r'\s*#\s*include\s+<([^>]*)>', line)
if m is not None:
system_includes.append(m.group(1))
fully_qualified_includes = [
locate_header_file(h, options.search_dirs)
for h in system_includes
] + [
locate_header_file(h, os.path.dirname(fname))
for h in local_includes
]
return FileEntry(
# If file-not-found wasn't an error, then skip non-found files
includes = [h for h in fully_qualified_includes if h is not None],
individual_linecount = linecount,
)
def transitive_closure_of_includes(graph, h1):
visited = set()
def explore(graph, h1):
if h1 not in visited:
visited.add(h1)
for h2 in graph[h1].includes:
explore(graph, h2)
explore(graph, h1)
return visited
def transitively_includes(graph, h1, h2):
return (h1 != h2) and (h2 in transitive_closure_of_includes(graph, h1))
def build_graph(roots, options):
original_roots = list(roots)
graph = {}
while roots:
frontier = roots
roots = []
for fname in frontier:
if fname not in graph:
graph[fname] = build_file_entry(fname, options)
graph[fname].is_graph_root = (fname in original_roots)
roots += graph[fname].includes
for fname, entry in graph.items():
entry.cumulative_linecount = sum(graph[h].individual_linecount for h in transitive_closure_of_includes(graph, fname))
return graph
def get_friendly_id(fname):
i = fname.index('include/')
assert(i >= 0)
result = fname[i+8:]
return result
def get_graphviz(graph, options):
def get_decorators(fname, entry):
result = ''
if entry.is_graph_root:
result += ' [style=bold]'
if options.show_individual_line_counts and options.show_cumulative_line_counts:
result += ' [label="%s\\n%d indiv, %d cumul"]' % (
get_friendly_id(fname), entry.individual_linecount, entry.cumulative_linecount
)
elif options.show_individual_line_counts:
result += ' [label="%s\\n%d indiv"]' % (get_friendly_id(fname), entry.individual_linecount)
elif options.show_cumulative_line_counts:
result += ' [label="%s\\n%d cumul"]' % (get_friendly_id(fname), entry.cumulative_linecount)
return result
result = ''
result += 'strict digraph {\n'
result += ' rankdir=LR;\n'
result += ' layout=dot;\n\n'
for fname, entry in graph.items():
result += ' "%s"%s;\n' % (get_friendly_id(fname), get_decorators(fname, entry))
for h in entry.includes:
if any(transitively_includes(graph, i, h) for i in entry.includes) and not options.show_transitive_edges:
continue
result += ' "%s" -> "%s";\n' % (get_friendly_id(fname), get_friendly_id(h))
result += '}\n'
return result
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='Produce a dependency graph of libc++ headers, in GraphViz dot format.\n' +
'For example, ./graph_header_deps.py | dot -Tpng > graph.png',
formatter_class=argparse.RawDescriptionHelpFormatter,
)
parser.add_argument('--root', default=None, metavar='FILE', help='File or directory to be the root of the dependency graph')
parser.add_argument('-I', dest='search_dirs', default=[], action='append', metavar='DIR', help='Path(s) to search for local includes')
parser.add_argument('--show-transitive-edges', action='store_true', help='Show edges to headers that are transitively included anyway')
parser.add_argument('--show-config-headers', action='store_true', help='Show universally included headers, such as __config')
parser.add_argument('--show-experimental-headers', action='store_true', help='Show headers in the experimental/ and ext/ directories')
parser.add_argument('--show-support-headers', action='store_true', help='Show headers in the __support/ directory')
parser.add_argument('--show-individual-line-counts', action='store_true', help='Include an individual line count in each node')
parser.add_argument('--show-cumulative-line-counts', action='store_true', help='Include a total line count in each node')
parser.add_argument('--error-on-file-not-found', action='store_true', help="Don't ignore failure to open an #included file")
options = parser.parse_args()
if options.root is None:
curr_dir = os.path.dirname(os.path.abspath(__file__))
options.root = os.path.join(curr_dir, '../include')
if options.search_dirs == [] and os.path.isdir(options.root):
options.search_dirs = [options.root]
options.root = os.path.abspath(options.root)
options.search_dirs = [os.path.abspath(p) for p in options.search_dirs]
if os.path.isdir(options.root):
roots = list_all_roots_under(options.root)
elif os.path.isfile(options.root):
roots = [options.root]
else:
raise RuntimeError('--root seems to be invalid')
graph = build_graph(roots, options)
# Eliminate certain kinds of "visual noise" headers, if asked for.
def should_keep(fname):
return all([
options.show_config_headers or not is_config_header(fname),
options.show_experimental_headers or not is_experimental_header(fname),
options.show_support_headers or not is_support_header(fname),
])
for fname in list(graph.keys()):
if should_keep(fname):
graph[fname].includes = [h for h in graph[fname].includes if should_keep(h)]
else:
del graph[fname]
# Look for cycles.
no_cycles_detected = True
for fname, entry in graph.items():
for h in entry.includes:
if h == fname:
sys.stderr.write('Cycle detected: %s includes itself\n' % (
get_friendly_id(fname)
))
no_cycles_detected = False
elif transitively_includes(graph, h, fname):
sys.stderr.write('Cycle detected between %s and %s\n' % (
get_friendly_id(fname), get_friendly_id(h)
))
no_cycles_detected = False
assert no_cycles_detected
print(get_graphviz(graph, options))

View File

@@ -0,0 +1,16 @@
#===----------------------------------------------------------------------===##
#
# Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
# See https://llvm.org/LICENSE.txt for license information.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
#
#===----------------------------------------------------------------------===##
"""libcxx python utilities"""
__author__ = 'Eric Fiselier'
__email__ = 'eric@efcs.ca'
__versioninfo__ = (0, 1, 0)
__version__ = ' '.join(str(v) for v in __versioninfo__) + 'dev'
__all__ = []

View File

@@ -0,0 +1,308 @@
#===----------------------------------------------------------------------===##
#
# Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
# See https://llvm.org/LICENSE.txt for license information.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
#
#===----------------------------------------------------------------------===##
import platform
import os
import libcxx.util
class CXXCompiler(object):
CM_Default = 0
CM_PreProcess = 1
CM_Compile = 2
CM_Link = 3
def __init__(self, config, path, flags=None, compile_flags=None, link_flags=None,
warning_flags=None, verify_supported=None,
verify_flags=None, use_verify=False,
modules_flags=None, use_modules=False,
use_ccache=False, use_warnings=False, compile_env=None,
cxx_type=None, cxx_version=None):
self.libcxx_config = config
self.source_lang = 'c++'
self.path = path
self.flags = list(flags or [])
self.compile_flags = list(compile_flags or [])
self.link_flags = list(link_flags or [])
self.warning_flags = list(warning_flags or [])
self.verify_supported = verify_supported
self.use_verify = use_verify
self.verify_flags = list(verify_flags or [])
assert not use_verify or verify_supported
assert not use_verify or verify_flags is not None
self.modules_flags = list(modules_flags or [])
self.use_modules = use_modules
assert not use_modules or modules_flags is not None
self.use_ccache = use_ccache
self.use_warnings = use_warnings
if compile_env is not None:
self.compile_env = dict(compile_env)
else:
self.compile_env = None
self.type = cxx_type
self.version = cxx_version
if self.type is None or self.version is None:
self._initTypeAndVersion()
def isVerifySupported(self):
if self.verify_supported is None:
self.verify_supported = self.hasCompileFlag(['-Xclang',
'-verify-ignore-unexpected'])
if self.verify_supported:
self.verify_flags = [
'-Xclang', '-verify',
'-Xclang', '-verify-ignore-unexpected=note',
'-ferror-limit=1024'
]
return self.verify_supported
def useVerify(self, value=True):
self.use_verify = value
assert not self.use_verify or self.verify_flags is not None
def useModules(self, value=True):
self.use_modules = value
assert not self.use_modules or self.modules_flags is not None
def useCCache(self, value=True):
self.use_ccache = value
def useWarnings(self, value=True):
self.use_warnings = value
def _initTypeAndVersion(self):
# Get compiler type and version
macros = self.dumpMacros()
if macros is None:
return
compiler_type = None
major_ver = minor_ver = patchlevel = None
if '__clang__' in macros.keys():
compiler_type = 'clang'
# Treat apple's llvm fork differently.
if '__apple_build_version__' in macros.keys():
compiler_type = 'apple-clang'
major_ver = macros['__clang_major__']
minor_ver = macros['__clang_minor__']
patchlevel = macros['__clang_patchlevel__']
elif '__GNUC__' in macros.keys():
compiler_type = 'gcc'
major_ver = macros['__GNUC__']
minor_ver = macros['__GNUC_MINOR__']
patchlevel = macros['__GNUC_PATCHLEVEL__']
self.type = compiler_type
self.version = (major_ver, minor_ver, patchlevel)
def _basicCmd(self, source_files, out, mode=CM_Default, flags=[],
input_is_cxx=False):
cmd = []
if self.use_ccache \
and not mode == self.CM_Link \
and not mode == self.CM_PreProcess:
cmd += ['ccache']
cmd += [self.path]
if out is not None:
cmd += ['-o', out]
if input_is_cxx:
cmd += ['-x', self.source_lang]
if isinstance(source_files, list):
cmd += source_files
elif isinstance(source_files, str):
cmd += [source_files]
else:
raise TypeError('source_files must be a string or list')
if mode == self.CM_PreProcess:
cmd += ['-E']
elif mode == self.CM_Compile:
cmd += ['-c']
cmd += self.flags
if self.use_verify:
cmd += self.verify_flags
assert mode in [self.CM_Default, self.CM_Compile]
if self.use_modules:
cmd += self.modules_flags
if mode != self.CM_Link:
cmd += self.compile_flags
if self.use_warnings:
cmd += self.warning_flags
if mode != self.CM_PreProcess and mode != self.CM_Compile:
cmd += self.link_flags
cmd += flags
return cmd
def preprocessCmd(self, source_files, out=None, flags=[]):
return self._basicCmd(source_files, out, flags=flags,
mode=self.CM_PreProcess,
input_is_cxx=True)
def compileCmd(self, source_files, out=None, flags=[]):
return self._basicCmd(source_files, out, flags=flags,
mode=self.CM_Compile,
input_is_cxx=True) + ['-c']
def linkCmd(self, source_files, out=None, flags=[]):
return self._basicCmd(source_files, out, flags=flags,
mode=self.CM_Link)
def compileLinkCmd(self, source_files, out=None, flags=[]):
return self._basicCmd(source_files, out, flags=flags)
def preprocess(self, source_files, out=None, flags=[], cwd=None):
cmd = self.preprocessCmd(source_files, out, flags)
out, err, rc = libcxx.util.executeCommand(cmd, env=self.compile_env,
cwd=cwd)
return cmd, out, err, rc
def compile(self, source_files, out=None, flags=[], cwd=None):
cmd = self.compileCmd(source_files, out, flags)
out, err, rc = libcxx.util.executeCommand(cmd, env=self.compile_env,
cwd=cwd)
return cmd, out, err, rc
def link(self, source_files, exec_path=None, flags=[], cwd=None):
cmd = self.linkCmd(source_files, exec_path, flags)
out, err, rc = libcxx.util.executeCommand(cmd, env=self.compile_env,
cwd=cwd)
cs_cmd, cs_out, cs_err, cs_rc = self.codesign(exec_path, cwd)
if cs_rc != 0:
return cs_cmd, cs_out, cs_err, cs_rc
return cmd, out, err, rc
def compileLink(self, source_files, exec_path=None, flags=[],
cwd=None):
cmd = self.compileLinkCmd(source_files, exec_path, flags)
out, err, rc = libcxx.util.executeCommand(cmd, env=self.compile_env,
cwd=cwd)
cs_cmd, cs_out, cs_err, cs_rc = self.codesign(exec_path, cwd)
if cs_rc != 0:
return cs_cmd, cs_out, cs_err, cs_rc
return cmd, out, err, rc
def codesign(self, exec_path, cwd=None):
null_op = [], '', '', 0
if not exec_path:
return null_op
codesign_ident = self.libcxx_config.get_lit_conf('llvm_codesign_identity', '')
if not codesign_ident:
return null_op
cmd = ['xcrun', 'codesign', '-s', codesign_ident, exec_path]
out, err, rc = libcxx.util.executeCommand(cmd, cwd=cwd)
return cmd, out, err, rc
def compileLinkTwoSteps(self, source_file, out=None, object_file=None,
flags=[], cwd=None):
if not isinstance(source_file, str):
raise TypeError('This function only accepts a single input file')
if object_file is None:
# Create, use and delete a temporary object file if none is given.
with_fn = lambda: libcxx.util.guardedTempFilename(suffix='.o')
else:
# Otherwise wrap the filename in a context manager function.
with_fn = lambda: libcxx.util.nullContext(object_file)
with with_fn() as object_file:
cc_cmd, cc_stdout, cc_stderr, rc = self.compile(
source_file, object_file, flags=flags, cwd=cwd)
if rc != 0:
return cc_cmd, cc_stdout, cc_stderr, rc
link_cmd, link_stdout, link_stderr, rc = self.link(
object_file, exec_path=out, flags=flags, cwd=cwd)
return (cc_cmd + ['&&'] + link_cmd, cc_stdout + link_stdout,
cc_stderr + link_stderr, rc)
def dumpMacros(self, source_files=None, flags=[], cwd=None):
if source_files is None:
source_files = os.devnull
flags = ['-dM'] + flags
cmd, out, err, rc = self.preprocess(source_files, flags=flags, cwd=cwd)
if rc != 0:
return cmd, out, err, rc
parsed_macros = {}
lines = [l.strip() for l in out.split('\n') if l.strip()]
for l in lines:
assert l.startswith('#define ')
l = l[len('#define '):]
macro, _, value = l.partition(' ')
parsed_macros[macro] = value
return parsed_macros
def getTriple(self):
cmd = [self.path] + self.flags + ['-dumpmachine']
return libcxx.util.capture(cmd).strip()
def hasCompileFlag(self, flag):
if isinstance(flag, list):
flags = list(flag)
else:
flags = [flag]
# Add -Werror to ensure that an unrecognized flag causes a non-zero
# exit code. -Werror is supported on all known compiler types.
if self.type is not None:
flags += ['-Werror', '-fsyntax-only']
cmd, out, err, rc = self.compile(os.devnull, out=os.devnull,
flags=flags)
return rc == 0
def addFlagIfSupported(self, flag):
if isinstance(flag, list):
flags = list(flag)
else:
flags = [flag]
if self.hasCompileFlag(flags):
self.flags += flags
return True
else:
return False
def addCompileFlagIfSupported(self, flag):
if isinstance(flag, list):
flags = list(flag)
else:
flags = [flag]
if self.hasCompileFlag(flags):
self.compile_flags += flags
return True
else:
return False
def hasWarningFlag(self, flag):
"""
hasWarningFlag - Test if the compiler supports a given warning flag.
Unlike addCompileFlagIfSupported, this function detects when
"-Wno-<warning>" flags are unsupported. If flag is a
"-Wno-<warning>" GCC will not emit an unknown option diagnostic unless
another error is triggered during compilation.
"""
assert isinstance(flag, str)
assert flag.startswith('-W')
if not flag.startswith('-Wno-'):
return self.hasCompileFlag(flag)
flags = ['-Werror', flag]
old_use_warnings = self.use_warnings
self.useWarnings(False)
cmd = self.compileCmd('-', os.devnull, flags)
self.useWarnings(old_use_warnings)
# Remove '-v' because it will cause the command line invocation
# to be printed as part of the error output.
# TODO(EricWF): Are there other flags we need to worry about?
if '-v' in cmd:
cmd.remove('-v')
out, err, rc = libcxx.util.executeCommand(
cmd, input=libcxx.util.to_bytes('#error\n'))
assert rc != 0
if flag in err:
return False
return True
def addWarningFlagIfSupported(self, flag):
if self.hasWarningFlag(flag):
if flag not in self.warning_flags:
self.warning_flags += [flag]
return True
return False

View File

@@ -0,0 +1,16 @@
#===----------------------------------------------------------------------===##
#
# Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
# See https://llvm.org/LICENSE.txt for license information.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
#
#===----------------------------------------------------------------------===##
"""libcxx abi symbol checker"""
__author__ = 'Eric Fiselier'
__email__ = 'eric@efcs.ca'
__versioninfo__ = (0, 1, 0)
__version__ = ' '.join(str(v) for v in __versioninfo__) + 'dev'
__all__ = ['diff', 'extract', 'util']

View File

@@ -0,0 +1,102 @@
# -*- Python -*- vim: set syntax=python tabstop=4 expandtab cc=80:
#===----------------------------------------------------------------------===##
#
# Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
# See https://llvm.org/LICENSE.txt for license information.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
#
#===----------------------------------------------------------------------===##
"""
diff - A set of functions for diff-ing two symbol lists.
"""
from libcxx.sym_check import util
def _symbol_difference(lhs, rhs):
lhs_names = set(((n['name'], n['type']) for n in lhs))
rhs_names = set(((n['name'], n['type']) for n in rhs))
diff_names = lhs_names - rhs_names
return [n for n in lhs if (n['name'], n['type']) in diff_names]
def _find_by_key(sym_list, k):
for sym in sym_list:
if sym['name'] == k:
return sym
return None
def added_symbols(old, new):
return _symbol_difference(new, old)
def removed_symbols(old, new):
return _symbol_difference(old, new)
def changed_symbols(old, new):
changed = []
for old_sym in old:
if old_sym in new:
continue
new_sym = _find_by_key(new, old_sym['name'])
if (new_sym is not None and not new_sym in old
and old_sym != new_sym):
changed += [(old_sym, new_sym)]
return changed
def diff(old, new):
added = added_symbols(old, new)
removed = removed_symbols(old, new)
changed = changed_symbols(old, new)
return added, removed, changed
def report_diff(added_syms, removed_syms, changed_syms, names_only=False,
demangle=True):
def maybe_demangle(name):
return util.demangle_symbol(name) if demangle else name
report = ''
for sym in added_syms:
report += 'Symbol added: %s\n' % maybe_demangle(sym['name'])
if not names_only:
report += ' %s\n\n' % sym
if added_syms and names_only:
report += '\n'
for sym in removed_syms:
report += 'SYMBOL REMOVED: %s\n' % maybe_demangle(sym['name'])
if not names_only:
report += ' %s\n\n' % sym
if removed_syms and names_only:
report += '\n'
if not names_only:
for sym_pair in changed_syms:
old_sym, new_sym = sym_pair
old_str = '\n OLD SYMBOL: %s' % old_sym
new_str = '\n NEW SYMBOL: %s' % new_sym
report += ('SYMBOL CHANGED: %s%s%s\n\n' %
(maybe_demangle(old_sym['name']),
old_str, new_str))
added = bool(len(added_syms) != 0)
abi_break = bool(len(removed_syms))
if not names_only:
abi_break = abi_break or len(changed_syms)
if added or abi_break:
report += 'Summary\n'
report += ' Added: %d\n' % len(added_syms)
report += ' Removed: %d\n' % len(removed_syms)
if not names_only:
report += ' Changed: %d\n' % len(changed_syms)
if not abi_break:
report += 'Symbols added.'
else:
report += 'ABI BREAKAGE: SYMBOLS ADDED OR REMOVED!'
else:
report += 'Symbols match.'
is_different = abi_break or bool(len(added_syms)) \
or bool(len(changed_syms))
return report, abi_break, is_different

View File

@@ -0,0 +1,294 @@
# -*- Python -*- vim: set syntax=python tabstop=4 expandtab cc=80:
#===----------------------------------------------------------------------===##
#
# Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
# See https://llvm.org/LICENSE.txt for license information.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
#
#===----------------------------------------------------------------------===##
"""
extract - A set of function that extract symbol lists from shared libraries.
"""
import distutils.spawn
import os.path
from os import environ
import re
import subprocess
import sys
from libcxx.sym_check import util
extract_ignore_names = ['_init', '_fini']
class NMExtractor(object):
"""
NMExtractor - Extract symbol lists from libraries using nm.
"""
@staticmethod
def find_tool():
"""
Search for the nm executable and return the path.
"""
return distutils.spawn.find_executable('nm')
def __init__(self, static_lib):
"""
Initialize the nm executable and flags that will be used to extract
symbols from shared libraries.
"""
self.nm_exe = self.find_tool()
if self.nm_exe is None:
# ERROR no NM found
print("ERROR: Could not find nm")
sys.exit(1)
self.static_lib = static_lib
self.flags = ['-P', '-g']
if sys.platform.startswith('aix'):
# AIX nm demangles symbols by default, so suppress that.
self.flags.append('-C')
def extract(self, lib):
"""
Extract symbols from a library and return the results as a dict of
parsed symbols.
"""
cmd = [self.nm_exe] + self.flags + [lib]
out = subprocess.check_output(cmd).decode()
fmt_syms = (self._extract_sym(l)
for l in out.splitlines() if l.strip())
# Cast symbol to string.
final_syms = (repr(s) for s in fmt_syms if self._want_sym(s))
# Make unique and sort strings.
tmp_list = list(sorted(set(final_syms)))
# Cast string back to symbol.
return util.read_syms_from_list(tmp_list)
def _extract_sym(self, sym_str):
bits = sym_str.split()
# Everything we want has at least two columns.
if len(bits) < 2:
return None
new_sym = {
'name': bits[0],
'type': bits[1],
'is_defined': (bits[1].lower() != 'u')
}
new_sym['name'] = new_sym['name'].replace('@@', '@')
new_sym = self._transform_sym_type(new_sym)
# NM types which we want to save the size for.
if new_sym['type'] == 'OBJECT' and len(bits) > 3:
new_sym['size'] = int(bits[3], 16)
return new_sym
@staticmethod
def _want_sym(sym):
"""
Check that s is a valid symbol that we want to keep.
"""
if sym is None or len(sym) < 2:
return False
if sym['name'] in extract_ignore_names:
return False
bad_types = ['t', 'b', 'r', 'd', 'w']
return (sym['type'] not in bad_types
and sym['name'] not in ['__bss_start', '_end', '_edata'])
@staticmethod
def _transform_sym_type(sym):
"""
Map the nm single letter output for type to either FUNC or OBJECT.
If the type is not recognized it is left unchanged.
"""
func_types = ['T', 'W']
obj_types = ['B', 'D', 'R', 'V', 'S']
if sym['type'] in func_types:
sym['type'] = 'FUNC'
elif sym['type'] in obj_types:
sym['type'] = 'OBJECT'
return sym
class ReadElfExtractor(object):
"""
ReadElfExtractor - Extract symbol lists from libraries using readelf.
"""
@staticmethod
def find_tool():
"""
Search for the readelf executable and return the path.
"""
return distutils.spawn.find_executable('readelf')
def __init__(self, static_lib):
"""
Initialize the readelf executable and flags that will be used to
extract symbols from shared libraries.
"""
self.tool = self.find_tool()
if self.tool is None:
# ERROR no NM found
print("ERROR: Could not find readelf")
sys.exit(1)
# TODO: Support readelf for reading symbols from archives
assert not static_lib and "RealElf does not yet support static libs"
self.flags = ['--wide', '--symbols']
def extract(self, lib):
"""
Extract symbols from a library and return the results as a dict of
parsed symbols.
"""
cmd = [self.tool] + self.flags + [lib]
out = subprocess.check_output(cmd).decode()
dyn_syms = self.get_dynsym_table(out)
return self.process_syms(dyn_syms)
def process_syms(self, sym_list):
new_syms = []
for s in sym_list:
parts = s.split()
if not parts:
continue
assert len(parts) == 7 or len(parts) == 8 or len(parts) == 9
if len(parts) == 7:
continue
new_sym = {
'name': parts[7],
'size': int(parts[2]),
'type': parts[3],
'is_defined': (parts[6] != 'UND')
}
assert new_sym['type'] in ['OBJECT', 'FUNC', 'NOTYPE', 'TLS']
if new_sym['name'] in extract_ignore_names:
continue
if new_sym['type'] == 'NOTYPE':
continue
if new_sym['type'] == 'FUNC':
del new_sym['size']
new_syms += [new_sym]
return new_syms
def get_dynsym_table(self, out):
lines = out.splitlines()
start = -1
end = -1
for i in range(len(lines)):
if lines[i].startswith("Symbol table '.dynsym'"):
start = i + 2
if start != -1 and end == -1 and not lines[i].strip():
end = i + 1
assert start != -1
if end == -1:
end = len(lines)
return lines[start:end]
class AIXDumpExtractor(object):
"""
AIXDumpExtractor - Extract symbol lists from libraries using AIX dump.
"""
@staticmethod
def find_tool():
"""
Search for the dump executable and return the path.
"""
return distutils.spawn.find_executable('dump')
def __init__(self, static_lib):
"""
Initialize the dump executable and flags that will be used to
extract symbols from shared libraries.
"""
# TODO: Support dump for reading symbols from static libraries
assert not static_lib and "static libs not yet supported with dump"
self.tool = self.find_tool()
if self.tool is None:
print("ERROR: Could not find dump")
sys.exit(1)
self.flags = ['-n', '-v']
object_mode = environ.get('OBJECT_MODE')
if object_mode == '32':
self.flags += ['-X32']
elif object_mode == '64':
self.flags += ['-X64']
else:
self.flags += ['-X32_64']
def extract(self, lib):
"""
Extract symbols from a library and return the results as a dict of
parsed symbols.
"""
cmd = [self.tool] + self.flags + [lib]
out = subprocess.check_output(cmd).decode()
loader_syms = self.get_loader_symbol_table(out)
return self.process_syms(loader_syms)
def process_syms(self, sym_list):
new_syms = []
for s in sym_list:
parts = s.split()
if not parts:
continue
assert len(parts) == 8 or len(parts) == 7
if len(parts) == 7:
continue
new_sym = {
'name': parts[7],
'type': 'FUNC' if parts[4] == 'DS' else 'OBJECT',
'is_defined': (parts[5] != 'EXTref'),
'storage_mapping_class': parts[4],
'import_export': parts[3]
}
if new_sym['name'] in extract_ignore_names:
continue
new_syms += [new_sym]
return new_syms
def get_loader_symbol_table(self, out):
lines = out.splitlines()
return filter(lambda n: re.match(r'^\[[0-9]+\]', n), lines)
@staticmethod
def is_shared_lib(lib):
"""
Check for the shared object flag in XCOFF headers of the input file or
library archive.
"""
dump = AIXDumpExtractor.find_tool()
if dump is None:
print("ERROR: Could not find dump")
sys.exit(1)
cmd = [dump, '-X32_64', '-ov', lib]
out = subprocess.check_output(cmd).decode()
return out.find("SHROBJ") != -1
def is_static_library(lib_file):
"""
Determine if a given library is static or shared.
"""
if sys.platform.startswith('aix'):
# An AIX library could be both, but for simplicity assume it isn't.
return not AIXDumpExtractor.is_shared_lib(lib_file)
else:
_, ext = os.path.splitext(lib_file)
return ext == '.a'
def extract_symbols(lib_file, static_lib=None):
"""
Extract and return a list of symbols extracted from a static or dynamic
library. The symbols are extracted using dump, nm or readelf. They are
then filtered and formated. Finally the symbols are made unique.
"""
if static_lib is None:
static_lib = is_static_library(lib_file)
if sys.platform.startswith('aix'):
extractor = AIXDumpExtractor(static_lib=static_lib)
elif ReadElfExtractor.find_tool() and not static_lib:
extractor = ReadElfExtractor(static_lib=static_lib)
else:
extractor = NMExtractor(static_lib=static_lib)
return extractor.extract(lib_file)

View File

@@ -0,0 +1,39 @@
# -*- Python -*- vim: set syntax=python tabstop=4 expandtab cc=80:
#===----------------------------------------------------------------------===##
#
# Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
# See https://llvm.org/LICENSE.txt for license information.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
#
#===----------------------------------------------------------------------===##
"""
match - A set of functions for matching symbols in a list to a list of regexs
"""
import re
def find_and_report_matching(symbol_list, regex_list):
report = ''
found_count = 0
for regex_str in regex_list:
report += 'Matching regex "%s":\n' % regex_str
matching_list = find_matching_symbols(symbol_list, regex_str)
if not matching_list:
report += ' No matches found\n\n'
continue
# else
found_count += len(matching_list)
for m in matching_list:
report += ' MATCHES: %s\n' % m['name']
report += '\n'
return found_count, report
def find_matching_symbols(symbol_list, regex_str):
regex = re.compile(regex_str)
matching_list = []
for s in symbol_list:
if regex.match(s['name']):
matching_list += [s]
return matching_list

View File

@@ -0,0 +1,279 @@
#===----------------------------------------------------------------------===##
#
# Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
# See https://llvm.org/LICENSE.txt for license information.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
#
#===----------------------------------------------------------------------===##
from pprint import pformat
import ast
import distutils.spawn
import re
import subprocess
import sys
def read_syms_from_list(slist):
"""
Read a list of symbols from a list of strings.
Each string is one symbol.
"""
return [ast.literal_eval(l) for l in slist]
def read_syms_from_file(filename):
"""
Read a list of symbols in from a file.
"""
with open(filename, 'r') as f:
data = f.read()
return read_syms_from_list(data.splitlines())
def read_exclusions(filename):
with open(filename, 'r') as f:
data = f.read()
lines = [l.strip() for l in data.splitlines() if l.strip()]
lines = [l for l in lines if not l.startswith('#')]
return lines
def write_syms(sym_list, out=None, names_only=False, filter=None):
"""
Write a list of symbols to the file named by out.
"""
out_str = ''
out_list = sym_list
out_list.sort(key=lambda x: x['name'])
if filter is not None:
out_list = filter(out_list)
if names_only:
out_list = [sym['name'] for sym in out_list]
for sym in out_list:
# Use pformat for consistent ordering of keys.
out_str += pformat(sym, width=100000) + '\n'
if out is None:
sys.stdout.write(out_str)
else:
with open(out, 'w') as f:
f.write(out_str)
_cppfilt_exe = distutils.spawn.find_executable('c++filt')
def demangle_symbol(symbol):
if _cppfilt_exe is None:
return symbol
result = subprocess.run([_cppfilt_exe], input=symbol.encode(), capture_output=True)
if result.returncode != 0:
return symbol
return result.stdout.decode()
def is_elf(filename):
with open(filename, 'rb') as f:
magic_bytes = f.read(4)
return magic_bytes == b'\x7fELF'
def is_mach_o(filename):
with open(filename, 'rb') as f:
magic_bytes = f.read(4)
return magic_bytes in [
b'\xfe\xed\xfa\xce', # MH_MAGIC
b'\xce\xfa\xed\xfe', # MH_CIGAM
b'\xfe\xed\xfa\xcf', # MH_MAGIC_64
b'\xcf\xfa\xed\xfe', # MH_CIGAM_64
b'\xca\xfe\xba\xbe', # FAT_MAGIC
b'\xbe\xba\xfe\xca' # FAT_CIGAM
]
def is_xcoff_or_big_ar(filename):
with open(filename, 'rb') as f:
magic_bytes = f.read(7)
return magic_bytes[:4] in [
b'\x01DF', # XCOFF32
b'\x01F7' # XCOFF64
] or magic_bytes == b'<bigaf>'
def is_library_file(filename):
if sys.platform == 'darwin':
return is_mach_o(filename)
elif sys.platform.startswith('aix'):
return is_xcoff_or_big_ar(filename)
else:
return is_elf(filename)
def extract_or_load(filename):
import libcxx.sym_check.extract
if is_library_file(filename):
return libcxx.sym_check.extract.extract_symbols(filename)
return read_syms_from_file(filename)
def adjust_mangled_name(name):
if not name.startswith('__Z'):
return name
return name[1:]
new_delete_std_symbols = [
'_Znam',
'_Znwm',
'_ZdaPv',
'_ZdaPvm',
'_ZdlPv',
'_ZdlPvm'
]
cxxabi_symbols = [
'___dynamic_cast',
'___gxx_personality_v0',
'_ZTIDi',
'_ZTIDn',
'_ZTIDs',
'_ZTIPDi',
'_ZTIPDn',
'_ZTIPDs',
'_ZTIPKDi',
'_ZTIPKDn',
'_ZTIPKDs',
'_ZTIPKa',
'_ZTIPKb',
'_ZTIPKc',
'_ZTIPKd',
'_ZTIPKe',
'_ZTIPKf',
'_ZTIPKh',
'_ZTIPKi',
'_ZTIPKj',
'_ZTIPKl',
'_ZTIPKm',
'_ZTIPKs',
'_ZTIPKt',
'_ZTIPKv',
'_ZTIPKw',
'_ZTIPKx',
'_ZTIPKy',
'_ZTIPa',
'_ZTIPb',
'_ZTIPc',
'_ZTIPd',
'_ZTIPe',
'_ZTIPf',
'_ZTIPh',
'_ZTIPi',
'_ZTIPj',
'_ZTIPl',
'_ZTIPm',
'_ZTIPs',
'_ZTIPt',
'_ZTIPv',
'_ZTIPw',
'_ZTIPx',
'_ZTIPy',
'_ZTIa',
'_ZTIb',
'_ZTIc',
'_ZTId',
'_ZTIe',
'_ZTIf',
'_ZTIh',
'_ZTIi',
'_ZTIj',
'_ZTIl',
'_ZTIm',
'_ZTIs',
'_ZTIt',
'_ZTIv',
'_ZTIw',
'_ZTIx',
'_ZTIy',
'_ZTSDi',
'_ZTSDn',
'_ZTSDs',
'_ZTSPDi',
'_ZTSPDn',
'_ZTSPDs',
'_ZTSPKDi',
'_ZTSPKDn',
'_ZTSPKDs',
'_ZTSPKa',
'_ZTSPKb',
'_ZTSPKc',
'_ZTSPKd',
'_ZTSPKe',
'_ZTSPKf',
'_ZTSPKh',
'_ZTSPKi',
'_ZTSPKj',
'_ZTSPKl',
'_ZTSPKm',
'_ZTSPKs',
'_ZTSPKt',
'_ZTSPKv',
'_ZTSPKw',
'_ZTSPKx',
'_ZTSPKy',
'_ZTSPa',
'_ZTSPb',
'_ZTSPc',
'_ZTSPd',
'_ZTSPe',
'_ZTSPf',
'_ZTSPh',
'_ZTSPi',
'_ZTSPj',
'_ZTSPl',
'_ZTSPm',
'_ZTSPs',
'_ZTSPt',
'_ZTSPv',
'_ZTSPw',
'_ZTSPx',
'_ZTSPy',
'_ZTSa',
'_ZTSb',
'_ZTSc',
'_ZTSd',
'_ZTSe',
'_ZTSf',
'_ZTSh',
'_ZTSi',
'_ZTSj',
'_ZTSl',
'_ZTSm',
'_ZTSs',
'_ZTSt',
'_ZTSv',
'_ZTSw',
'_ZTSx',
'_ZTSy'
]
def is_stdlib_symbol_name(name, sym):
name = adjust_mangled_name(name)
if re.search("@GLIBC|@GCC", name):
# Only when symbol is defined do we consider it ours
return sym['is_defined']
if re.search('(St[0-9])|(__cxa)|(__cxxabi)', name):
return True
if name in new_delete_std_symbols:
return True
if name in cxxabi_symbols:
return True
if name.startswith('_Z'):
return True
return False
def filter_stdlib_symbols(syms):
stdlib_symbols = []
other_symbols = []
for s in syms:
canon_name = adjust_mangled_name(s['name'])
if not is_stdlib_symbol_name(canon_name, s):
other_symbols += [s]
else:
stdlib_symbols += [s]
return stdlib_symbols, other_symbols

View File

@@ -0,0 +1,461 @@
#===----------------------------------------------------------------------===##
#
# Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
# See https://llvm.org/LICENSE.txt for license information.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
#
#===----------------------------------------------------------------------===##
import copy
import os
import pkgutil
import pipes
import platform
import re
import shlex
import shutil
import sys
from libcxx.compiler import CXXCompiler
from libcxx.test.target_info import make_target_info
import libcxx.util
import libcxx.test.features
import libcxx.test.newconfig
import libcxx.test.params
import lit
def loadSiteConfig(lit_config, config, param_name, env_name):
# We haven't loaded the site specific configuration (the user is
# probably trying to run on a test file directly, and either the site
# configuration hasn't been created by the build system, or we are in an
# out-of-tree build situation).
site_cfg = lit_config.params.get(param_name,
os.environ.get(env_name))
if not site_cfg:
lit_config.warning('No site specific configuration file found!'
' Running the tests in the default configuration.')
elif not os.path.isfile(site_cfg):
lit_config.fatal(
"Specified site configuration file does not exist: '%s'" %
site_cfg)
else:
lit_config.note('using site specific configuration at %s' % site_cfg)
ld_fn = lit_config.load_config
# Null out the load_config function so that lit.site.cfg doesn't
# recursively load a config even if it tries.
# TODO: This is one hell of a hack. Fix it.
def prevent_reload_fn(*args, **kwargs):
pass
lit_config.load_config = prevent_reload_fn
ld_fn(config, site_cfg)
lit_config.load_config = ld_fn
# Extract the value of a numeric macro such as __cplusplus or a feature-test
# macro.
def intMacroValue(token):
return int(token.rstrip('LlUu'))
class Configuration(object):
# pylint: disable=redefined-outer-name
def __init__(self, lit_config, config):
self.lit_config = lit_config
self.config = config
self.cxx = None
self.cxx_is_clang_cl = None
self.cxx_stdlib_under_test = None
self.project_obj_root = None
self.libcxx_src_root = None
self.libcxx_obj_root = None
self.cxx_library_root = None
self.cxx_runtime_root = None
self.abi_library_root = None
self.link_shared = self.get_lit_bool('enable_shared', default=True)
self.debug_build = self.get_lit_bool('debug_build', default=False)
self.exec_env = dict()
self.use_clang_verify = False
def get_lit_conf(self, name, default=None):
val = self.lit_config.params.get(name, None)
if val is None:
val = getattr(self.config, name, None)
if val is None:
val = default
return val
def get_lit_bool(self, name, default=None, env_var=None):
def check_value(value, var_name):
if value is None:
return default
if isinstance(value, bool):
return value
if not isinstance(value, str):
raise TypeError('expected bool or string')
if value.lower() in ('1', 'true'):
return True
if value.lower() in ('', '0', 'false'):
return False
self.lit_config.fatal(
"parameter '{}' should be true or false".format(var_name))
conf_val = self.get_lit_conf(name)
if env_var is not None and env_var in os.environ and \
os.environ[env_var] is not None:
val = os.environ[env_var]
if conf_val is not None:
self.lit_config.warning(
'Environment variable %s=%s is overriding explicit '
'--param=%s=%s' % (env_var, val, name, conf_val))
return check_value(val, env_var)
return check_value(conf_val, name)
def make_static_lib_name(self, name):
"""Return the full filename for the specified library name"""
if self.target_info.is_windows() and not self.target_info.is_mingw():
assert name == 'c++' # Only allow libc++ to use this function for now.
return 'lib' + name + '.lib'
else:
return 'lib' + name + '.a'
def configure(self):
self.target_info = make_target_info(self)
self.executor = self.get_lit_conf('executor')
self.configure_cxx()
self.configure_src_root()
self.configure_obj_root()
self.cxx_stdlib_under_test = self.get_lit_conf('cxx_stdlib_under_test', 'libc++')
self.cxx_library_root = self.get_lit_conf('cxx_library_root', self.libcxx_obj_root)
self.abi_library_root = self.get_lit_conf('abi_library_root') or self.cxx_library_root
self.cxx_runtime_root = self.get_lit_conf('cxx_runtime_root', self.cxx_library_root)
self.abi_runtime_root = self.get_lit_conf('abi_runtime_root', self.abi_library_root)
self.configure_compile_flags()
self.configure_link_flags()
self.configure_env()
self.configure_coverage()
self.configure_substitutions()
libcxx.test.newconfig.configure(
libcxx.test.params.DEFAULT_PARAMETERS,
libcxx.test.features.DEFAULT_FEATURES,
self.config,
self.lit_config
)
self.lit_config.note("All available features: {}".format(self.config.available_features))
def print_config_info(self):
if self.cxx.use_modules:
self.lit_config.note('Using modules flags: %s' %
self.cxx.modules_flags)
if len(self.cxx.warning_flags):
self.lit_config.note('Using warnings: %s' % self.cxx.warning_flags)
show_env_vars = {}
for k,v in self.exec_env.items():
if k not in os.environ or os.environ[k] != v:
show_env_vars[k] = v
self.lit_config.note('Adding environment variables: %r' % show_env_vars)
self.lit_config.note("Linking against the C++ Library at {}".format(self.cxx_library_root))
self.lit_config.note("Running against the C++ Library at {}".format(self.cxx_runtime_root))
self.lit_config.note("Linking against the ABI Library at {}".format(self.abi_library_root))
self.lit_config.note("Running against the ABI Library at {}".format(self.abi_runtime_root))
def configure_cxx(self):
# Gather various compiler parameters.
cxx = self.get_lit_conf('cxx_under_test')
self.cxx_is_clang_cl = cxx is not None and \
os.path.basename(cxx).startswith('clang-cl')
# If no specific cxx_under_test was given, attempt to infer it as
# clang++.
if cxx is None or self.cxx_is_clang_cl:
search_paths = self.config.environment['PATH']
if cxx is not None and os.path.isabs(cxx):
search_paths = os.path.dirname(cxx)
clangxx = libcxx.util.which('clang++', search_paths)
if clangxx:
cxx = clangxx
self.lit_config.note(
"inferred cxx_under_test as: %r" % cxx)
elif self.cxx_is_clang_cl:
self.lit_config.fatal('Failed to find clang++ substitution for'
' clang-cl')
if not cxx:
self.lit_config.fatal('must specify user parameter cxx_under_test '
'(e.g., --param=cxx_under_test=clang++)')
self.cxx = CXXCompiler(self, cxx) if not self.cxx_is_clang_cl else \
self._configure_clang_cl(cxx)
self.cxx.compile_env = dict(os.environ)
def _configure_clang_cl(self, clang_path):
def _split_env_var(var):
return [p.strip() for p in os.environ.get(var, '').split(';') if p.strip()]
def _prefixed_env_list(var, prefix):
from itertools import chain
return list(chain.from_iterable((prefix, path) for path in _split_env_var(var)))
assert self.cxx_is_clang_cl
flags = []
compile_flags = []
link_flags = _prefixed_env_list('LIB', '-L')
return CXXCompiler(self, clang_path, flags=flags,
compile_flags=compile_flags,
link_flags=link_flags)
def configure_src_root(self):
self.libcxx_src_root = self.get_lit_conf(
'libcxx_src_root', os.path.dirname(self.config.test_source_root))
def configure_obj_root(self):
self.project_obj_root = self.get_lit_conf('project_obj_root')
self.libcxx_obj_root = self.get_lit_conf('libcxx_obj_root')
if not self.libcxx_obj_root and self.project_obj_root is not None:
possible_roots = [
os.path.join(self.project_obj_root, 'libcxx'),
os.path.join(self.project_obj_root, 'projects', 'libcxx'),
os.path.join(self.project_obj_root, 'runtimes', 'libcxx'),
]
for possible_root in possible_roots:
if os.path.isdir(possible_root):
self.libcxx_obj_root = possible_root
break
else:
self.libcxx_obj_root = self.project_obj_root
def configure_compile_flags(self):
self.configure_default_compile_flags()
# Configure extra flags
compile_flags_str = self.get_lit_conf('compile_flags', '')
self.cxx.compile_flags += shlex.split(compile_flags_str)
if self.target_info.is_windows():
self.cxx.compile_flags += ['-D_CRT_SECURE_NO_WARNINGS']
# Don't warn about using common but nonstandard unprefixed functions
# like chdir, fileno.
self.cxx.compile_flags += ['-D_CRT_NONSTDC_NO_WARNINGS']
# Build the tests in the same configuration as libcxx itself,
# to avoid mismatches if linked statically.
self.cxx.compile_flags += ['-D_CRT_STDIO_ISO_WIDE_SPECIFIERS']
# Required so that tests using min/max don't fail on Windows,
# and so that those tests don't have to be changed to tolerate
# this insanity.
self.cxx.compile_flags += ['-DNOMINMAX']
additional_flags = self.get_lit_conf('test_compiler_flags')
if additional_flags:
self.cxx.compile_flags += shlex.split(additional_flags)
def configure_default_compile_flags(self):
# Configure include paths
self.configure_compile_flags_header_includes()
self.target_info.add_cxx_compile_flags(self.cxx.compile_flags)
self.target_info.add_cxx_flags(self.cxx.flags)
# Use verbose output for better errors
self.cxx.flags += ['-v']
sysroot = self.get_lit_conf('sysroot')
if sysroot:
self.cxx.flags += ['--sysroot=' + sysroot]
gcc_toolchain = self.get_lit_conf('gcc_toolchain')
if gcc_toolchain:
self.cxx.flags += ['--gcc-toolchain=' + gcc_toolchain]
# NOTE: the _DEBUG definition must preceed the triple check because for
# the Windows build of libc++, the forced inclusion of a header requires
# that _DEBUG is defined. Incorrect ordering will result in -target
# being elided.
if self.target_info.is_windows() and self.debug_build:
self.cxx.compile_flags += ['-D_DEBUG']
# Add includes for support headers used in the tests.
support_path = os.path.join(self.libcxx_src_root, 'test/support')
self.cxx.compile_flags += ['-I' + support_path]
# Add includes for the PSTL headers
pstl_src_root = self.get_lit_conf('pstl_src_root')
pstl_obj_root = self.get_lit_conf('pstl_obj_root')
if pstl_src_root is not None and pstl_obj_root is not None:
self.cxx.compile_flags += ['-I' + os.path.join(pstl_src_root, 'include')]
self.cxx.compile_flags += ['-I' + os.path.join(pstl_obj_root, 'generated_headers')]
self.cxx.compile_flags += ['-I' + os.path.join(pstl_src_root, 'test')]
self.config.available_features.add('parallel-algorithms')
def configure_compile_flags_header_includes(self):
support_path = os.path.join(self.libcxx_src_root, 'test', 'support')
if self.cxx_stdlib_under_test == 'msvc':
self.cxx.compile_flags += [
'-include', os.path.join(support_path,
'msvc_stdlib_force_include.h')]
pass
if self.target_info.is_windows() and self.debug_build and \
self.cxx_stdlib_under_test != 'msvc':
self.cxx.compile_flags += [
'-include', os.path.join(support_path,
'set_windows_crt_report_mode.h')
]
cxx_headers = self.get_lit_conf('cxx_headers')
if cxx_headers is None and self.cxx_stdlib_under_test != 'libc++':
self.lit_config.note('using the system cxx headers')
return
self.cxx.compile_flags += ['-nostdinc++']
if not os.path.isdir(cxx_headers):
self.lit_config.fatal("cxx_headers='{}' is not a directory.".format(cxx_headers))
(path, version) = os.path.split(cxx_headers)
(path, cxx) = os.path.split(path)
triple = self.get_lit_conf('target_triple', None)
if triple is not None:
cxx_target_headers = os.path.join(path, triple, cxx, version)
if os.path.isdir(cxx_target_headers):
self.cxx.compile_flags += ['-I', cxx_target_headers]
self.cxx.compile_flags += ['-I', cxx_headers]
if self.libcxx_obj_root is not None:
cxxabi_headers = os.path.join(self.libcxx_obj_root, 'include',
'c++build')
if os.path.isdir(cxxabi_headers):
self.cxx.compile_flags += ['-I' + cxxabi_headers]
def configure_link_flags(self):
# Configure library path
self.configure_link_flags_cxx_library_path()
self.configure_link_flags_abi_library_path()
# Configure libraries
if self.cxx_stdlib_under_test == 'libc++':
if self.target_info.is_mingw():
self.cxx.link_flags += ['-nostdlib++']
else:
self.cxx.link_flags += ['-nodefaultlibs']
# FIXME: Handle MSVCRT as part of the ABI library handling.
if self.target_info.is_windows() and not self.target_info.is_mingw():
self.cxx.link_flags += ['-nostdlib']
self.configure_link_flags_cxx_library()
self.configure_link_flags_abi_library()
self.configure_extra_library_flags()
elif self.cxx_stdlib_under_test == 'libstdc++':
self.cxx.link_flags += ['-lstdc++fs', '-lm', '-pthread']
elif self.cxx_stdlib_under_test == 'msvc':
# FIXME: Correctly setup debug/release flags here.
pass
elif self.cxx_stdlib_under_test == 'cxx_default':
self.cxx.link_flags += ['-pthread']
else:
self.lit_config.fatal('invalid stdlib under test')
link_flags_str = self.get_lit_conf('link_flags', '')
self.cxx.link_flags += shlex.split(link_flags_str)
def configure_link_flags_cxx_library_path(self):
if self.cxx_library_root:
self.cxx.link_flags += ['-L' + self.cxx_library_root]
if self.target_info.is_windows() and self.link_shared:
self.add_path(self.cxx.compile_env, self.cxx_library_root)
if self.cxx_runtime_root:
if not self.target_info.is_windows():
self.cxx.link_flags += ['-Wl,-rpath,' +
self.cxx_runtime_root]
elif self.target_info.is_windows() and self.link_shared:
self.add_path(self.exec_env, self.cxx_runtime_root)
additional_flags = self.get_lit_conf('test_linker_flags')
if additional_flags:
self.cxx.link_flags += shlex.split(additional_flags)
def configure_link_flags_abi_library_path(self):
# Configure ABI library paths.
if self.abi_library_root:
self.cxx.link_flags += ['-L' + self.abi_library_root]
if self.abi_runtime_root:
if not self.target_info.is_windows():
self.cxx.link_flags += ['-Wl,-rpath,' + self.abi_runtime_root]
else:
self.add_path(self.exec_env, self.abi_runtime_root)
def configure_link_flags_cxx_library(self):
if self.link_shared:
self.cxx.link_flags += ['-lc++']
else:
if self.cxx_library_root:
libname = self.make_static_lib_name('c++')
abs_path = os.path.join(self.cxx_library_root, libname)
assert os.path.exists(abs_path) and \
"static libc++ library does not exist"
self.cxx.link_flags += [abs_path]
else:
self.cxx.link_flags += ['-lc++']
def configure_link_flags_abi_library(self):
cxx_abi = self.get_lit_conf('cxx_abi', 'libcxxabi')
if cxx_abi == 'libstdc++':
self.cxx.link_flags += ['-lstdc++']
elif cxx_abi == 'libsupc++':
self.cxx.link_flags += ['-lsupc++']
elif cxx_abi == 'libcxxabi':
# If the C++ library requires explicitly linking to libc++abi, or
# if we're testing libc++abi itself (the test configs are shared),
# then link it.
testing_libcxxabi = self.get_lit_conf('name', '') == 'libc++abi'
if self.target_info.allow_cxxabi_link() or testing_libcxxabi:
libcxxabi_shared = self.get_lit_bool('libcxxabi_shared', default=True)
if libcxxabi_shared:
self.cxx.link_flags += ['-lc++abi']
else:
if self.abi_library_root:
libname = self.make_static_lib_name('c++abi')
abs_path = os.path.join(self.abi_library_root, libname)
self.cxx.link_flags += [abs_path]
else:
self.cxx.link_flags += ['-lc++abi']
elif cxx_abi == 'system-libcxxabi':
self.cxx.link_flags += ['-lc++abi']
elif cxx_abi == 'libcxxrt':
self.cxx.link_flags += ['-lcxxrt']
elif cxx_abi == 'vcruntime':
debug_suffix = 'd' if self.debug_build else ''
# This matches the set of libraries linked in the toplevel
# libcxx CMakeLists.txt if building targeting msvc.
self.cxx.link_flags += ['-l%s%s' % (lib, debug_suffix) for lib in
['vcruntime', 'ucrt', 'msvcrt', 'msvcprt']]
# The compiler normally links in oldnames.lib too, but we've
# specified -nostdlib above, so we need to specify it manually.
self.cxx.link_flags += ['-loldnames']
elif cxx_abi == 'none':
if self.target_info.is_windows():
debug_suffix = 'd' if self.debug_build else ''
self.cxx.link_flags += ['-lmsvcrt%s' % debug_suffix]
else:
self.lit_config.fatal(
'C++ ABI setting %s unsupported for tests' % cxx_abi)
def configure_extra_library_flags(self):
if self.get_lit_bool('cxx_ext_threads', default=False):
self.cxx.link_flags += ['-lc++external_threads']
self.target_info.add_cxx_link_flags(self.cxx.link_flags)
def configure_coverage(self):
self.generate_coverage = self.get_lit_bool('generate_coverage', False)
if self.generate_coverage:
self.cxx.flags += ['-g', '--coverage']
self.cxx.compile_flags += ['-O0']
def quote(self, s):
if platform.system() == 'Windows':
return lit.TestRunner.quote_windows_command([s])
return pipes.quote(s)
def configure_substitutions(self):
sub = self.config.substitutions
sub.append(('%{cxx}', self.quote(self.cxx.path)))
flags = self.cxx.flags + (self.cxx.modules_flags if self.cxx.use_modules else [])
compile_flags = self.cxx.compile_flags + (self.cxx.warning_flags if self.cxx.use_warnings else [])
sub.append(('%{flags}', ' '.join(map(self.quote, flags))))
sub.append(('%{compile_flags}', ' '.join(map(self.quote, compile_flags))))
sub.append(('%{link_flags}', ' '.join(map(self.quote, self.cxx.link_flags))))
codesign_ident = self.get_lit_conf('llvm_codesign_identity', '')
env_vars = ' '.join('%s=%s' % (k, self.quote(v)) for (k, v) in self.exec_env.items())
exec_args = [
'--execdir %T',
'--codesign_identity "{}"'.format(codesign_ident),
'--env {}'.format(env_vars)
]
sub.append(('%{exec}', '{} {} -- '.format(self.executor, ' '.join(exec_args))))
def configure_env(self):
self.config.environment = dict(os.environ)
def add_path(self, dest_env, new_path):
self.target_info.add_path(dest_env, new_path)

View File

@@ -0,0 +1,720 @@
#===----------------------------------------------------------------------===##
#
# Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
# See https://llvm.org/LICENSE.txt for license information.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
#
#===----------------------------------------------------------------------===##
import os
import pickle
import pipes
import platform
import re
import shutil
import tempfile
import libcxx.test.format
import lit
import lit.LitConfig
import lit.Test
import lit.TestRunner
import lit.util
class ConfigurationError(Exception):
pass
class ConfigurationCompilationError(ConfigurationError):
pass
class ConfigurationRuntimeError(ConfigurationError):
pass
def _memoizeExpensiveOperation(extractCacheKey):
"""
Allows memoizing a very expensive operation.
We pickle the cache key to make sure we store an immutable representation
of it. If we stored an object and the object was referenced elsewhere, it
could be changed from under our feet, which would break the cache.
We also store the cache for a given function persistently across invocations
of Lit. This dramatically speeds up the configuration of the test suite when
invoking Lit repeatedly, which is important for developer workflow. However,
with the current implementation that does not synchronize updates to the
persistent cache, this also means that one should not call a memoized
operation from multiple threads. This should normally not be a problem
since Lit configuration is single-threaded.
"""
def decorator(function):
def f(config, *args, **kwargs):
cacheRoot = os.path.join(config.test_exec_root, '__config_cache__')
persistentCache = os.path.join(cacheRoot, function.__name__)
if not os.path.exists(cacheRoot):
os.makedirs(cacheRoot)
cache = {}
# Load a cache from a previous Lit invocation if there is one.
if os.path.exists(persistentCache):
with open(persistentCache, 'rb') as cacheFile:
cache = pickle.load(cacheFile)
cacheKey = pickle.dumps(extractCacheKey(config, *args, **kwargs))
if cacheKey not in cache:
cache[cacheKey] = function(config, *args, **kwargs)
# Update the persistent cache so it knows about the new key
with open(persistentCache, 'wb') as cacheFile:
pickle.dump(cache, cacheFile)
return cache[cacheKey]
return f
return decorator
def _executeScriptInternal(test, commands):
"""
Returns (stdout, stderr, exitCode, timeoutInfo)
TODO: This really should be easier to access from Lit itself
"""
parsedCommands = libcxx.test.format.parseScript(test, preamble=commands)
litConfig = lit.LitConfig.LitConfig(
progname='lit',
path=[],
quiet=False,
useValgrind=False,
valgrindLeakCheck=False,
valgrindArgs=[],
noExecute=False,
debug=False,
isWindows=platform.system() == 'Windows',
order='smart',
params={})
_, tmpBase = libcxx.test.format._getTempPaths(test)
execDir = os.path.dirname(test.getExecPath())
res = lit.TestRunner.executeScriptInternal(test, litConfig, tmpBase, parsedCommands, execDir)
if isinstance(res, lit.Test.Result): # Handle failure to parse the Lit test
res = ('', res.output, 127, None)
(out, err, exitCode, timeoutInfo) = res
# TODO: As a temporary workaround until https://reviews.llvm.org/D81892 lands, manually
# split any stderr output that is included in stdout. It shouldn't be there, but
# the Lit internal shell conflates stderr and stdout.
conflatedErrorOutput = re.search("(# command stderr:.+$)", out, flags=re.DOTALL)
if conflatedErrorOutput:
conflatedErrorOutput = conflatedErrorOutput.group(0)
out = out[:-len(conflatedErrorOutput)]
err += conflatedErrorOutput
return (out, err, exitCode, timeoutInfo)
def _makeConfigTest(config):
# Make sure the support directories exist, which is needed to create
# the temporary file %t below.
sourceRoot = os.path.join(config.test_exec_root, '__config_src__')
execRoot = os.path.join(config.test_exec_root, '__config_exec__')
for supportDir in (sourceRoot, execRoot):
if not os.path.exists(supportDir):
os.makedirs(supportDir)
# Create a dummy test suite and single dummy test inside it. As part of
# the Lit configuration, automatically do the equivalent of 'mkdir %T'
# and 'rm -r %T' to avoid cluttering the build directory.
suite = lit.Test.TestSuite('__config__', sourceRoot, execRoot, config)
tmp = tempfile.NamedTemporaryFile(dir=sourceRoot, delete=False, suffix='.cpp')
tmp.close()
pathInSuite = [os.path.relpath(tmp.name, sourceRoot)]
class TestWrapper(lit.Test.Test):
def __enter__(self):
testDir, _ = libcxx.test.format._getTempPaths(self)
os.makedirs(testDir)
return self
def __exit__(self, *args):
testDir, _ = libcxx.test.format._getTempPaths(self)
shutil.rmtree(testDir)
os.remove(tmp.name)
return TestWrapper(suite, pathInSuite, config)
@_memoizeExpensiveOperation(lambda c, s, f=[]: (c.substitutions, c.environment, s, f))
def sourceBuilds(config, source, additionalFlags=[]):
"""
Return whether the program in the given string builds successfully.
This is done by compiling and linking a program that consists of the given
source with the %{cxx} substitution, and seeing whether that succeeds. If
any additional flags are passed, they are appended to the compiler invocation.
"""
with _makeConfigTest(config) as test:
with open(test.getSourcePath(), 'w') as sourceFile:
sourceFile.write(source)
_, _, exitCode, _ = _executeScriptInternal(test, ['%{{build}} {}'.format(' '.join(additionalFlags))])
return exitCode == 0
@_memoizeExpensiveOperation(lambda c, p, args=None: (c.substitutions, c.environment, p, args))
def programOutput(config, program, args=None):
"""
Compiles a program for the test target, run it on the test target and return
the output.
Note that execution of the program is done through the %{exec} substitution,
which means that the program may be run on a remote host depending on what
%{exec} does.
"""
if args is None:
args = []
with _makeConfigTest(config) as test:
with open(test.getSourcePath(), 'w') as source:
source.write(program)
_, err, exitCode, _ = _executeScriptInternal(test, ['%{build}'])
if exitCode != 0:
raise ConfigurationCompilationError("Failed to build program, stderr is:\n{}".format(err))
out, err, exitCode, _ = _executeScriptInternal(test, ["%{{run}} {}".format(' '.join(args))])
if exitCode != 0:
raise ConfigurationRuntimeError("Failed to run program, stderr is:\n{}".format(err))
actualOut = re.search("# command output:\n(.+)\n$", out, flags=re.DOTALL)
actualOut = actualOut.group(1) if actualOut else ""
return actualOut
@_memoizeExpensiveOperation(lambda c, p, args=None: (c.substitutions, c.environment, p, args))
def programSucceeds(config, program, args=None):
"""
Compiles a program for the test target, run it on the test target and return
whether it completed successfully.
Note that execution of the program is done through the %{exec} substitution,
which means that the program may be run on a remote host depending on what
%{exec} does.
"""
try:
programOutput(config, program, args)
except ConfigurationRuntimeError:
return False
return True
@_memoizeExpensiveOperation(lambda c, f: (c.substitutions, c.environment, f))
def hasCompileFlag(config, flag):
"""
Return whether the compiler in the configuration supports a given compiler flag.
This is done by executing the %{cxx} substitution with the given flag and
checking whether that succeeds.
"""
with _makeConfigTest(config) as test:
out, err, exitCode, timeoutInfo = _executeScriptInternal(test, [
"%{{cxx}} -xc++ {} -Werror -fsyntax-only %{{flags}} %{{compile_flags}} {}".format(os.devnull, flag)
])
return exitCode == 0
@_memoizeExpensiveOperation(lambda c, s: (c.substitutions, c.environment, s))
def runScriptExitCode(config, script):
"""
Runs the given script as a Lit test, and returns the exit code of the execution.
The script must be a list of commands, each of which being something that
could appear on the right-hand-side of a `RUN:` keyword.
"""
with _makeConfigTest(config) as test:
_, _, exitCode, _ = _executeScriptInternal(test, script)
return exitCode
@_memoizeExpensiveOperation(lambda c, s: (c.substitutions, c.environment, s))
def commandOutput(config, command):
"""
Runs the given script as a Lit test, and returns the output.
If the exit code isn't 0 an exception is raised.
The script must be a list of commands, each of which being something that
could appear on the right-hand-side of a `RUN:` keyword.
"""
with _makeConfigTest(config) as test:
out, _, exitCode, _ = _executeScriptInternal(test, command)
if exitCode != 0:
raise ConfigurationRuntimeError()
return out
@_memoizeExpensiveOperation(lambda c, l: (c.substitutions, c.environment, l))
def hasAnyLocale(config, locales):
"""
Return whether the runtime execution environment supports a given locale.
Different systems may use different names for a locale, so this function checks
whether any of the passed locale names is supported by setlocale() and returns
true if one of them works.
This is done by executing a program that tries to set the given locale using
%{exec} -- this means that the command may be executed on a remote host
depending on the %{exec} substitution.
"""
program = """
#include <stddef.h>
#if defined(_LIBCPP_HAS_NO_LOCALIZATION)
int main(int, char**) { return 1; }
#else
#include <locale.h>
int main(int argc, char** argv) {
for (int i = 1; i < argc; i++) {
if (::setlocale(LC_ALL, argv[i]) != NULL) {
return 0;
}
}
return 1;
}
#endif
"""
return programSucceeds(config, program, args=[pipes.quote(l) for l in locales])
@_memoizeExpensiveOperation(lambda c, flags='': (c.substitutions, c.environment, flags))
def compilerMacros(config, flags=''):
"""
Return a dictionary of predefined compiler macros.
The keys are strings representing macros, and the values are strings
representing what each macro is defined to.
If the optional `flags` argument (a string) is provided, these flags will
be added to the compiler invocation when generating the macros.
"""
with _makeConfigTest(config) as test:
with open(test.getSourcePath(), 'w') as sourceFile:
sourceFile.write("""
#if __has_include(<__config_site>)
# include <__config_site>
#endif
""")
unparsedOutput, err, exitCode, _ = _executeScriptInternal(test, [
"%{{cxx}} %s -dM -E %{{flags}} %{{compile_flags}} {}".format(flags)
])
if exitCode != 0:
raise ConfigurationCompilationError("Failed to retrieve compiler macros, stderr is:\n{}".format(err))
parsedMacros = dict()
defines = (l.strip() for l in unparsedOutput.split('\n') if l.startswith('#define '))
for line in defines:
line = line[len('#define '):]
macro, _, value = line.partition(' ')
parsedMacros[macro] = value
return parsedMacros
def featureTestMacros(config, flags=''):
"""
Return a dictionary of feature test macros.
The keys are strings representing feature test macros, and the values are
integers representing the value of the macro.
"""
allMacros = compilerMacros(config, flags)
return {m: int(v.rstrip('LlUu')) for (m, v) in allMacros.items() if m.startswith('__cpp_')}
def _appendToSubstitution(substitutions, key, value):
return [(k, v + ' ' + value) if k == key else (k, v) for (k, v) in substitutions]
def _prependToSubstitution(substitutions, key, value):
return [(k, value + ' ' + v) if k == key else (k, v) for (k, v) in substitutions]
class ConfigAction(object):
"""
This class represents an action that can be performed on a Lit TestingConfig
object.
Examples of such actions are adding or modifying substitutions, Lit features,
etc. This class only provides the interface of such actions, and it is meant
to be subclassed appropriately to create new actions.
"""
def applyTo(self, config):
"""
Applies the action to the given configuration.
This should modify the configuration object in place, and return nothing.
If applying the action to the configuration would yield an invalid
configuration, and it is possible to diagnose it here, this method
should produce an error. For example, it should be an error to modify
a substitution in a way that we know for sure is invalid (e.g. adding
a compiler flag when we know the compiler doesn't support it). Failure
to do so early may lead to difficult-to-diagnose issues down the road.
"""
pass
def pretty(self, config, litParams):
"""
Returns a short and human-readable string describing what this action does.
This is used for logging purposes when running the test suite, so it should
be kept concise.
"""
pass
class AddFeature(ConfigAction):
"""
This action defines the given Lit feature when running the test suite.
The name of the feature can be a string or a callable, in which case it is
called with the configuration to produce the feature name (as a string).
"""
def __init__(self, name):
self._name = name
def _getName(self, config):
name = self._name(config) if callable(self._name) else self._name
if not isinstance(name, str):
raise ValueError("Lit feature did not resolve to a string (got {})".format(name))
return name
def applyTo(self, config):
config.available_features.add(self._getName(config))
def pretty(self, config, litParams):
return 'add Lit feature {}'.format(self._getName(config))
class AddFlag(ConfigAction):
"""
This action adds the given flag to the %{flags} substitution.
The flag can be a string or a callable, in which case it is called with the
configuration to produce the actual flag (as a string).
"""
def __init__(self, flag):
self._getFlag = lambda config: flag(config) if callable(flag) else flag
def applyTo(self, config):
flag = self._getFlag(config)
assert hasCompileFlag(config, flag), "Trying to enable flag {}, which is not supported".format(flag)
config.substitutions = _appendToSubstitution(config.substitutions, '%{flags}', flag)
def pretty(self, config, litParams):
return 'add {} to %{{flags}}'.format(self._getFlag(config))
class AddFlagIfSupported(ConfigAction):
"""
This action adds the given flag to the %{flags} substitution, only if
the compiler supports the flag.
The flag can be a string or a callable, in which case it is called with the
configuration to produce the actual flag (as a string).
"""
def __init__(self, flag):
self._getFlag = lambda config: flag(config) if callable(flag) else flag
def applyTo(self, config):
flag = self._getFlag(config)
if hasCompileFlag(config, flag):
config.substitutions = _appendToSubstitution(config.substitutions, '%{flags}', flag)
def pretty(self, config, litParams):
return 'add {} to %{{flags}}'.format(self._getFlag(config))
class AddCompileFlag(ConfigAction):
"""
This action adds the given flag to the %{compile_flags} substitution.
The flag can be a string or a callable, in which case it is called with the
configuration to produce the actual flag (as a string).
"""
def __init__(self, flag):
self._getFlag = lambda config: flag(config) if callable(flag) else flag
def applyTo(self, config):
flag = self._getFlag(config)
assert hasCompileFlag(config, flag), "Trying to enable compile flag {}, which is not supported".format(flag)
config.substitutions = _appendToSubstitution(config.substitutions, '%{compile_flags}', flag)
def pretty(self, config, litParams):
return 'add {} to %{{compile_flags}}'.format(self._getFlag(config))
class AddLinkFlag(ConfigAction):
"""
This action appends the given flag to the %{link_flags} substitution.
The flag can be a string or a callable, in which case it is called with the
configuration to produce the actual flag (as a string).
"""
def __init__(self, flag):
self._getFlag = lambda config: flag(config) if callable(flag) else flag
def applyTo(self, config):
flag = self._getFlag(config)
assert hasCompileFlag(config, flag), "Trying to enable link flag {}, which is not supported".format(flag)
config.substitutions = _appendToSubstitution(config.substitutions, '%{link_flags}', flag)
def pretty(self, config, litParams):
return 'append {} to %{{link_flags}}'.format(self._getFlag(config))
class PrependLinkFlag(ConfigAction):
"""
This action prepends the given flag to the %{link_flags} substitution.
The flag can be a string or a callable, in which case it is called with the
configuration to produce the actual flag (as a string).
"""
def __init__(self, flag):
self._getFlag = lambda config: flag(config) if callable(flag) else flag
def applyTo(self, config):
flag = self._getFlag(config)
assert hasCompileFlag(config, flag), "Trying to enable link flag {}, which is not supported".format(flag)
config.substitutions = _prependToSubstitution(config.substitutions, '%{link_flags}', flag)
def pretty(self, config, litParams):
return 'prepend {} to %{{link_flags}}'.format(self._getFlag(config))
class AddOptionalWarningFlag(ConfigAction):
"""
This action adds the given warning flag to the %{compile_flags} substitution,
if it is supported by the compiler.
The flag can be a string or a callable, in which case it is called with the
configuration to produce the actual flag (as a string).
"""
def __init__(self, flag):
self._getFlag = lambda config: flag(config) if callable(flag) else flag
def applyTo(self, config):
flag = self._getFlag(config)
# Use -Werror to make sure we see an error about the flag being unsupported.
if hasCompileFlag(config, '-Werror ' + flag):
config.substitutions = _appendToSubstitution(config.substitutions, '%{compile_flags}', flag)
def pretty(self, config, litParams):
return 'add {} to %{{compile_flags}}'.format(self._getFlag(config))
class AddSubstitution(ConfigAction):
"""
This action adds the given substitution to the Lit configuration.
The substitution can be a string or a callable, in which case it is called
with the configuration to produce the actual substitution (as a string).
"""
def __init__(self, key, substitution):
self._key = key
self._getSub = lambda config: substitution(config) if callable(substitution) else substitution
def applyTo(self, config):
key = self._key
sub = self._getSub(config)
config.substitutions.append((key, sub))
def pretty(self, config, litParams):
return 'add substitution {} = {}'.format(self._key, self._getSub(config))
class Feature(object):
"""
Represents a Lit available feature that is enabled whenever it is supported.
A feature like this informs the test suite about a capability of the compiler,
platform, etc. Unlike Parameters, it does not make sense to explicitly
control whether a Feature is enabled -- it should be enabled whenever it
is supported.
"""
def __init__(self, name, actions=None, when=lambda _: True):
"""
Create a Lit feature for consumption by a test suite.
- name
The name of the feature. This is what will end up in Lit's available
features if the feature is enabled. This can be either a string or a
callable, in which case it is passed the TestingConfig and should
generate a string representing the name of the feature.
- actions
An optional list of ConfigActions to apply when the feature is supported.
An AddFeature action is always created regardless of any actions supplied
here -- these actions are meant to perform more than setting a corresponding
Lit feature (e.g. adding compiler flags). If 'actions' is a callable, it
is called with the current configuration object to generate the actual
list of actions.
- when
A callable that gets passed a TestingConfig and should return a
boolean representing whether the feature is supported in that
configuration. For example, this can use `hasCompileFlag` to
check whether the compiler supports the flag that the feature
represents. If omitted, the feature will always be considered
supported.
"""
self._name = name
self._actions = [] if actions is None else actions
self._isSupported = when
def _getName(self, config):
name = self._name(config) if callable(self._name) else self._name
if not isinstance(name, str):
raise ValueError("Feature did not resolve to a name that's a string, got {}".format(name))
return name
def getActions(self, config):
"""
Return the list of actions associated to this feature.
If the feature is not supported, an empty list is returned.
If the feature is supported, an `AddFeature` action is automatically added
to the returned list of actions, in addition to any actions provided on
construction.
"""
if not self._isSupported(config):
return []
else:
actions = self._actions(config) if callable(self._actions) else self._actions
return [AddFeature(self._getName(config))] + actions
def pretty(self, config):
"""
Returns the Feature's name.
"""
return self._getName(config)
def _str_to_bool(s):
"""
Convert a string value to a boolean.
True values are "y", "yes", "t", "true", "on" and "1", regardless of capitalization.
False values are "n", "no", "f", "false", "off" and "0", regardless of capitalization.
"""
trueVals = ["y", "yes", "t", "true", "on", "1"]
falseVals = ["n", "no", "f", "false", "off", "0"]
lower = s.lower()
if lower in trueVals:
return True
elif lower in falseVals:
return False
else:
raise ValueError("Got string '{}', which isn't a valid boolean".format(s))
def _parse_parameter(s, type):
if type is bool and isinstance(s, str):
return _str_to_bool(s)
elif type is list and isinstance(s, str):
return [x.strip() for x in s.split(',') if x.strip()]
return type(s)
class Parameter(object):
"""
Represents a parameter of a Lit test suite.
Parameters are used to customize the behavior of test suites in a user
controllable way. There are two ways of setting the value of a Parameter.
The first one is to pass `--param <KEY>=<VALUE>` when running Lit (or
equivalenlty to set `litConfig.params[KEY] = VALUE` somewhere in the
Lit configuration files. This method will set the parameter globally for
all test suites being run.
The second method is to set `config.KEY = VALUE` somewhere in the Lit
configuration files, which sets the parameter only for the test suite(s)
that use that `config` object.
Parameters can have multiple possible values, and they can have a default
value when left unspecified. They can also have any number of ConfigActions
associated to them, in which case the actions will be performed on the
TestingConfig if the parameter is enabled. Depending on the actions
associated to a Parameter, it may be an error to enable the Parameter
if some actions are not supported in the given configuration. For example,
trying to set the compilation standard to C++23 when `-std=c++23` is not
supported by the compiler would be an error.
"""
def __init__(self, name, type, help, actions, choices=None, default=None):
"""
Create a Lit parameter to customize the behavior of a test suite.
- name
The name of the parameter that can be used to set it on the command-line.
On the command-line, the parameter can be set using `--param <name>=<value>`
when running Lit. This must be non-empty.
- choices
An optional non-empty set of possible values for this parameter. If provided,
this must be anything that can be iterated. It is an error if the parameter
is given a value that is not in that set, whether explicitly or through a
default value.
- type
A callable that can be used to parse the value of the parameter given
on the command-line. As a special case, using the type `bool` also
allows parsing strings with boolean-like contents, and the type `list`
will parse a string delimited by commas into a list of the substrings.
- help
A string explaining the parameter, for documentation purposes.
TODO: We should be able to surface those from the Lit command-line.
- actions
A callable that gets passed the parsed value of the parameter (either
the one passed on the command-line or the default one), and that returns
a list of ConfigAction to perform given the value of the parameter.
All the ConfigAction must be supported in the given configuration.
- default
An optional default value to use for the parameter when no value is
provided on the command-line. If the default value is a callable, it
is called with the TestingConfig and should return the default value
for the parameter. Whether the default value is computed or specified
directly, it must be in the 'choices' provided for that Parameter.
"""
self._name = name
if len(self._name) == 0:
raise ValueError("Parameter name must not be the empty string")
if choices is not None:
self._choices = list(choices) # should be finite
if len(self._choices) == 0:
raise ValueError("Parameter '{}' must be given at least one possible value".format(self._name))
else:
self._choices = None
self._parse = lambda x: _parse_parameter(x, type)
self._help = help
self._actions = actions
self._default = default
def _getValue(self, config, litParams):
"""
Return the value of the parameter given the configuration objects.
"""
param = getattr(config, self.name, None)
param = litParams.get(self.name, param)
if param is None and self._default is None:
raise ValueError("Parameter {} doesn't have a default value, but it was not specified in the Lit parameters or in the Lit config".format(self.name))
getDefault = lambda: self._default(config) if callable(self._default) else self._default
if param is not None:
(pretty, value) = (param, self._parse(param))
else:
value = getDefault()
pretty = '{} (default)'.format(value)
if self._choices and value not in self._choices:
raise ValueError("Got value '{}' for parameter '{}', which is not in the provided set of possible choices: {}".format(value, self.name, self._choices))
return (pretty, value)
@property
def name(self):
"""
Return the name of the parameter.
This is the name that can be used to set the parameter on the command-line
when running Lit.
"""
return self._name
def getActions(self, config, litParams):
"""
Return the list of actions associated to this value of the parameter.
"""
(_, parameterValue) = self._getValue(config, litParams)
return self._actions(parameterValue)
def pretty(self, config, litParams):
"""
Return a pretty representation of the parameter's name and value.
"""
(prettyParameterValue, _) = self._getValue(config, litParams)
return "{}={}".format(self.name, prettyParameterValue)

View File

@@ -0,0 +1,301 @@
#===----------------------------------------------------------------------===##
#
# Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
# See https://llvm.org/LICENSE.txt for license information.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
#
#===----------------------------------------------------------------------===##
from libcxx.test.dsl import *
import re
import shutil
import sys
import subprocess
_isClang = lambda cfg: '__clang__' in compilerMacros(cfg) and '__apple_build_version__' not in compilerMacros(cfg)
_isAppleClang = lambda cfg: '__apple_build_version__' in compilerMacros(cfg)
_isGCC = lambda cfg: '__GNUC__' in compilerMacros(cfg) and '__clang__' not in compilerMacros(cfg)
_isMSVC = lambda cfg: '_MSC_VER' in compilerMacros(cfg)
_msvcVersion = lambda cfg: (int(compilerMacros(cfg)['_MSC_VER']) // 100, int(compilerMacros(cfg)['_MSC_VER']) % 100)
def _hasSuitableClangTidy(cfg):
try:
return int(re.search('[0-9]+', commandOutput(cfg, ['clang-tidy --version'])).group()) >= 13
except ConfigurationRuntimeError:
return False
DEFAULT_FEATURES = [
Feature(name='fcoroutines-ts',
when=lambda cfg: hasCompileFlag(cfg, '-fcoroutines-ts') and
featureTestMacros(cfg, flags='-fcoroutines-ts').get('__cpp_coroutines', 0) >= 201703,
actions=[AddCompileFlag('-fcoroutines-ts')]),
Feature(name='thread-safety',
when=lambda cfg: hasCompileFlag(cfg, '-Werror=thread-safety'),
actions=[AddCompileFlag('-Werror=thread-safety')]),
Feature(name='diagnose-if-support',
when=lambda cfg: hasCompileFlag(cfg, '-Wuser-defined-warnings'),
actions=[AddCompileFlag('-Wuser-defined-warnings')]),
Feature(name='has-fblocks', when=lambda cfg: hasCompileFlag(cfg, '-fblocks')),
Feature(name='-fsized-deallocation', when=lambda cfg: hasCompileFlag(cfg, '-fsized-deallocation')),
Feature(name='-faligned-allocation', when=lambda cfg: hasCompileFlag(cfg, '-faligned-allocation')),
Feature(name='fdelayed-template-parsing', when=lambda cfg: hasCompileFlag(cfg, '-fdelayed-template-parsing')),
Feature(name='libcpp-no-coroutines', when=lambda cfg: featureTestMacros(cfg).get('__cpp_impl_coroutine', 0) < 201902),
Feature(name='has-fobjc-arc', when=lambda cfg: hasCompileFlag(cfg, '-xobjective-c++ -fobjc-arc') and
sys.platform.lower().strip() == 'darwin'), # TODO: this doesn't handle cross-compiling to Apple platforms.
Feature(name='objective-c++', when=lambda cfg: hasCompileFlag(cfg, '-xobjective-c++ -fobjc-arc')),
Feature(name='verify-support', when=lambda cfg: hasCompileFlag(cfg, '-Xclang -verify-ignore-unexpected')),
Feature(name='non-lockfree-atomics',
when=lambda cfg: sourceBuilds(cfg, """
#include <atomic>
struct Large { int storage[100]; };
std::atomic<Large> x;
int main(int, char**) { (void)x.load(); return 0; }
""")),
# TODO: Remove this feature once compiler-rt includes __atomic_is_lockfree()
# on all supported platforms.
Feature(name='is-lockfree-runtime-function',
when=lambda cfg: sourceBuilds(cfg, """
#include <atomic>
struct Large { int storage[100]; };
std::atomic<Large> x;
int main(int, char**) { return x.is_lock_free(); }
""")),
# Some tests rely on creating shared libraries which link in the C++ Standard Library. In some
# cases, this doesn't work (e.g. if the library was built as a static archive and wasn't compiled
# as position independent). This feature informs the test suite of whether it's possible to create
# a shared library in a shell test by using the '-shared' compiler flag.
#
# Note: To implement this check properly, we need to make sure that we use something inside the
# compiled library, not only in the headers. It should be safe to assume that all implementations
# define `operator new` in the compiled library.
Feature(name='cant-build-shared-library',
when=lambda cfg: not sourceBuilds(cfg, """
void f() { new int(3); }
""", ['-shared'])),
# Check for a Windows UCRT bug (fixed in UCRT/Windows 10.0.20348.0):
# https://developercommunity.visualstudio.com/t/utf-8-locales-break-ctype-functions-for-wchar-type/1653678
Feature(name='win32-broken-utf8-wchar-ctype',
when=lambda cfg: not '_LIBCPP_HAS_NO_LOCALIZATION' in compilerMacros(cfg) and '_WIN32' in compilerMacros(cfg) and not programSucceeds(cfg, """
#include <locale.h>
#include <wctype.h>
int main(int, char**) {
setlocale(LC_ALL, "en_US.UTF-8");
return towlower(L'\\xDA') != L'\\xFA';
}
""")),
# Check for a Windows UCRT bug (fixed in UCRT/Windows 10.0.19041.0).
# https://developercommunity.visualstudio.com/t/printf-formatting-with-g-outputs-too/1660837
Feature(name='win32-broken-printf-g-precision',
when=lambda cfg: not '_LIBCPP_HAS_NO_LOCALIZATION' in compilerMacros(cfg) and '_WIN32' in compilerMacros(cfg) and not programSucceeds(cfg, """
#include <stdio.h>
#include <string.h>
int main(int, char**) {
char buf[100];
snprintf(buf, sizeof(buf), "%#.*g", 0, 0.0);
return strcmp(buf, "0.");
}
""")),
# Check for Glibc < 2.27, where the ru_RU.UTF-8 locale had
# mon_decimal_point == ".", which our tests don't handle.
Feature(name='glibc-old-ru_RU-decimal-point',
when=lambda cfg: not '_LIBCPP_HAS_NO_LOCALIZATION' in compilerMacros(cfg) and not programSucceeds(cfg, """
#include <locale.h>
#include <string.h>
int main(int, char**) {
setlocale(LC_ALL, "ru_RU.UTF-8");
return strcmp(localeconv()->mon_decimal_point, ",");
}
""")),
Feature(name='has-unix-headers',
when=lambda cfg: sourceBuilds(cfg, """
#include <unistd.h>
#include <sys/wait.h>
int main(int, char**) {
return 0;
}
""")),
# Whether Bash can run on the executor.
# This is not always the case, for example when running on embedded systems.
#
# For the corner case of bash existing, but it being missing in the path
# set in %{exec} as "--env PATH=one-single-dir", the executor does find
# and executes bash, but bash then can't find any other common shell
# utilities. Test executing "bash -c 'bash --version'" to see if bash
# manages to find binaries to execute.
Feature(name='executor-has-no-bash',
when=lambda cfg: runScriptExitCode(cfg, ['%{exec} bash -c \'bash --version\'']) != 0),
Feature(name='has-clang-tidy',
when=_hasSuitableClangTidy),
Feature(name='apple-clang', when=_isAppleClang),
Feature(name=lambda cfg: 'apple-clang-{__clang_major__}'.format(**compilerMacros(cfg)), when=_isAppleClang),
Feature(name=lambda cfg: 'apple-clang-{__clang_major__}.{__clang_minor__}'.format(**compilerMacros(cfg)), when=_isAppleClang),
Feature(name=lambda cfg: 'apple-clang-{__clang_major__}.{__clang_minor__}.{__clang_patchlevel__}'.format(**compilerMacros(cfg)), when=_isAppleClang),
Feature(name='clang', when=_isClang),
Feature(name=lambda cfg: 'clang-{__clang_major__}'.format(**compilerMacros(cfg)), when=_isClang),
Feature(name=lambda cfg: 'clang-{__clang_major__}.{__clang_minor__}'.format(**compilerMacros(cfg)), when=_isClang),
Feature(name=lambda cfg: 'clang-{__clang_major__}.{__clang_minor__}.{__clang_patchlevel__}'.format(**compilerMacros(cfg)), when=_isClang),
# Note: Due to a GCC bug (https://gcc.gnu.org/bugzilla/show_bug.cgi?id=104760), we must disable deprecation warnings
# on GCC or spurious diagnostics are issued.
#
# TODO:
# - Enable -Wplacement-new with GCC.
# - Enable -Wclass-memaccess with GCC.
Feature(name='gcc', when=_isGCC,
actions=[AddCompileFlag('-D_LIBCPP_DISABLE_DEPRECATION_WARNINGS'),
AddCompileFlag('-Wno-placement-new'),
AddCompileFlag('-Wno-class-memaccess')]),
Feature(name=lambda cfg: 'gcc-{__GNUC__}'.format(**compilerMacros(cfg)), when=_isGCC),
Feature(name=lambda cfg: 'gcc-{__GNUC__}.{__GNUC_MINOR__}'.format(**compilerMacros(cfg)), when=_isGCC),
Feature(name=lambda cfg: 'gcc-{__GNUC__}.{__GNUC_MINOR__}.{__GNUC_PATCHLEVEL__}'.format(**compilerMacros(cfg)), when=_isGCC),
Feature(name='msvc', when=_isMSVC),
Feature(name=lambda cfg: 'msvc-{}'.format(*_msvcVersion(cfg)), when=_isMSVC),
Feature(name=lambda cfg: 'msvc-{}.{}'.format(*_msvcVersion(cfg)), when=_isMSVC),
]
# Deduce and add the test features that that are implied by the #defines in
# the <__config_site> header.
#
# For each macro of the form `_LIBCPP_XXX_YYY_ZZZ` defined below that
# is defined after including <__config_site>, add a Lit feature called
# `libcpp-xxx-yyy-zzz`. When a macro is defined to a specific value
# (e.g. `_LIBCPP_ABI_VERSION=2`), the feature is `libcpp-xxx-yyy-zzz=<value>`.
#
# Note that features that are more strongly tied to libc++ are named libcpp-foo,
# while features that are more general in nature are not prefixed with 'libcpp-'.
macros = {
'_LIBCPP_HAS_NO_MONOTONIC_CLOCK': 'no-monotonic-clock',
'_LIBCPP_HAS_NO_THREADS': 'no-threads',
'_LIBCPP_HAS_THREAD_API_EXTERNAL': 'libcpp-has-thread-api-external',
'_LIBCPP_HAS_THREAD_API_PTHREAD': 'libcpp-has-thread-api-pthread',
'_LIBCPP_NO_VCRUNTIME': 'libcpp-no-vcruntime',
'_LIBCPP_ABI_VERSION': 'libcpp-abi-version',
'_LIBCPP_HAS_NO_FILESYSTEM_LIBRARY': 'no-filesystem',
'_LIBCPP_HAS_NO_RANDOM_DEVICE': 'no-random-device',
'_LIBCPP_HAS_NO_LOCALIZATION': 'no-localization',
'_LIBCPP_HAS_NO_WIDE_CHARACTERS': 'no-wide-characters',
'_LIBCPP_HAS_NO_UNICODE': 'libcpp-has-no-unicode',
'_LIBCPP_ENABLE_DEBUG_MODE': 'libcpp-has-debug-mode',
}
for macro, feature in macros.items():
DEFAULT_FEATURES.append(
Feature(name=lambda cfg, m=macro, f=feature: f + ('={}'.format(compilerMacros(cfg)[m]) if compilerMacros(cfg)[m] else ''),
when=lambda cfg, m=macro: m in compilerMacros(cfg))
)
# Mapping from canonical locale names (used in the tests) to possible locale
# names on various systems. Each locale is considered supported if any of the
# alternative names is supported.
locales = {
'en_US.UTF-8': ['en_US.UTF-8', 'en_US.utf8', 'English_United States.1252'],
'fr_FR.UTF-8': ['fr_FR.UTF-8', 'fr_FR.utf8', 'French_France.1252'],
'ja_JP.UTF-8': ['ja_JP.UTF-8', 'ja_JP.utf8', 'Japanese_Japan.923'],
'ru_RU.UTF-8': ['ru_RU.UTF-8', 'ru_RU.utf8', 'Russian_Russia.1251'],
'zh_CN.UTF-8': ['zh_CN.UTF-8', 'zh_CN.utf8', 'Chinese_China.936'],
'fr_CA.ISO8859-1': ['fr_CA.ISO8859-1', 'French_Canada.1252'],
'cs_CZ.ISO8859-2': ['cs_CZ.ISO8859-2', 'Czech_Czech Republic.1250']
}
for locale, alts in locales.items():
# Note: Using alts directly in the lambda body here will bind it to the value at the
# end of the loop. Assigning it to a default argument works around this issue.
DEFAULT_FEATURES.append(Feature(name='locale.{}'.format(locale),
when=lambda cfg, alts=alts: hasAnyLocale(cfg, alts)))
# Add features representing the platform name: darwin, linux, windows, etc...
DEFAULT_FEATURES += [
Feature(name='darwin', when=lambda cfg: '__APPLE__' in compilerMacros(cfg)),
Feature(name='windows', when=lambda cfg: '_WIN32' in compilerMacros(cfg)),
Feature(name='windows-dll', when=lambda cfg: '_WIN32' in compilerMacros(cfg) and programSucceeds(cfg, """
#include <iostream>
#include <windows.h>
#include <winnt.h>
int main(int, char**) {
// Get a pointer to a data member that gets linked from the C++
// library. This must be a data member (functions can get
// thunk inside the calling executable), and must not be
// something that is defined inline in headers.
void *ptr = &std::cout;
// Get a handle to the current main executable.
void *exe = GetModuleHandle(NULL);
// The handle points at the PE image header. Navigate through
// the header structure to find the size of the PE image (the
// executable).
PIMAGE_DOS_HEADER dosheader = (PIMAGE_DOS_HEADER)exe;
PIMAGE_NT_HEADERS ntheader = (PIMAGE_NT_HEADERS)((BYTE *)dosheader + dosheader->e_lfanew);
PIMAGE_OPTIONAL_HEADER peheader = &ntheader->OptionalHeader;
void *exeend = (BYTE*)exe + peheader->SizeOfImage;
// Check if the tested pointer - the data symbol from the
// C++ library - is located within the exe.
if (ptr >= exe && ptr <= exeend)
return 1;
// Return success if it was outside of the executable, i.e.
// loaded from a DLL.
return 0;
}
"""), actions=[AddCompileFlag('-DTEST_WINDOWS_DLL')]),
Feature(name='linux', when=lambda cfg: '__linux__' in compilerMacros(cfg)),
Feature(name='netbsd', when=lambda cfg: '__NetBSD__' in compilerMacros(cfg)),
Feature(name='freebsd', when=lambda cfg: '__FreeBSD__' in compilerMacros(cfg))
]
# Add features representing the build host platform name.
# The build host could differ from the target platform for cross-compilation.
DEFAULT_FEATURES += [
Feature(name='buildhost={}'.format(sys.platform.lower().strip())),
# sys.platform can often be represented by a "sub-system", such as 'win32', 'cygwin', 'mingw', freebsd13 & etc.
# We define a consolidated feature on a few platforms.
Feature(name='buildhost=windows', when=lambda cfg: platform.system().lower().startswith('windows')),
Feature(name='buildhost=freebsd', when=lambda cfg: platform.system().lower().startswith('freebsd')),
Feature(name='buildhost=aix', when=lambda cfg: platform.system().lower().startswith('aix'))
]
# Detect whether GDB is on the system, has Python scripting and supports
# adding breakpoint commands. If so add a substitution to access it.
def check_gdb(cfg):
gdb_path = shutil.which('gdb')
if gdb_path is None:
return False
# Check that we can set breakpoint commands, which was added in 8.3.
# Using the quit command here means that gdb itself exits, not just
# the "python <...>" command.
test_src = """\
try:
gdb.Breakpoint(\"main\").commands=\"foo\"
except AttributeError:
gdb.execute(\"quit 1\")
gdb.execute(\"quit\")"""
try:
stdout = subprocess.check_output(
[gdb_path, "-ex", "python " + test_src, "--batch"],
stderr=subprocess.DEVNULL, universal_newlines=True)
except subprocess.CalledProcessError:
# We can't set breakpoint commands
return False
# Check we actually ran the Python
return not "Python scripting is not supported" in stdout
DEFAULT_FEATURES += [
Feature(name='host-has-gdb-with-python',
when=check_gdb,
actions=[AddSubstitution('%{gdb}', lambda cfg: shutil.which('gdb'))]
)
]

View File

@@ -0,0 +1,278 @@
#===----------------------------------------------------------------------===##
#
# Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
# See https://llvm.org/LICENSE.txt for license information.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
#
#===----------------------------------------------------------------------===##
import lit
import lit.formats
import os
import pipes
import re
import shutil
def _getTempPaths(test):
"""
Return the values to use for the %T and %t substitutions, respectively.
The difference between this and Lit's default behavior is that we guarantee
that %T is a path unique to the test being run.
"""
tmpDir, _ = lit.TestRunner.getTempPaths(test)
_, testName = os.path.split(test.getExecPath())
tmpDir = os.path.join(tmpDir, testName + '.dir')
tmpBase = os.path.join(tmpDir, 't')
return tmpDir, tmpBase
def _checkBaseSubstitutions(substitutions):
substitutions = [s for (s, _) in substitutions]
for s in ['%{cxx}', '%{compile_flags}', '%{link_flags}', '%{flags}', '%{exec}']:
assert s in substitutions, "Required substitution {} was not provided".format(s)
def parseScript(test, preamble):
"""
Extract the script from a test, with substitutions applied.
Returns a list of commands ready to be executed.
- test
The lit.Test to parse.
- preamble
A list of commands to perform before any command in the test.
These commands can contain unexpanded substitutions, but they
must not be of the form 'RUN:' -- they must be proper commands
once substituted.
"""
# Get the default substitutions
tmpDir, tmpBase = _getTempPaths(test)
substitutions = lit.TestRunner.getDefaultSubstitutions(test, tmpDir, tmpBase)
# Check base substitutions and add the %{build} and %{run} convenience substitutions
_checkBaseSubstitutions(substitutions)
substitutions.append(('%{build}', '%{cxx} %s %{flags} %{compile_flags} %{link_flags} -o %t.exe'))
substitutions.append(('%{run}', '%{exec} %t.exe'))
# Parse the test file, including custom directives
additionalCompileFlags = []
fileDependencies = []
parsers = [
lit.TestRunner.IntegratedTestKeywordParser('FILE_DEPENDENCIES:',
lit.TestRunner.ParserKind.LIST,
initial_value=fileDependencies),
lit.TestRunner.IntegratedTestKeywordParser('ADDITIONAL_COMPILE_FLAGS:',
lit.TestRunner.ParserKind.LIST,
initial_value=additionalCompileFlags)
]
scriptInTest = lit.TestRunner.parseIntegratedTestScript(test, additional_parsers=parsers,
require_script=not preamble)
if isinstance(scriptInTest, lit.Test.Result):
return scriptInTest
script = []
# For each file dependency in FILE_DEPENDENCIES, inject a command to copy
# that file to the execution directory. Execute the copy from %S to allow
# relative paths from the test directory.
for dep in fileDependencies:
script += ['%dbg(SETUP) cd %S && cp {} %T'.format(dep)]
script += preamble
script += scriptInTest
# Add compile flags specified with ADDITIONAL_COMPILE_FLAGS.
substitutions = [(s, x + ' ' + ' '.join(additionalCompileFlags)) if s == '%{compile_flags}'
else (s, x) for (s, x) in substitutions]
# Perform substitutions in the script itself.
script = lit.TestRunner.applySubstitutions(script, substitutions,
recursion_limit=test.config.recursiveExpansionLimit)
return script
class CxxStandardLibraryTest(lit.formats.TestFormat):
"""
Lit test format for the C++ Standard Library conformance test suite.
This test format is based on top of the ShTest format -- it basically
creates a shell script performing the right operations (compile/link/run)
based on the extension of the test file it encounters. It supports files
with the following extensions:
FOO.pass.cpp - Compiles, links and runs successfully
FOO.pass.mm - Same as .pass.cpp, but for Objective-C++
FOO.compile.pass.cpp - Compiles successfully, link and run not attempted
FOO.compile.fail.cpp - Does not compile successfully
FOO.link.pass.cpp - Compiles and links successfully, run not attempted
FOO.link.fail.cpp - Compiles successfully, but fails to link
FOO.sh.<anything> - A builtin Lit Shell test
FOO.verify.cpp - Compiles with clang-verify. This type of test is
automatically marked as UNSUPPORTED if the compiler
does not support Clang-verify.
FOO.fail.cpp - Compiled with clang-verify if clang-verify is
supported, and equivalent to a .compile.fail.cpp
test otherwise. This is supported only for backwards
compatibility with the test suite.
Substitution requirements
===============================
The test format operates by assuming that each test's configuration provides
the following substitutions, which it will reuse in the shell scripts it
constructs:
%{cxx} - A command that can be used to invoke the compiler
%{compile_flags} - Flags to use when compiling a test case
%{link_flags} - Flags to use when linking a test case
%{flags} - Flags to use either when compiling or linking a test case
%{exec} - A command to prefix the execution of executables
Note that when building an executable (as opposed to only compiling a source
file), all three of %{flags}, %{compile_flags} and %{link_flags} will be used
in the same command line. In other words, the test format doesn't perform
separate compilation and linking steps in this case.
Additional supported directives
===============================
In addition to everything that's supported in Lit ShTests, this test format
also understands the following directives inside test files:
// FILE_DEPENDENCIES: file, directory, /path/to/file
This directive expresses that the test requires the provided files
or directories in order to run. An example is a test that requires
some test input stored in a data file. When a test file contains
such a directive, this test format will collect them and copy them
to the directory represented by %T. The intent is that %T contains
all the inputs necessary to run the test, such that e.g. execution
on a remote host can be done by simply copying %T to the host.
// ADDITIONAL_COMPILE_FLAGS: flag1, flag2, flag3
This directive will cause the provided flags to be added to the
%{compile_flags} substitution for the test that contains it. This
allows adding special compilation flags without having to use a
.sh.cpp test, which would be more powerful but perhaps overkill.
Additional provided substitutions and features
==============================================
The test format will define the following substitutions for use inside tests:
%{build}
Expands to a command-line that builds the current source
file with the %{flags}, %{compile_flags} and %{link_flags}
substitutions, and that produces an executable named %t.exe.
%{run}
Equivalent to `%{exec} %t.exe`. This is intended to be used
in conjunction with the %{build} substitution.
"""
def getTestsInDirectory(self, testSuite, pathInSuite, litConfig, localConfig):
SUPPORTED_SUFFIXES = ['[.]pass[.]cpp$', '[.]pass[.]mm$',
'[.]compile[.]pass[.]cpp$', '[.]compile[.]fail[.]cpp$',
'[.]link[.]pass[.]cpp$', '[.]link[.]fail[.]cpp$',
'[.]sh[.][^.]+$',
'[.]verify[.]cpp$',
'[.]fail[.]cpp$']
sourcePath = testSuite.getSourcePath(pathInSuite)
for filename in os.listdir(sourcePath):
# Ignore dot files and excluded tests.
if filename.startswith('.') or filename in localConfig.excludes:
continue
filepath = os.path.join(sourcePath, filename)
if not os.path.isdir(filepath):
if any([re.search(ext, filename) for ext in SUPPORTED_SUFFIXES]):
yield lit.Test.Test(testSuite, pathInSuite + (filename,), localConfig)
def execute(self, test, litConfig):
VERIFY_FLAGS = '-Xclang -verify -Xclang -verify-ignore-unexpected=note -ferror-limit=0'
supportsVerify = 'verify-support' in test.config.available_features
filename = test.path_in_suite[-1]
if re.search('[.]sh[.][^.]+$', filename):
steps = [ ] # The steps are already in the script
return self._executeShTest(test, litConfig, steps)
elif filename.endswith('.compile.pass.cpp'):
steps = [
"%dbg(COMPILED WITH) %{cxx} %s %{flags} %{compile_flags} -fsyntax-only"
]
return self._executeShTest(test, litConfig, steps)
elif filename.endswith('.compile.fail.cpp'):
steps = [
"%dbg(COMPILED WITH) ! %{cxx} %s %{flags} %{compile_flags} -fsyntax-only"
]
return self._executeShTest(test, litConfig, steps)
elif filename.endswith('.link.pass.cpp'):
steps = [
"%dbg(COMPILED WITH) %{cxx} %s %{flags} %{compile_flags} %{link_flags} -o %t.exe"
]
return self._executeShTest(test, litConfig, steps)
elif filename.endswith('.link.fail.cpp'):
steps = [
"%dbg(COMPILED WITH) %{cxx} %s %{flags} %{compile_flags} -c -o %t.o",
"%dbg(LINKED WITH) ! %{cxx} %t.o %{flags} %{link_flags} -o %t.exe"
]
return self._executeShTest(test, litConfig, steps)
elif filename.endswith('.verify.cpp'):
if not supportsVerify:
return lit.Test.Result(lit.Test.UNSUPPORTED,
"Test {} requires support for Clang-verify, which isn't supported by the compiler".format(test.getFullName()))
steps = [
# Note: Use -Wno-error to make sure all diagnostics are not treated as errors,
# which doesn't make sense for clang-verify tests.
"%dbg(COMPILED WITH) %{{cxx}} %s %{{flags}} %{{compile_flags}} -fsyntax-only -Wno-error {}".format(VERIFY_FLAGS)
]
return self._executeShTest(test, litConfig, steps)
# Make sure to check these ones last, since they will match other
# suffixes above too.
elif filename.endswith('.pass.cpp') or filename.endswith('.pass.mm'):
steps = [
"%dbg(COMPILED WITH) %{cxx} %s %{flags} %{compile_flags} %{link_flags} -o %t.exe",
"%dbg(EXECUTED AS) %{exec} %t.exe"
]
return self._executeShTest(test, litConfig, steps)
# This is like a .verify.cpp test when clang-verify is supported,
# otherwise it's like a .compile.fail.cpp test. This is only provided
# for backwards compatibility with the test suite.
elif filename.endswith('.fail.cpp'):
if supportsVerify:
steps = [
"%dbg(COMPILED WITH) %{{cxx}} %s %{{flags}} %{{compile_flags}} -fsyntax-only -Wno-error {}".format(VERIFY_FLAGS)
]
else:
steps = [
"%dbg(COMPILED WITH) ! %{cxx} %s %{flags} %{compile_flags} -fsyntax-only"
]
return self._executeShTest(test, litConfig, steps)
else:
return lit.Test.Result(lit.Test.UNRESOLVED, "Unknown test suffix for '{}'".format(filename))
# Utility function to add compile flags in lit.local.cfg files.
def addCompileFlags(self, config, *flags):
string = ' '.join(flags)
config.substitutions = [(s, x + ' ' + string) if s == '%{compile_flags}' else (s, x) for (s, x) in config.substitutions]
def _executeShTest(self, test, litConfig, steps):
if test.config.unsupported:
return lit.Test.Result(lit.Test.UNSUPPORTED, 'Test is unsupported')
script = parseScript(test, steps)
if isinstance(script, lit.Test.Result):
return script
if litConfig.noExecute:
return lit.Test.Result(lit.Test.XFAIL if test.isExpectedToFail() else lit.Test.PASS)
else:
_, tmpBase = _getTempPaths(test)
useExternalSh = False
return lit.TestRunner._runShTest(test, litConfig, useExternalSh, script, tmpBase)

View File

@@ -0,0 +1,121 @@
from __future__ import absolute_import
import os
import subprocess
import sys
import lit.Test
import lit.TestRunner
import lit.util
from lit.formats.base import TestFormat
kIsWindows = sys.platform in ['win32', 'cygwin']
class GoogleBenchmark(TestFormat):
def __init__(self, test_sub_dirs, test_suffix, benchmark_args=[]):
self.benchmark_args = list(benchmark_args)
self.test_sub_dirs = os.path.normcase(str(test_sub_dirs)).split(';')
# On Windows, assume tests will also end in '.exe'.
exe_suffix = str(test_suffix)
if kIsWindows:
exe_suffix += '.exe'
# Also check for .py files for testing purposes.
self.test_suffixes = {exe_suffix, test_suffix + '.py'}
def getBenchmarkTests(self, path, litConfig, localConfig):
"""getBenchmarkTests(path) - [name]
Return the tests available in gtest executable.
Args:
path: String path to a gtest executable
litConfig: LitConfig instance
localConfig: TestingConfig instance"""
# TODO: allow splitting tests according to the "benchmark family" so
# the output for a single family of tests all belongs to the same test
# target.
list_test_cmd = [path, '--benchmark_list_tests']
try:
output = subprocess.check_output(list_test_cmd,
env=localConfig.environment)
except subprocess.CalledProcessError as exc:
litConfig.warning(
"unable to discover google-benchmarks in %r: %s. Process output: %s"
% (path, sys.exc_info()[1], exc.output))
raise StopIteration
nested_tests = []
for ln in output.splitlines(False): # Don't keep newlines.
ln = lit.util.to_string(ln)
if not ln.strip():
continue
index = 0
while ln[index*2:index*2+2] == ' ':
index += 1
while len(nested_tests) > index:
nested_tests.pop()
ln = ln[index*2:]
if ln.endswith('.'):
nested_tests.append(ln)
elif any([name.startswith('DISABLED_')
for name in nested_tests + [ln]]):
# Gtest will internally skip these tests. No need to launch a
# child process for it.
continue
else:
yield ''.join(nested_tests) + ln
def getTestsInDirectory(self, testSuite, path_in_suite,
litConfig, localConfig):
source_path = testSuite.getSourcePath(path_in_suite)
for subdir in self.test_sub_dirs:
dir_path = os.path.join(source_path, subdir)
if not os.path.isdir(dir_path):
continue
for fn in lit.util.listdir_files(dir_path,
suffixes=self.test_suffixes):
# Discover the tests in this executable.
execpath = os.path.join(source_path, subdir, fn)
testnames = self.getBenchmarkTests(execpath, litConfig, localConfig)
for testname in testnames:
testPath = path_in_suite + (subdir, fn, testname)
yield lit.Test.Test(testSuite, testPath, localConfig,
file_path=execpath)
def execute(self, test, litConfig):
testPath,testName = os.path.split(test.getSourcePath())
while not os.path.exists(testPath):
# Handle GTest parametrized and typed tests, whose name includes
# some '/'s.
testPath, namePrefix = os.path.split(testPath)
testName = namePrefix + '/' + testName
cmd = [testPath, '--benchmark_filter=%s$' % testName ] + self.benchmark_args
if litConfig.noExecute:
return lit.Test.PASS, ''
try:
out, err, exitCode = lit.util.executeCommand(
cmd, env=test.config.environment,
timeout=litConfig.maxIndividualTestTime)
except lit.util.ExecuteCommandTimeoutException:
return (lit.Test.TIMEOUT,
'Reached timeout of {} seconds'.format(
litConfig.maxIndividualTestTime)
)
if exitCode:
return lit.Test.FAIL, ('exit code: %d\n' % exitCode) + out + err
passing_test_line = testName
if passing_test_line not in out:
msg = ('Unable to find %r in google benchmark output:\n\n%s%s' %
(passing_test_line, out, err))
return lit.Test.UNRESOLVED, msg
return lit.Test.PASS, err + out

View File

@@ -0,0 +1,49 @@
#===----------------------------------------------------------------------===##
#
# Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
# See https://llvm.org/LICENSE.txt for license information.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
#
#===----------------------------------------------------------------------===##
import os
def _getSubstitution(substitution, config):
for (orig, replacement) in config.substitutions:
if orig == substitution:
return replacement
raise ValueError('Substitution {} is not in the config.'.format(substitution))
def configure(parameters, features, config, lit_config):
note = lambda s: lit_config.note("({}) {}".format(config.name, s))
config.environment = dict(os.environ)
# Apply the actions supplied by parameters to the configuration first, since
# parameters are things that we request explicitly and which might influence
# what features are implicitly made available next.
for param in parameters:
actions = param.getActions(config, lit_config.params)
for action in actions:
action.applyTo(config)
if lit_config.debug:
note("Applied '{}' as a result of parameter '{}'".format(
action.pretty(config, lit_config.params),
param.pretty(config, lit_config.params)))
# Then, apply the automatically-detected features.
for feature in features:
actions = feature.getActions(config)
for action in actions:
action.applyTo(config)
if lit_config.debug:
note("Applied '{}' as a result of implicitly detected feature '{}'".format(
action.pretty(config, lit_config.params),
feature.pretty(config)))
# Print the basic substitutions
for sub in ('%{cxx}', '%{flags}', '%{compile_flags}', '%{link_flags}', '%{exec}'):
note("Using {} substitution: '{}'".format(sub, _getSubstitution(sub, config)))
# Print all available features
note("All available features: {}".format(', '.join(config.available_features)))

View File

@@ -0,0 +1,240 @@
#===----------------------------------------------------------------------===##
#
# Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
# See https://llvm.org/LICENSE.txt for license information.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
#
#===----------------------------------------------------------------------===##
from libcxx.test.dsl import *
from libcxx.test.features import _isMSVC
import re
_warningFlags = [
'-Werror',
'-Wall',
'-Wextra',
'-Wshadow',
'-Wundef',
'-Wno-unused-command-line-argument',
'-Wno-attributes',
'-Wno-pessimizing-move',
'-Wno-c++11-extensions',
'-Wno-noexcept-type',
'-Wno-aligned-allocation-unavailable',
'-Wno-atomic-alignment',
# GCC warns about places where we might want to add sized allocation/deallocation
# functions, but we know better what we're doing/testing in the test suite.
'-Wno-sized-deallocation',
# Turn off warnings about user-defined literals with reserved suffixes. Those are
# just noise since we are testing the Standard Library itself.
'-Wno-literal-suffix', # GCC
'-Wno-user-defined-literals', # Clang
# GCC warns about this when TEST_IS_CONSTANT_EVALUATED is used on a non-constexpr
# function. (This mostely happens in C++11 mode.)
# TODO(mordante) investigate a solution for this issue.
'-Wno-tautological-compare',
# -Wstringop-overread seems to be a bit buggy currently
'-Wno-stringop-overread',
# These warnings should be enabled in order to support the MSVC
# team using the test suite; They enable the warnings below and
# expect the test suite to be clean.
'-Wsign-compare',
'-Wunused-variable',
'-Wunused-parameter',
'-Wunreachable-code',
'-Wno-unused-local-typedef',
]
_allStandards = ['c++03', 'c++11', 'c++14', 'c++17', 'c++20', 'c++2b']
def getStdFlag(cfg, std):
fallbacks = {
'c++11': 'c++0x',
'c++14': 'c++1y',
'c++17': 'c++1z',
'c++20': 'c++2a',
}
if hasCompileFlag(cfg, '-std='+std):
return '-std='+std
if std in fallbacks and hasCompileFlag(cfg, '-std='+fallbacks[std]):
return '-std='+fallbacks[std]
return None
DEFAULT_PARAMETERS = [
Parameter(name='target_triple', type=str,
help="The target triple to compile the test suite for. This must be "
"compatible with the target that the tests will be run on.",
actions=lambda triple: filter(None, [
AddFeature('target={}'.format(triple)),
AddFlagIfSupported('--target={}'.format(triple)),
AddSubstitution('%{triple}', triple)
])),
Parameter(name='std', choices=_allStandards, type=str,
help="The version of the standard to compile the test suite with.",
default=lambda cfg: next(s for s in reversed(_allStandards) if getStdFlag(cfg, s)),
actions=lambda std: [
AddFeature(std),
AddCompileFlag(lambda cfg: getStdFlag(cfg, std)),
]),
Parameter(name='enable_modules', choices=[True, False], type=bool, default=False,
help="Whether to build the test suite with Clang modules enabled.",
actions=lambda modules: [
AddFeature('modules-build'),
AddCompileFlag('-fmodules'),
AddCompileFlag('-fcxx-modules'), # AppleClang disregards -fmodules entirely when compiling C++. This enables modules for C++.
] if modules else []),
Parameter(name='enable_exceptions', choices=[True, False], type=bool, default=True,
help="Whether to enable exceptions when compiling the test suite.",
actions=lambda exceptions: [] if exceptions else [
AddFeature('no-exceptions'),
AddCompileFlag('-fno-exceptions')
]),
Parameter(name='enable_rtti', choices=[True, False], type=bool, default=True,
help="Whether to enable RTTI when compiling the test suite.",
actions=lambda rtti: [] if rtti else [
AddFeature('no-rtti'),
AddCompileFlag('-fno-rtti')
]),
Parameter(name='stdlib', choices=['llvm-libc++', 'apple-libc++', 'libstdc++', 'msvc'], type=str, default='llvm-libc++',
help="""The C++ Standard Library implementation being tested.
Note that this parameter can also be used to encode different 'flavors' of the same
standard library, such as libc++ as shipped by a different vendor, if it has different
properties worth testing.
The Standard libraries currently supported are:
- llvm-libc++: The 'upstream' libc++ as shipped with LLVM.
- apple-libc++: libc++ as shipped by Apple. This is basically like the LLVM one, but
there are a few differences like installation paths and the use of
universal dylibs.
- libstdc++: The GNU C++ library typically shipped with GCC.
- msvc: The Microsoft implementation of the C++ Standard Library.
""",
actions=lambda stdlib: filter(None, [
AddFeature('stdlib={}'.format(stdlib)),
# Also add an umbrella feature 'stdlib=libc++' for all flavors of libc++, to simplify
# the test suite.
AddFeature('stdlib=libc++') if re.match('.+-libc\+\+', stdlib) else None
])),
Parameter(name='enable_warnings', choices=[True, False], type=bool, default=True,
help="Whether to enable warnings when compiling the test suite.",
actions=lambda warnings: [] if not warnings else
[AddOptionalWarningFlag(w) for w in _warningFlags] +
[AddCompileFlag('-D_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER')]
),
Parameter(name='use_sanitizer', choices=['', 'Address', 'Undefined', 'Memory', 'MemoryWithOrigins', 'Thread', 'DataFlow', 'Leaks'], type=str, default='',
help="An optional sanitizer to enable when building and running the test suite.",
actions=lambda sanitizer: filter(None, [
AddFlag('-g -fno-omit-frame-pointer') if sanitizer else None,
AddFlag('-fsanitize=undefined -fno-sanitize=float-divide-by-zero -fno-sanitize-recover=all') if sanitizer == 'Undefined' else None,
AddFeature('ubsan') if sanitizer == 'Undefined' else None,
AddFlag('-fsanitize=address') if sanitizer == 'Address' else None,
AddFeature('asan') if sanitizer == 'Address' else None,
AddFlag('-fsanitize=memory') if sanitizer in ['Memory', 'MemoryWithOrigins'] else None,
AddFeature('msan') if sanitizer in ['Memory', 'MemoryWithOrigins'] else None,
AddFlag('-fsanitize-memory-track-origins') if sanitizer == 'MemoryWithOrigins' else None,
AddFlag('-fsanitize=thread') if sanitizer == 'Thread' else None,
AddFeature('tsan') if sanitizer == 'Thread' else None,
AddFlag('-fsanitize=dataflow') if sanitizer == 'DataFlow' else None,
AddFlag('-fsanitize=leaks') if sanitizer == 'Leaks' else None,
AddFeature('sanitizer-new-delete') if sanitizer in ['Address', 'Memory', 'MemoryWithOrigins', 'Thread'] else None,
])),
Parameter(name='enable_experimental', choices=[True, False], type=bool, default=True,
help="Whether to enable tests for experimental C++ Library features.",
actions=lambda experimental: [
# When linking in MSVC mode via the Clang driver, a -l<foo>
# maps to <foo>.lib, so we need to use -llibc++experimental here
# to make it link against the static libc++experimental.lib.
# We can't check for the feature 'msvc' in available_features
# as those features are added after processing parameters.
AddFeature('c++experimental'),
PrependLinkFlag(lambda cfg: '-llibc++experimental' if _isMSVC(cfg) else '-lc++experimental'),
AddCompileFlag('-D_LIBCPP_ENABLE_EXPERIMENTAL'),
] if experimental else [
AddFeature('libcpp-has-no-incomplete-format'),
AddFeature('libcpp-has-no-incomplete-ranges')
]),
Parameter(name='long_tests', choices=[True, False], type=bool, default=True,
help="Whether to enable tests that take longer to run. This can be useful when running on a very slow device.",
actions=lambda enabled: [] if not enabled else [
AddFeature('long_tests')
]),
Parameter(name='enable_assertions', choices=[True, False], type=bool, default=False,
help="Whether to enable assertions when compiling the test suite. This is only meaningful when "
"running the tests against libc++.",
actions=lambda assertions: [
AddCompileFlag('-D_LIBCPP_ENABLE_ASSERTIONS=1'),
AddFeature('libcpp-has-assertions')
] if assertions else []),
Parameter(name='additional_features', type=list, default=[],
help="A comma-delimited list of additional features that will be enabled when running the tests. "
"This should be used sparingly since specifying ad-hoc features manually is error-prone and "
"brittle in the long run as changes are made to the test suite.",
actions=lambda features: [AddFeature(f) for f in features]),
Parameter(name='enable_transitive_includes', choices=[True, False], type=bool, default=True,
help="Whether to enable backwards-compatibility transitive includes when running the tests. This "
"is provided to ensure that the trimmed-down version of libc++ does not bit-rot in between "
"points at which we bulk-remove transitive includes.",
actions=lambda enabled: [] if enabled else [
AddFeature('transitive-includes-disabled'),
AddCompileFlag('-D_LIBCPP_REMOVE_TRANSITIVE_INCLUDES')
]),
]
DEFAULT_PARAMETERS += [
Parameter(name='use_system_cxx_lib', choices=[True, False], type=bool, default=False,
help="""
Whether the test suite is being *run* against the library shipped on the
target triple in use, as opposed to the trunk library.
When vendor-specific availability annotations are enabled, we add the
'use_system_cxx_lib' Lit feature to allow writing XFAIL or UNSUPPORTED
markup for tests that are known to fail on a particular triple.
That feature can be used to XFAIL a test that fails when deployed on (or is
compiled for) an older system. For example, if the test exhibits a bug in the
libc on a particular system version, or if the test uses a symbol that is not
available on an older version of the dylib, it can be marked as XFAIL with
the above feature.
It is sometimes useful to check that a test fails specifically when compiled
for a given deployment target. For example, this is the case when testing
availability markup, where we want to make sure that using the annotated
facility on a deployment target that doesn't support it will fail at compile
time, not at runtime. This can be achieved by creating a `.compile.pass.cpp`
and XFAILing it for the right deployment target. If the test doesn't fail at
compile-time like it's supposed to, the test will XPASS. Another option is to
create a `.verify.cpp` test that checks for the right errors, and mark that
test as requiring `use_system_cxx_lib && <target>`.
""",
actions=lambda useSystem: [
AddFeature('use_system_cxx_lib')
] if useSystem else [
# If we're testing upstream libc++, disable availability markup,
# which is not relevant for non-shipped flavors of libc++.
AddCompileFlag('-D_LIBCPP_DISABLE_AVAILABILITY')
])
]

View File

@@ -0,0 +1,171 @@
#===----------------------------------------------------------------------===//
#
# Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
# See https://llvm.org/LICENSE.txt for license information.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
#
#===----------------------------------------------------------------------===//
import importlib
import lit.util
import os
import platform
import re
import subprocess
import sys
from libcxx.util import executeCommand
class DefaultTargetInfo(object):
def __init__(self, full_config):
self.full_config = full_config
self.executor = None
def is_windows(self):
return False
def is_zos(self):
return False
def is_mingw(self):
return False
def add_cxx_flags(self, flags): pass
def add_cxx_compile_flags(self, flags): pass
def add_cxx_link_flags(self, flags): pass
def allow_cxxabi_link(self): return True
def add_path(self, dest_env, new_path):
if not new_path:
return
if 'PATH' not in dest_env:
dest_env['PATH'] = new_path
else:
split_char = ';' if self.is_windows() else ':'
dest_env['PATH'] = '%s%s%s' % (new_path, split_char,
dest_env['PATH'])
class DarwinLocalTI(DefaultTargetInfo):
def __init__(self, full_config):
super(DarwinLocalTI, self).__init__(full_config)
def add_cxx_flags(self, flags):
out, err, exit_code = executeCommand(['xcrun', '--show-sdk-path'])
if exit_code != 0:
self.full_config.lit_config.warning("Could not determine macOS SDK path! stderr was " + err)
if exit_code == 0 and out:
sdk_path = out.strip()
self.full_config.lit_config.note('using SDKROOT: %r' % sdk_path)
assert isinstance(sdk_path, str)
flags += ["-isysroot", sdk_path]
def add_cxx_link_flags(self, flags):
flags += ['-lSystem']
def allow_cxxabi_link(self):
# Don't link libc++abi explicitly on OS X because the symbols
# should be available in libc++ directly.
return False
class FreeBSDLocalTI(DefaultTargetInfo):
def __init__(self, full_config):
super(FreeBSDLocalTI, self).__init__(full_config)
def add_cxx_link_flags(self, flags):
flags += ['-lc', '-lm', '-lpthread', '-lgcc_s', '-lcxxrt']
class NetBSDLocalTI(DefaultTargetInfo):
def __init__(self, full_config):
super(NetBSDLocalTI, self).__init__(full_config)
def add_cxx_link_flags(self, flags):
flags += ['-lc', '-lm', '-lpthread', '-lgcc_s', '-lc++abi',
'-lunwind']
class LinuxLocalTI(DefaultTargetInfo):
def __init__(self, full_config):
super(LinuxLocalTI, self).__init__(full_config)
def add_cxx_compile_flags(self, flags):
flags += ['-D__STDC_FORMAT_MACROS',
'-D__STDC_LIMIT_MACROS',
'-D__STDC_CONSTANT_MACROS']
def add_cxx_link_flags(self, flags):
enable_threads = ('no-threads' not in
self.full_config.config.available_features)
llvm_unwinder = self.full_config.get_lit_bool('llvm_unwinder', False)
shared_libcxx = self.full_config.get_lit_bool('enable_shared', True)
flags += ['-lm']
if not llvm_unwinder:
flags += ['-lgcc_s', '-lgcc']
if enable_threads:
flags += ['-lpthread']
if not shared_libcxx:
flags += ['-lrt']
flags += ['-lc']
if llvm_unwinder:
flags += ['-lunwind', '-ldl']
else:
flags += ['-lgcc_s']
builtins_lib = self.full_config.get_lit_conf('builtins_library')
if builtins_lib:
flags += [builtins_lib]
else:
flags += ['-lgcc']
has_libatomic = self.full_config.get_lit_bool('has_libatomic', False)
if has_libatomic:
flags += ['-latomic']
san = self.full_config.get_lit_conf('use_sanitizer', '').strip()
if san:
# The libraries and their order are taken from the
# linkSanitizerRuntimeDeps function in
# clang/lib/Driver/Tools.cpp
flags += ['-lpthread', '-lrt', '-lm', '-ldl']
class LinuxRemoteTI(LinuxLocalTI):
def __init__(self, full_config):
super(LinuxRemoteTI, self).__init__(full_config)
class WindowsLocalTI(DefaultTargetInfo):
def __init__(self, full_config):
super(WindowsLocalTI, self).__init__(full_config)
def is_windows(self):
return True
class ZOSLocalTI(DefaultTargetInfo):
def __init__(self, full_config):
super(ZOSLocalTI, self).__init__(full_config)
def is_zos(self):
return True
class MingwLocalTI(WindowsLocalTI):
def __init__(self, full_config):
super(MingwLocalTI, self).__init__(full_config)
def is_mingw(self):
return True
def make_target_info(full_config):
default = "libcxx.test.target_info.LocalTI"
info_str = full_config.get_lit_conf('target_info', default)
if info_str != default:
mod_path, _, info = info_str.rpartition('.')
mod = importlib.import_module(mod_path)
target_info = getattr(mod, info)(full_config)
full_config.lit_config.note("inferred target_info as: %r" % info_str)
return target_info
target_system = platform.system()
if target_system == 'Darwin': return DarwinLocalTI(full_config)
if target_system == 'FreeBSD': return FreeBSDLocalTI(full_config)
if target_system == 'NetBSD': return NetBSDLocalTI(full_config)
if target_system == 'Linux': return LinuxLocalTI(full_config)
if target_system == 'Windows': return WindowsLocalTI(full_config)
if target_system == 'OS/390': return ZOSLocalTI(full_config)
return DefaultTargetInfo(full_config)

View File

@@ -0,0 +1,298 @@
#===----------------------------------------------------------------------===##
#
# Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
# See https://llvm.org/LICENSE.txt for license information.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
#
#===----------------------------------------------------------------------===##
from contextlib import contextmanager
import errno
import os
import platform
import signal
import subprocess
import sys
import tempfile
import threading
# FIXME: Most of these functions are cribbed from LIT
def to_bytes(str):
# Encode to UTF-8 to get binary data.
if isinstance(str, bytes):
return str
return str.encode('utf-8')
def to_string(bytes):
if isinstance(bytes, str):
return bytes
return to_bytes(bytes)
def convert_string(bytes):
try:
return to_string(bytes.decode('utf-8'))
except AttributeError: # 'str' object has no attribute 'decode'.
return str(bytes)
except UnicodeError:
return str(bytes)
def cleanFile(filename):
try:
os.remove(filename)
except OSError:
pass
@contextmanager
def guardedTempFilename(suffix='', prefix='', dir=None):
# Creates and yeilds a temporary filename within a with statement. The file
# is removed upon scope exit.
handle, name = tempfile.mkstemp(suffix=suffix, prefix=prefix, dir=dir)
os.close(handle)
yield name
cleanFile(name)
@contextmanager
def guardedFilename(name):
# yeilds a filename within a with statement. The file is removed upon scope
# exit.
yield name
cleanFile(name)
@contextmanager
def nullContext(value):
# yeilds a variable within a with statement. No action is taken upon scope
# exit.
yield value
def makeReport(cmd, out, err, rc):
report = "Command: %s\n" % cmd
report += "Exit Code: %d\n" % rc
if out:
report += "Standard Output:\n--\n%s--\n" % out
if err:
report += "Standard Error:\n--\n%s--\n" % err
report += '\n'
return report
def capture(args, env=None):
"""capture(command) - Run the given command (or argv list) in a shell and
return the standard output. Raises a CalledProcessError if the command
exits with a non-zero status."""
p = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE,
env=env)
out, err = p.communicate()
out = convert_string(out)
err = convert_string(err)
if p.returncode != 0:
raise subprocess.CalledProcessError(cmd=args,
returncode=p.returncode,
output="{}\n{}".format(out, err))
return out
def which(command, paths = None):
"""which(command, [paths]) - Look up the given command in the paths string
(or the PATH environment variable, if unspecified)."""
if paths is None:
paths = os.environ.get('PATH', '')
# Check for absolute match first.
if os.path.isfile(command):
return command
# Would be nice if Python had a lib function for this.
if not paths:
paths = os.defpath
# Get suffixes to search.
# On Cygwin, 'PATHEXT' may exist but it should not be used.
if os.pathsep == ';':
pathext = os.environ.get('PATHEXT', '').split(';')
else:
pathext = ['']
# Search the paths...
for path in paths.split(os.pathsep):
for ext in pathext:
p = os.path.join(path, command + ext)
if os.path.exists(p) and not os.path.isdir(p):
return p
return None
def checkToolsPath(dir, tools):
for tool in tools:
if not os.path.exists(os.path.join(dir, tool)):
return False
return True
def whichTools(tools, paths):
for path in paths.split(os.pathsep):
if checkToolsPath(path, tools):
return path
return None
def mkdir_p(path):
"""mkdir_p(path) - Make the "path" directory, if it does not exist; this
will also make directories for any missing parent directories."""
if not path or os.path.exists(path):
return
parent = os.path.dirname(path)
if parent != path:
mkdir_p(parent)
try:
os.mkdir(path)
except OSError:
e = sys.exc_info()[1]
# Ignore EEXIST, which may occur during a race condition.
if e.errno != errno.EEXIST:
raise
class ExecuteCommandTimeoutException(Exception):
def __init__(self, msg, out, err, exitCode):
assert isinstance(msg, str)
assert isinstance(out, str)
assert isinstance(err, str)
assert isinstance(exitCode, int)
self.msg = msg
self.out = out
self.err = err
self.exitCode = exitCode
# Close extra file handles on UNIX (on Windows this cannot be done while
# also redirecting input).
kUseCloseFDs = not (platform.system() == 'Windows')
def executeCommand(command, cwd=None, env=None, input=None, timeout=0):
"""
Execute command ``command`` (list of arguments or string)
with
* working directory ``cwd`` (str), use None to use the current
working directory
* environment ``env`` (dict), use None for none
* Input to the command ``input`` (str), use string to pass
no input.
* Max execution time ``timeout`` (int) seconds. Use 0 for no timeout.
Returns a tuple (out, err, exitCode) where
* ``out`` (str) is the standard output of running the command
* ``err`` (str) is the standard error of running the command
* ``exitCode`` (int) is the exitCode of running the command
If the timeout is hit an ``ExecuteCommandTimeoutException``
is raised.
"""
if input is not None:
input = to_bytes(input)
p = subprocess.Popen(command, cwd=cwd,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
env=env, close_fds=kUseCloseFDs)
timerObject = None
hitTimeOut = False
try:
if timeout > 0:
def killProcess():
# We may be invoking a shell so we need to kill the
# process and all its children.
nonlocal hitTimeOut
hitTimeOut = True
killProcessAndChildren(p.pid)
timerObject = threading.Timer(timeout, killProcess)
timerObject.start()
out, err = p.communicate(input=input)
exitCode = p.wait()
finally:
if timerObject != None:
timerObject.cancel()
# Ensure the resulting output is always of string type.
out = convert_string(out)
err = convert_string(err)
if hitTimeOut:
raise ExecuteCommandTimeoutException(
msg='Reached timeout of {} seconds'.format(timeout),
out=out,
err=err,
exitCode=exitCode
)
# Detect Ctrl-C in subprocess.
if exitCode == -signal.SIGINT:
raise KeyboardInterrupt
return out, err, exitCode
def killProcessAndChildren(pid):
"""
This function kills a process with ``pid`` and all its
running children (recursively). It is currently implemented
using the psutil module which provides a simple platform
neutral implementation.
TODO: Reimplement this without using psutil so we can
remove our dependency on it.
"""
if platform.system() == 'AIX':
subprocess.call('kill -kill $(ps -o pid= -L{})'.format(pid), shell=True)
else:
import psutil
try:
psutilProc = psutil.Process(pid)
# Handle the different psutil API versions
try:
# psutil >= 2.x
children_iterator = psutilProc.children(recursive=True)
except AttributeError:
# psutil 1.x
children_iterator = psutilProc.get_children(recursive=True)
for child in children_iterator:
try:
child.kill()
except psutil.NoSuchProcess:
pass
psutilProc.kill()
except psutil.NoSuchProcess:
pass
def executeCommandVerbose(cmd, *args, **kwargs):
"""
Execute a command and print its output on failure.
"""
out, err, exitCode = executeCommand(cmd, *args, **kwargs)
if exitCode != 0:
report = makeReport(cmd, out, err, exitCode)
report += "\n\nFailed!"
sys.stderr.write('%s\n' % report)
return out, err, exitCode
def executeCommandOrDie(cmd, *args, **kwargs):
"""
Execute a command and print its output on failure.
"""
out, err, exitCode = executeCommand(cmd, *args, **kwargs)
if exitCode != 0:
report = makeReport(cmd, out, err, exitCode)
report += "\n\nFailed!"
sys.stderr.write('%s\n' % report)
sys.exit(exitCode)
return out, err, exitCode

View File

@@ -0,0 +1,61 @@
#!/usr/bin/env python
#===----------------------------------------------------------------------===##
#
# Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
# See https://llvm.org/LICENSE.txt for license information.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
#
#===----------------------------------------------------------------------===##
"""run.py is a utility for running a program.
It can perform code signing, forward arguments to the program, and return the
program's error code.
"""
import argparse
import os
import platform
import subprocess
import sys
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--execdir', type=str, required=True)
parser.add_argument('--codesign_identity', type=str, required=False, default=None)
parser.add_argument('--env', type=str, nargs='*', required=False, default=dict())
parser.add_argument("command", nargs=argparse.ONE_OR_MORE)
args = parser.parse_args()
commandLine = args.command
# HACK:
# If an argument is a file that ends in `.tmp.exe`, assume it is the name
# of an executable generated by a test file. We call these test-executables
# below. This allows us to do custom processing like codesigning test-executables.
# It's also possible for there to be no such executable, for example in the case
# of a .sh.cpp test.
isTestExe = lambda exe: exe.endswith('.tmp.exe') and os.path.exists(exe)
# Do any necessary codesigning of test-executables found in the command line.
if args.codesign_identity:
for exe in filter(isTestExe, commandLine):
subprocess.check_call(['xcrun', 'codesign', '-f', '-s', args.codesign_identity, exe], env={})
# Extract environment variables into a dictionary
env = {k : v for (k, v) in map(lambda s: s.split('=', 1), args.env)}
if platform.system() == 'Windows':
# Pass some extra variables through on Windows:
# COMSPEC is needed for running subprocesses via std::system().
if 'COMSPEC' in os.environ:
env['COMSPEC'] = os.environ.get('COMSPEC')
# TEMP is needed for placing temp files in a sensible directory.
if 'TEMP' in os.environ:
env['TEMP'] = os.environ.get('TEMP')
# Run the command line with the given environment in the execution directory.
return subprocess.call(commandLine, cwd=args.execdir, env=env, shell=False)
if __name__ == '__main__':
exit(main())

View File

@@ -0,0 +1,125 @@
#!/usr/bin/env python
#===----------------------------------------------------------------------===##
#
# Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
# See https://llvm.org/LICENSE.txt for license information.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
#
#===----------------------------------------------------------------------===##
"""
Runs an executable on a remote host.
This is meant to be used as an executor when running the C++ Standard Library
conformance test suite.
"""
import argparse
import os
import posixpath
import shlex
import subprocess
import sys
import tarfile
import tempfile
from shlex import quote as cmd_quote
def ssh(args, command):
cmd = ['ssh', '-oBatchMode=yes']
if args.extra_ssh_args is not None:
cmd.extend(shlex.split(args.extra_ssh_args))
return cmd + [args.host, command]
def scp(args, src, dst):
cmd = ['scp', '-q', '-oBatchMode=yes']
if args.extra_scp_args is not None:
cmd.extend(shlex.split(args.extra_scp_args))
return cmd + [src, '{}:{}'.format(args.host, dst)]
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--host', type=str, required=True)
parser.add_argument('--execdir', type=str, required=True)
parser.add_argument('--tempdir', type=str, required=False, default='/tmp')
parser.add_argument('--extra-ssh-args', type=str, required=False)
parser.add_argument('--extra-scp-args', type=str, required=False)
parser.add_argument('--codesign_identity', type=str, required=False, default=None)
parser.add_argument('--env', type=str, nargs='*', required=False, default=dict())
parser.add_argument("command", nargs=argparse.ONE_OR_MORE)
args = parser.parse_args()
commandLine = args.command
# Create a temporary directory where the test will be run.
# That is effectively the value of %T on the remote host.
tmp = subprocess.check_output(ssh(args, 'mktemp -d {}/libcxx.XXXXXXXXXX'.format(args.tempdir)), universal_newlines=True).strip()
# HACK:
# If an argument is a file that ends in `.tmp.exe`, assume it is the name
# of an executable generated by a test file. We call these test-executables
# below. This allows us to do custom processing like codesigning test-executables
# and changing their path when running on the remote host. It's also possible
# for there to be no such executable, for example in the case of a .sh.cpp
# test.
isTestExe = lambda exe: exe.endswith('.tmp.exe') and os.path.exists(exe)
pathOnRemote = lambda file: posixpath.join(tmp, os.path.basename(file))
try:
# Do any necessary codesigning of test-executables found in the command line.
if args.codesign_identity:
for exe in filter(isTestExe, commandLine):
subprocess.check_call(['xcrun', 'codesign', '-f', '-s', args.codesign_identity, exe], env={})
# tar up the execution directory (which contains everything that's needed
# to run the test), and copy the tarball over to the remote host.
try:
tmpTar = tempfile.NamedTemporaryFile(suffix='.tar', delete=False)
with tarfile.open(fileobj=tmpTar, mode='w') as tarball:
tarball.add(args.execdir, arcname=os.path.basename(args.execdir))
# Make sure we close the file before we scp it, because accessing
# the temporary file while still open doesn't work on Windows.
tmpTar.close()
remoteTarball = pathOnRemote(tmpTar.name)
subprocess.check_call(scp(args, tmpTar.name, remoteTarball))
finally:
# Make sure we close the file in case an exception happens before
# we've closed it above -- otherwise close() is idempotent.
tmpTar.close()
os.remove(tmpTar.name)
# Untar the dependencies in the temporary directory and remove the tarball.
remoteCommands = [
'tar -xf {} -C {} --strip-components 1'.format(remoteTarball, tmp),
'rm {}'.format(remoteTarball)
]
# Make sure all test-executables in the remote command line have 'execute'
# permissions on the remote host. The host that compiled the test-executable
# might not have a notion of 'executable' permissions.
for exe in map(pathOnRemote, filter(isTestExe, commandLine)):
remoteCommands.append('chmod +x {}'.format(exe))
# Execute the command through SSH in the temporary directory, with the
# correct environment. We tweak the command line to run it on the remote
# host by transforming the path of test-executables to their path in the
# temporary directory on the remote host.
commandLine = (pathOnRemote(x) if isTestExe(x) else x for x in commandLine)
remoteCommands.append('cd {}'.format(tmp))
if args.env:
remoteCommands.append('export {}'.format(cmd_quote(' '.join(args.env))))
remoteCommands.append(subprocess.list2cmdline(commandLine))
# Finally, SSH to the remote host and execute all the commands.
rc = subprocess.call(ssh(args, ' && '.join(remoteCommands)))
return rc
finally:
# Make sure the temporary directory is removed when we're done.
subprocess.check_call(ssh(args, 'rm -r {}'.format(tmp)))
if __name__ == '__main__':
exit(main())

View File

@@ -0,0 +1,72 @@
#!/usr/bin/env python
#===----------------------------------------------------------------------===##
#
# Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
# See https://llvm.org/LICENSE.txt for license information.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
#
#===----------------------------------------------------------------------===##
"""
sym_diff - Compare two symbol lists and output the differences.
"""
from argparse import ArgumentParser
import sys
from libcxx.sym_check import diff, util
def main():
parser = ArgumentParser(
description='Extract a list of symbols from a shared library.')
parser.add_argument(
'--names-only', dest='names_only',
help='Only print symbol names',
action='store_true', default=False)
parser.add_argument(
'--removed-only', dest='removed_only',
help='Only print removed symbols',
action='store_true', default=False)
parser.add_argument('--only-stdlib-symbols', dest='only_stdlib',
help="Filter all symbols not related to the stdlib",
action='store_true', default=False)
parser.add_argument('--strict', dest='strict',
help="Exit with a non-zero status if any symbols "
"differ",
action='store_true', default=False)
parser.add_argument(
'-o', '--output', dest='output',
help='The output file. stdout is used if not given',
type=str, action='store', default=None)
parser.add_argument(
'--demangle', dest='demangle', action='store_true', default=False)
parser.add_argument(
'old_syms', metavar='old-syms', type=str,
help='The file containing the old symbol list or a library')
parser.add_argument(
'new_syms', metavar='new-syms', type=str,
help='The file containing the new symbol list or a library')
args = parser.parse_args()
old_syms_list = util.extract_or_load(args.old_syms)
new_syms_list = util.extract_or_load(args.new_syms)
if args.only_stdlib:
old_syms_list, _ = util.filter_stdlib_symbols(old_syms_list)
new_syms_list, _ = util.filter_stdlib_symbols(new_syms_list)
added, removed, changed = diff.diff(old_syms_list, new_syms_list)
if args.removed_only:
added = {}
report, is_break, is_different = diff.report_diff(
added, removed, changed, names_only=args.names_only,
demangle=args.demangle)
if args.output is None:
print(report)
else:
with open(args.output, 'w') as f:
f.write(report + '\n')
exit_code = 1 if is_break or (args.strict and is_different) else 0
sys.exit(exit_code)
if __name__ == '__main__':
main()

View File

@@ -0,0 +1,19 @@
# all guard variables
_ZGVNSt3__
# all vtables
_ZTV
# all VTT
_ZTT
# all non-virtual thunks
_ZTh
# all virtual thunks
_ZTv
# typeinfo for std::__1::__types
# There are no std::__types
_ZTINSt3__1[0-9][0-9]*__
# typeinfo name for std::__1::__types
_ZTSNSt3__1[0-9][0-9]*__
# anything using __hidden_allocator
.*__hidden_allocator
# anything using __sso_allocator
.*__sso_allocator

View File

@@ -0,0 +1,19 @@
# all guard variables
__ZGVNSt3__
# all vtables
__ZTV
# all VTT
__ZTT
# all non-virtual thunks
__ZTh
# all virtual thunks
__ZTv
# typeinfo for std::__1::__types
# There are no std::__types
__ZTINSt3__1[0-9][0-9]*__
# typeinfo name for std::__1::__types
__ZTSNSt3__1[0-9][0-9]*__
# anything using __hidden_allocator
.*__hidden_allocator
# anything using __sso_allocator
.*__sso_allocator