Make `process_obligations`' computation of `completed` optional.
It's only used in tests.
This reduces instruction counts on several benchmarks by 0.5--1%.
path = src/llvm
url = https://github.com/rust-lang/llvm.git
branch = master
-[submodule "src/jemalloc"]
- path = src/jemalloc
- url = https://github.com/rust-lang/jemalloc.git
[submodule "src/rust-installer"]
path = src/tools/rust-installer
url = https://github.com/rust-lang/rust-installer.git
path = src/tools/clang
url = https://github.com/rust-lang-nursery/clang.git
branch = rust-release-80-v1
-
\ No newline at end of file
+
- env: >
RUST_CHECK_TARGET=dist
- RUST_CONFIGURE_ARGS="--enable-extended --enable-profiler --enable-lldb"
+ RUST_CONFIGURE_ARGS="--enable-extended --enable-profiler --enable-lldb --set rust.jemalloc"
SRC=.
DEPLOY_ALT=1
RUSTC_RETRY_LINKER_ON_SEGFAULT=1
# version that we're using, 8.2, cannot compile LLVM for OSX 10.7.
- env: >
RUST_CHECK_TARGET=check
- RUST_CONFIGURE_ARGS="--build=x86_64-apple-darwin --enable-sanitizers --enable-profiler"
+ RUST_CONFIGURE_ARGS="--build=x86_64-apple-darwin --enable-sanitizers --enable-profiler --set rust.jemalloc"
SRC=.
RUSTC_RETRY_LINKER_ON_SEGFAULT=1
MACOSX_DEPLOYMENT_TARGET=10.8
- env: >
RUST_CHECK_TARGET=check
- RUST_CONFIGURE_ARGS=--build=i686-apple-darwin
+ RUST_CONFIGURE_ARGS="--build=i686-apple-darwin --set rust.jemalloc"
SRC=.
RUSTC_RETRY_LINKER_ON_SEGFAULT=1
MACOSX_DEPLOYMENT_TARGET=10.8
# OSX 10.7 and `xcode7` is the latest Xcode able to compile LLVM for 10.7.
- env: >
RUST_CHECK_TARGET=dist
- RUST_CONFIGURE_ARGS="--build=i686-apple-darwin --enable-full-tools --enable-profiler --enable-lldb"
+ RUST_CONFIGURE_ARGS="--build=i686-apple-darwin --enable-full-tools --enable-profiler --enable-lldb --set rust.jemalloc"
SRC=.
DEPLOY=1
RUSTC_RETRY_LINKER_ON_SEGFAULT=1
- env: >
RUST_CHECK_TARGET=dist
- RUST_CONFIGURE_ARGS="--target=aarch64-apple-ios,armv7-apple-ios,armv7s-apple-ios,i386-apple-ios,x86_64-apple-ios --enable-full-tools --enable-sanitizers --enable-profiler --enable-lldb"
+ RUST_CONFIGURE_ARGS="--target=aarch64-apple-ios,armv7-apple-ios,armv7s-apple-ios,i386-apple-ios,x86_64-apple-ios --enable-full-tools --enable-sanitizers --enable-profiler --enable-lldb --set rust.jemalloc"
SRC=.
DEPLOY=1
RUSTC_RETRY_LINKER_ON_SEGFAULT=1
. src/ci/docker/x86_64-gnu-tools/repo.sh;
commit_toolstate_change "$MESSAGE_FILE" "$TRAVIS_BUILD_DIR/src/tools/publish_toolstate.py" "$(git rev-parse HEAD)" "$(git log --format=%s -n1 HEAD)" "$MESSAGE_FILE" "$TOOLSTATE_REPO_ACCESS_TOKEN";
-env:
- global:
- - SCCACHE_BUCKET=rust-lang-ci-sccache2
- - SCCACHE_REGION=us-west-1
- - AWS_ACCESS_KEY_ID=AKIAJAMV3QAMMA6AXHFQ
- # AWS_SECRET_ACCESS_KEY=...
- - secure: "j96XxTVOSUf4s4r4htIxn/fvIa5DWbMgLqWl7r8z2QfgUwscmkMXAwXuFNc7s7bGTpV/+CgDiMFFM6BAFLGKutytIF6oA02s9b+usQYnM0th7YQ2AIgm9GtMTJCJp4AoyfFmh8F2faUICBZlfVLUJ34udHEe35vOklix+0k4WDo="
- # TOOLSTATE_REPO_ACCESS_TOKEN=...
- - secure: "ESfcXqv4N2VMhqi2iIyw6da9VrsA78I4iR1asouCaq4hzTTrkB4WNRrfURy6xg72gQ4nMhtRJbB0/2jmc9Cu1+g2CzXtyiL223aJ5CKrXdcvbitopQSDfp07dMWm+UED+hNFEanpErKAeU/6FM3A+J+60PMk8MCF1h9tqNRISJw="
-
before_install:
- # We'll use the AWS cli to download/upload cached docker layers, so install
- # that here.
- - if [ "$TRAVIS_OS_NAME" = linux ]; then
- pip install --user awscli;
- export PATH=$PATH:$HOME/.local/bin;
- fi
+ # We'll use the AWS cli to download/upload cached docker layers as well as
+ # push our deployments, so download that here.
+ - pip install --user awscli; export PATH=$PATH:$HOME/.local/bin:$HOME/Library/Python/2.7/bin/
- mkdir -p $HOME/rustsrc
# FIXME(#46924): these two commands are required to enable IPv6,
# they shouldn't exist, please revert once more official solutions appeared.
echo "#### Build successful; Disk usage after running script:";
df -h;
du . | sort -nr | head -n100
+ - >
+ if [ "$DEPLOY$DEPLOY_ALT" == "1" ]; then
+ mkdir -p deploy/$TRAVIS_COMMIT;
+ if [ "$TRAVIS_OS_NAME" == "osx" ]; then
+ rm -rf build/dist/doc &&
+ cp -r build/dist/* deploy/$TRAVIS_COMMIT;
+ else
+ rm -rf obj/build/dist/doc &&
+ cp -r obj/build/dist/* deploy/$TRAVIS_COMMIT;
+ fi;
+ ls -la deploy/$TRAVIS_COMMIT;
+ deploy_dir=rustc-builds;
+ if [ "$DEPLOY_ALT" == "1" ]; then
+ deploy_dir=rustc-builds-alt;
+ fi;
+ travis_retry aws s3 cp --no-progress --recursive --acl public-read ./deploy s3://rust-lang-ci2/$deploy_dir
+ fi
after_failure:
- >
notifications:
email: false
-
-before_deploy:
- - mkdir -p deploy/$TRAVIS_COMMIT
- - >
- if [ "$TRAVIS_OS_NAME" == "osx" ]; then
- rm -rf build/dist/doc &&
- cp -r build/dist/* deploy/$TRAVIS_COMMIT;
- else
- rm -rf obj/build/dist/doc &&
- cp -r obj/build/dist/* deploy/$TRAVIS_COMMIT;
- fi
- - ls -la deploy/$TRAVIS_COMMIT
-
-deploy:
- - provider: s3
- bucket: rust-lang-ci2
- skip_cleanup: true
- local_dir: deploy
- upload_dir: rustc-builds
- acl: public_read
- region: us-west-1
- access_key_id: AKIAJVBODR3IA4O72THQ
- secret_access_key:
- secure: "kUGd3t7JcVWFESgIlzvsM8viZgCA9Encs3creW0xLJaLSeI1iVjlJK4h/2/nO6y224AFrh/GUfsNr4/4AlxPuYb8OU5oC5Lv+Ff2JiRDYtuNpyQSKAQp+bRYytWMtrmhja91h118Mbm90cUfcLPwkdiINgJNTXhPKg5Cqu3VYn0="
- on:
- branch: auto
- condition: $DEPLOY = 1
-
- # this is the same as the above deployment provider except that it uploads to
- # a slightly different directory and has a different trigger
- - provider: s3
- bucket: rust-lang-ci2
- skip_cleanup: true
- local_dir: deploy
- upload_dir: rustc-builds-alt
- acl: public_read
- region: us-west-1
- access_key_id: AKIAJVBODR3IA4O72THQ
- secret_access_key:
- secure: "kUGd3t7JcVWFESgIlzvsM8viZgCA9Encs3creW0xLJaLSeI1iVjlJK4h/2/nO6y224AFrh/GUfsNr4/4AlxPuYb8OU5oC5Lv+Ff2JiRDYtuNpyQSKAQp+bRYytWMtrmhja91h118Mbm90cUfcLPwkdiINgJNTXhPKg5Cqu3VYn0="
- on:
- branch: auto
- condition: $DEPLOY_ALT = 1
-
- # These two providers are the same as the two above, except deploy on the
- # try branch. Travis does not appear to provide a way to use "or" in these
- # conditions.
- - provider: s3
- bucket: rust-lang-ci2
- skip_cleanup: true
- local_dir: deploy
- upload_dir: rustc-builds
- acl: public_read
- region: us-west-1
- access_key_id: AKIAJVBODR3IA4O72THQ
- secret_access_key:
- secure: "kUGd3t7JcVWFESgIlzvsM8viZgCA9Encs3creW0xLJaLSeI1iVjlJK4h/2/nO6y224AFrh/GUfsNr4/4AlxPuYb8OU5oC5Lv+Ff2JiRDYtuNpyQSKAQp+bRYytWMtrmhja91h118Mbm90cUfcLPwkdiINgJNTXhPKg5Cqu3VYn0="
- on:
- branch: try
- condition: $DEPLOY = 1
-
- - provider: s3
- bucket: rust-lang-ci2
- skip_cleanup: true
- local_dir: deploy
- upload_dir: rustc-builds-alt
- acl: public_read
- region: us-west-1
- access_key_id: AKIAJVBODR3IA4O72THQ
- secret_access_key:
- secure: "kUGd3t7JcVWFESgIlzvsM8viZgCA9Encs3creW0xLJaLSeI1iVjlJK4h/2/nO6y224AFrh/GUfsNr4/4AlxPuYb8OU5oC5Lv+Ff2JiRDYtuNpyQSKAQp+bRYytWMtrmhja91h118Mbm90cUfcLPwkdiINgJNTXhPKg5Cqu3VYn0="
- on:
- branch: try
- condition: $DEPLOY_ALT = 1
environment:
- SCCACHE_BUCKET: rust-lang-ci-sccache2
- SCCACHE_REGION: us-west-1
- AWS_ACCESS_KEY_ID: AKIAJAMV3QAMMA6AXHFQ
- AWS_SECRET_ACCESS_KEY:
- secure: 7Y+JiquYedOAgnUU26uL0DPzrxmTtR+qIwG6rNKSuWDffqU3vVZxbGXim9QpTO80
SCCACHE_DIGEST: f808afabb4a4eb1d7112bcb3fa6be03b61e93412890c88e177c667eb37f46353d7ec294e559b16f9f4b5e894f2185fe7670a0df15fd064889ecbd80f0c34166c
- TOOLSTATE_REPO_ACCESS_TOKEN:
- secure: gKGlVktr7iuqCoYSxHxDE9ltLOKU0nYDEuQxvWbNxUIW7ri5ppn8L06jQzN0GGzN
# By default schannel checks revocation of certificates unlike some other SSL
# backends, but we've historically had problems on CI where a revocation
# 32/64 bit MSVC and GNU deployment
- RUST_CONFIGURE_ARGS: >
--build=x86_64-pc-windows-msvc
+ --target=x86_64-pc-windows-msvc,aarch64-pc-windows-msvc
--enable-full-tools
--enable-profiler
SCRIPT: python x.py dist
DIST_REQUIRE_ALL_TOOLS: 1
DEPLOY: 1
CI_JOB_NAME: dist-x86_64-msvc
+ APPVEYOR_BUILD_WORKER_IMAGE: Visual Studio 2017 Preview
- RUST_CONFIGURE_ARGS: >
--build=i686-pc-windows-msvc
--target=i586-pc-windows-msvc
# Note that the LLVM installer is an NSIS installer
#
# Original downloaded here came from
- # http://releases.llvm.org/6.0.0/LLVM-6.0.0-win64.exe
- - if NOT defined MINGW_URL appveyor-retry appveyor DownloadFile https://s3-us-west-1.amazonaws.com/rust-lang-ci2/rust-ci-mirror/LLVM-6.0.0-win64.exe
- - if NOT defined MINGW_URL .\LLVM-6.0.0-win64.exe /S /NCRC /D=C:\clang-rust
+ # http://releases.llvm.org/7.0.0/LLVM-7.0.0-win64.exe
+ - if NOT defined MINGW_URL appveyor-retry appveyor DownloadFile https://s3-us-west-1.amazonaws.com/rust-lang-ci2/rust-ci-mirror/LLVM-7.0.0-win64.exe
+ - if NOT defined MINGW_URL .\LLVM-7.0.0-win64.exe /S /NCRC /D=C:\clang-rust
- if NOT defined MINGW_URL set RUST_CONFIGURE_ARGS=%RUST_CONFIGURE_ARGS% --set llvm.clang-cl=C:\clang-rust\bin\clang-cl.exe
# Here we do a pretty heinous thing which is to mangle the MinGW installation
deploy:
- provider: S3
- skip_cleanup: true
- access_key_id: AKIAJVBODR3IA4O72THQ
- secret_access_key:
- secure: tQWIE+DJHjXaV4np/3YeETkEmXngtIuIgAO/LYKQaUshGLgN8cBCFGG3cHx5lKLt
+ access_key_id: $(AWS_ACCESS_KEY_ID)
+ secret_access_key: $(AWS_SECRET_ACCESS_KEY)
bucket: rust-lang-ci2
set_public: true
region: us-west-1
# This provider is the same as the one above except that it has a slightly
# different upload directory and a slightly different trigger
- provider: S3
- skip_cleanup: true
- access_key_id: AKIAJVBODR3IA4O72THQ
- secret_access_key:
- secure: tQWIE+DJHjXaV4np/3YeETkEmXngtIuIgAO/LYKQaUshGLgN8cBCFGG3cHx5lKLt
+ access_key_id: $(AWS_ACCESS_KEY_ID)
+ secret_access_key: $(AWS_SECRET_ACCESS_KEY)
bucket: rust-lang-ci2
set_public: true
region: us-west-1
# Adding debuginfo makes them several times larger.
#debuginfo-tools = false
-# Whether or not jemalloc is built and enabled
-#use-jemalloc = true
-
-# Whether or not jemalloc is built with its debug option set
-#debug-jemalloc = false
-
# Whether or not `panic!`s generate backtraces (RUST_BACKTRACE)
#backtrace = true
# generally only set for releases
#remap-debuginfo = false
+# Link the compiler against `jemalloc`, where on Linux and OSX it should
+# override the default allocator for rustc and LLVM.
+#jemalloc = false
+
# =============================================================================
# Options for specific targets
#
# not, you can specify an explicit file name for it.
#llvm-filecheck = "/path/to/FileCheck"
-# Path to the custom jemalloc static library to link into the standard library
-# by default. This is only used if jemalloc is still enabled above
-#jemalloc = "/path/to/jemalloc/libjemalloc_pic.a"
-
# If this target is for Android, this option will be required to specify where
# the NDK for the target lives. This is used to find the C compiler to link and
# build native code.
"rand 0.5.5 (registry+https://github.com/rust-lang/crates.io-index)",
]
-[[package]]
-name = "alloc_jemalloc"
-version = "0.0.0"
-dependencies = [
- "build_helper 0.1.0",
- "cc 1.0.25 (registry+https://github.com/rust-lang/crates.io-index)",
- "compiler_builtins 0.0.0",
- "core 0.0.0",
- "libc 0.0.0",
-]
-
[[package]]
name = "alloc_system"
version = "0.0.0"
[[package]]
name = "cargo"
-version = "0.32.0"
+version = "0.33.0"
dependencies = [
"atty 0.2.11 (registry+https://github.com/rust-lang/crates.io-index)",
"bufstream 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)",
"bytesize 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
"clap 2.32.0 (registry+https://github.com/rust-lang/crates.io-index)",
"core-foundation 0.6.1 (registry+https://github.com/rust-lang/crates.io-index)",
- "crates-io 0.20.0",
+ "crates-io 0.21.0",
"crossbeam-utils 0.5.0 (registry+https://github.com/rust-lang/crates.io-index)",
"crypto-hash 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)",
"curl 0.4.18 (registry+https://github.com/rust-lang/crates.io-index)",
"clippy-mini-macro-test 0.2.0",
"clippy_dev 0.0.1",
"clippy_lints 0.0.212",
- "compiletest_rs 0.3.13 (registry+https://github.com/rust-lang/crates.io-index)",
+ "compiletest_rs 0.3.16 (registry+https://github.com/rust-lang/crates.io-index)",
"derive-new 0.5.4 (registry+https://github.com/rust-lang/crates.io-index)",
"lazy_static 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
"regex 1.0.4 (registry+https://github.com/rust-lang/crates.io-index)",
[[package]]
name = "compiletest_rs"
-version = "0.3.13"
+version = "0.3.16"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"diff 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)",
"libc 0.2.43 (registry+https://github.com/rust-lang/crates.io-index)",
"log 0.4.5 (registry+https://github.com/rust-lang/crates.io-index)",
"miow 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)",
+ "regex 1.0.4 (registry+https://github.com/rust-lang/crates.io-index)",
"serde 1.0.75 (registry+https://github.com/rust-lang/crates.io-index)",
"serde_derive 1.0.75 (registry+https://github.com/rust-lang/crates.io-index)",
"serde_json 1.0.31 (registry+https://github.com/rust-lang/crates.io-index)",
[[package]]
name = "crates-io"
-version = "0.20.0"
+version = "0.21.0"
dependencies = [
"curl 0.4.18 (registry+https://github.com/rust-lang/crates.io-index)",
"failure 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)",
"winapi 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)",
]
+[[package]]
+name = "fs_extra"
+version = "1.1.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+
[[package]]
name = "fst"
version = "0.3.0"
version = "0.4.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
+[[package]]
+name = "jemalloc-sys"
+version = "0.1.8"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+dependencies = [
+ "cc 1.0.25 (registry+https://github.com/rust-lang/crates.io-index)",
+ "fs_extra 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
+ "libc 0.2.43 (registry+https://github.com/rust-lang/crates.io-index)",
+]
+
[[package]]
name = "jobserver"
version = "0.1.11"
"byteorder 1.2.3 (registry+https://github.com/rust-lang/crates.io-index)",
"cargo_metadata 0.6.0 (registry+https://github.com/rust-lang/crates.io-index)",
"colored 1.6.0 (registry+https://github.com/rust-lang/crates.io-index)",
- "compiletest_rs 0.3.13 (registry+https://github.com/rust-lang/crates.io-index)",
+ "compiletest_rs 0.3.16 (registry+https://github.com/rust-lang/crates.io-index)",
"env_logger 0.5.12 (registry+https://github.com/rust-lang/crates.io-index)",
"log 0.4.5 (registry+https://github.com/rust-lang/crates.io-index)",
"vergen 3.0.3 (registry+https://github.com/rust-lang/crates.io-index)",
name = "rls"
version = "0.130.5"
dependencies = [
- "cargo 0.32.0",
+ "cargo 0.33.0",
"cargo_metadata 0.6.0 (registry+https://github.com/rust-lang/crates.io-index)",
"clippy_lints 0.0.212",
"crossbeam-channel 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)",
"flate2 1.0.3 (registry+https://github.com/rust-lang/crates.io-index)",
"log 0.4.5 (registry+https://github.com/rust-lang/crates.io-index)",
"rustc 0.0.0",
+ "rustc_allocator 0.0.0",
"rustc_data_structures 0.0.0",
"rustc_incremental 0.0.0",
- "rustc_metadata_utils 0.0.0",
+ "rustc_metadata 0.0.0",
"rustc_mir 0.0.0",
"rustc_target 0.0.0",
+ "serialize 0.0.0",
"syntax 0.0.0",
"syntax_pos 0.0.0",
]
"arena 0.0.0",
"env_logger 0.5.12 (registry+https://github.com/rust-lang/crates.io-index)",
"graphviz 0.0.0",
+ "jemalloc-sys 0.1.8 (registry+https://github.com/rust-lang/crates.io-index)",
"log 0.4.5 (registry+https://github.com/rust-lang/crates.io-index)",
"rustc 0.0.0",
"rustc-rayon 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)",
"rustc 0.0.0",
"rustc_data_structures 0.0.0",
"rustc_errors 0.0.0",
- "rustc_metadata_utils 0.0.0",
"rustc_target 0.0.0",
"serialize 0.0.0",
"syntax 0.0.0",
"syntax_pos 0.0.0",
]
-[[package]]
-name = "rustc_metadata_utils"
-version = "0.0.0"
-dependencies = [
- "rustc 0.0.0",
- "syntax 0.0.0",
- "syntax_pos 0.0.0",
-]
-
[[package]]
name = "rustc_mir"
version = "0.0.0"
version = "0.0.0"
dependencies = [
"alloc 0.0.0",
- "alloc_jemalloc 0.0.0",
"alloc_system 0.0.0",
"build_helper 0.1.0",
"cc 1.0.25 (registry+https://github.com/rust-lang/crates.io-index)",
"checksum colored 1.6.0 (registry+https://github.com/rust-lang/crates.io-index)" = "b0aa3473e85a3161b59845d6096b289bb577874cafeaf75ea1b1beaa6572c7fc"
"checksum commoncrypto 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "d056a8586ba25a1e4d61cb090900e495952c7886786fc55f909ab2f819b69007"
"checksum commoncrypto-sys 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "1fed34f46747aa73dfaa578069fd8279d2818ade2b55f38f22a9401c7f4083e2"
-"checksum compiletest_rs 0.3.13 (registry+https://github.com/rust-lang/crates.io-index)" = "d3064bc712922596dd5ab449fca9261d411893356581fe5297b96aa8f53bb1b8"
+"checksum compiletest_rs 0.3.16 (registry+https://github.com/rust-lang/crates.io-index)" = "75e809f56d6aa9575b67924b0af686c4f4c1380314f47947e235e9ff7fa94bed"
"checksum core-foundation 0.6.1 (registry+https://github.com/rust-lang/crates.io-index)" = "cc3532ec724375c7cb7ff0a097b714fde180bb1f6ed2ab27cfcd99ffca873cd2"
"checksum core-foundation-sys 0.6.1 (registry+https://github.com/rust-lang/crates.io-index)" = "a3fb15cdbdd9cf8b82d97d0296bb5cd3631bba58d6e31650a002a8e7fb5721f9"
"checksum crossbeam 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)" = "24ce9782d4d5c53674646a6a4c1863a21a8fc0cb649b3c94dfc16e45071dea19"
"checksum foreign-types 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)" = "f6f339eb8adc052cd2ca78910fda869aefa38d22d5cb648e6485e4d3fc06f3b1"
"checksum foreign-types-shared 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "00b0228411908ca8685dba7fc2cdd70ec9990a6e753e89b6ac91a84c40fbaf4b"
"checksum fs2 0.4.3 (registry+https://github.com/rust-lang/crates.io-index)" = "9564fc758e15025b46aa6643b1b77d047d1a56a1aea6e01002ac0c7026876213"
+"checksum fs_extra 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "5f2a4a2034423744d2cc7ca2068453168dcdb82c438419e639a26bd87839c674"
"checksum fst 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "d94485a00b1827b861dd9d1a2cc9764f9044d4c535514c0760a5a2012ef3399f"
"checksum fuchsia-zircon 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)" = "2e9763c69ebaae630ba35f74888db465e49e259ba1bc0eda7d06f4a067615d82"
"checksum fuchsia-zircon-sys 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)" = "3dcaa9ae7725d12cdb85b3ad99a434db70b468c09ded17e012d86b5c1010f7a7"
"checksum is-match 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "7e5b386aef33a1c677be65237cb9d32c3f3ef56bd035949710c4bb13083eb053"
"checksum itertools 0.7.8 (registry+https://github.com/rust-lang/crates.io-index)" = "f58856976b776fedd95533137617a02fb25719f40e7d9b01c7043cd65474f450"
"checksum itoa 0.4.3 (registry+https://github.com/rust-lang/crates.io-index)" = "1306f3464951f30e30d12373d31c79fbd52d236e5e896fd92f96ec7babbbe60b"
+"checksum jemalloc-sys 0.1.8 (registry+https://github.com/rust-lang/crates.io-index)" = "bfc62c8e50e381768ce8ee0428ee53741929f7ebd73e4d83f669bcf7693e00ae"
"checksum jobserver 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)" = "60af5f849e1981434e4a31d3d782c4774ae9b434ce55b101a96ecfd09147e8be"
"checksum json 0.11.13 (registry+https://github.com/rust-lang/crates.io-index)" = "9ad0485404155f45cce53a40d4b2d6ac356418300daed05273d9e26f91c390be"
"checksum jsonrpc-core 8.0.1 (registry+https://github.com/rust-lang/crates.io-index)" = "ddf83704f4e79979a424d1082dd2c1e52683058056c9280efa19ac5f6bc9033c"
// flesh out rpath support more fully in the future.
cmd.arg("-Z").arg("osx-rpath-install-name");
Some("-Wl,-rpath,@loader_path/../lib")
- } else if !target.contains("windows") && !target.contains("wasm32") {
+ } else if !target.contains("windows") &&
+ !target.contains("wasm32") &&
+ !target.contains("fuchsia") {
Some("-Wl,-rpath,$ORIGIN/../lib")
} else {
None
// When running miri tests, we need to generate MIR for all libraries
if env::var("TEST_MIRI").ok().map_or(false, |val| val == "true") {
+ // The flags here should be kept in sync with `add_miri_default_args`
+ // in miri's `src/lib.rs`.
cmd.arg("-Zalways-encode-mir");
- cmd.arg("-Zmir-emit-validate=1");
+ // These options are preferred by miri, to be able to perform better validation,
+ // but the bootstrap compiler might not understand them.
+ if stage != "0" {
+ cmd.arg("-Zmir-emit-retag");
+ cmd.arg("-Zmir-opt-level=0");
+ }
}
// Force all crates compiled by this compiler to (a) be unstable and (b)
backends = self.get_toml('codegen-backends')
if backends is None or not 'emscripten' in backends:
continue
- if module.endswith("jemalloc"):
- if self.get_toml('use-jemalloc') == 'false':
- continue
- if self.get_toml('jemalloc'):
- continue
if module.endswith("lld"):
config = self.get_toml('lld')
if config is None or config == 'false':
fn has(&self, needle: &Path) -> bool {
match self {
PathSet::Set(set) => set.iter().any(|p| p.ends_with(needle)),
- PathSet::Suite(_) => false,
+ PathSet::Suite(suite) => suite.ends_with(needle),
}
}
"build" => self.cargo_out(compiler, mode, target),
// This is the intended out directory for crate documentation.
- "doc" => self.crate_doc_out(target),
+ "doc" | "rustdoc" => self.crate_doc_out(target),
_ => self.stage_out(compiler, mode),
};
_ => compile::librustc_stamp(self, cmp, target),
};
- if cmd == "doc" {
+ if cmd == "doc" || cmd == "rustdoc" {
if mode == Mode::Rustc || mode == Mode::ToolRustc || mode == Mode::Codegen {
// This is the intended out directory for compiler documentation.
my_out = self.compiler_doc_out(target);
.env("RUSTDOC", self.out.join("bootstrap/debug/rustdoc"))
.env(
"RUSTDOC_REAL",
- if cmd == "doc" || (cmd == "test" && want_rustdoc) {
+ if cmd == "doc" || cmd == "rustdoc" || (cmd == "test" && want_rustdoc) {
self.rustdoc(compiler.host)
} else {
PathBuf::from("/path/to/nowhere/rustdoc/not/required")
);
// Ensure we don't build any compiler artifacts.
- assert!(builder.cache.all::<compile::Rustc>().is_empty());
+ assert!(!builder.cache.contains::<compile::Rustc>());
assert_eq!(
first(builder.cache.all::<test::Crate>()),
&[test::Crate {
},]
);
}
+
+ #[test]
+ fn test_exclude() {
+ let mut config = configure(&[], &[]);
+ config.exclude = vec![
+ "src/test/run-pass".into(),
+ "src/tools/tidy".into(),
+ ];
+ config.cmd = Subcommand::Test {
+ paths: Vec::new(),
+ test_args: Vec::new(),
+ rustc_args: Vec::new(),
+ fail_fast: true,
+ doc_tests: DocTests::No,
+ bless: false,
+ compare_mode: None,
+ };
+
+ let build = Build::new(config);
+ let builder = Builder::new(&build);
+ builder.run_step_descriptions(&Builder::get_step_descriptions(Kind::Test), &[]);
+
+ // Ensure we have really excluded run-pass & tidy
+ assert!(!builder.cache.contains::<test::RunPass>());
+ assert!(!builder.cache.contains::<test::Tidy>());
+
+ // Ensure other tests are not affected.
+ assert!(builder.cache.contains::<test::RunPassFullDeps>());
+ assert!(builder.cache.contains::<test::RustdocUi>());
+ }
}
v.sort_by_key(|&(a, _)| a);
v
}
+
+ #[cfg(test)]
+ pub fn contains<S: Step>(&self) -> bool {
+ self.0.borrow().contains_key(&TypeId::of::<S>())
+ }
}
use config::Config;
// The version number
-pub const CFG_RELEASE_NUM: &str = "1.31.0";
+pub const CFG_RELEASE_NUM: &str = "1.32.0";
pub struct GitInfo {
inner: Option<Info>,
.arg("--manifest-path")
.arg(builder.src.join("src/rustc/compiler_builtins_shim/Cargo.toml"));
} else {
- let mut features = builder.std_features();
-
- // When doing a local rebuild we tell cargo that we're stage1 rather than
- // stage0. This works fine if the local rust and being-built rust have the
- // same view of what the default allocator is, but fails otherwise. Since
- // we don't have a way to express an allocator preference yet, work
- // around the issue in the case of a local rebuild with jemalloc disabled.
- if compiler.stage == 0 && builder.local_rebuild && !builder.config.use_jemalloc {
- features.push_str(" force_alloc_system");
- }
+ let features = builder.std_features();
if compiler.stage != 0 && builder.config.sanitizers {
// This variable is used by the sanitizer runtime crates, e.g.
.arg("--manifest-path")
.arg(builder.src.join("src/libstd/Cargo.toml"));
- if let Some(target) = builder.config.target_config.get(&target) {
- if let Some(ref jemalloc) = target.jemalloc {
- cargo.env("JEMALLOC_OVERRIDE", jemalloc);
- }
- }
if target.contains("musl") {
if let Some(p) = builder.musl_root(target) {
cargo.env("MUSL_ROOT", p);
pub hosts: Vec<Interned<String>>,
pub targets: Vec<Interned<String>>,
pub local_rebuild: bool,
+ pub jemalloc: bool,
// dist misc
pub dist_sign_folder: Option<PathBuf>,
pub dist_gpg_password_file: Option<PathBuf>,
// libstd features
- pub debug_jemalloc: bool,
- pub use_jemalloc: bool,
pub backtrace: bool, // support for RUST_BACKTRACE
pub wasm_syscall: bool,
pub llvm_config: Option<PathBuf>,
/// Some(path to FileCheck) if one was specified.
pub llvm_filecheck: Option<PathBuf>,
- pub jemalloc: Option<PathBuf>,
pub cc: Option<PathBuf>,
pub cxx: Option<PathBuf>,
pub ar: Option<PathBuf>,
link_jobs: Option<u32>,
link_shared: Option<bool>,
version_suffix: Option<String>,
- clang_cl: Option<String>
+ clang_cl: Option<String>,
}
#[derive(Deserialize, Default, Clone)]
debuginfo_only_std: Option<bool>,
debuginfo_tools: Option<bool>,
experimental_parallel_queries: Option<bool>,
- debug_jemalloc: Option<bool>,
- use_jemalloc: Option<bool>,
backtrace: Option<bool>,
default_linker: Option<String>,
channel: Option<String>,
backtrace_on_ice: Option<bool>,
verify_llvm_ir: Option<bool>,
remap_debuginfo: Option<bool>,
+ jemalloc: Option<bool>,
}
/// TOML representation of how each build target is configured.
struct TomlTarget {
llvm_config: Option<String>,
llvm_filecheck: Option<String>,
- jemalloc: Option<String>,
cc: Option<String>,
cxx: Option<String>,
ar: Option<String>,
config.llvm_enabled = true;
config.llvm_optimize = true;
config.llvm_version_check = true;
- config.use_jemalloc = true;
config.backtrace = true;
config.rust_optimize = true;
config.rust_optimize_tests = true;
let mut debuginfo_only_std = None;
let mut debuginfo_tools = None;
let mut debug = None;
- let mut debug_jemalloc = None;
let mut debuginfo = None;
let mut debug_assertions = None;
let mut optimize = None;
debuginfo_tools = rust.debuginfo_tools;
optimize = rust.optimize;
ignore_git = rust.ignore_git;
- debug_jemalloc = rust.debug_jemalloc;
set(&mut config.rust_optimize_tests, rust.optimize_tests);
set(&mut config.rust_debuginfo_tests, rust.debuginfo_tests);
set(&mut config.codegen_tests, rust.codegen_tests);
set(&mut config.rust_rpath, rust.rpath);
- set(&mut config.use_jemalloc, rust.use_jemalloc);
+ set(&mut config.jemalloc, rust.jemalloc);
set(&mut config.backtrace, rust.backtrace);
set(&mut config.channel, rust.channel.clone());
set(&mut config.rust_dist_src, rust.dist_src);
if let Some(ref s) = cfg.llvm_filecheck {
target.llvm_filecheck = Some(config.src.join(s));
}
- if let Some(ref s) = cfg.jemalloc {
- target.jemalloc = Some(config.src.join(s));
- }
if let Some(ref s) = cfg.android_ndk {
target.ndk = Some(config.src.join(s));
}
config.rust_debuginfo_tools = debuginfo_tools.unwrap_or(false);
let default = debug == Some(true);
- config.debug_jemalloc = debug_jemalloc.unwrap_or(default);
config.rust_debuginfo = debuginfo.unwrap_or(default);
config.rust_debug_assertions = debug_assertions.unwrap_or(default);
options.append(Option(*args, value=True))
-o("debug", "rust.debug", "debug mode; disables optimization unless `--enable-optimize` given")
+o("debug", "rust.debug", "enables debugging environment; does not affect optimization of bootstrapped code (use `--disable-optimize` for that)")
o("docs", "build.docs", "build standard library documentation")
o("compiler-docs", "build.compiler-docs", "build compiler documentation")
o("optimize-tests", "rust.optimize-tests", "build tests with optimizations")
o("profiler", "build.profiler", "build the profiler runtime")
o("emscripten", None, "compile the emscripten backend as well as LLVM")
o("full-tools", None, "enable all tools")
+o("lld", "rust.lld", "build lld")
o("lldb", "rust.lldb", "build lldb")
o("missing-tools", "dist.missing-tools", "allow failures when building tools")
o("debuginfo-lines", "rust.debuginfo-lines", "build with line number debugger metadata")
o("debuginfo-only-std", "rust.debuginfo-only-std", "build only libstd with debugging information")
o("debuginfo-tools", "rust.debuginfo-tools", "build extended tools with debugging information")
-o("debug-jemalloc", "rust.debug-jemalloc", "build jemalloc with --enable-debug --enable-fill")
v("save-toolstates", "rust.save-toolstates", "save build and test status of external tools into this file")
v("prefix", "install.prefix", "set installation prefix")
v("llvm-config", None, "set path to llvm-config")
v("llvm-filecheck", None, "set path to LLVM's FileCheck utility")
v("python", "build.python", "set path to python")
-v("jemalloc-root", None, "set directory where libjemalloc_pic.a is located")
v("android-cross-path", "target.arm-linux-androideabi.android-ndk",
"Android NDK standalone path (deprecated)")
v("i686-linux-android-ndk", "target.i686-linux-android.android-ndk",
# Many of these are saved below during the "writing configuration" step
# (others are conditionally saved).
o("manage-submodules", "build.submodules", "let the build manage the git submodules")
-o("jemalloc", "rust.use-jemalloc", "build liballoc with jemalloc")
o("full-bootstrap", "build.full-bootstrap", "build three compilers instead of two")
o("extended", "build.extended", "build an extended rust tool set")
set('target.{}.llvm-config'.format(build()), value)
elif option.name == 'llvm-filecheck':
set('target.{}.llvm-filecheck'.format(build()), value)
- elif option.name == 'jemalloc-root':
- set('target.{}.jemalloc'.format(build()), value + '/libjemalloc_pic.a')
elif option.name == 'tools':
set('build.tools', value.split(','))
elif option.name == 'host':
"src/build_helper",
"src/dlmalloc",
"src/liballoc",
- "src/liballoc_jemalloc",
"src/liballoc_system",
"src/libbacktrace",
"src/libcompiler_builtins",
"src/rustc/dlmalloc_shim",
"src/libtest",
"src/libterm",
- "src/jemalloc",
"src/libprofiler_builtins",
"src/stdsimd",
+ "src/libproc_macro",
];
let std_src_dirs_exclude = [
"src/libcompiler_builtins/compiler-rt/test",
- "src/jemalloc/test/unit",
];
copy_src_dirs(builder, &std_src_dirs[..], &std_src_dirs_exclude[..], &dst_src);
}
}
-const CARGO_VENDOR_VERSION: &str = "0.1.4";
+const CARGO_VENDOR_VERSION: &str = "0.1.19";
#[derive(Debug, PartialOrd, Ord, Copy, Clone, Hash, PartialEq, Eq)]
pub struct PlainSourceTarball;
cmd.arg("--html-after-content").arg(&footer)
.arg("--html-before-content").arg(&version_info)
.arg("--html-in-header").arg(&favicon)
+ .arg("--markdown-no-toc")
+ .arg("--index-page").arg(&builder.src.join("src/doc/index.md"))
.arg("--markdown-playground-url")
.arg("https://play.rust-lang.org/")
.arg("-o").arg(&out)
.arg(&path);
if filename == "not_found.md" {
- cmd.arg("--markdown-no-toc")
- .arg("--markdown-css")
+ cmd.arg("--markdown-css")
.arg("https://doc.rust-lang.org/rust.css");
} else {
cmd.arg("--markdown-css").arg("rust.css");
// will also directly handle merging.
let my_out = builder.crate_doc_out(target);
t!(symlink_dir_force(&builder.config, &my_out, &out_dir));
+ t!(fs::copy(builder.src.join("src/doc/rust.css"), out.join("rust.css")));
- let mut cargo = builder.cargo(compiler, Mode::Std, target, "doc");
- compile::std_cargo(builder, &compiler, target, &mut cargo);
+ let run_cargo_rustdoc_for = |package: &str| {
+ let mut cargo = builder.cargo(compiler, Mode::Std, target, "rustdoc");
+ compile::std_cargo(builder, &compiler, target, &mut cargo);
- // Keep a whitelist so we do not build internal stdlib crates, these will be
- // build by the rustc step later if enabled.
- cargo.arg("--no-deps");
- for krate in &["alloc", "core", "std"] {
- cargo.arg("-p").arg(krate);
+ // Keep a whitelist so we do not build internal stdlib crates, these will be
+ // build by the rustc step later if enabled.
+ cargo.arg("-Z").arg("unstable-options")
+ .arg("-p").arg(package);
// Create all crate output directories first to make sure rustdoc uses
// relative links.
// FIXME: Cargo should probably do this itself.
- t!(fs::create_dir_all(out_dir.join(krate)));
+ t!(fs::create_dir_all(out_dir.join(package)));
+ cargo.arg("--")
+ .arg("--markdown-css").arg("rust.css")
+ .arg("--markdown-no-toc")
+ .arg("--index-page").arg(&builder.src.join("src/doc/index.md"));
+
+ builder.run(&mut cargo);
+ builder.cp_r(&my_out, &out);
+ };
+ for krate in &["alloc", "core", "std"] {
+ run_cargo_rustdoc_for(krate);
}
-
- builder.run(&mut cargo);
- builder.cp_r(&my_out, &out);
}
}
fn std_features(&self) -> String {
let mut features = "panic-unwind".to_string();
- if self.config.debug_jemalloc {
- features.push_str(" debug-jemalloc");
- }
- if self.config.use_jemalloc {
- features.push_str(" jemalloc");
- }
if self.config.backtrace {
features.push_str(" backtrace");
}
/// Get the space-separated set of activated features for the compiler.
fn rustc_features(&self) -> String {
let mut features = String::new();
- if self.config.use_jemalloc {
- features.push_str(" jemalloc");
+ if self.config.jemalloc {
+ features.push_str("jemalloc");
}
features
}
// If we're compiling on macOS then we add a few unconditional flags
// indicating that we want libc++ (more filled out than libstdc++) and
// we want to compile for 10.7. This way we can ensure that
- // LLVM/jemalloc/etc are all properly compiled.
+ // LLVM/etc are all properly compiled.
if target.contains("apple-darwin") {
base.push("-stdlib=libc++".into());
}
} else if target != self.config.build &&
!target.contains("msvc") &&
!target.contains("emscripten") &&
- !target.contains("wasm32") {
+ !target.contains("wasm32") &&
+ !target.contains("fuchsia") {
Some(self.cc(target))
} else {
None
check-stage2-T-x86_64-unknown-linux-musl-H-x86_64-unknown-linux-gnu:
$(Q)$(BOOTSTRAP) test --target x86_64-unknown-linux-musl
-TESTS_IN_2 := src/test/run-pass src/test/compile-fail src/test/run-pass-fulldeps
+TESTS_IN_2 := \
+ src/test/ui \
+ src/test/run-pass \
+ src/test/compile-fail \
+ src/test/run-pass-fulldeps \
+ src/tools/linkchecker
appveyor-subset-1:
$(Q)$(BOOTSTRAP) test $(TESTS_IN_2:%=--exclude %)
if !build.config.dry_run {
cmd_finder.must_have(build.cxx(*host).unwrap());
}
-
- // The msvc hosts don't use jemalloc, turn it off globally to
- // avoid packaging the dummy liballoc_jemalloc on that platform.
- if host.contains("msvc") {
- build.config.use_jemalloc = false;
- }
}
// Externally configured LLVM requires FileCheck to exist
type Output = ();
fn should_run(run: ShouldRun) -> ShouldRun {
- run.path("src/liballoc_jemalloc")
- .path("src/librustc_asan")
+ run.path("src/librustc_asan")
.path("src/librustc_lsan")
.path("src/librustc_msan")
.path("src/librustc_tsan")
target: run.target,
test_kind,
krate: match run.path {
- _ if run.path.ends_with("src/liballoc_jemalloc") => "alloc_jemalloc",
_ if run.path.ends_with("src/librustc_asan") => "rustc_asan",
_ if run.path.ends_with("src/librustc_lsan") => "rustc_lsan",
_ if run.path.ends_with("src/librustc_msan") => "rustc_msan",
run = run.krate("test");
for krate in run.builder.in_tree_crates("std") {
if krate.is_local(&run.builder)
- && !krate.name.contains("jemalloc")
&& !(krate.name.starts_with("rustc_") && krate.name.ends_with("san"))
&& krate.name != "dlmalloc"
{
--- /dev/null
+FROM ubuntu:16.04
+
+RUN apt-get update && apt-get install -y --no-install-recommends \
+ g++ \
+ make \
+ file \
+ curl \
+ ca-certificates \
+ python2.7 \
+ git \
+ cmake \
+ sudo \
+ gdb \
+ xz-utils \
+ g++-powerpc-linux-gnuspe \
+ libssl-dev \
+ pkg-config
+
+
+COPY scripts/sccache.sh /scripts/
+RUN sh /scripts/sccache.sh
+
+ENV HOSTS=powerpc-unknown-linux-gnuspe
+
+ENV RUST_CONFIGURE_ARGS --enable-extended --disable-docs
+ENV SCRIPT python2.7 ../x.py dist --host $HOSTS --target $HOSTS
--enable-sanitizers \
--enable-profiler \
--set target.i686-unknown-linux-gnu.linker=clang \
- --build=i686-unknown-linux-gnu
+ --build=i686-unknown-linux-gnu \
+ --set rust.jemalloc
ENV SCRIPT python2.7 ../x.py dist --build $HOSTS --host $HOSTS --target $HOSTS
ENV CARGO_TARGET_I686_UNKNOWN_LINUX_GNU_LINKER=clang
libssl-dev \
pkg-config \
gcc-arm-none-eabi \
- libnewlib-arm-none-eabi
+ libnewlib-arm-none-eabi \
+ qemu-system-arm
WORKDIR /build
CC_x86_64_sun_solaris=x86_64-sun-solaris2.10-gcc \
CXX_x86_64_sun_solaris=x86_64-sun-solaris2.10-g++
+ENV CARGO_TARGET_X86_64_FUCHSIA_AR /usr/local/bin/llvm-ar
+ENV CARGO_TARGET_X86_64_FUCHSIA_RUSTFLAGS \
+-C link-arg=--sysroot=/usr/local/x86_64-fuchsia \
+-C link-arg=-L/usr/local/x86_64-fuchsia/lib \
+-C link-arg=-L/usr/local/lib/x86_64-fuchsia/lib
+ENV CARGO_TARGET_AARCH64_FUCHSIA_AR /usr/local/bin/llvm-ar
+ENV CARGO_TARGET_AARCH64_FUCHSIA_RUSTFLAGS \
+-C link-arg=--sysroot=/usr/local/aarch64-fuchsia \
+-C link-arg=-L/usr/local/aarch64-fuchsia/lib \
+-C link-arg=-L/usr/local/lib/aarch64-fuchsia/lib
+
ENV TARGETS=x86_64-fuchsia
ENV TARGETS=$TARGETS,aarch64-fuchsia
ENV TARGETS=$TARGETS,sparcv9-sun-solaris
ENV TARGETS=$TARGETS,x86_64-unknown-linux-gnux32
ENV TARGETS=$TARGETS,x86_64-unknown-cloudabi
-ENV RUST_CONFIGURE_ARGS --enable-extended --disable-docs
+ENV RUST_CONFIGURE_ARGS --enable-extended --enable-lld --disable-docs
ENV SCRIPT python2.7 ../x.py dist --target $TARGETS
--set target.x86_64-unknown-linux-gnu.linker=clang \
--set target.x86_64-unknown-linux-gnu.ar=/rustroot/bin/llvm-ar \
--set target.x86_64-unknown-linux-gnu.ranlib=/rustroot/bin/llvm-ranlib \
- --set llvm.thin-lto=true
+ --set llvm.thin-lto=true \
+ --set rust.jemalloc
ENV SCRIPT python2.7 ../x.py dist --host $HOSTS --target $HOSTS
ENV CARGO_TARGET_X86_64_UNKNOWN_LINUX_GNU_LINKER=clang
-FROM ubuntu:16.04
+FROM ubuntu:18.10
RUN apt-get update && apt-get install -y --no-install-recommends \
g++ \
[unstable-include]: ../unstable-book/language-features/external-doc.html
[issue-include]: https://github.com/rust-lang/rust/issues/44732
+### Add aliases for an item in documentation search
+
+This feature allows you to add alias(es) to an item when using the `rustdoc` search through the
+`doc(alias)` attribute. Example:
+
+```rust,no_run
+#![feature(doc_alias)]
+
+#[doc(alias = "x")]
+#[doc(alias = "big")]
+pub struct BigX;
+```
+
+Then, when looking for it through the `rustdoc` search, if you enter "x" or
+"big", search will show the `BigX` struct first.
+
## Unstable command-line arguments
These features are enabled by passing a command-line flag to Rustdoc, but the flags in question are
allows `rustdoc` to be able to generate documentation for the compiler crates and the standard
library, as an equivalent command-line argument is provided to `rustc` when building those crates.
-### `doc_alias` feature
+### `--index-page`: provide a top-level landing page for docs
-This feature allows you to add alias(es) to an item when using the `rustdoc` search through the
-`doc(alias)` attribute. Example:
+This feature allows you to generate an index-page with a given markdown file. A good example of it
+is the [rust documentation index](https://doc.rust-lang.org/index.html).
-```rust,no_run
-#![feature(doc_alias)]
+With this, you'll have a page which you can custom as much as you want at the top of your crates.
-#[doc(alias = "x")]
-#[doc(alias = "big")]
-pub struct BigX;
-```
+Using `index-page` option enables `enable-index-page` option as well.
-Then, when looking for it through the `rustdoc` search, if you enter "x" or
-"big", search will show the `BigX` struct first.
+### `--enable-index-page`: generate a default index page for docs
+
+This feature allows the generation of a default index-page which lists the generated crates.
--- /dev/null
+# `trait_alias`
+
+The tracking issue for this feature is: [#41517]
+
+[#41417]: https://github.com/rust-lang/rust/issues/41517
+
+------------------------
+
+The `trait_alias` feature adds support for trait aliases. These allow aliases
+to be created for one or more traits (currently just a single regular trait plus
+any number of auto-traits), and used wherever traits would normally be used as
+either bounds or trait objects.
+
+```rust
+#![feature(trait_alias)]
+
+trait Foo = std::fmt::Debug + Send;
+trait Bar = Foo + Sync;
+
+// Use trait alias as bound on type parameter.
+fn foo<T: Foo>(v: &T) {
+ println!("{:?}", v);
+}
+
+pub fn main() {
+ foo(&1);
+
+ // Use trait alias for trait objects.
+ let a: &Bar = &123;
+ println!("{:?}", a);
+ let b = Box::new(456) as Box<dyn Foo>;
+ println!("{:?}", b);
+}
+```
# except according to those terms.
import gdb
-import re
import sys
import debugger_pretty_printers_common as rustpp
+++ /dev/null
-Subproject commit 1f5a28755e301ac581e2048011e4e0ff3da482ef
use core::marker::{Unpin, Unsize};
use core::mem;
use core::pin::Pin;
-use core::ops::{CoerceUnsized, Deref, DerefMut, Generator, GeneratorState};
+use core::ops::{CoerceUnsized, DispatchFromDyn, Deref, DerefMut, Generator, GeneratorState};
use core::ptr::{self, NonNull, Unique};
use core::task::{LocalWaker, Poll};
#[unstable(feature = "coerce_unsized", issue = "27732")]
impl<T: ?Sized + Unsize<U>, U: ?Sized> CoerceUnsized<Box<U>> for Box<T> {}
+#[unstable(feature = "dispatch_from_dyn", issue = "0")]
+impl<T: ?Sized + Unsize<U>, U: ?Sized> DispatchFromDyn<Box<U>> for Box<T> {}
+
#[stable(feature = "box_slice_clone", since = "1.3.0")]
impl<T: Clone> Clone for Box<[T]> {
fn clone(&self) -> Self {
const INITIAL_CAPACITY: usize = 7; // 2^3 - 1
const MINIMUM_CAPACITY: usize = 1; // 2 - 1
+#[cfg(target_pointer_width = "16")]
+const MAXIMUM_ZST_CAPACITY: usize = 1 << (16 - 1); // Largest possible power of two
#[cfg(target_pointer_width = "32")]
const MAXIMUM_ZST_CAPACITY: usize = 1 << (32 - 1); // Largest possible power of two
#[cfg(target_pointer_width = "64")]
#![feature(box_syntax)]
#![feature(cfg_target_has_atomic)]
#![feature(coerce_unsized)]
-#![cfg_attr(stage0, feature(min_const_fn))]
+#![feature(dispatch_from_dyn)]
#![feature(core_intrinsics)]
#![feature(custom_attribute)]
#![feature(dropck_eyepatch)]
use core::marker::{Unpin, Unsize, PhantomData};
use core::mem::{self, align_of_val, forget, size_of_val};
use core::ops::Deref;
-use core::ops::CoerceUnsized;
+use core::ops::{CoerceUnsized, DispatchFromDyn};
use core::pin::Pin;
use core::ptr::{self, NonNull};
use core::convert::From;
/// type `T`.
///
/// [get_mut]: #method.get_mut
-#[cfg_attr(all(not(stage0), not(test)), lang = "rc")]
+#[cfg_attr(not(test), lang = "rc")]
#[stable(feature = "rust1", since = "1.0.0")]
pub struct Rc<T: ?Sized> {
ptr: NonNull<RcBox<T>>,
#[unstable(feature = "coerce_unsized", issue = "27732")]
impl<T: ?Sized + Unsize<U>, U: ?Sized> CoerceUnsized<Rc<U>> for Rc<T> {}
+#[unstable(feature = "dispatch_from_dyn", issue = "0")]
+impl<T: ?Sized + Unsize<U>, U: ?Sized> DispatchFromDyn<Rc<U>> for Rc<T> {}
+
impl<T> Rc<T> {
/// Constructs a new `Rc<T>`.
///
impl<T: ?Sized> Rc<T> {
// Allocates an `RcBox<T>` with sufficient space for an unsized value
unsafe fn allocate_for_ptr(ptr: *const T) -> *mut RcBox<T> {
- // Create a fake RcBox to find allocation size and alignment
- let fake_ptr = ptr as *mut RcBox<T>;
-
- let layout = Layout::for_value(&*fake_ptr);
+ // Calculate layout using the given value.
+ // Previously, layout was calculated on the expression
+ // `&*(ptr as *const RcBox<T>)`, but this created a misaligned
+ // reference (see #54908).
+ let (layout, _) = Layout::new::<RcBox<()>>()
+ .extend(Layout::for_value(&*ptr)).unwrap();
let mem = Global.alloc(layout)
.unwrap_or_else(|_| handle_alloc_error(layout));
- // Initialize the real RcBox
+ // Initialize the RcBox
let inner = set_data_ptr(ptr as *mut T, mem.as_ptr() as *mut u8) as *mut RcBox<T>;
ptr::write(&mut (*inner).strong, Cell::new(1));
#[unstable(feature = "coerce_unsized", issue = "27732")]
impl<T: ?Sized + Unsize<U>, U: ?Sized> CoerceUnsized<Weak<U>> for Weak<T> {}
+#[unstable(feature = "dispatch_from_dyn", issue = "0")]
+impl<T: ?Sized + Unsize<U>, U: ?Sized> DispatchFromDyn<Weak<U>> for Weak<T> {}
+
impl<T> Weak<T> {
/// Constructs a new `Weak<T>`, without allocating any memory.
/// Calling [`upgrade`][Weak::upgrade] on the return value always gives [`None`].
use core::intrinsics::abort;
use core::mem::{self, align_of_val, size_of_val};
use core::ops::Deref;
-use core::ops::CoerceUnsized;
+use core::ops::{CoerceUnsized, DispatchFromDyn};
use core::pin::Pin;
use core::ptr::{self, NonNull};
use core::marker::{Unpin, Unsize, PhantomData};
/// counting in general.
///
/// [rc_examples]: ../../std/rc/index.html#examples
-#[cfg_attr(all(not(stage0), not(test)), lang = "arc")]
+#[cfg_attr(not(test), lang = "arc")]
#[stable(feature = "rust1", since = "1.0.0")]
pub struct Arc<T: ?Sized> {
ptr: NonNull<ArcInner<T>>,
#[unstable(feature = "coerce_unsized", issue = "27732")]
impl<T: ?Sized + Unsize<U>, U: ?Sized> CoerceUnsized<Arc<U>> for Arc<T> {}
+#[unstable(feature = "dispatch_from_dyn", issue = "0")]
+impl<T: ?Sized + Unsize<U>, U: ?Sized> DispatchFromDyn<Arc<U>> for Arc<T> {}
+
/// `Weak` is a version of [`Arc`] that holds a non-owning reference to the
/// managed value. The value is accessed by calling [`upgrade`] on the `Weak`
/// pointer, which returns an [`Option`]`<`[`Arc`]`<T>>`.
#[unstable(feature = "coerce_unsized", issue = "27732")]
impl<T: ?Sized + Unsize<U>, U: ?Sized> CoerceUnsized<Weak<U>> for Weak<T> {}
+#[unstable(feature = "dispatch_from_dyn", issue = "0")]
+impl<T: ?Sized + Unsize<U>, U: ?Sized> DispatchFromDyn<Weak<U>> for Weak<T> {}
#[stable(feature = "arc_weak", since = "1.4.0")]
impl<T: ?Sized + fmt::Debug> fmt::Debug for Weak<T> {
impl<T: ?Sized> Arc<T> {
// Allocates an `ArcInner<T>` with sufficient space for an unsized value
unsafe fn allocate_for_ptr(ptr: *const T) -> *mut ArcInner<T> {
- // Create a fake ArcInner to find allocation size and alignment
- let fake_ptr = ptr as *mut ArcInner<T>;
-
- let layout = Layout::for_value(&*fake_ptr);
+ // Calculate layout using the given value.
+ // Previously, layout was calculated on the expression
+ // `&*(ptr as *const ArcInner<T>)`, but this created a misaligned
+ // reference (see #54908).
+ let (layout, _) = Layout::new::<ArcInner<()>>()
+ .extend(Layout::for_value(&*ptr)).unwrap();
let mem = Global.alloc(layout)
.unwrap_or_else(|_| handle_alloc_error(layout));
- // Initialize the real ArcInner
+ // Initialize the ArcInner
let inner = set_data_ptr(ptr as *mut T, mem.as_ptr() as *mut u8) as *mut ArcInner<T>;
ptr::write(&mut (*inner).strong, atomic::AtomicUsize::new(1));
use std::alloc::{Global, Alloc, Layout};
/// https://github.com/rust-lang/rust/issues/45955
-///
-/// Note that `#[global_allocator]` is not used,
-/// so `liballoc_jemalloc` is linked (on some platforms).
#[test]
fn alloc_system_overaligned_request() {
check_overalign_requests(System)
#![feature(allocator_api)]
#![feature(alloc_system)]
#![feature(box_syntax)]
-#![cfg_attr(stage0, feature(min_const_fn))]
#![feature(drain_filter)]
#![feature(exact_size_is_empty)]
#![feature(pattern)]
+++ /dev/null
-[package]
-authors = ["The Rust Project Developers"]
-name = "alloc_jemalloc"
-version = "0.0.0"
-build = "build.rs"
-links = "jemalloc"
-
-[lib]
-name = "alloc_jemalloc"
-path = "lib.rs"
-test = false
-doc = false
-
-[dependencies]
-core = { path = "../libcore" }
-libc = { path = "../rustc/libc_shim" }
-compiler_builtins = { path = "../rustc/compiler_builtins_shim" }
-
-[build-dependencies]
-build_helper = { path = "../build_helper" }
-cc = "1.0.1"
-
-[features]
-debug = []
+++ /dev/null
-// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-#![deny(warnings)]
-
-extern crate build_helper;
-extern crate cc;
-
-use std::env;
-use std::path::PathBuf;
-use std::process::Command;
-use build_helper::{run, native_lib_boilerplate};
-
-fn main() {
- // FIXME: This is a hack to support building targets that don't
- // support jemalloc alongside hosts that do. The jemalloc build is
- // controlled by a feature of the std crate, and if that feature
- // changes between targets, it invalidates the fingerprint of
- // std's build script (this is a cargo bug); so we must ensure
- // that the feature set used by std is the same across all
- // targets, which means we have to build the alloc_jemalloc crate
- // for targets like emscripten, even if we don't use it.
- let target = env::var("TARGET").expect("TARGET was not set");
- let host = env::var("HOST").expect("HOST was not set");
- if target.contains("bitrig") || target.contains("emscripten") || target.contains("fuchsia") ||
- target.contains("msvc") || target.contains("openbsd") || target.contains("redox") ||
- target.contains("rumprun") || target.contains("wasm32") {
- println!("cargo:rustc-cfg=dummy_jemalloc");
- return;
- }
-
- // CloudABI ships with a copy of jemalloc that has been patched to
- // work well with sandboxing. Don't attempt to build our own copy,
- // as it won't build.
- if target.contains("cloudabi") {
- return;
- }
-
- if target.contains("android") {
- println!("cargo:rustc-link-lib=gcc");
- } else if !target.contains("windows") && !target.contains("musl") {
- println!("cargo:rustc-link-lib=pthread");
- }
-
- if let Some(jemalloc) = env::var_os("JEMALLOC_OVERRIDE") {
- let jemalloc = PathBuf::from(jemalloc);
- println!("cargo:rustc-link-search=native={}",
- jemalloc.parent().unwrap().display());
- let stem = jemalloc.file_stem().unwrap().to_str().unwrap();
- let name = jemalloc.file_name().unwrap().to_str().unwrap();
- let kind = if name.ends_with(".a") {
- "static"
- } else {
- "dylib"
- };
- println!("cargo:rustc-link-lib={}={}", kind, &stem[3..]);
- return;
- }
-
- let link_name = if target.contains("windows") { "jemalloc" } else { "jemalloc_pic" };
- let native = match native_lib_boilerplate("jemalloc", "jemalloc", link_name, "lib") {
- Ok(native) => native,
- _ => return,
- };
-
- let mut cmd = Command::new("sh");
- cmd.arg(native.src_dir.join("configure")
- .to_str()
- .unwrap()
- .replace("C:\\", "/c/")
- .replace("\\", "/"))
- .current_dir(&native.out_dir)
- // jemalloc generates Makefile deps using GCC's "-MM" flag. This means
- // that GCC will run the preprocessor, and only the preprocessor, over
- // jemalloc's source files. If we don't specify CPPFLAGS, then at least
- // on ARM that step fails with a "Missing implementation for 32-bit
- // atomic operations" error. This is because no "-march" flag will be
- // passed to GCC, and then GCC won't define the
- // "__GCC_HAVE_SYNC_COMPARE_AND_SWAP_4" macro that jemalloc needs to
- // select an atomic operation implementation.
- .env("CPPFLAGS", env::var_os("CFLAGS").unwrap_or_default());
-
- if target.contains("ios") {
- cmd.arg("--disable-tls");
- } else if target.contains("android") {
- // We force android to have prefixed symbols because apparently
- // replacement of the libc allocator doesn't quite work. When this was
- // tested (unprefixed symbols), it was found that the `realpath`
- // function in libc would allocate with libc malloc (not jemalloc
- // malloc), and then the standard library would free with jemalloc free,
- // causing a segfault.
- //
- // If the test suite passes, however, without symbol prefixes then we
- // should be good to go!
- cmd.arg("--with-jemalloc-prefix=je_");
- cmd.arg("--disable-tls");
- } else if target.contains("dragonfly") || target.contains("musl") {
- cmd.arg("--with-jemalloc-prefix=je_");
- }
-
- if cfg!(feature = "debug") {
- // Enable jemalloc assertions.
- cmd.arg("--enable-debug");
- }
-
- cmd.arg(format!("--host={}", build_helper::gnu_target(&target)));
- cmd.arg(format!("--build={}", build_helper::gnu_target(&host)));
-
- // for some reason, jemalloc configure doesn't detect this value
- // automatically for this target
- if target == "sparc64-unknown-linux-gnu" {
- cmd.arg("--with-lg-quantum=4");
- }
-
- run(&mut cmd);
-
- let mut make = Command::new(build_helper::make(&host));
- make.current_dir(&native.out_dir)
- .arg("build_lib_static");
-
- // These are intended for mingw32-make which we don't use
- if cfg!(windows) {
- make.env_remove("MAKEFLAGS").env_remove("MFLAGS");
- }
-
- // mingw make seems... buggy? unclear...
- if !host.contains("windows") {
- make.arg("-j")
- .arg(env::var("NUM_JOBS").expect("NUM_JOBS was not set"));
- }
-
- run(&mut make);
-
- // The pthread_atfork symbols is used by jemalloc on android but the really
- // old android we're building on doesn't have them defined, so just make
- // sure the symbols are available.
- if target.contains("androideabi") {
- println!("cargo:rerun-if-changed=pthread_atfork_dummy.c");
- cc::Build::new()
- .flag("-fvisibility=hidden")
- .file("pthread_atfork_dummy.c")
- .compile("pthread_atfork_dummy");
- }
-}
+++ /dev/null
-// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-#![no_std]
-#![allow(unused_attributes)]
-#![unstable(feature = "alloc_jemalloc",
- reason = "implementation detail of std, does not provide any public API",
- issue = "0")]
-#![feature(core_intrinsics)]
-#![feature(libc)]
-#![feature(linkage)]
-#![feature(nll)]
-#![feature(staged_api)]
-#![feature(rustc_attrs)]
-#![cfg_attr(dummy_jemalloc, allow(dead_code, unused_extern_crates))]
-#![cfg_attr(not(dummy_jemalloc), feature(allocator_api))]
-#![rustc_alloc_kind = "exe"]
-
-extern crate libc;
-
-#[cfg(not(dummy_jemalloc))]
-pub use contents::*;
-#[cfg(not(dummy_jemalloc))]
-mod contents {
- use libc::{c_int, c_void, size_t};
-
- // Note that the symbols here are prefixed by default on macOS and Windows (we
- // don't explicitly request it), and on Android and DragonFly we explicitly
- // request it as unprefixing cause segfaults (mismatches in allocators).
- extern "C" {
- #[cfg_attr(any(target_os = "macos", target_os = "android", target_os = "ios",
- target_os = "dragonfly", target_os = "windows", target_env = "musl"),
- link_name = "je_mallocx")]
- fn mallocx(size: size_t, flags: c_int) -> *mut c_void;
- #[cfg_attr(any(target_os = "macos", target_os = "android", target_os = "ios",
- target_os = "dragonfly", target_os = "windows", target_env = "musl"),
- link_name = "je_calloc")]
- fn calloc(size: size_t, flags: c_int) -> *mut c_void;
- #[cfg_attr(any(target_os = "macos", target_os = "android", target_os = "ios",
- target_os = "dragonfly", target_os = "windows", target_env = "musl"),
- link_name = "je_rallocx")]
- fn rallocx(ptr: *mut c_void, size: size_t, flags: c_int) -> *mut c_void;
- #[cfg_attr(any(target_os = "macos", target_os = "android", target_os = "ios",
- target_os = "dragonfly", target_os = "windows", target_env = "musl"),
- link_name = "je_sdallocx")]
- fn sdallocx(ptr: *mut c_void, size: size_t, flags: c_int);
- }
-
- const MALLOCX_ZERO: c_int = 0x40;
-
- // The minimum alignment guaranteed by the architecture. This value is used to
- // add fast paths for low alignment values.
- #[cfg(all(any(target_arch = "arm",
- target_arch = "mips",
- target_arch = "powerpc")))]
- const MIN_ALIGN: usize = 8;
- #[cfg(all(any(target_arch = "x86",
- target_arch = "x86_64",
- target_arch = "aarch64",
- target_arch = "powerpc64",
- target_arch = "mips64",
- target_arch = "s390x",
- target_arch = "sparc64")))]
- const MIN_ALIGN: usize = 16;
-
- // MALLOCX_ALIGN(a) macro
- fn mallocx_align(a: usize) -> c_int {
- a.trailing_zeros() as c_int
- }
-
- fn align_to_flags(align: usize, size: usize) -> c_int {
- if align <= MIN_ALIGN && align <= size {
- 0
- } else {
- mallocx_align(align)
- }
- }
-
- // for symbol names src/librustc/middle/allocator.rs
- // for signatures src/librustc_allocator/lib.rs
-
- // linkage directives are provided as part of the current compiler allocator
- // ABI
-
- #[rustc_std_internal_symbol]
- pub unsafe extern fn __rde_alloc(size: usize, align: usize) -> *mut u8 {
- let flags = align_to_flags(align, size);
- let ptr = mallocx(size as size_t, flags) as *mut u8;
- ptr
- }
-
- #[rustc_std_internal_symbol]
- pub unsafe extern fn __rde_dealloc(ptr: *mut u8,
- size: usize,
- align: usize) {
- let flags = align_to_flags(align, size);
- sdallocx(ptr as *mut c_void, size, flags);
- }
-
- #[rustc_std_internal_symbol]
- pub unsafe extern fn __rde_realloc(ptr: *mut u8,
- _old_size: usize,
- align: usize,
- new_size: usize) -> *mut u8 {
- let flags = align_to_flags(align, new_size);
- let ptr = rallocx(ptr as *mut c_void, new_size, flags) as *mut u8;
- ptr
- }
-
- #[rustc_std_internal_symbol]
- pub unsafe extern fn __rde_alloc_zeroed(size: usize, align: usize) -> *mut u8 {
- let ptr = if align <= MIN_ALIGN && align <= size {
- calloc(size as size_t, 1) as *mut u8
- } else {
- let flags = align_to_flags(align, size) | MALLOCX_ZERO;
- mallocx(size as size_t, flags) as *mut u8
- };
- ptr
- }
-}
+++ /dev/null
-// Copyright 2017 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-// See comments in build.rs for why this exists
-int pthread_atfork(void* prefork,
- void* postfork_parent,
- void* postfork_child) {
- return 0;
-}
#[cfg(not(target_feature = "atomics"))]
mod lock {
+ #[inline]
pub fn lock() {} // no atomics, no threads, that's easy!
}
}
-Subproject commit 0703bfa72524e01e414477657ca9b64794c5c1c3
+Subproject commit 939cbca6e9d829265d6cf006d3532142a4061cd3
///
/// # Examples
///
-/// Here you can see how using `Cell<T>` allows to use mutable field inside
-/// immutable struct (which is also called 'interior mutability').
+/// In this example, you can see that `Cell<T>` enables mutation inside an
+/// immutable struct. In other words, it enables "interior mutability".
///
/// ```
/// use std::cell::Cell;
///
/// let new_value = 100;
///
-/// // ERROR, because my_struct is immutable
+/// // ERROR: `my_struct` is immutable
/// // my_struct.regular_field = new_value;
///
-/// // WORKS, although `my_struct` is immutable, field `special_field` is mutable because it is Cell
+/// // WORKS: although `my_struct` is immutable, `special_field` is a `Cell`,
+/// // which can always be mutated
/// my_struct.special_field.set(new_value);
/// assert_eq!(my_struct.special_field.get(), new_value);
/// ```
/// }
///
/// impl Default for Kind {
-/// fn default() -> Kind { Kind::A }
+/// fn default() -> Self { Kind::A }
/// }
/// ```
///
/// }
///
/// impl Default for Kind {
- /// fn default() -> Kind { Kind::A }
+ /// fn default() -> Self { Kind::A }
/// }
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
#![feature(doc_spotlight)]
#![feature(extern_types)]
#![feature(fundamental)]
-#![cfg_attr(stage0, feature(impl_header_lifetime_elision))]
#![feature(intrinsics)]
#![feature(lang_items)]
#![feature(link_llvm_intrinsics)]
/// [alignment]: ./fn.align_of.html
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
-#[cfg_attr(not(stage0), rustc_promotable)]
+#[rustc_promotable]
pub const fn size_of<T>() -> usize {
intrinsics::size_of::<T>()
}
/// ```
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
-#[cfg_attr(not(stage0), rustc_promotable)]
+#[rustc_promotable]
pub const fn align_of<T>() -> usize {
intrinsics::min_align_of::<T>()
}
#[inline]
#[stable(feature = "needs_drop", since = "1.21.0")]
#[rustc_const_unstable(feature = "const_needs_drop")]
-#[cfg(not(stage0))]
pub const fn needs_drop<T>() -> bool {
intrinsics::needs_drop::<T>()
}
-#[inline]
-#[stable(feature = "needs_drop", since = "1.21.0")]
-#[cfg(stage0)]
-/// Ceci n'est pas la documentation
-pub fn needs_drop<T>() -> bool {
- unsafe { intrinsics::needs_drop::<T>() }
-}
-
/// Creates a value whose bytes are all zero.
///
/// This has the same effect as allocating space with
//! Exposes the NonZero lang item which provides optimization hints.
-use ops::CoerceUnsized;
+use ops::{CoerceUnsized, DispatchFromDyn};
/// A wrapper type for raw pointers and integers that will never be
/// NULL or 0 that might allow certain optimizations.
pub(crate) struct NonZero<T>(pub(crate) T);
impl<T: CoerceUnsized<U>, U> CoerceUnsized<NonZero<U>> for NonZero<T> {}
+
+impl<T: DispatchFromDyn<U>, U> DispatchFromDyn<NonZero<U>> for NonZero<T> {}
```"),
#[stable(feature = "rust1", since = "1.0.0")]
#[inline]
- #[cfg_attr(not(stage0), rustc_promotable)]
+ #[rustc_promotable]
pub const fn min_value() -> Self {
!0 ^ ((!0 as $UnsignedT) >> 1) as Self
}
```"),
#[stable(feature = "rust1", since = "1.0.0")]
#[inline]
- #[cfg_attr(not(stage0), rustc_promotable)]
+ #[rustc_promotable]
pub const fn max_value() -> Self {
!Self::min_value()
}
#[unstable(feature = "coerce_unsized", issue = "27732")]
pub use self::unsize::CoerceUnsized;
+
+#[unstable(feature = "dispatch_from_dyn", issue = "0")]
+pub use self::unsize::DispatchFromDyn;
/// ```
#[stable(feature = "inclusive_range_methods", since = "1.27.0")]
#[inline]
- #[cfg_attr(not(stage0), rustc_promotable)]
+ #[rustc_promotable]
pub const fn new(start: Idx, end: Idx) -> Self {
Self { start, end, is_empty: None }
}
/// [nomicon-coerce]: ../../nomicon/coercions.html
#[unstable(feature = "coerce_unsized", issue = "27732")]
#[lang = "coerce_unsized"]
-pub trait CoerceUnsized<T> {
+pub trait CoerceUnsized<T: ?Sized> {
// Empty.
}
// *const T -> *const U
#[unstable(feature = "coerce_unsized", issue = "27732")]
impl<T: ?Sized+Unsize<U>, U: ?Sized> CoerceUnsized<*const U> for *const T {}
+
+
+/// This is used for object safety, to check that a method's receiver type can be dispatched on.
+///
+/// example impl:
+///
+/// ```
+/// # #![feature(dispatch_from_dyn, unsize)]
+/// # use std::{ops::DispatchFromDyn, marker::Unsize};
+/// # struct Rc<T: ?Sized>(::std::rc::Rc<T>);
+/// impl<T: ?Sized, U: ?Sized> DispatchFromDyn<Rc<U>> for Rc<T>
+/// where
+/// T: Unsize<U>,
+/// {}
+/// ```
+#[unstable(feature = "dispatch_from_dyn", issue = "0")]
+#[cfg_attr(not(stage0), lang = "dispatch_from_dyn")]
+pub trait DispatchFromDyn<T> {
+ // Empty.
+}
+
+// &T -> &U
+#[unstable(feature = "dispatch_from_dyn", issue = "0")]
+impl<'a, T: ?Sized+Unsize<U>, U: ?Sized> DispatchFromDyn<&'a U> for &'a T {}
+// &mut T -> &mut U
+#[unstable(feature = "dispatch_from_dyn", issue = "0")]
+impl<'a, T: ?Sized+Unsize<U>, U: ?Sized> DispatchFromDyn<&'a mut U> for &'a mut T {}
+// *const T -> *const U
+#[unstable(feature = "dispatch_from_dyn", issue = "0")]
+impl<T: ?Sized+Unsize<U>, U: ?Sized> DispatchFromDyn<*const U> for *const T {}
+// *mut T -> *mut U
+#[unstable(feature = "dispatch_from_dyn", issue = "0")]
+impl<T: ?Sized+Unsize<U>, U: ?Sized> DispatchFromDyn<*mut U> for *mut T {}
+
use fmt;
use marker::Sized;
-use ops::{Deref, DerefMut, CoerceUnsized};
+use ops::{Deref, DerefMut, CoerceUnsized, DispatchFromDyn};
#[doc(inline)]
pub use marker::Unpin;
P: CoerceUnsized<U>,
{}
+#[unstable(feature = "pin", issue = "49150")]
+impl<'a, P, U> DispatchFromDyn<Pin<U>> for Pin<P>
+where
+ P: DispatchFromDyn<U>,
+{}
+
#[unstable(feature = "pin", issue = "49150")]
impl<P> Unpin for Pin<P> {}
use convert::From;
use intrinsics;
-use ops::CoerceUnsized;
+use ops::{CoerceUnsized, DispatchFromDyn};
use fmt;
use hash;
use marker::{PhantomData, Unsize};
/// ```
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
-#[cfg_attr(not(stage0), rustc_promotable)]
+#[rustc_promotable]
pub const fn null<T>() -> *const T { 0 as *const T }
/// Creates a null mutable raw pointer.
/// ```
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
-#[cfg_attr(not(stage0), rustc_promotable)]
+#[rustc_promotable]
pub const fn null_mut<T>() -> *mut T { 0 as *mut T }
/// Swaps the values at two mutable locations of the same type, without
#[unstable(feature = "ptr_internals", issue = "0")]
impl<T: ?Sized, U: ?Sized> CoerceUnsized<Unique<U>> for Unique<T> where T: Unsize<U> { }
+#[unstable(feature = "ptr_internals", issue = "0")]
+impl<T: ?Sized, U: ?Sized> DispatchFromDyn<Unique<U>> for Unique<T> where T: Unsize<U> { }
+
#[unstable(feature = "ptr_internals", issue = "0")]
impl<T: ?Sized> fmt::Pointer for Unique<T> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
#[unstable(feature = "coerce_unsized", issue = "27732")]
impl<T: ?Sized, U: ?Sized> CoerceUnsized<NonNull<U>> for NonNull<T> where T: Unsize<U> { }
+#[unstable(feature = "dispatch_from_dyn", issue = "0")]
+impl<T: ?Sized, U: ?Sized> DispatchFromDyn<NonNull<U>> for NonNull<T> where T: Unsize<U> { }
+
#[stable(feature = "nonnull", since = "1.25.0")]
impl<T: ?Sized> fmt::Debug for NonNull<T> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
/// [`bool`]: ../../../std/primitive.bool.html
#[cfg(target_has_atomic = "8")]
#[stable(feature = "rust1", since = "1.0.0")]
+#[repr(C, align(1))]
pub struct AtomicBool {
v: UnsafeCell<u8>,
}
/// This type has the same in-memory representation as a `*mut T`.
#[cfg(target_has_atomic = "ptr")]
#[stable(feature = "rust1", since = "1.0.0")]
+#[cfg_attr(target_pointer_width = "16", repr(C, align(2)))]
+#[cfg_attr(target_pointer_width = "32", repr(C, align(4)))]
+#[cfg_attr(target_pointer_width = "64", repr(C, align(8)))]
pub struct AtomicPtr<T> {
p: UnsafeCell<*mut T>,
}
$s_int_type:expr, $int_ref:expr,
$extra_feature:expr,
$min_fn:ident, $max_fn:ident,
+ $align:expr,
$int_type:ident $atomic_type:ident $atomic_init:ident) => {
/// An integer type which can be safely shared between threads.
///
///
/// [module-level documentation]: index.html
#[$stable]
+ #[repr(C, align($align))]
pub struct $atomic_type {
v: UnsafeCell<$int_type>,
}
"i8", "../../../std/primitive.i8.html",
"#![feature(integer_atomics)]\n\n",
atomic_min, atomic_max,
+ 1,
i8 AtomicI8 ATOMIC_I8_INIT
}
#[cfg(target_has_atomic = "8")]
"u8", "../../../std/primitive.u8.html",
"#![feature(integer_atomics)]\n\n",
atomic_umin, atomic_umax,
+ 1,
u8 AtomicU8 ATOMIC_U8_INIT
}
#[cfg(target_has_atomic = "16")]
"i16", "../../../std/primitive.i16.html",
"#![feature(integer_atomics)]\n\n",
atomic_min, atomic_max,
+ 2,
i16 AtomicI16 ATOMIC_I16_INIT
}
#[cfg(target_has_atomic = "16")]
"u16", "../../../std/primitive.u16.html",
"#![feature(integer_atomics)]\n\n",
atomic_umin, atomic_umax,
+ 2,
u16 AtomicU16 ATOMIC_U16_INIT
}
#[cfg(target_has_atomic = "32")]
"i32", "../../../std/primitive.i32.html",
"#![feature(integer_atomics)]\n\n",
atomic_min, atomic_max,
+ 4,
i32 AtomicI32 ATOMIC_I32_INIT
}
#[cfg(target_has_atomic = "32")]
"u32", "../../../std/primitive.u32.html",
"#![feature(integer_atomics)]\n\n",
atomic_umin, atomic_umax,
+ 4,
u32 AtomicU32 ATOMIC_U32_INIT
}
#[cfg(target_has_atomic = "64")]
"i64", "../../../std/primitive.i64.html",
"#![feature(integer_atomics)]\n\n",
atomic_min, atomic_max,
+ 8,
i64 AtomicI64 ATOMIC_I64_INIT
}
#[cfg(target_has_atomic = "64")]
"u64", "../../../std/primitive.u64.html",
"#![feature(integer_atomics)]\n\n",
atomic_umin, atomic_umax,
+ 8,
u64 AtomicU64 ATOMIC_U64_INIT
}
+#[cfg(all(not(stage0), target_has_atomic = "128"))]
+atomic_int! {
+ unstable(feature = "integer_atomics", issue = "32976"),
+ unstable(feature = "integer_atomics", issue = "32976"),
+ unstable(feature = "integer_atomics", issue = "32976"),
+ unstable(feature = "integer_atomics", issue = "32976"),
+ unstable(feature = "integer_atomics", issue = "32976"),
+ unstable(feature = "integer_atomics", issue = "32976"),
+ "i128", "../../../std/primitive.i128.html",
+ "#![feature(integer_atomics)]\n\n",
+ atomic_min, atomic_max,
+ 16,
+ i128 AtomicI128 ATOMIC_I128_INIT
+}
+#[cfg(all(not(stage0), target_has_atomic = "128"))]
+atomic_int! {
+ unstable(feature = "integer_atomics", issue = "32976"),
+ unstable(feature = "integer_atomics", issue = "32976"),
+ unstable(feature = "integer_atomics", issue = "32976"),
+ unstable(feature = "integer_atomics", issue = "32976"),
+ unstable(feature = "integer_atomics", issue = "32976"),
+ unstable(feature = "integer_atomics", issue = "32976"),
+ "u128", "../../../std/primitive.u128.html",
+ "#![feature(integer_atomics)]\n\n",
+ atomic_umin, atomic_umax,
+ 16,
+ u128 AtomicU128 ATOMIC_U128_INIT
+}
+#[cfg(target_pointer_width = "16")]
+macro_rules! ptr_width {
+ () => { 2 }
+}
+#[cfg(target_pointer_width = "32")]
+macro_rules! ptr_width {
+ () => { 4 }
+}
+#[cfg(target_pointer_width = "64")]
+macro_rules! ptr_width {
+ () => { 8 }
+}
#[cfg(target_has_atomic = "ptr")]
atomic_int!{
stable(feature = "rust1", since = "1.0.0"),
"isize", "../../../std/primitive.isize.html",
"",
atomic_min, atomic_max,
+ ptr_width!(),
isize AtomicIsize ATOMIC_ISIZE_INIT
}
#[cfg(target_has_atomic = "ptr")]
"usize", "../../../std/primitive.usize.html",
"",
atomic_umin, atomic_umax,
+ ptr_width!(),
usize AtomicUsize ATOMIC_USIZE_INIT
}
#![feature(flt2dec)]
#![feature(fmt_internals)]
#![feature(hashmap_internals)]
-#![cfg_attr(stage0, feature(impl_header_lifetime_elision))]
#![feature(pattern)]
#![feature(range_is_empty)]
#![feature(raw)]
/// ```
#[stable(feature = "duration", since = "1.3.0")]
#[inline]
- #[cfg_attr(not(stage0), rustc_promotable)]
+ #[rustc_promotable]
pub const fn from_secs(secs: u64) -> Duration {
Duration { secs, nanos: 0 }
}
/// ```
#[stable(feature = "duration", since = "1.3.0")]
#[inline]
- #[cfg_attr(not(stage0), rustc_promotable)]
+ #[rustc_promotable]
pub const fn from_millis(millis: u64) -> Duration {
Duration {
secs: millis / MILLIS_PER_SEC,
/// ```
#[stable(feature = "duration_from_micros", since = "1.27.0")]
#[inline]
- #[cfg_attr(not(stage0), rustc_promotable)]
+ #[rustc_promotable]
pub const fn from_micros(micros: u64) -> Duration {
Duration {
secs: micros / MICROS_PER_SEC,
/// ```
#[stable(feature = "duration_extras", since = "1.27.0")]
#[inline]
- #[cfg_attr(not(stage0), rustc_promotable)]
+ #[rustc_promotable]
pub const fn from_nanos(nanos: u64) -> Duration {
Duration {
secs: nanos / (NANOS_PER_SEC as u64),
&mut self,
def: Def,
p: &Path,
- ident: Option<Ident>,
param_mode: ParamMode,
explicit_owner: Option<NodeId>,
) -> hir::Path {
explicit_owner,
)
})
- .chain(ident.map(|ident| hir::PathSegment::from_ident(ident)))
.collect(),
span: p.span,
}
fn lower_path(&mut self, id: NodeId, p: &Path, param_mode: ParamMode) -> hir::Path {
let def = self.expect_full_def(id);
- self.lower_path_extra(def, p, None, param_mode, None)
+ self.lower_path_extra(def, p, param_mode, None)
}
fn lower_path_segment(
self.with_hir_id_owner(new_node_id, |this| {
let new_id = this.lower_node_id(new_node_id);
let path =
- this.lower_path_extra(def, &path, None, ParamMode::Explicit, None);
+ this.lower_path_extra(def, &path, ParamMode::Explicit, None);
let item = hir::ItemKind::Use(P(path), hir::UseKind::Single);
let vis_kind = match vis.node {
hir::VisibilityKind::Public => hir::VisibilityKind::Public,
hir::VisibilityKind::Inherited => hir::VisibilityKind::Inherited,
hir::VisibilityKind::Restricted { ref path, id: _, hir_id: _ } => {
let id = this.next_id();
+ let mut path = path.clone();
+ for seg in path.segments.iter_mut() {
+ if seg.id.is_some() {
+ seg.id = Some(this.next_id().node_id);
+ }
+ }
hir::VisibilityKind::Restricted {
- path: path.clone(),
+ path,
id: id.node_id,
hir_id: id.hir_id,
}
}
let path =
- P(self.lower_path_extra(ret_def, &path, None, ParamMode::Explicit, None));
+ P(self.lower_path_extra(ret_def, &path, ParamMode::Explicit, None));
hir::ItemKind::Use(path, hir::UseKind::Single)
}
UseTreeKind::Glob => {
// the stability of `use a::{};`, to avoid it showing up as
// a re-export by accident when `pub`, e.g. in documentation.
let def = self.expect_full_def_from_use(id).next().unwrap_or(Def::Err);
- let path = P(self.lower_path_extra(def, &prefix, None, ParamMode::Explicit, None));
+ let path = P(self.lower_path_extra(def, &prefix, ParamMode::Explicit, None));
*vis = respan(prefix.span.shrink_to_lo(), hir::VisibilityKind::Inherited);
hir::ItemKind::Use(path, hir::UseKind::ListStem)
}
path: P(self.lower_path_extra(
def,
path,
- None,
ParamMode::Explicit,
explicit_owner,
)),
let node = match qpath {
hir::QPath::Resolved(None, path) => {
// Turn trait object paths into `TyKind::TraitObject` instead.
- if let Def::Trait(_) = path.def {
- let principal = hir::PolyTraitRef {
- bound_generic_params: hir::HirVec::new(),
- trait_ref: hir::TraitRef {
- path: path.and_then(|path| path),
- ref_id: id.node_id,
- hir_ref_id: id.hir_id,
- },
- span,
- };
+ match path.def {
+ Def::Trait(_) | Def::TraitAlias(_) => {
+ let principal = hir::PolyTraitRef {
+ bound_generic_params: hir::HirVec::new(),
+ trait_ref: hir::TraitRef {
+ path: path.and_then(|path| path),
+ ref_id: id.node_id,
+ hir_ref_id: id.hir_id,
+ },
+ span,
+ };
- // The original ID is taken by the `PolyTraitRef`,
- // so the `Ty` itself needs a different one.
- id = self.next_id();
- hir::TyKind::TraitObject(hir_vec![principal], self.elided_dyn_bound(span))
- } else {
- hir::TyKind::Path(hir::QPath::Resolved(None, path))
+ // The original ID is taken by the `PolyTraitRef`,
+ // so the `Ty` itself needs a different one.
+ id = self.next_id();
+ hir::TyKind::TraitObject(hir_vec![principal], self.elided_dyn_bound(span))
+ }
+ _ => hir::TyKind::Path(hir::QPath::Resolved(None, path)),
}
}
_ => hir::TyKind::Path(qpath),
};
bug!("inconsistent DepNode for `{}`: \
- current_dep_node_owner={} ({:?}), hir_id.owner={} ({:?}) {}",
+ current_dep_node_owner={} ({:?}), hir_id.owner={} ({:?}){}",
node_str,
self.definitions
.def_path(self.current_dep_node_owner)
ItemKind::Struct(..) => Some(Def::Struct(def_id())),
ItemKind::Union(..) => Some(Def::Union(def_id())),
ItemKind::Trait(..) => Some(Def::Trait(def_id())),
- ItemKind::TraitAlias(..) => {
- bug!("trait aliases are not yet implemented (see issue #41517)")
- },
+ ItemKind::TraitAlias(..) => Some(Def::TraitAlias(def_id())),
ItemKind::ExternCrate(_) |
ItemKind::Use(..) |
ItemKind::ForeignMod(..) |
Some(Node::MacroDef(_)) => {
format!("macro {}{}", path_str(), id_str)
}
- Some(Node::Crate) => format!("root_crate"),
+ Some(Node::Crate) => String::from("root_crate"),
None => format!("unknown node{}", id_str),
}
}
+++ /dev/null
-// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-use rustc_data_structures::sync::Lrc;
-use syntax::source_map::SourceMap;
-use syntax_pos::{BytePos, SourceFile};
-
-#[derive(Clone)]
-struct CacheEntry {
- time_stamp: usize,
- line_number: usize,
- line_start: BytePos,
- line_end: BytePos,
- file: Lrc<SourceFile>,
- file_index: usize,
-}
-
-#[derive(Clone)]
-pub struct CachingSourceMapView<'cm> {
- source_map: &'cm SourceMap,
- line_cache: [CacheEntry; 3],
- time_stamp: usize,
-}
-
-impl<'cm> CachingSourceMapView<'cm> {
- pub fn new(source_map: &'cm SourceMap) -> CachingSourceMapView<'cm> {
- let files = source_map.files();
- let first_file = files[0].clone();
- let entry = CacheEntry {
- time_stamp: 0,
- line_number: 0,
- line_start: BytePos(0),
- line_end: BytePos(0),
- file: first_file,
- file_index: 0,
- };
-
- CachingSourceMapView {
- source_map,
- line_cache: [entry.clone(), entry.clone(), entry],
- time_stamp: 0,
- }
- }
-
- pub fn byte_pos_to_line_and_col(&mut self,
- pos: BytePos)
- -> Option<(Lrc<SourceFile>, usize, BytePos)> {
- self.time_stamp += 1;
-
- // Check if the position is in one of the cached lines
- for cache_entry in self.line_cache.iter_mut() {
- if pos >= cache_entry.line_start && pos < cache_entry.line_end {
- cache_entry.time_stamp = self.time_stamp;
-
- return Some((cache_entry.file.clone(),
- cache_entry.line_number,
- pos - cache_entry.line_start));
- }
- }
-
- // No cache hit ...
- let mut oldest = 0;
- for index in 1 .. self.line_cache.len() {
- if self.line_cache[index].time_stamp < self.line_cache[oldest].time_stamp {
- oldest = index;
- }
- }
-
- let cache_entry = &mut self.line_cache[oldest];
-
- // If the entry doesn't point to the correct file, fix it up
- if pos < cache_entry.file.start_pos || pos >= cache_entry.file.end_pos {
- let file_valid;
- if self.source_map.files().len() > 0 {
- let file_index = self.source_map.lookup_source_file_idx(pos);
- let file = self.source_map.files()[file_index].clone();
-
- if pos >= file.start_pos && pos < file.end_pos {
- cache_entry.file = file;
- cache_entry.file_index = file_index;
- file_valid = true;
- } else {
- file_valid = false;
- }
- } else {
- file_valid = false;
- }
-
- if !file_valid {
- return None;
- }
- }
-
- let line_index = cache_entry.file.lookup_line(pos).unwrap();
- let line_bounds = cache_entry.file.line_bounds(line_index);
-
- cache_entry.line_number = line_index + 1;
- cache_entry.line_start = line_bounds.0;
- cache_entry.line_end = line_bounds.1;
- cache_entry.time_stamp = self.time_stamp;
-
- return Some((cache_entry.file.clone(),
- cache_entry.line_number,
- pos - cache_entry.line_start));
- }
-}
--- /dev/null
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+use rustc_data_structures::sync::Lrc;
+use syntax::source_map::SourceMap;
+use syntax_pos::{BytePos, SourceFile};
+
+#[derive(Clone)]
+struct CacheEntry {
+ time_stamp: usize,
+ line_number: usize,
+ line_start: BytePos,
+ line_end: BytePos,
+ file: Lrc<SourceFile>,
+ file_index: usize,
+}
+
+#[derive(Clone)]
+pub struct CachingSourceMapView<'cm> {
+ source_map: &'cm SourceMap,
+ line_cache: [CacheEntry; 3],
+ time_stamp: usize,
+}
+
+impl<'cm> CachingSourceMapView<'cm> {
+ pub fn new(source_map: &'cm SourceMap) -> CachingSourceMapView<'cm> {
+ let files = source_map.files();
+ let first_file = files[0].clone();
+ let entry = CacheEntry {
+ time_stamp: 0,
+ line_number: 0,
+ line_start: BytePos(0),
+ line_end: BytePos(0),
+ file: first_file,
+ file_index: 0,
+ };
+
+ CachingSourceMapView {
+ source_map,
+ line_cache: [entry.clone(), entry.clone(), entry],
+ time_stamp: 0,
+ }
+ }
+
+ pub fn byte_pos_to_line_and_col(&mut self,
+ pos: BytePos)
+ -> Option<(Lrc<SourceFile>, usize, BytePos)> {
+ self.time_stamp += 1;
+
+ // Check if the position is in one of the cached lines
+ for cache_entry in self.line_cache.iter_mut() {
+ if pos >= cache_entry.line_start && pos < cache_entry.line_end {
+ cache_entry.time_stamp = self.time_stamp;
+
+ return Some((cache_entry.file.clone(),
+ cache_entry.line_number,
+ pos - cache_entry.line_start));
+ }
+ }
+
+ // No cache hit ...
+ let mut oldest = 0;
+ for index in 1 .. self.line_cache.len() {
+ if self.line_cache[index].time_stamp < self.line_cache[oldest].time_stamp {
+ oldest = index;
+ }
+ }
+
+ let cache_entry = &mut self.line_cache[oldest];
+
+ // If the entry doesn't point to the correct file, fix it up
+ if pos < cache_entry.file.start_pos || pos >= cache_entry.file.end_pos {
+ let file_valid;
+ if self.source_map.files().len() > 0 {
+ let file_index = self.source_map.lookup_source_file_idx(pos);
+ let file = self.source_map.files()[file_index].clone();
+
+ if pos >= file.start_pos && pos < file.end_pos {
+ cache_entry.file = file;
+ cache_entry.file_index = file_index;
+ file_valid = true;
+ } else {
+ file_valid = false;
+ }
+ } else {
+ file_valid = false;
+ }
+
+ if !file_valid {
+ return None;
+ }
+ }
+
+ let line_index = cache_entry.file.lookup_line(pos).unwrap();
+ let line_bounds = cache_entry.file.line_bounds(line_index);
+
+ cache_entry.line_number = line_index + 1;
+ cache_entry.line_start = line_bounds.0;
+ cache_entry.line_end = line_bounds.1;
+ cache_entry.time_stamp = self.time_stamp;
+
+ return Some((cache_entry.file.clone(),
+ cache_entry.line_number,
+ pos - cache_entry.line_start));
+ }
+}
mir::StatementKind::EndRegion(ref region_scope) => {
region_scope.hash_stable(hcx, hasher);
}
- mir::StatementKind::Validate(ref op, ref places) => {
- op.hash_stable(hcx, hasher);
- places.hash_stable(hcx, hasher);
+ mir::StatementKind::Retag { fn_entry, ref place } => {
+ fn_entry.hash_stable(hcx, hasher);
+ place.hash_stable(hcx, hasher);
}
mir::StatementKind::AscribeUserType(ref place, ref variance, ref c_ty) => {
place.hash_stable(hcx, hasher);
impl_stable_hash_for!(enum mir::FakeReadCause { ForMatchGuard, ForMatchedPlace, ForLet });
-impl<'a, 'gcx, T> HashStable<StableHashingContext<'a>>
- for mir::ValidationOperand<'gcx, T>
- where T: HashStable<StableHashingContext<'a>>
-{
- fn hash_stable<W: StableHasherResult>(&self,
- hcx: &mut StableHashingContext<'a>,
- hasher: &mut StableHasher<W>)
- {
- self.place.hash_stable(hcx, hasher);
- self.ty.hash_stable(hcx, hasher);
- self.re.hash_stable(hcx, hasher);
- self.mutbl.hash_stable(hcx, hasher);
- }
-}
-
-impl_stable_hash_for!(enum mir::ValidationOp { Acquire, Release, Suspend(region_scope) });
-
impl<'a, 'gcx> HashStable<StableHashingContext<'a>> for mir::Place<'gcx> {
fn hash_stable<W: StableHasherResult>(&self,
hcx: &mut StableHashingContext<'a>,
ty::ReEmpty => {
// No variant fields to hash for these ...
}
- ty::ReCanonical(c) => {
- c.hash_stable(hcx, hasher);
- }
ty::ReLateBound(db, ty::BrAnon(i)) => {
db.hash_stable(hcx, hasher);
i.hash_stable(hcx, hasher);
}
}
-impl<'gcx> HashStable<StableHashingContext<'gcx>> for ty::BoundTyIndex {
+impl<'gcx> HashStable<StableHashingContext<'gcx>> for ty::BoundVar {
#[inline]
fn hash_stable<W: StableHasherResult>(&self,
hcx: &mut StableHashingContext<'gcx>,
Param(param_ty) => {
param_ty.hash_stable(hcx, hasher);
}
+ Bound(bound_ty) => {
+ bound_ty.hash_stable(hcx, hasher);
+ }
Foreign(def_id) => {
def_id.hash_stable(hcx, hasher);
}
FreshTy(a),
FreshIntTy(a),
FreshFloatTy(a),
- BoundTy(a),
});
impl<'a, 'gcx> HashStable<StableHashingContext<'a>>
&VtableClosure(ref table_closure) => table_closure.hash_stable(hcx, hasher),
&VtableFnPointer(ref table_fn_pointer) => table_fn_pointer.hash_stable(hcx, hasher),
&VtableGenerator(ref table_generator) => table_generator.hash_stable(hcx, hasher),
+ &VtableTraitAlias(ref table_alias) => table_alias.hash_stable(hcx, hasher),
}
}
}
}
}
+impl<'a, 'gcx, N> HashStable<StableHashingContext<'a>>
+for traits::VtableTraitAliasData<'gcx, N> where N: HashStable<StableHashingContext<'a>> {
+ fn hash_stable<W: StableHasherResult>(&self,
+ hcx: &mut StableHashingContext<'a>,
+ hasher: &mut StableHasher<W>) {
+ let traits::VtableTraitAliasData {
+ alias_def_id,
+ substs,
+ ref nested,
+ } = *self;
+ alias_def_id.hash_stable(hcx, hasher);
+ substs.hash_stable(hcx, hasher);
+ nested.hash_stable(hcx, hasher);
+ }
+}
+
impl_stable_hash_for!(
impl<'tcx, V> for struct infer::canonical::Canonical<'tcx, V> {
- variables, value
+ max_universe, variables, value
}
);
impl_stable_hash_for!(enum infer::canonical::CanonicalVarKind {
Ty(k),
- Region
+ Region(ui),
+ PlaceholderRegion(placeholder),
});
impl_stable_hash_for!(enum infer::canonical::CanonicalTyVarKind {
//! ICH - Incremental Compilation Hash
crate use rustc_data_structures::fingerprint::Fingerprint;
-pub use self::caching_codemap_view::CachingSourceMapView;
+pub use self::caching_source_map_view::CachingSourceMapView;
pub use self::hcx::{StableHashingContextProvider, StableHashingContext, NodeIdHashingMode,
hash_stable_trait_impls};
-mod caching_codemap_view;
+mod caching_source_map_view;
mod hcx;
mod impls_cstore;
use std::sync::atomic::Ordering;
use ty::fold::{TypeFoldable, TypeFolder};
use ty::subst::Kind;
-use ty::{self, BoundTy, BoundTyIndex, Lift, List, Ty, TyCtxt, TypeFlags};
+use ty::{self, BoundTy, BoundVar, Lift, List, Ty, TyCtxt, TypeFlags};
use rustc_data_structures::fx::FxHashMap;
use rustc_data_structures::indexed_vec::Idx;
)
}
+ pub fn canonicalize_user_type_annotation<V>(&self, value: &V) -> Canonicalized<'gcx, V>
+ where
+ V: TypeFoldable<'tcx> + Lift<'gcx>,
+ {
+ let mut query_state = OriginalQueryValues::default();
+ Canonicalizer::canonicalize(
+ value,
+ Some(self),
+ self.tcx,
+ &CanonicalizeUserTypeAnnotation,
+ &mut query_state,
+ )
+ }
+
/// A hacky variant of `canonicalize_query` that does not
/// canonicalize `'static`. Unfortunately, the existing leak
/// check treaks `'static` differently in some cases (see also
impl CanonicalizeRegionMode for CanonicalizeQueryResponse {
fn canonicalize_free_region(
&self,
- _canonicalizer: &mut Canonicalizer<'_, '_, 'tcx>,
+ canonicalizer: &mut Canonicalizer<'_, '_, 'tcx>,
r: ty::Region<'tcx>,
) -> ty::Region<'tcx> {
match r {
ty::ReFree(_) | ty::ReEmpty | ty::ReErased | ty::ReStatic | ty::ReEarlyBound(..) => r,
+ ty::RePlaceholder(placeholder) => canonicalizer.canonical_var_for_region(
+ CanonicalVarInfo {
+ kind: CanonicalVarKind::PlaceholderRegion(*placeholder),
+ },
+ r,
+ ),
+ ty::ReVar(vid) => {
+ let universe = canonicalizer.region_var_universe(*vid);
+ canonicalizer.canonical_var_for_region(
+ CanonicalVarInfo {
+ kind: CanonicalVarKind::Region(universe),
+ },
+ r,
+ )
+ }
_ => {
// Other than `'static` or `'empty`, the query
// response should be executing in a fully
}
}
+struct CanonicalizeUserTypeAnnotation;
+
+impl CanonicalizeRegionMode for CanonicalizeUserTypeAnnotation {
+ fn canonicalize_free_region(
+ &self,
+ canonicalizer: &mut Canonicalizer<'_, '_, 'tcx>,
+ r: ty::Region<'tcx>,
+ ) -> ty::Region<'tcx> {
+ match r {
+ ty::ReEarlyBound(_) | ty::ReFree(_) | ty::ReErased | ty::ReEmpty | ty::ReStatic => r,
+ ty::ReVar(_) => canonicalizer.canonical_var_for_region_in_root_universe(r),
+ _ => {
+ // We only expect region names that the user can type.
+ bug!("unexpected region in query response: `{:?}`", r)
+ }
+ }
+ }
+
+ fn any(&self) -> bool {
+ false
+ }
+}
+
struct CanonicalizeAllFreeRegions;
impl CanonicalizeRegionMode for CanonicalizeAllFreeRegions {
canonicalizer: &mut Canonicalizer<'_, '_, 'tcx>,
r: ty::Region<'tcx>,
) -> ty::Region<'tcx> {
- canonicalizer.canonical_var_for_region(r)
+ canonicalizer.canonical_var_for_region_in_root_universe(r)
}
fn any(&self) -> bool {
if let ty::ReStatic = r {
r
} else {
- canonicalizer.canonical_var_for_region(r)
+ canonicalizer.canonical_var_for_region_in_root_universe(r)
}
}
query_state: &'cx mut OriginalQueryValues<'tcx>,
// Note that indices is only used once `var_values` is big enough to be
// heap-allocated.
- indices: FxHashMap<Kind<'tcx>, BoundTyIndex>,
+ indices: FxHashMap<Kind<'tcx>, BoundVar>,
canonicalize_region_mode: &'cx dyn CanonicalizeRegionMode,
needs_canonical_flags: TypeFlags,
+
+ binder_index: ty::DebruijnIndex,
}
impl<'cx, 'gcx, 'tcx> TypeFolder<'gcx, 'tcx> for Canonicalizer<'cx, 'gcx, 'tcx> {
self.tcx
}
+ fn fold_binder<T>(&mut self, t: &ty::Binder<T>) -> ty::Binder<T>
+ where T: TypeFoldable<'tcx>
+ {
+ self.binder_index.shift_in(1);
+ let t = t.super_fold_with(self);
+ self.binder_index.shift_out(1);
+ t
+ }
+
fn fold_region(&mut self, r: ty::Region<'tcx>) -> ty::Region<'tcx> {
match *r {
- ty::ReLateBound(..) => {
- // leave bound regions alone
- r
+ ty::ReLateBound(index, ..) => {
+ if index >= self.binder_index {
+ bug!("escaping late bound region during canonicalization")
+ } else {
+ r
+ }
}
ty::ReVar(vid) => {
opportunistically resolved to {:?}",
vid, r
);
- self.canonical_var_for_region(r)
+ self.canonicalize_region_mode
+ .canonicalize_free_region(self, r)
}
ty::ReStatic
| ty::ReScope(_)
| ty::RePlaceholder(..)
| ty::ReEmpty
- | ty::ReErased => self.canonicalize_region_mode.canonicalize_free_region(self, r),
+ | ty::ReErased => self.canonicalize_region_mode
+ .canonicalize_free_region(self, r),
- ty::ReClosureBound(..) | ty::ReCanonical(_) => {
- bug!("canonical region encountered during canonicalization")
+ ty::ReClosureBound(..) => {
+ bug!("closure bound region encountered during canonicalization")
}
}
}
bug!("encountered a fresh type during canonicalization")
}
- ty::Infer(ty::BoundTy(_)) => {
- bug!("encountered a canonical type during canonicalization")
+ ty::Bound(bound_ty) => {
+ if bound_ty.index >= self.binder_index {
+ bug!("escaping bound type during canonicalization")
+ } else {
+ t
+ }
}
ty::Closure(..)
where
V: TypeFoldable<'tcx> + Lift<'gcx>,
{
- debug_assert!(
- !value.has_type_flags(TypeFlags::HAS_CANONICAL_VARS),
- "canonicalizing a canonical value: {:?}",
- value,
- );
-
let needs_canonical_flags = if canonicalize_region_mode.any() {
TypeFlags::HAS_FREE_REGIONS | TypeFlags::KEEP_IN_LOCAL_TCX
} else {
if !value.has_type_flags(needs_canonical_flags) {
let out_value = gcx.lift(value).unwrap();
let canon_value = Canonical {
+ max_universe: ty::UniverseIndex::ROOT,
variables: List::empty(),
value: out_value,
};
variables: SmallVec::new(),
query_state,
indices: FxHashMap::default(),
+ binder_index: ty::INNERMOST,
};
let out_value = value.fold_with(&mut canonicalizer);
let canonical_variables = tcx.intern_canonical_var_infos(&canonicalizer.variables);
+ let max_universe = canonical_variables
+ .iter()
+ .map(|cvar| cvar.universe())
+ .max()
+ .unwrap_or(ty::UniverseIndex::ROOT);
+
Canonical {
+ max_universe,
variables: canonical_variables,
value: out_value,
}
/// or returns an existing variable if `kind` has already been
/// seen. `kind` is expected to be an unbound variable (or
/// potentially a free region).
- fn canonical_var(&mut self, info: CanonicalVarInfo, kind: Kind<'tcx>) -> BoundTy {
+ fn canonical_var(&mut self, info: CanonicalVarInfo, kind: Kind<'tcx>) -> BoundVar {
let Canonicalizer {
variables,
query_state,
// direct linear search of `var_values`.
if let Some(idx) = var_values.iter().position(|&k| k == kind) {
// `kind` is already present in `var_values`.
- BoundTyIndex::new(idx)
+ BoundVar::new(idx)
} else {
// `kind` isn't present in `var_values`. Append it. Likewise
// for `info` and `variables`.
*indices = var_values
.iter()
.enumerate()
- .map(|(i, &kind)| (kind, BoundTyIndex::new(i)))
+ .map(|(i, &kind)| (kind, BoundVar::new(i)))
.collect();
}
// The cv is the index of the appended element.
- BoundTyIndex::new(var_values.len() - 1)
+ BoundVar::new(var_values.len() - 1)
}
} else {
// `var_values` is large. Do a hashmap search via `indices`.
variables.push(info);
var_values.push(kind);
assert_eq!(variables.len(), var_values.len());
- BoundTyIndex::new(variables.len() - 1)
+ BoundVar::new(variables.len() - 1)
})
};
- BoundTy {
- level: ty::INNERMOST,
- var,
- }
+ var
}
- fn canonical_var_for_region(&mut self, r: ty::Region<'tcx>) -> ty::Region<'tcx> {
- let info = CanonicalVarInfo {
- kind: CanonicalVarKind::Region,
- };
- let b = self.canonical_var(info, r.into());
- debug_assert_eq!(ty::INNERMOST, b.level);
- self.tcx().mk_region(ty::ReCanonical(b.var))
+ /// Shorthand helper that creates a canonical region variable for
+ /// `r` (always in the root universe). The reason that we always
+ /// put these variables into the root universe is because this
+ /// method is used during **query construction:** in that case, we
+ /// are taking all the regions and just putting them into the most
+ /// generic context we can. This may generate solutions that don't
+ /// fit (e.g., that equate some region variable with a placeholder
+ /// it can't name) on the caller side, but that's ok, the caller
+ /// can figure that out. In the meantime, it maximizes our
+ /// caching.
+ ///
+ /// (This works because unification never fails -- and hence trait
+ /// selection is never affected -- due to a universe mismatch.)
+ fn canonical_var_for_region_in_root_universe(
+ &mut self,
+ r: ty::Region<'tcx>,
+ ) -> ty::Region<'tcx> {
+ self.canonical_var_for_region(
+ CanonicalVarInfo {
+ kind: CanonicalVarKind::Region(ty::UniverseIndex::ROOT),
+ },
+ r,
+ )
+ }
+
+ /// Returns the universe in which `vid` is defined.
+ fn region_var_universe(&self, vid: ty::RegionVid) -> ty::UniverseIndex {
+ self.infcx
+ .unwrap()
+ .borrow_region_constraints()
+ .var_universe(vid)
+ }
+
+ /// Create a canonical variable (with the given `info`)
+ /// representing the region `r`; return a region referencing it.
+ fn canonical_var_for_region(
+ &mut self,
+ info: CanonicalVarInfo,
+ r: ty::Region<'tcx>,
+ ) -> ty::Region<'tcx> {
+ let var = self.canonical_var(info, r.into());
+ let region = ty::ReLateBound(
+ self.binder_index,
+ ty::BoundRegion::BrAnon(var.as_u32())
+ );
+ self.tcx().mk_region(region)
}
/// Given a type variable `ty_var` of the given kind, first check
let info = CanonicalVarInfo {
kind: CanonicalVarKind::Ty(ty_kind),
};
- let b = self.canonical_var(info, ty_var.into());
- debug_assert_eq!(ty::INNERMOST, b.level);
- self.tcx().mk_infer(ty::InferTy::BoundTy(b))
+ let var = self.canonical_var(info, ty_var.into());
+ self.tcx().mk_ty(ty::Bound(BoundTy::new(self.binder_index, var)))
}
}
}
//! - a map M (of type `CanonicalVarValues`) from those canonical
//! variables back to the original.
//!
-//! We can then do queries using T2. These will give back constriants
+//! We can then do queries using T2. These will give back constraints
//! on the canonical variables which can be translated, using the map
//! M, into constraints in our source context. This process of
//! translating the results back is done by the
use infer::{InferCtxt, RegionVariableOrigin, TypeVariableOrigin};
use rustc_data_structures::indexed_vec::IndexVec;
-use smallvec::SmallVec;
use rustc_data_structures::sync::Lrc;
use serialize::UseSpecializedDecodable;
+use smallvec::SmallVec;
use std::ops::Index;
use syntax::source_map::Span;
use ty::fold::TypeFoldable;
use ty::subst::Kind;
-use ty::{self, BoundTyIndex, Lift, Region, List, TyCtxt};
+use ty::{self, BoundVar, Lift, List, Region, TyCtxt};
mod canonicalizer;
/// numbered starting from 0 in order of first appearance.
#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash, RustcDecodable, RustcEncodable)]
pub struct Canonical<'gcx, V> {
+ pub max_universe: ty::UniverseIndex,
pub variables: CanonicalVarInfos<'gcx>,
pub value: V,
}
/// canonicalized query response.
#[derive(Clone, Debug, PartialEq, Eq, Hash, RustcDecodable, RustcEncodable)]
pub struct CanonicalVarValues<'tcx> {
- pub var_values: IndexVec<BoundTyIndex, Kind<'tcx>>,
+ pub var_values: IndexVec<BoundVar, Kind<'tcx>>,
}
/// When we canonicalize a value to form a query, we wind up replacing
/// various parts of it with canonical variables. This struct stores
/// those replaced bits to remember for when we process the query
/// result.
-#[derive(Clone, Debug, Default, PartialEq, Eq, Hash, RustcDecodable, RustcEncodable)]
+#[derive(Clone, Debug, PartialEq, Eq, Hash, RustcDecodable, RustcEncodable)]
pub struct OriginalQueryValues<'tcx> {
+ /// Map from the universes that appear in the query to the
+ /// universes in the caller context. For the time being, we only
+ /// ever put ROOT values into the query, so this map is very
+ /// simple.
+ pub universe_map: SmallVec<[ty::UniverseIndex; 4]>,
+
/// This is equivalent to `CanonicalVarValues`, but using a
/// `SmallVec` yields a significant performance win.
pub var_values: SmallVec<[Kind<'tcx>; 8]>,
}
+impl Default for OriginalQueryValues<'tcx> {
+ fn default() -> Self {
+ let mut universe_map = SmallVec::default();
+ universe_map.push(ty::UniverseIndex::ROOT);
+
+ Self {
+ universe_map,
+ var_values: SmallVec::default(),
+ }
+ }
+}
+
/// Information about a canonical variable that is included with the
/// canonical value. This is sufficient information for code to create
/// a copy of the canonical value in some other inference context,
pub kind: CanonicalVarKind,
}
+impl CanonicalVarInfo {
+ pub fn universe(&self) -> ty::UniverseIndex {
+ self.kind.universe()
+ }
+
+ pub fn is_existential(&self) -> bool {
+ match self.kind {
+ CanonicalVarKind::Ty(_) => true,
+ CanonicalVarKind::Region(_) => true,
+ CanonicalVarKind::PlaceholderRegion(..) => false,
+ }
+ }
+}
+
/// Describes the "kind" of the canonical variable. This is a "kind"
/// in the type-theory sense of the term -- i.e., a "meta" type system
/// that analyzes type-like values.
Ty(CanonicalTyVarKind),
/// Region variable `'?R`.
- Region,
+ Region(ty::UniverseIndex),
+
+ /// A "placeholder" that represents "any region". Created when you
+ /// are solving a goal like `for<'a> T: Foo<'a>` to represent the
+ /// bound region `'a`.
+ PlaceholderRegion(ty::Placeholder),
+}
+
+impl CanonicalVarKind {
+ pub fn universe(self) -> ty::UniverseIndex {
+ match self {
+ // At present, we don't support higher-ranked
+ // quantification over types, so all type variables are in
+ // the root universe.
+ CanonicalVarKind::Ty(_) => ty::UniverseIndex::ROOT,
+
+ // Region variables can be created in sub-universes.
+ CanonicalVarKind::Region(ui) => ui,
+ CanonicalVarKind::PlaceholderRegion(placeholder) => placeholder.universe,
+ }
+ }
}
/// Rust actually has more than one category of type variables;
/// let b: Canonical<'tcx, (T, Ty<'tcx>)> = a.unchecked_map(|v| (v, ty));
/// ```
pub fn unchecked_map<W>(self, map_op: impl FnOnce(V) -> W) -> Canonical<'gcx, W> {
- let Canonical { variables, value } = self;
- Canonical { variables, value: map_op(value) }
+ let Canonical {
+ max_universe,
+ variables,
+ value,
+ } = self;
+ Canonical {
+ max_universe,
+ variables,
+ value: map_op(value),
+ }
}
}
where
T: TypeFoldable<'tcx>,
{
+ // For each universe that is referred to in the incoming
+ // query, create a universe in our local inference context. In
+ // practice, as of this writing, all queries have no universes
+ // in them, so this code has no effect, but it is looking
+ // forward to the day when we *do* want to carry universes
+ // through into queries.
+ let universes: IndexVec<ty::UniverseIndex, _> = std::iter::once(ty::UniverseIndex::ROOT)
+ .chain((0..canonical.max_universe.as_u32()).map(|_| self.create_next_universe()))
+ .collect();
+
let canonical_inference_vars =
- self.fresh_inference_vars_for_canonical_vars(span, canonical.variables);
+ self.instantiate_canonical_vars(span, canonical.variables, |ui| universes[ui]);
let result = canonical.substitute(self.tcx, &canonical_inference_vars);
(result, canonical_inference_vars)
}
/// Given the "infos" about the canonical variables from some
- /// canonical, creates fresh inference variables with the same
- /// characteristics. You can then use `substitute` to instantiate
- /// the canonical variable with these inference variables.
- fn fresh_inference_vars_for_canonical_vars(
+ /// canonical, creates fresh variables with the same
+ /// characteristics (see `instantiate_canonical_var` for
+ /// details). You can then use `substitute` to instantiate the
+ /// canonical variable with these inference variables.
+ fn instantiate_canonical_vars(
&self,
span: Span,
variables: &List<CanonicalVarInfo>,
+ universe_map: impl Fn(ty::UniverseIndex) -> ty::UniverseIndex,
) -> CanonicalVarValues<'tcx> {
- let var_values: IndexVec<BoundTyIndex, Kind<'tcx>> = variables
+ let var_values: IndexVec<BoundVar, Kind<'tcx>> = variables
.iter()
- .map(|info| self.fresh_inference_var_for_canonical_var(span, *info))
+ .map(|info| self.instantiate_canonical_var(span, *info, &universe_map))
.collect();
CanonicalVarValues { var_values }
}
/// Given the "info" about a canonical variable, creates a fresh
- /// inference variable with the same characteristics.
- fn fresh_inference_var_for_canonical_var(
+ /// variable for it. If this is an existentially quantified
+ /// variable, then you'll get a new inference variable; if it is a
+ /// universally quantified variable, you get a placeholder.
+ fn instantiate_canonical_var(
&self,
span: Span,
cv_info: CanonicalVarInfo,
+ universe_map: impl Fn(ty::UniverseIndex) -> ty::UniverseIndex,
) -> Kind<'tcx> {
match cv_info.kind {
CanonicalVarKind::Ty(ty_kind) => {
ty.into()
}
- CanonicalVarKind::Region => self
- .next_region_var(RegionVariableOrigin::MiscVariable(span))
- .into(),
+ CanonicalVarKind::Region(ui) => self.next_region_var_in_universe(
+ RegionVariableOrigin::MiscVariable(span),
+ universe_map(ui),
+ ).into(),
+
+ CanonicalVarKind::PlaceholderRegion(ty::Placeholder { universe, name }) => {
+ let universe_mapped = universe_map(universe);
+ let placeholder_mapped = ty::Placeholder {
+ universe: universe_mapped,
+ name,
+ };
+ self.tcx
+ .mk_region(ty::RePlaceholder(placeholder_mapped))
+ .into()
+ }
}
}
}
BraceStructTypeFoldableImpl! {
impl<'tcx, C> TypeFoldable<'tcx> for Canonical<'tcx, C> {
+ max_universe,
variables,
value,
} where C: TypeFoldable<'tcx>
BraceStructLiftImpl! {
impl<'a, 'tcx, T> Lift<'tcx> for Canonical<'a, T> {
type Lifted = Canonical<'tcx, T::Lifted>;
- variables, value
+ max_universe, variables, value
} where T: Lift<'tcx>
}
} where R: Lift<'tcx>
}
-impl<'tcx> Index<BoundTyIndex> for CanonicalVarValues<'tcx> {
+impl<'tcx> Index<BoundVar> for CanonicalVarValues<'tcx> {
type Output = Kind<'tcx>;
- fn index(&self, value: BoundTyIndex) -> &Kind<'tcx> {
+ fn index(&self, value: BoundVar) -> &Kind<'tcx> {
&self.var_values[value]
}
}
use traits::{Obligation, ObligationCause, PredicateObligation};
use ty::fold::TypeFoldable;
use ty::subst::{Kind, UnpackedKind};
-use ty::{self, BoundTyIndex, Lift, Ty, TyCtxt};
+use ty::{self, BoundVar, Lift, Ty, TyCtxt};
impl<'cx, 'gcx, 'tcx> InferCtxtBuilder<'cx, 'gcx, 'tcx> {
/// The "main method" for a canonicalized trait query. Given the
for (index, original_value) in original_values.var_values.iter().enumerate() {
// ...with the value `v_r` of that variable from the query.
let result_value = query_response.substitute_projected(self.tcx, &result_subst, |v| {
- &v.var_values[BoundTyIndex::new(index)]
+ &v.var_values[BoundVar::new(index)]
});
match (original_value.unpack(), result_value.unpack()) {
(UnpackedKind::Lifetime(ty::ReErased), UnpackedKind::Lifetime(ty::ReErased)) => {
// ...also include the other query region constraints from the query.
output_query_region_constraints.extend(
query_response.value.region_constraints.iter().filter_map(|r_c| {
- let &ty::OutlivesPredicate(k1, r2) = r_c.skip_binder(); // reconstructed below
- let k1 = substitute_value(self.tcx, &result_subst, &k1);
- let r2 = substitute_value(self.tcx, &result_subst, &r2);
+ let r_c = substitute_value(self.tcx, &result_subst, r_c);
+
+ // Screen out `'a: 'a` cases -- we skip the binder here but
+ // only care the inner values to one another, so they are still at
+ // consistent binding levels.
+ let &ty::OutlivesPredicate(k1, r2) = r_c.skip_binder();
if k1 != r2.into() {
- Some(ty::Binder::bind(ty::OutlivesPredicate(k1, r2)))
+ Some(r_c)
} else {
None
}
original_values, query_response,
);
+ // For each new universe created in the query result that did
+ // not appear in the original query, create a local
+ // superuniverse.
+ let mut universe_map = original_values.universe_map.clone();
+ let num_universes_in_query = original_values.universe_map.len();
+ let num_universes_in_response = query_response.max_universe.as_usize() + 1;
+ for _ in num_universes_in_query..num_universes_in_response {
+ universe_map.push(self.create_next_universe());
+ }
+ assert!(universe_map.len() >= 1); // always have the root universe
+ assert_eq!(
+ universe_map[ty::UniverseIndex::ROOT.as_usize()],
+ ty::UniverseIndex::ROOT
+ );
+
// Every canonical query result includes values for each of
// the inputs to the query. Therefore, we begin by unifying
// these values with the original inputs that were
// is directly equal to one of the canonical variables in the
// result, then we can type the corresponding value from the
// input. See the example above.
- let mut opt_values: IndexVec<BoundTyIndex, Option<Kind<'tcx>>> =
+ let mut opt_values: IndexVec<BoundVar, Option<Kind<'tcx>>> =
IndexVec::from_elem_n(None, query_response.variables.len());
// In terms of our example above, we are iterating over pairs like:
match result_value.unpack() {
UnpackedKind::Type(result_value) => {
// e.g., here `result_value` might be `?0` in the example above...
- if let ty::Infer(ty::InferTy::BoundTy(b)) = result_value.sty {
- // in which case we would set `canonical_vars[0]` to `Some(?U)`.
+ if let ty::Bound(b) = result_value.sty {
+ // ...in which case we would set `canonical_vars[0]` to `Some(?U)`.
+
+ // We only allow a `ty::INNERMOST` index in substitutions.
+ assert_eq!(b.index, ty::INNERMOST);
opt_values[b.var] = Some(*original_value);
}
}
UnpackedKind::Lifetime(result_value) => {
// e.g., here `result_value` might be `'?1` in the example above...
- if let &ty::RegionKind::ReCanonical(index) = result_value {
- // in which case we would set `canonical_vars[0]` to `Some('static)`.
- opt_values[index] = Some(*original_value);
+ if let &ty::RegionKind::ReLateBound(index, br) = result_value {
+ // ... in which case we would set `canonical_vars[0]` to `Some('static)`.
+
+ // We only allow a `ty::INNERMOST` index in substitutions.
+ assert_eq!(index, ty::INNERMOST);
+ opt_values[br.assert_bound_var()] = Some(*original_value);
}
}
}
.variables
.iter()
.enumerate()
- .map(|(index, info)| opt_values[BoundTyIndex::new(index)].unwrap_or_else(||
- self.fresh_inference_var_for_canonical_var(cause.span, *info)
- ))
+ .map(|(index, info)| {
+ if info.is_existential() {
+ match opt_values[BoundVar::new(index)] {
+ Some(k) => k,
+ None => self.instantiate_canonical_var(cause.span, *info, |u| {
+ universe_map[u.as_usize()]
+ }),
+ }
+ } else {
+ self.instantiate_canonical_var(cause.span, *info, |u| {
+ universe_map[u.as_usize()]
+ })
+ }
+ })
.collect(),
};
// canonical variable; this is taken from
// `query_response.var_values` after applying the substitution
// `result_subst`.
- let substituted_query_response = |index: BoundTyIndex| -> Kind<'tcx> {
+ let substituted_query_response = |index: BoundVar| -> Kind<'tcx> {
query_response.substitute_projected(self.tcx, &result_subst, |v| &v.var_values[index])
};
unsubstituted_region_constraints
.iter()
.map(move |constraint| {
- let ty::OutlivesPredicate(k1, r2) = constraint.skip_binder(); // restored below
- let k1 = substitute_value(self.tcx, result_subst, k1);
- let r2 = substitute_value(self.tcx, result_subst, r2);
+ let constraint = substitute_value(self.tcx, result_subst, constraint);
+ let &ty::OutlivesPredicate(k1, r2) = constraint.skip_binder(); // restored below
Obligation::new(
cause.clone(),
param_env,
match k1.unpack() {
UnpackedKind::Lifetime(r1) => ty::Predicate::RegionOutlives(
- ty::Binder::dummy(
+ ty::Binder::bind(
ty::OutlivesPredicate(r1, r2)
- )),
+ )
+ ),
UnpackedKind::Type(t1) => ty::Predicate::TypeOutlives(
- ty::Binder::dummy(ty::OutlivesPredicate(
- t1, r2
- )))
+ ty::Binder::bind(
+ ty::OutlivesPredicate(t1, r2)
+ )
+ ),
}
)
})
cause: &ObligationCause<'tcx>,
param_env: ty::ParamEnv<'tcx>,
variables1: &OriginalQueryValues<'tcx>,
- variables2: impl Fn(BoundTyIndex) -> Kind<'tcx>,
+ variables2: impl Fn(BoundVar) -> Kind<'tcx>,
) -> InferResult<'tcx, ()> {
self.commit_if_ok(|_| {
let mut obligations = vec![];
for (index, value1) in variables1.var_values.iter().enumerate() {
- let value2 = variables2(BoundTyIndex::new(index));
+ let value2 = variables2(BoundVar::new(index));
match (value1.unpack(), value2.unpack()) {
(UnpackedKind::Type(v1), UnpackedKind::Type(v2)) => {
}
Constraint::RegSubReg(r1, r2) => ty::OutlivesPredicate(r2.into(), r1),
})
- .map(ty::Binder::dummy) // no bound regions in the code above
+ .map(ty::Binder::dummy) // no bound vars in the code above
.chain(
outlives_obligations
.map(|(ty, r)| ty::OutlivesPredicate(ty.into(), r))
- .map(ty::Binder::dummy), // no bound regions in the code above
+ .map(ty::Binder::dummy) // no bound vars in the code above
)
.collect();
//! [c]: https://rust-lang-nursery.github.io/rustc-guide/traits/canonicalization.html
use infer::canonical::{Canonical, CanonicalVarValues};
-use ty::fold::{TypeFoldable, TypeFolder};
+use ty::fold::TypeFoldable;
use ty::subst::UnpackedKind;
-use ty::{self, Ty, TyCtxt, TypeFlags};
+use ty::{self, TyCtxt};
impl<'tcx, V> Canonical<'tcx, V> {
/// Instantiate the wrapped value, replacing each canonical value
T: TypeFoldable<'tcx>,
{
if var_values.var_values.is_empty() {
- debug_assert!(!value.has_type_flags(TypeFlags::HAS_CANONICAL_VARS));
- value.clone()
- } else if !value.has_type_flags(TypeFlags::HAS_CANONICAL_VARS) {
value.clone()
} else {
- value.fold_with(&mut CanonicalVarValuesSubst { tcx, var_values })
- }
-}
-
-struct CanonicalVarValuesSubst<'cx, 'gcx: 'tcx, 'tcx: 'cx> {
- tcx: TyCtxt<'cx, 'gcx, 'tcx>,
- var_values: &'cx CanonicalVarValues<'tcx>,
-}
-
-impl<'cx, 'gcx, 'tcx> TypeFolder<'gcx, 'tcx> for CanonicalVarValuesSubst<'cx, 'gcx, 'tcx> {
- fn tcx(&self) -> TyCtxt<'_, 'gcx, 'tcx> {
- self.tcx
- }
-
- fn fold_ty(&mut self, t: Ty<'tcx>) -> Ty<'tcx> {
- match t.sty {
- ty::Infer(ty::InferTy::BoundTy(b)) => {
- debug_assert_eq!(ty::INNERMOST, b.level);
- match self.var_values.var_values[b.var].unpack() {
- UnpackedKind::Type(ty) => ty,
- r => bug!("{:?} is a type but value is {:?}", b, r),
- }
+ let fld_r = |br: ty::BoundRegion| {
+ match var_values.var_values[br.assert_bound_var()].unpack() {
+ UnpackedKind::Lifetime(l) => l,
+ r => bug!("{:?} is a region but value is {:?}", br, r),
}
- _ => {
- if !t.has_type_flags(TypeFlags::HAS_CANONICAL_VARS) {
- t
- } else {
- t.super_fold_with(self)
- }
+ };
+
+ let fld_t = |bound_ty: ty::BoundTy| {
+ match var_values.var_values[bound_ty.var].unpack() {
+ UnpackedKind::Type(ty) => ty,
+ r => bug!("{:?} is a type but value is {:?}", bound_ty, r),
}
- }
- }
+ };
- fn fold_region(&mut self, r: ty::Region<'tcx>) -> ty::Region<'tcx> {
- match r {
- ty::RegionKind::ReCanonical(c) => match self.var_values.var_values[*c].unpack() {
- UnpackedKind::Lifetime(l) => l,
- r => bug!("{:?} is a region but value is {:?}", c, r),
- },
- _ => r.super_fold_with(self),
- }
+ tcx.replace_escaping_bound_vars(value, fld_r, fld_t)
}
}
}
}
- ty::ReCanonical(..) |
ty::ReClosureBound(..) => {
span_bug!(
self.span,
}
// We shouldn't encounter an error message with ReClosureBound.
- ty::ReCanonical(..) | ty::ReClosureBound(..) => {
+ ty::ReClosureBound(..) => {
bug!("encountered unexpected ReClosureBound: {:?}", region,);
}
};
self.tcx().types.re_erased
}
- ty::ReCanonical(..) |
ty::ReClosureBound(..) => {
bug!(
"encountered unexpected region: {:?}",
t
}
- ty::Infer(ty::BoundTy(..)) =>
- bug!("encountered canonical ty during freshening"),
+ ty::Bound(..) =>
+ bug!("encountered bound ty during freshening"),
ty::Generator(..) |
ty::Bool |
use traits::ObligationCause;
use ty::{self, Ty, TyCtxt};
-use ty::error::TypeError;
use ty::relate::{Relate, RelateResult, TypeRelation};
/// "Greatest lower bound" (common subtype)
where T: Relate<'tcx>
{
debug!("binders(a={:?}, b={:?})", a, b);
- let was_error = self.infcx().probe(|_snapshot| {
- // Subtle: use a fresh combine-fields here because we recover
- // from Err. Doing otherwise could propagate obligations out
- // through our `self.obligations` field.
- self.infcx()
- .combine_fields(self.fields.trace.clone(), self.fields.param_env)
- .higher_ranked_glb(a, b, self.a_is_expected)
- .is_err()
- });
- debug!("binders: was_error={:?}", was_error);
// When higher-ranked types are involved, computing the LUB is
// very challenging, switch to invariance. This is obviously
// overly conservative but works ok in practice.
- match self.relate_with_variance(ty::Variance::Invariant, a, b) {
- Ok(_) => Ok(a.clone()),
- Err(err) => {
- debug!("binders: error occurred, was_error={:?}", was_error);
- if !was_error {
- Err(TypeError::OldStyleLUB(Box::new(err)))
- } else {
- Err(err)
- }
- }
- }
+ self.relate_with_variance(ty::Variance::Invariant, a, b)?;
+ Ok(a.clone())
}
}
use ty::{self, TyCtxt, Binder, TypeFoldable};
use ty::error::TypeError;
use ty::relate::{Relate, RelateResult, TypeRelation};
-use std::collections::BTreeMap;
use syntax_pos::Span;
use util::nodemap::{FxHashMap, FxHashSet};
Ok(HrMatchResult { value: a_value })
});
}
-
- pub fn higher_ranked_lub<T>(&mut self, a: &Binder<T>, b: &Binder<T>, a_is_expected: bool)
- -> RelateResult<'tcx, Binder<T>>
- where T: Relate<'tcx>
- {
- // Start a snapshot so we can examine "all bindings that were
- // created as part of this type comparison".
- return self.infcx.commit_if_ok(|snapshot| {
- // Instantiate each bound region with a fresh region variable.
- let span = self.trace.cause.span;
- let (a_with_fresh, a_map) =
- self.infcx.replace_late_bound_regions_with_fresh_var(
- span, HigherRankedType, a);
- let (b_with_fresh, _) =
- self.infcx.replace_late_bound_regions_with_fresh_var(
- span, HigherRankedType, b);
-
- // Collect constraints.
- let result0 =
- self.lub(a_is_expected).relate(&a_with_fresh, &b_with_fresh)?;
- let result0 =
- self.infcx.resolve_type_vars_if_possible(&result0);
- debug!("lub result0 = {:?}", result0);
-
- // Generalize the regions appearing in result0 if possible
- let new_vars = self.infcx.region_vars_confined_to_snapshot(snapshot);
- let span = self.trace.cause.span;
- let result1 =
- fold_regions_in(
- self.tcx(),
- &result0,
- |r, debruijn| generalize_region(self.infcx, span, snapshot, debruijn,
- &new_vars, &a_map, r));
-
- debug!("lub({:?},{:?}) = {:?}",
- a,
- b,
- result1);
-
- Ok(ty::Binder::bind(result1))
- });
-
- fn generalize_region<'a, 'gcx, 'tcx>(infcx: &InferCtxt<'a, 'gcx, 'tcx>,
- span: Span,
- snapshot: &CombinedSnapshot<'a, 'tcx>,
- debruijn: ty::DebruijnIndex,
- new_vars: &[ty::RegionVid],
- a_map: &BTreeMap<ty::BoundRegion, ty::Region<'tcx>>,
- r0: ty::Region<'tcx>)
- -> ty::Region<'tcx> {
- // Regions that pre-dated the LUB computation stay as they are.
- if !is_var_in_set(new_vars, r0) {
- assert!(!r0.is_late_bound());
- debug!("generalize_region(r0={:?}): not new variable", r0);
- return r0;
- }
-
- let tainted = infcx.tainted_regions(snapshot, r0, TaintDirections::both());
-
- // Variables created during LUB computation which are
- // *related* to regions that pre-date the LUB computation
- // stay as they are.
- if !tainted.iter().all(|&r| is_var_in_set(new_vars, r)) {
- debug!("generalize_region(r0={:?}): \
- non-new-variables found in {:?}",
- r0, tainted);
- assert!(!r0.is_late_bound());
- return r0;
- }
-
- // Otherwise, the variable must be associated with at
- // least one of the variables representing bound regions
- // in both A and B. Replace the variable with the "first"
- // bound region from A that we find it to be associated
- // with.
- for (a_br, a_r) in a_map {
- if tainted.iter().any(|x| x == a_r) {
- debug!("generalize_region(r0={:?}): \
- replacing with {:?}, tainted={:?}",
- r0, *a_br, tainted);
- return infcx.tcx.mk_region(ty::ReLateBound(debruijn, *a_br));
- }
- }
-
- span_bug!(
- span,
- "region {:?} is not associated with any bound region from A!",
- r0)
- }
- }
-
- pub fn higher_ranked_glb<T>(&mut self, a: &Binder<T>, b: &Binder<T>, a_is_expected: bool)
- -> RelateResult<'tcx, Binder<T>>
- where T: Relate<'tcx>
- {
- debug!("higher_ranked_glb({:?}, {:?})",
- a, b);
-
- // Make a snapshot so we can examine "all bindings that were
- // created as part of this type comparison".
- return self.infcx.commit_if_ok(|snapshot| {
- // Instantiate each bound region with a fresh region variable.
- let (a_with_fresh, a_map) =
- self.infcx.replace_late_bound_regions_with_fresh_var(
- self.trace.cause.span, HigherRankedType, a);
- let (b_with_fresh, b_map) =
- self.infcx.replace_late_bound_regions_with_fresh_var(
- self.trace.cause.span, HigherRankedType, b);
- let a_vars = var_ids(self, &a_map);
- let b_vars = var_ids(self, &b_map);
-
- // Collect constraints.
- let result0 =
- self.glb(a_is_expected).relate(&a_with_fresh, &b_with_fresh)?;
- let result0 =
- self.infcx.resolve_type_vars_if_possible(&result0);
- debug!("glb result0 = {:?}", result0);
-
- // Generalize the regions appearing in result0 if possible
- let new_vars = self.infcx.region_vars_confined_to_snapshot(snapshot);
- let span = self.trace.cause.span;
- let result1 =
- fold_regions_in(
- self.tcx(),
- &result0,
- |r, debruijn| generalize_region(self.infcx, span, snapshot, debruijn,
- &new_vars,
- &a_map, &a_vars, &b_vars,
- r));
-
- debug!("glb({:?},{:?}) = {:?}",
- a,
- b,
- result1);
-
- Ok(ty::Binder::bind(result1))
- });
-
- fn generalize_region<'a, 'gcx, 'tcx>(infcx: &InferCtxt<'a, 'gcx, 'tcx>,
- span: Span,
- snapshot: &CombinedSnapshot<'a, 'tcx>,
- debruijn: ty::DebruijnIndex,
- new_vars: &[ty::RegionVid],
- a_map: &BTreeMap<ty::BoundRegion, ty::Region<'tcx>>,
- a_vars: &[ty::RegionVid],
- b_vars: &[ty::RegionVid],
- r0: ty::Region<'tcx>)
- -> ty::Region<'tcx> {
- if !is_var_in_set(new_vars, r0) {
- assert!(!r0.is_late_bound());
- return r0;
- }
-
- let tainted = infcx.tainted_regions(snapshot, r0, TaintDirections::both());
-
- let mut a_r = None;
- let mut b_r = None;
- let mut only_new_vars = true;
- for r in &tainted {
- if is_var_in_set(a_vars, *r) {
- if a_r.is_some() {
- return fresh_bound_variable(infcx, debruijn);
- } else {
- a_r = Some(*r);
- }
- } else if is_var_in_set(b_vars, *r) {
- if b_r.is_some() {
- return fresh_bound_variable(infcx, debruijn);
- } else {
- b_r = Some(*r);
- }
- } else if !is_var_in_set(new_vars, *r) {
- only_new_vars = false;
- }
- }
-
- // NB---I do not believe this algorithm computes
- // (necessarily) the GLB. As written it can
- // spuriously fail. In particular, if there is a case
- // like: |fn(&a)| and fn(fn(&b)), where a and b are
- // free, it will return fn(&c) where c = GLB(a,b). If
- // however this GLB is not defined, then the result is
- // an error, even though something like
- // "fn<X>(fn(&X))" where X is bound would be a
- // subtype of both of those.
- //
- // The problem is that if we were to return a bound
- // variable, we'd be computing a lower-bound, but not
- // necessarily the *greatest* lower-bound.
- //
- // Unfortunately, this problem is non-trivial to solve,
- // because we do not know at the time of computing the GLB
- // whether a GLB(a,b) exists or not, because we haven't
- // run region inference (or indeed, even fully computed
- // the region hierarchy!). The current algorithm seems to
- // works ok in practice.
-
- if a_r.is_some() && b_r.is_some() && only_new_vars {
- // Related to exactly one bound variable from each fn:
- return rev_lookup(infcx, span, a_map, a_r.unwrap());
- } else if a_r.is_none() && b_r.is_none() {
- // Not related to bound variables from either fn:
- assert!(!r0.is_late_bound());
- return r0;
- } else {
- // Other:
- return fresh_bound_variable(infcx, debruijn);
- }
- }
-
- fn rev_lookup<'a, 'gcx, 'tcx>(infcx: &InferCtxt<'a, 'gcx, 'tcx>,
- span: Span,
- a_map: &BTreeMap<ty::BoundRegion, ty::Region<'tcx>>,
- r: ty::Region<'tcx>) -> ty::Region<'tcx>
- {
- for (a_br, a_r) in a_map {
- if *a_r == r {
- return infcx.tcx.mk_region(ty::ReLateBound(ty::INNERMOST, *a_br));
- }
- }
- span_bug!(
- span,
- "could not find original bound region for {:?}",
- r);
- }
-
- fn fresh_bound_variable<'a, 'gcx, 'tcx>(infcx: &InferCtxt<'a, 'gcx, 'tcx>,
- debruijn: ty::DebruijnIndex)
- -> ty::Region<'tcx> {
- infcx.borrow_region_constraints().new_bound(infcx.tcx, debruijn)
- }
- }
-}
-
-fn var_ids<'a, 'gcx, 'tcx>(fields: &CombineFields<'a, 'gcx, 'tcx>,
- map: &BTreeMap<ty::BoundRegion, ty::Region<'tcx>>)
- -> Vec<ty::RegionVid> {
- map.iter()
- .map(|(_, &r)| match *r {
- ty::ReVar(r) => { r }
- _ => {
- span_bug!(
- fields.trace.cause.span,
- "found non-region-vid: {:?}",
- r);
- }
- })
- .collect()
-}
-
-fn is_var_in_set(new_vars: &[ty::RegionVid], r: ty::Region<'_>) -> bool {
- match *r {
- ty::ReVar(ref v) => new_vars.iter().any(|x| x == v),
- _ => false
- }
}
fn fold_regions_in<'a, 'gcx, 'tcx, T, F>(tcx: TyCtxt<'a, 'gcx, 'tcx>,
type Node = Node;
type Edge = Edge<'tcx>;
fn nodes(&self) -> dot::Nodes<'_, Node> {
- let mut set = FxHashSet::default();
- for node in self.node_ids.keys() {
- set.insert(*node);
- }
+ let set = self.node_ids.keys().cloned().collect::<FxHashSet<_>>();
debug!("constraint graph has {} nodes", set.len());
set.into_iter().collect()
}
fn lub_concrete_regions(&self, a: Region<'tcx>, b: Region<'tcx>) -> Region<'tcx> {
let tcx = self.tcx();
match (a, b) {
- (&ty::ReCanonical(..), _)
- | (_, &ty::ReCanonical(..))
- | (&ty::ReClosureBound(..), _)
+ (&ty::ReClosureBound(..), _)
| (_, &ty::ReClosureBound(..))
| (&ReLateBound(..), _)
| (_, &ReLateBound(..))
use traits::ObligationCause;
use ty::{self, Ty, TyCtxt};
-use ty::error::TypeError;
use ty::relate::{Relate, RelateResult, TypeRelation};
/// "Least upper bound" (common supertype)
where T: Relate<'tcx>
{
debug!("binders(a={:?}, b={:?})", a, b);
- let was_error = self.infcx().probe(|_snapshot| {
- // Subtle: use a fresh combine-fields here because we recover
- // from Err. Doing otherwise could propagate obligations out
- // through our `self.obligations` field.
- self.infcx()
- .combine_fields(self.fields.trace.clone(), self.fields.param_env)
- .higher_ranked_lub(a, b, self.a_is_expected)
- .is_err()
- });
- debug!("binders: was_error={:?}", was_error);
// When higher-ranked types are involved, computing the LUB is
// very challenging, switch to invariance. This is obviously
// overly conservative but works ok in practice.
- match self.relate_with_variance(ty::Variance::Invariant, a, b) {
- Ok(_) => Ok(a.clone()),
- Err(err) => {
- debug!("binders: error occurred, was_error={:?}", was_error);
- if !was_error {
- Err(TypeError::OldStyleLUB(Box::new(err)))
- } else {
- Err(err)
- }
- }
- }
+ self.relate_with_variance(ty::Variance::Invariant, a, b)?;
+ Ok(a.clone())
}
}
ty, region, origin
);
- assert!(!ty.has_escaping_regions());
+ assert!(!ty.has_escaping_bound_vars());
let components = self.tcx.outlives_components(ty);
self.components_must_outlive(origin, components, region);
predicates
.into_iter()
.filter_map(|p| p.as_ref().to_opt_type_outlives())
- .filter_map(|p| p.no_late_bound_regions())
+ .filter_map(|p| p.no_bound_vars())
.filter(move |p| compare_ty(p.0))
}
}
ty::RePlaceholder(placeholder) => placeholder.universe,
ty::ReClosureBound(vid) | ty::ReVar(vid) => self.var_universe(vid),
ty::ReLateBound(..) => bug!("universe(): encountered bound region {:?}", region),
- ty::ReCanonical(..) => bug!(
- "region_universe(): encountered canonical region {:?}",
- region
- ),
}
}
// Shouldn't have any LBR here, so we can safely put
// this under a binder below without fear of accidental
// capture.
- assert!(!a.has_escaping_regions());
- assert!(!b.has_escaping_regions());
+ assert!(!a.has_escaping_bound_vars());
+ assert!(!b.has_escaping_bound_vars());
// can't make progress on `A <: B` if both A and B are
// type variables, so record an obligation. We also
#![feature(box_patterns)]
#![feature(box_syntax)]
-#![cfg_attr(stage0, feature(min_const_fn))]
#![feature(core_intrinsics)]
#![feature(drain_filter)]
#![cfg_attr(windows, feature(libc))]
#![feature(step_trait)]
#![feature(integer_atomics)]
#![feature(test)]
-#![cfg_attr(stage0, feature(impl_header_lifetime_elision))]
#![feature(in_band_lifetimes)]
#![feature(macro_at_most_once_rep)]
#![feature(crate_visibility_modifier)]
}
}
-impl<'a, 'tcx> LayoutOf for &'a LateContext<'a, 'tcx> {
+impl<'a, 'tcx> LayoutOf for LateContext<'a, 'tcx> {
type Ty = Ty<'tcx>;
type TyLayout = Result<TyLayout<'tcx>, LayoutError<'tcx>>;
- fn layout_of(self, ty: Ty<'tcx>) -> Self::TyLayout {
+ fn layout_of(&self, ty: Ty<'tcx>) -> Self::TyLayout {
self.tcx.layout_of(self.param_env.and(ty))
}
}
return true;
}
- // (To be) stable attribute for #[lang = "panic_impl"]
- if attr::contains_name(attrs, "panic_implementation") ||
- attr::contains_name(attrs, "panic_handler")
- {
+ // Stable attribute for #[lang = "panic_impl"]
+ if attr::contains_name(attrs, "panic_handler") {
return true;
}
pub fn calculate<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>) {
let sess = &tcx.sess;
- let mut fmts = FxHashMap::default();
- for &ty in sess.crate_types.borrow().iter() {
+ let fmts = sess.crate_types.borrow().iter().map(|&ty| {
let linkage = calculate_type(tcx, ty);
verify_ok(tcx, &linkage);
- fmts.insert(ty, linkage);
- }
+ (ty, linkage)
+ }).collect::<FxHashMap<_, _>>();
sess.abort_if_errors();
sess.dependency_formats.set(fmts);
}
// `Option<typeof(function)>` to present a clearer error.
let from = unpack_option_like(self.tcx.global_tcx(), from);
if let (&ty::FnDef(..), SizeSkeleton::Known(size_to)) = (&from.sty, sk_to) {
- if size_to == Pointer.size(self.tcx) {
+ if size_to == Pointer.size(&self.tcx) {
struct_span_err!(self.tcx.sess, span, E0591,
"can't transmute zero-sized type")
.note(&format!("source type: {}", from))
if let Some(value) = attribute.value_str() {
return Some((value, attribute.span));
}
- } else if attribute.check_name("panic_implementation") ||
- attribute.check_name("panic_handler")
- {
+ } else if attribute.check_name("panic_handler") {
return Some((Symbol::intern("panic_impl"), attribute.span))
} else if attribute.check_name("alloc_error_handler") {
return Some((Symbol::intern("oom"), attribute.span))
DropTraitLangItem, "drop", drop_trait, Target::Trait;
CoerceUnsizedTraitLangItem, "coerce_unsized", coerce_unsized_trait, Target::Trait;
+ DispatchFromDynTraitLangItem,"dispatch_from_dyn", dispatch_from_dyn_trait, Target::Trait;
AddTraitLangItem, "add", add_trait, Target::Trait;
SubTraitLangItem, "sub", sub_trait, Target::Trait;
map
}
+/// In traits, there is an implicit `Self` type parameter which comes before the generics.
+/// We have to account for this when computing the index of the other generic parameters.
+/// This function returns whether there is such an implicit parameter defined on the given item.
+fn sub_items_have_self_param(node: &hir::ItemKind) -> bool {
+ match *node {
+ hir::ItemKind::Trait(..) |
+ hir::ItemKind::TraitAlias(..) => true,
+ _ => false,
+ }
+}
+
impl<'a, 'tcx> Visitor<'tcx> for LifetimeContext<'a, 'tcx> {
fn nested_visit_map<'this>(&'this mut self) -> NestedVisitorMap<'this, 'tcx> {
NestedVisitorMap::All(&self.tcx.hir)
hir::ItemKind::Impl(..) => true,
_ => false,
};
- // These kinds of items have only early bound lifetime parameters.
- let mut index = if let hir::ItemKind::Trait(..) = item.node {
+ // These kinds of items have only early-bound lifetime parameters.
+ let mut index = if sub_items_have_self_param(&item.node) {
1 // Self comes before lifetimes
} else {
0
/// helper method to determine the span to remove when suggesting the
/// deletion of a lifetime
fn lifetime_deletion_span(&self, name: ast::Ident, generics: &hir::Generics) -> Option<Span> {
- if generics.params.len() == 1 {
- // if sole lifetime, remove the `<>` brackets
- Some(generics.span)
- } else {
- generics.params.iter().enumerate().find_map(|(i, param)| {
- if param.name.ident() == name {
- // We also want to delete a leading or trailing comma
- // as appropriate
- if i >= generics.params.len() - 1 {
- Some(generics.params[i - 1].span.shrink_to_hi().to(param.span))
- } else {
- Some(param.span.to(generics.params[i + 1].span.shrink_to_lo()))
+ generics.params.iter().enumerate().find_map(|(i, param)| {
+ if param.name.ident() == name {
+ let mut in_band = false;
+ if let hir::GenericParamKind::Lifetime { kind } = param.kind {
+ if let hir::LifetimeParamKind::InBand = kind {
+ in_band = true;
}
+ }
+ if in_band {
+ Some(param.span)
} else {
- None
+ if generics.params.len() == 1 {
+ // if sole lifetime, remove the entire `<>` brackets
+ Some(generics.span)
+ } else {
+ // if removing within `<>` brackets, we also want to
+ // delete a leading or trailing comma as appropriate
+ if i >= generics.params.len() - 1 {
+ Some(generics.params[i - 1].span.shrink_to_hi().to(param.span))
+ } else {
+ Some(param.span.to(generics.params[i + 1].span.shrink_to_lo()))
+ }
+ }
}
- })
+ } else {
+ None
+ }
+ })
+ }
+
+ // helper method to issue suggestions from `fn rah<'a>(&'a T)` to `fn rah(&T)`
+ fn suggest_eliding_single_use_lifetime(
+ &self, err: &mut DiagnosticBuilder<'_>, def_id: DefId, lifetime: &hir::Lifetime
+ ) {
+ // FIXME: future work: also suggest `impl Foo<'_>` for `impl<'a> Foo<'a>`
+ let name = lifetime.name.ident();
+ let mut remove_decl = None;
+ if let Some(parent_def_id) = self.tcx.parent(def_id) {
+ if let Some(generics) = self.tcx.hir.get_generics(parent_def_id) {
+ remove_decl = self.lifetime_deletion_span(name, generics);
+ }
+ }
+
+ let mut remove_use = None;
+ let mut find_arg_use_span = |inputs: &hir::HirVec<hir::Ty>| {
+ for input in inputs {
+ if let hir::TyKind::Rptr(lt, _) = input.node {
+ if lt.name.ident() == name {
+ // include the trailing whitespace between the ampersand and the type name
+ let lt_through_ty_span = lifetime.span.to(input.span.shrink_to_hi());
+ remove_use = Some(
+ self.tcx.sess.source_map()
+ .span_until_non_whitespace(lt_through_ty_span)
+ );
+ break;
+ }
+ }
+ }
+ };
+ if let Node::Lifetime(hir_lifetime) = self.tcx.hir.get(lifetime.id) {
+ if let Some(parent) = self.tcx.hir.find(self.tcx.hir.get_parent(hir_lifetime.id)) {
+ match parent {
+ Node::Item(item) => {
+ if let hir::ItemKind::Fn(decl, _, _, _) = &item.node {
+ find_arg_use_span(&decl.inputs);
+ }
+ },
+ Node::ImplItem(impl_item) => {
+ if let hir::ImplItemKind::Method(sig, _) = &impl_item.node {
+ find_arg_use_span(&sig.decl.inputs);
+ }
+ }
+ _ => {}
+ }
+ }
+ }
+
+ if let (Some(decl_span), Some(use_span)) = (remove_decl, remove_use) {
+ // if both declaration and use deletion spans start at the same
+ // place ("start at" because the latter includes trailing
+ // whitespace), then this is an in-band lifetime
+ if decl_span.shrink_to_lo() == use_span.shrink_to_lo() {
+ err.span_suggestion_with_applicability(
+ use_span,
+ "elide the single-use lifetime",
+ String::new(),
+ Applicability::MachineApplicable,
+ );
+ } else {
+ err.multipart_suggestion_with_applicability(
+ "elide the single-use lifetime",
+ vec![(decl_span, String::new()), (use_span, String::new())],
+ Applicability::MachineApplicable,
+ );
+ }
}
}
_ => None,
} {
debug!("id = {:?} span = {:?} name = {:?}", node_id, span, name);
+
+ if name == keywords::UnderscoreLifetime.ident() {
+ continue;
+ }
+
let mut err = self.tcx.struct_span_lint_node(
lint::builtin::SINGLE_USE_LIFETIMES,
id,
span,
&format!("lifetime parameter `{}` only used once", name),
);
- err.span_label(span, "this lifetime...");
- err.span_label(lifetime.span, "...is used only here");
+
+ if span == lifetime.span {
+ // spans are the same for in-band lifetime declarations
+ err.span_label(span, "this lifetime is only used here");
+ } else {
+ err.span_label(span, "this lifetime...");
+ err.span_label(lifetime.span, "...is used only here");
+ }
+ self.suggest_eliding_single_use_lifetime(&mut err, def_id, lifetime);
err.emit();
}
}
if let Some(span) = unused_lt_span {
err.span_suggestion_with_applicability(
span,
- "remove it",
+ "elide the unused lifetime",
String::new(),
Applicability::MachineApplicable,
);
let mut index = 0;
if let Some(parent_id) = parent_id {
let parent = self.tcx.hir.expect_item(parent_id);
- if let hir::ItemKind::Trait(..) = parent.node {
- index += 1; // Self comes first.
+ if sub_items_have_self_param(&parent.node) {
+ index += 1; // Self comes before lifetimes
}
match parent.node {
hir::ItemKind::Trait(_, _, ref generics, ..)
// These are not supposed to be overridden.
#[inline(always)]
- fn pointer_size(self) -> Size {
+ fn pointer_size(&self) -> Size {
self.data_layout().pointer_size
}
//// Trunace the given value to the pointer size; also return whether there was an overflow
- fn truncate_to_ptr(self, val: u128) -> (u64, bool) {
+ fn truncate_to_ptr(&self, val: u128) -> (u64, bool) {
let max_ptr_plus_1 = 1u128 << self.pointer_size().bits();
((val % max_ptr_plus_1) as u64, val >= max_ptr_plus_1)
}
// Overflow checking only works properly on the range from -u64 to +u64.
- fn overflowing_signed_offset(self, val: u64, i: i128) -> (u64, bool) {
+ fn overflowing_signed_offset(&self, val: u64, i: i128) -> (u64, bool) {
// FIXME: is it possible to over/underflow here?
if i < 0 {
// trickery to ensure that i64::min_value() works fine
}
}
- fn overflowing_offset(self, val: u64, i: u64) -> (u64, bool) {
+ fn overflowing_offset(&self, val: u64, i: u64) -> (u64, bool) {
let (res, over1) = val.overflowing_add(i);
let (res, over2) = self.truncate_to_ptr(res as u128);
(res, over1 || over2)
}
- fn signed_offset<'tcx>(self, val: u64, i: i64) -> EvalResult<'tcx, u64> {
+ fn signed_offset<'tcx>(&self, val: u64, i: i64) -> EvalResult<'tcx, u64> {
let (res, over) = self.overflowing_signed_offset(val, i as i128);
if over { err!(Overflow(mir::BinOp::Add)) } else { Ok(res) }
}
- fn offset<'tcx>(self, val: u64, i: u64) -> EvalResult<'tcx, u64> {
+ fn offset<'tcx>(&self, val: u64, i: u64) -> EvalResult<'tcx, u64> {
let (res, over) = self.overflowing_offset(val, i);
if over { err!(Overflow(mir::BinOp::Add)) } else { Ok(res) }
}
- fn wrapping_signed_offset(self, val: u64, i: i64) -> u64 {
+ fn wrapping_signed_offset(&self, val: u64, i: i64) -> u64 {
self.overflowing_signed_offset(val, i as i128).0
}
}
Pointer { alloc_id, offset, tag }
}
- pub fn wrapping_signed_offset(self, i: i64, cx: impl HasDataLayout) -> Self {
+ pub fn wrapping_signed_offset(self, i: i64, cx: &impl HasDataLayout) -> Self {
Pointer::new_with_tag(
self.alloc_id,
Size::from_bytes(cx.data_layout().wrapping_signed_offset(self.offset.bytes(), i)),
)
}
- pub fn overflowing_signed_offset(self, i: i128, cx: impl HasDataLayout) -> (Self, bool) {
+ pub fn overflowing_signed_offset(self, i: i128, cx: &impl HasDataLayout) -> (Self, bool) {
let (res, over) = cx.data_layout().overflowing_signed_offset(self.offset.bytes(), i);
(Pointer::new_with_tag(self.alloc_id, Size::from_bytes(res), self.tag), over)
}
- pub fn signed_offset(self, i: i64, cx: impl HasDataLayout) -> EvalResult<'tcx, Self> {
+ pub fn signed_offset(self, i: i64, cx: &impl HasDataLayout) -> EvalResult<'tcx, Self> {
Ok(Pointer::new_with_tag(
self.alloc_id,
Size::from_bytes(cx.data_layout().signed_offset(self.offset.bytes(), i)?),
))
}
- pub fn overflowing_offset(self, i: Size, cx: impl HasDataLayout) -> (Self, bool) {
+ pub fn overflowing_offset(self, i: Size, cx: &impl HasDataLayout) -> (Self, bool) {
let (res, over) = cx.data_layout().overflowing_offset(self.offset.bytes(), i.bytes());
(Pointer::new_with_tag(self.alloc_id, Size::from_bytes(res), self.tag), over)
}
- pub fn offset(self, i: Size, cx: impl HasDataLayout) -> EvalResult<'tcx, Self> {
+ pub fn offset(self, i: Size, cx: &impl HasDataLayout) -> EvalResult<'tcx, Self> {
Ok(Pointer::new_with_tag(
self.alloc_id,
Size::from_bytes(cx.data_layout().offset(self.offset.bytes(), i.bytes())?),
}
pub fn new(data_offsets: Vec<u32>) -> AllocDecodingState {
- let decoding_state: Vec<_> = ::std::iter::repeat(Mutex::new(State::Empty))
- .take(data_offsets.len())
- .collect();
+ let decoding_state = vec![Mutex::new(State::Empty); data_offsets.len()];
AllocDecodingState {
- decoding_state: decoding_state,
+ decoding_state,
data_offsets,
}
}
pub fn new_slice(
val: Scalar,
len: u64,
- cx: impl HasDataLayout
+ cx: &impl HasDataLayout
) -> Self {
ConstValue::ScalarPair(val, Scalar::Bits {
bits: len as u128,
}
#[inline]
- pub fn ptr_null(cx: impl HasDataLayout) -> Self {
+ pub fn ptr_null(cx: &impl HasDataLayout) -> Self {
Scalar::Bits {
bits: 0,
size: cx.data_layout().pointer_size.bytes() as u8,
}
#[inline]
- pub fn ptr_signed_offset(self, i: i64, cx: impl HasDataLayout) -> EvalResult<'tcx, Self> {
- let layout = cx.data_layout();
+ pub fn ptr_signed_offset(self, i: i64, cx: &impl HasDataLayout) -> EvalResult<'tcx, Self> {
+ let dl = cx.data_layout();
match self {
Scalar::Bits { bits, size } => {
- assert_eq!(size as u64, layout.pointer_size.bytes());
+ assert_eq!(size as u64, dl.pointer_size.bytes());
Ok(Scalar::Bits {
- bits: layout.signed_offset(bits as u64, i)? as u128,
+ bits: dl.signed_offset(bits as u64, i)? as u128,
size,
})
}
- Scalar::Ptr(ptr) => ptr.signed_offset(i, layout).map(Scalar::Ptr),
+ Scalar::Ptr(ptr) => ptr.signed_offset(i, dl).map(Scalar::Ptr),
}
}
#[inline]
- pub fn ptr_offset(self, i: Size, cx: impl HasDataLayout) -> EvalResult<'tcx, Self> {
- let layout = cx.data_layout();
+ pub fn ptr_offset(self, i: Size, cx: &impl HasDataLayout) -> EvalResult<'tcx, Self> {
+ let dl = cx.data_layout();
match self {
Scalar::Bits { bits, size } => {
- assert_eq!(size as u64, layout.pointer_size.bytes());
+ assert_eq!(size as u64, dl.pointer_size.bytes());
Ok(Scalar::Bits {
- bits: layout.offset(bits as u64, i.bytes())? as u128,
+ bits: dl.offset(bits as u64, i.bytes())? as u128,
size,
})
}
- Scalar::Ptr(ptr) => ptr.offset(i, layout).map(Scalar::Ptr),
+ Scalar::Ptr(ptr) => ptr.offset(i, dl).map(Scalar::Ptr),
}
}
#[inline]
- pub fn ptr_wrapping_signed_offset(self, i: i64, cx: impl HasDataLayout) -> Self {
- let layout = cx.data_layout();
+ pub fn ptr_wrapping_signed_offset(self, i: i64, cx: &impl HasDataLayout) -> Self {
+ let dl = cx.data_layout();
match self {
Scalar::Bits { bits, size } => {
- assert_eq!(size as u64, layout.pointer_size.bytes());
+ assert_eq!(size as u64, dl.pointer_size.bytes());
Scalar::Bits {
- bits: layout.wrapping_signed_offset(bits as u64, i) as u128,
+ bits: dl.wrapping_signed_offset(bits as u64, i) as u128,
size,
}
}
- Scalar::Ptr(ptr) => Scalar::Ptr(ptr.wrapping_signed_offset(i, layout)),
+ Scalar::Ptr(ptr) => Scalar::Ptr(ptr.wrapping_signed_offset(i, dl)),
}
}
#[inline]
- pub fn is_null_ptr(self, cx: impl HasDataLayout) -> bool {
+ pub fn is_null_ptr(self, cx: &impl HasDataLayout) -> bool {
match self {
Scalar::Bits { bits, size } => {
assert_eq!(size as u64, cx.data_layout().pointer_size.bytes());
Ok(b as u64)
}
- pub fn to_usize(self, cx: impl HasDataLayout) -> EvalResult<'static, u64> {
+ pub fn to_usize(self, cx: &impl HasDataLayout) -> EvalResult<'static, u64> {
let b = self.to_bits(cx.data_layout().pointer_size)?;
assert_eq!(b as u64 as u128, b);
Ok(b as u64)
Ok(b as i64)
}
- pub fn to_isize(self, cx: impl HasDataLayout) -> EvalResult<'static, i64> {
+ pub fn to_isize(self, cx: &impl HasDataLayout) -> EvalResult<'static, i64> {
let b = self.to_bits(cx.data_layout().pointer_size)?;
let b = sign_extend(b, cx.data_layout().pointer_size) as i128;
assert_eq!(b as i64 as i128, b);
/// implicit closure bindings. It is needed when the closure is
/// borrowing or mutating a mutable referent, e.g.:
///
- /// let x: &mut isize = ...;
- /// let y = || *x += 5;
+ /// let x: &mut isize = ...;
+ /// let y = || *x += 5;
///
/// If we were to try to translate this closure into a more explicit
/// form, we'd encounter an error with the code as written:
///
- /// struct Env { x: & &mut isize }
- /// let x: &mut isize = ...;
- /// let y = (&mut Env { &x }, fn_ptr); // Closure is pair of env and fn
- /// fn fn_ptr(env: &mut Env) { **env.x += 5; }
+ /// struct Env { x: & &mut isize }
+ /// let x: &mut isize = ...;
+ /// let y = (&mut Env { &x }, fn_ptr); // Closure is pair of env and fn
+ /// fn fn_ptr(env: &mut Env) { **env.x += 5; }
///
/// This is then illegal because you cannot mutate an `&mut` found
/// in an aliasable location. To solve, you'd have to translate with
/// an `&mut` borrow:
///
- /// struct Env { x: & &mut isize }
- /// let x: &mut isize = ...;
- /// let y = (&mut Env { &mut x }, fn_ptr); // changed from &x to &mut x
- /// fn fn_ptr(env: &mut Env) { **env.x += 5; }
+ /// struct Env { x: & &mut isize }
+ /// let x: &mut isize = ...;
+ /// let y = (&mut Env { &mut x }, fn_ptr); // changed from &x to &mut x
+ /// fn fn_ptr(env: &mut Env) { **env.x += 5; }
///
/// Now the assignment to `**env.x` is legal, but creating a
/// mutable pointer to `x` is not because `x` is not mutable. We
InlineAsm {
asm: Box<InlineAsm>,
outputs: Box<[Place<'tcx>]>,
- inputs: Box<[Operand<'tcx>]>,
+ inputs: Box<[(Span, Operand<'tcx>)]>,
},
- /// Assert the given places to be valid inhabitants of their type. These statements are
- /// currently only interpreted by miri and only generated when "-Z mir-emit-validate" is passed.
- /// See <https://internals.rust-lang.org/t/types-as-contracts/5562/73> for more details.
- Validate(ValidationOp, Vec<ValidationOperand<'tcx, Place<'tcx>>>),
+ /// Retag references in the given place, ensuring they got fresh tags. This is
+ /// part of the Stacked Borrows model. These statements are currently only interpreted
+ /// by miri and only generated when "-Z mir-emit-retag" is passed.
+ /// See <https://internals.rust-lang.org/t/stacked-borrows-an-aliasing-model-for-rust/8153/>
+ /// for more details.
+ Retag {
+ /// `fn_entry` indicates whether this is the initial retag that happens in the
+ /// function prolog.
+ fn_entry: bool,
+ place: Place<'tcx>,
+ },
/// Mark one terminating point of a region scope (i.e. static region).
/// (The starting point(s) arise implicitly from borrows.)
ForLet,
}
-/// The `ValidationOp` describes what happens with each of the operands of a
-/// `Validate` statement.
-#[derive(Copy, Clone, RustcEncodable, RustcDecodable, PartialEq, Eq)]
-pub enum ValidationOp {
- /// Recursively traverse the place following the type and validate that all type
- /// invariants are maintained. Furthermore, acquire exclusive/read-only access to the
- /// memory reachable from the place.
- Acquire,
- /// Recursive traverse the *mutable* part of the type and relinquish all exclusive
- /// access.
- Release,
- /// Recursive traverse the *mutable* part of the type and relinquish all exclusive
- /// access *until* the given region ends. Then, access will be recovered.
- Suspend(region::Scope),
-}
-
-impl Debug for ValidationOp {
- fn fmt(&self, fmt: &mut Formatter<'_>) -> fmt::Result {
- use self::ValidationOp::*;
- match *self {
- Acquire => write!(fmt, "Acquire"),
- Release => write!(fmt, "Release"),
- // (reuse lifetime rendering policy from ppaux.)
- Suspend(ref ce) => write!(fmt, "Suspend({})", ty::ReScope(*ce)),
- }
- }
-}
-
-// This is generic so that it can be reused by miri
-#[derive(Clone, Hash, PartialEq, Eq, RustcEncodable, RustcDecodable)]
-pub struct ValidationOperand<'tcx, T> {
- pub place: T,
- pub ty: Ty<'tcx>,
- pub re: Option<region::Scope>,
- pub mutbl: hir::Mutability,
-}
-
-impl<'tcx, T: Debug> Debug for ValidationOperand<'tcx, T> {
- fn fmt(&self, fmt: &mut Formatter<'_>) -> fmt::Result {
- write!(fmt, "{:?}: {:?}", self.place, self.ty)?;
- if let Some(ce) = self.re {
- // (reuse lifetime rendering policy from ppaux.)
- write!(fmt, "/{}", ty::ReScope(ce))?;
- }
- if let hir::MutImmutable = self.mutbl {
- write!(fmt, " (imm)")?;
- }
- Ok(())
- }
-}
-
impl<'tcx> Debug for Statement<'tcx> {
fn fmt(&self, fmt: &mut Formatter<'_>) -> fmt::Result {
use self::StatementKind::*;
FakeRead(ref cause, ref place) => write!(fmt, "FakeRead({:?}, {:?})", cause, place),
// (reuse lifetime rendering policy from ppaux.)
EndRegion(ref ce) => write!(fmt, "EndRegion({})", ty::ReScope(*ce)),
- Validate(ref op, ref places) => write!(fmt, "Validate({:?}, {:?})", op, places),
+ Retag { fn_entry, ref place } =>
+ write!(fmt, "Retag({}{:?})", if fn_entry { "[fn entry] " } else { "" }, place),
StorageLive(ref place) => write!(fmt, "StorageLive({:?})", place),
StorageDead(ref place) => write!(fmt, "StorageDead({:?})", place),
SetDiscriminant {
SourceInfo,
UpvarDecl,
FakeReadCause,
- ValidationOp,
SourceScope,
SourceScopeData,
SourceScopeLocalData,
}
}
-BraceStructTypeFoldableImpl! {
- impl<'tcx> TypeFoldable<'tcx> for ValidationOperand<'tcx, Place<'tcx>> {
- place, ty, re, mutbl
- }
-}
-
BraceStructTypeFoldableImpl! {
impl<'tcx> TypeFoldable<'tcx> for Statement<'tcx> {
source_info, kind
(StatementKind::StorageLive)(a),
(StatementKind::StorageDead)(a),
(StatementKind::InlineAsm) { asm, outputs, inputs },
- (StatementKind::Validate)(a, b),
+ (StatementKind::Retag) { fn_entry, place },
(StatementKind::EndRegion)(a),
(StatementKind::AscribeUserType)(a, v, b),
(StatementKind::Nop),
self.super_ascribe_user_ty(place, variance, user_ty, location);
}
+ fn visit_retag(&mut self,
+ fn_entry: & $($mutability)* bool,
+ place: & $($mutability)* Place<'tcx>,
+ location: Location) {
+ self.super_retag(fn_entry, place, location);
+ }
+
fn visit_place(&mut self,
place: & $($mutability)* Place<'tcx>,
context: PlaceContext<'tcx>,
);
}
StatementKind::EndRegion(_) => {}
- StatementKind::Validate(_, ref $($mutability)* places) => {
- for operand in places {
- self.visit_place(
- & $($mutability)* operand.place,
- PlaceContext::NonUse(NonUseContext::Validate),
- location
- );
- self.visit_ty(& $($mutability)* operand.ty,
- TyContext::Location(location));
- }
- }
StatementKind::SetDiscriminant{ ref $($mutability)* place, .. } => {
self.visit_place(
place,
location
);
}
- for input in & $($mutability)* inputs[..] {
+ for (span, input) in & $($mutability)* inputs[..] {
+ self.visit_span(span);
self.visit_operand(input, location);
}
}
+ StatementKind::Retag { ref $($mutability)* fn_entry,
+ ref $($mutability)* place } => {
+ self.visit_retag(fn_entry, place, location);
+ }
StatementKind::AscribeUserType(
ref $($mutability)* place,
ref $($mutability)* variance,
self.visit_user_type_projection(user_ty);
}
+ fn super_retag(&mut self,
+ _fn_entry: & $($mutability)* bool,
+ place: & $($mutability)* Place<'tcx>,
+ location: Location) {
+ self.visit_place(
+ place,
+ PlaceContext::MutatingUse(MutatingUseContext::Retag),
+ location,
+ );
+ }
+
fn super_place(&mut self,
place: & $($mutability)* Place<'tcx>,
context: PlaceContext<'tcx>,
/// f(&mut x.y);
///
Projection,
+ /// Retagging (updating the "Stacked Borrows" tag)
+ Retag,
}
#[derive(Copy, Clone, Debug, PartialEq, Eq)]
StorageDead,
/// User type annotation assertions for NLL.
AscribeUserTy,
- /// Validation command.
- Validate,
}
#[derive(Copy, Clone, Debug, PartialEq, Eq)]
"in addition to `.mir` files, create graphviz `.dot` files"),
dump_mir_exclude_pass_number: bool = (false, parse_bool, [UNTRACKED],
"if set, exclude the pass number when dumping MIR (used in tests)"),
- mir_emit_validate: usize = (0, parse_uint, [TRACKED],
- "emit Validate MIR statements, interpreted e.g. by miri (0: do not emit; 1: if function \
- contains unsafe block, only validate arguments; 2: always emit full validation)"),
+ mir_emit_retag: bool = (false, parse_bool, [TRACKED],
+ "emit Retagging MIR statements, interpreted e.g. by miri; implies -Zmir-opt-level=0"),
perf_stats: bool = (false, parse_bool, [UNTRACKED],
"print some performance-related statistics"),
hir_stats: bool = (false, parse_bool, [UNTRACKED],
let fuel = self.optimization_fuel_limit.get();
ret = fuel != 0;
if fuel == 0 && !self.out_of_fuel.get() {
- println!("optimization-fuel-exhausted: {}", msg());
+ eprintln!("optimization-fuel-exhausted: {}", msg());
self.out_of_fuel.set(true);
} else if fuel > 0 {
self.optimization_fuel_limit.set(fuel - 1);
ty::RegionKind::ReLateBound(_, _),
) => {}
- (ty::RegionKind::ReLateBound(_, _), _) => {
+ (ty::RegionKind::ReLateBound(_, _), _) |
+ (_, ty::RegionKind::ReVar(_)) => {
+ // One of these is true:
// The new predicate has a HRTB in a spot where the old
// predicate does not (if they both had a HRTB, the previous
- // match arm would have executed).
+ // match arm would have executed). A HRBT is a 'stricter'
+ // bound than anything else, so we want to keep the newer
+ // predicate (with the HRBT) in place of the old predicate.
//
- // The means we want to remove the older predicate from
- // user_computed_preds, since having both it and the new
+ // OR
+ //
+ // The old predicate has a region variable where the new
+ // predicate has some other kind of region. An region
+ // variable isn't something we can actually display to a user,
+ // so we choose ther new predicate (which doesn't have a region
+ // varaible).
+ //
+ // In both cases, we want to remove the old predicate,
+ // from user_computed_preds, and replace it with the new
+ // one. Having both the old and the new
// predicate in a ParamEnv would confuse SelectionContext
+ //
// We're currently in the predicate passed to 'retain',
// so we return 'false' to remove the old predicate from
// user_computed_preds
return false;
}
- (_, ty::RegionKind::ReLateBound(_, _)) => {
- // This is the opposite situation as the previous arm - the
- // old predicate has a HRTB lifetime in a place where the
- // new predicate does not. We want to leave the old
+ (_, ty::RegionKind::ReLateBound(_, _)) |
+ (ty::RegionKind::ReVar(_), _) => {
+ // This is the opposite situation as the previous arm.
+ // One of these is true:
+ //
+ // The old predicate has a HRTB lifetime in a place where the
+ // new predicate does not.
+ //
+ // OR
+ //
+ // The new predicate has a region variable where the old
+ // predicate has some other type of region.
+ //
+ // We want to leave the old
// predicate in user_computed_preds, and skip adding
// new_pred to user_computed_params.
should_add_new = false
- }
+ },
_ => {}
}
}
}
&ty::Predicate::TypeOutlives(ref binder) => {
match (
- binder.no_late_bound_regions(),
- binder.map_bound_ref(|pred| pred.0).no_late_bound_regions(),
+ binder.no_bound_vars(),
+ binder.map_bound_ref(|pred| pred.0).no_bound_vars(),
) {
(None, Some(t_a)) => {
select.infcx().register_region_obligation_with_cause(
false
}
- ty::Infer(..) => match in_crate {
+ ty::Bound(..) | ty::Infer(..) => match in_crate {
InCrate::Local => false,
// The inference variable might be unified with a local
// type in that remote crate.
use infer::{self, InferCtxt};
use infer::type_variable::TypeVariableOrigin;
use std::fmt;
-use std::iter;
use syntax::ast;
use session::DiagnosticMessageId;
use ty::{self, AdtKind, ToPredicate, ToPolyTraitRef, Ty, TyCtxt, TypeFoldable};
ty::Generator(..) => Some(18),
ty::Foreign(..) => Some(19),
ty::GeneratorWitness(..) => Some(20),
- ty::Infer(..) | ty::Error => None,
+ ty::Bound(..) | ty::Infer(..) | ty::Error => None,
ty::UnnormalizedProjection(..) => bug!("only used with chalk-engine"),
}
}
));
let tcx = self.tcx;
if let Some(len) = len.val.try_to_scalar().and_then(|scalar| {
- scalar.to_usize(tcx).ok()
+ scalar.to_usize(&tcx).ok()
}) {
flags.push((
"_Self".to_owned(),
}
ty::Predicate::ObjectSafe(trait_def_id) => {
- let violations = self.tcx.object_safety_violations(trait_def_id);
+ let violations = self.tcx.global_tcx()
+ .object_safety_violations(trait_def_id);
self.tcx.report_object_safety_error(span,
trait_def_id,
violations)
}
TraitNotObjectSafe(did) => {
- let violations = self.tcx.object_safety_violations(did);
+ let violations = self.tcx.global_tcx().object_safety_violations(did);
self.tcx.report_object_safety_error(span, did, violations)
}
// found arguments is empty (assume the user just wants to ignore args in this case).
// For example, if `expected_args_length` is 2, suggest `|_, _|`.
if found_args.is_empty() && is_closure {
- let underscores = iter::repeat("_")
- .take(expected_args.len())
- .collect::<Vec<_>>()
- .join(", ");
+ let underscores = vec!["_"; expected_args.len()].join(", ");
err.span_suggestion_with_applicability(
found_span,
&format!(
debug!("normalize_projection_type(projection_ty={:?})",
projection_ty);
- debug_assert!(!projection_ty.has_escaping_regions());
+ debug_assert!(!projection_ty.has_escaping_bound_vars());
// FIXME(#20304) -- cache
}
ty::Predicate::TypeOutlives(ref binder) => {
- // Check if there are higher-ranked regions.
- match binder.no_late_bound_regions() {
+ // Check if there are higher-ranked vars.
+ match binder.no_bound_vars() {
// If there are, inspect the underlying type further.
None => {
// Convert from `Binder<OutlivesPredicate<Ty, Region>>` to `Binder<Ty>`.
let binder = binder.map_bound_ref(|pred| pred.0);
- // Check if the type has any bound regions.
- match binder.no_late_bound_regions() {
+ // Check if the type has any bound vars.
+ match binder.no_bound_vars() {
// If so, this obligation is an error (for now). Eventually we should be
// able to support additional cases here, like `for<'a> &'a str: 'a`.
// NOTE: this is duplicate-implemented between here and fulfillment.
domain_goal: PolyDomainGoal<'tcx>,
tcx: TyCtxt<'a, 'tcx, 'tcx>,
) -> GoalKind<'tcx> {
- match domain_goal.no_late_bound_regions() {
+ match domain_goal.no_bound_vars() {
Some(p) => p.into_goal(),
None => GoalKind::Quantified(
QuantifierKind::Universal,
/// Same as above, but for a fn pointer type with the given signature.
VtableFnPointer(VtableFnPointerData<'tcx, N>),
- /// Vtable automatically generated for a generator
+ /// Vtable automatically generated for a generator.
VtableGenerator(VtableGeneratorData<'tcx, N>),
+
+ /// Vtable for a trait alias.
+ VtableTraitAlias(VtableTraitAliasData<'tcx, N>),
}
/// Identifies a particular impl in the source, along with a set of
pub nested: Vec<N>
}
+#[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable)]
+pub struct VtableTraitAliasData<'tcx, N> {
+ pub alias_def_id: DefId,
+ pub substs: &'tcx Substs<'tcx>,
+ pub nested: Vec<N>,
+}
+
/// Creates predicate obligations from the generic bounds.
pub fn predicates_for_generics<'tcx>(cause: ObligationCause<'tcx>,
param_env: ty::ParamEnv<'tcx>,
VtableGenerator(c) => c.nested,
VtableObject(d) => d.nested,
VtableFnPointer(d) => d.nested,
+ VtableTraitAlias(d) => d.nested,
}
}
trait_def_id: d.trait_def_id,
nested: d.nested.into_iter().map(f).collect(),
}),
- VtableFnPointer(p) => VtableFnPointer(VtableFnPointerData {
- fn_ty: p.fn_ty,
- nested: p.nested.into_iter().map(f).collect(),
+ VtableClosure(c) => VtableClosure(VtableClosureData {
+ closure_def_id: c.closure_def_id,
+ substs: c.substs,
+ nested: c.nested.into_iter().map(f).collect(),
}),
VtableGenerator(c) => VtableGenerator(VtableGeneratorData {
generator_def_id: c.generator_def_id,
substs: c.substs,
nested: c.nested.into_iter().map(f).collect(),
}),
- VtableClosure(c) => VtableClosure(VtableClosureData {
- closure_def_id: c.closure_def_id,
- substs: c.substs,
- nested: c.nested.into_iter().map(f).collect(),
- })
+ VtableFnPointer(p) => VtableFnPointer(VtableFnPointerData {
+ fn_ty: p.fn_ty,
+ nested: p.nested.into_iter().map(f).collect(),
+ }),
+ VtableTraitAlias(d) => VtableTraitAlias(VtableTraitAliasData {
+ alias_def_id: d.alias_def_id,
+ substs: d.substs,
+ nested: d.nested.into_iter().map(f).collect(),
+ }),
}
}
}
//! object if all of their methods meet certain criteria. In particular,
//! they must:
//!
-//! - have a suitable receiver from which we can extract a vtable;
+//! - have a suitable receiver from which we can extract a vtable and coerce to a "thin" version
+//! that doesn't contain the vtable;
//! - not reference the erased type `Self` except for in this receiver;
//! - not have generic type parameters
use hir::def_id::DefId;
use lint;
-use traits;
-use ty::{self, Ty, TyCtxt, TypeFoldable};
-use ty::util::ExplicitSelf;
+use traits::{self, Obligation, ObligationCause};
+use ty::{self, Ty, TyCtxt, TypeFoldable, Predicate, ToPredicate};
+use ty::subst::{Subst, Substs};
use std::borrow::Cow;
-use syntax::ast;
+use std::iter::{self};
+use syntax::ast::{self, Name};
use syntax_pos::Span;
#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)]
format!("method `{}` references the `Self` type in where clauses", name).into(),
ObjectSafetyViolation::Method(name, MethodViolationCode::Generic) =>
format!("method `{}` has generic type parameters", name).into(),
- ObjectSafetyViolation::Method(name, MethodViolationCode::NonStandardSelfType) =>
- format!("method `{}` has a non-standard `self` type", name).into(),
+ ObjectSafetyViolation::Method(name, MethodViolationCode::UndispatchableReceiver) =>
+ format!("method `{}`'s receiver cannot be dispatched on", name).into(),
ObjectSafetyViolation::AssociatedConst(name) =>
format!("the trait cannot contain associated consts like `{}`", name).into(),
}
/// e.g., `fn foo<A>()`
Generic,
- /// arbitrary `self` type, e.g. `self: Rc<Self>`
- NonStandardSelfType,
+ /// the method's receiver (`self` argument) can't be dispatched on
+ UndispatchableReceiver,
}
-impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> {
+impl<'a, 'tcx> TyCtxt<'a, 'tcx, 'tcx> {
/// Returns the object safety violations that affect
/// astconv - currently, Self in supertraits. This is needed
pub fn object_safety_violations(self, trait_def_id: DefId)
-> Vec<ObjectSafetyViolation>
{
+ debug!("object_safety_violations: {:?}", trait_def_id);
+
traits::supertrait_def_ids(self, trait_def_id)
.flat_map(|def_id| self.object_safety_violations_for_trait(def_id))
.collect()
method: &ty::AssociatedItem)
-> Option<MethodViolationCode>
{
- // The method's first parameter must be something that derefs (or
- // autorefs) to `&self`. For now, we only accept `self`, `&self`
- // and `Box<Self>`.
+ // The method's first parameter must be named `self`
if !method.method_has_self_argument {
return Some(MethodViolationCode::StaticMethod);
}
let sig = self.fn_sig(method.def_id);
- let self_ty = self.mk_self_type();
- let self_arg_ty = sig.skip_binder().inputs()[0];
- if let ExplicitSelf::Other = ExplicitSelf::determine(self_arg_ty, |ty| ty == self_ty) {
- return Some(MethodViolationCode::NonStandardSelfType);
- }
-
- // The `Self` type is erased, so it should not appear in list of
- // arguments or return type apart from the receiver.
for input_ty in &sig.skip_binder().inputs()[1..] {
if self.contains_illegal_self_type_reference(trait_def_id, input_ty) {
return Some(MethodViolationCode::ReferencesSelf);
return Some(MethodViolationCode::WhereClauseReferencesSelf(span));
}
+ let receiver_ty = self.liberate_late_bound_regions(
+ method.def_id,
+ &sig.map_bound(|sig| sig.inputs()[0]),
+ );
+
+ // until `unsized_locals` is fully implemented, `self: Self` can't be dispatched on.
+ // However, this is already considered object-safe. We allow it as a special case here.
+ // FIXME(mikeyhew) get rid of this `if` statement once `receiver_is_dispatchable` allows
+ // `Receiver: Unsize<Receiver[Self => dyn Trait]>`
+ if receiver_ty != self.mk_self_type() {
+ if !self.receiver_is_dispatchable(method, receiver_ty) {
+ return Some(MethodViolationCode::UndispatchableReceiver);
+ } else {
+ // sanity check to make sure the receiver actually has the layout of a pointer
+
+ use ty::layout::Abi;
+
+ let param_env = self.param_env(method.def_id);
+
+ let abi_of_ty = |ty: Ty<'tcx>| -> &Abi {
+ match self.layout_of(param_env.and(ty)) {
+ Ok(layout) => &layout.abi,
+ Err(err) => bug!(
+ "Error: {}\n while computing layout for type {:?}", err, ty
+ )
+ }
+ };
+
+ // e.g. Rc<()>
+ let unit_receiver_ty = self.receiver_for_self_ty(
+ receiver_ty, self.mk_unit(), method.def_id
+ );
+
+ match abi_of_ty(unit_receiver_ty) {
+ &Abi::Scalar(..) => (),
+ abi => bug!("Receiver when Self = () should have a Scalar ABI, found {:?}", abi)
+ }
+
+ let trait_object_ty = self.object_ty_for_trait(
+ trait_def_id, self.mk_region(ty::ReStatic)
+ );
+
+ // e.g. Rc<dyn Trait>
+ let trait_object_receiver = self.receiver_for_self_ty(
+ receiver_ty, trait_object_ty, method.def_id
+ );
+
+ match abi_of_ty(trait_object_receiver) {
+ &Abi::ScalarPair(..) => (),
+ abi => bug!(
+ "Receiver when Self = {} should have a ScalarPair ABI, found {:?}",
+ trait_object_ty, abi
+ )
+ }
+ }
+ }
+
None
}
+ /// performs a type substitution to produce the version of receiver_ty when `Self = self_ty`
+ /// e.g. for receiver_ty = `Rc<Self>` and self_ty = `Foo`, returns `Rc<Foo>`
+ fn receiver_for_self_ty(
+ self, receiver_ty: Ty<'tcx>, self_ty: Ty<'tcx>, method_def_id: DefId
+ ) -> Ty<'tcx> {
+ let substs = Substs::for_item(self, method_def_id, |param, _| {
+ if param.index == 0 {
+ self_ty.into()
+ } else {
+ self.mk_param_from_def(param)
+ }
+ });
+
+ receiver_ty.subst(self, substs)
+ }
+
+ /// creates the object type for the current trait. For example,
+ /// if the current trait is `Deref`, then this will be
+ /// `dyn Deref<Target=Self::Target> + 'static`
+ fn object_ty_for_trait(self, trait_def_id: DefId, lifetime: ty::Region<'tcx>) -> Ty<'tcx> {
+ debug!("object_ty_for_trait: trait_def_id={:?}", trait_def_id);
+
+ let trait_ref = ty::TraitRef::identity(self, trait_def_id);
+
+ let trait_predicate = ty::ExistentialPredicate::Trait(
+ ty::ExistentialTraitRef::erase_self_ty(self, trait_ref)
+ );
+
+ let mut associated_types = traits::supertraits(self, ty::Binder::dummy(trait_ref))
+ .flat_map(|trait_ref| self.associated_items(trait_ref.def_id()))
+ .filter(|item| item.kind == ty::AssociatedKind::Type)
+ .collect::<Vec<_>>();
+
+ // existential predicates need to be in a specific order
+ associated_types.sort_by_key(|item| self.def_path_hash(item.def_id));
+
+ let projection_predicates = associated_types.into_iter().map(|item| {
+ ty::ExistentialPredicate::Projection(ty::ExistentialProjection {
+ ty: self.mk_projection(item.def_id, trait_ref.substs),
+ item_def_id: item.def_id,
+ substs: trait_ref.substs,
+ })
+ });
+
+ let existential_predicates = self.mk_existential_predicates(
+ iter::once(trait_predicate).chain(projection_predicates)
+ );
+
+ let object_ty = self.mk_dynamic(
+ ty::Binder::dummy(existential_predicates),
+ lifetime,
+ );
+
+ debug!("object_ty_for_trait: object_ty=`{}`", object_ty);
+
+ object_ty
+ }
+
+ /// checks the method's receiver (the `self` argument) can be dispatched on when `Self` is a
+ /// trait object. We require that `DispatchableFromDyn` be implemented for the receiver type
+ /// in the following way:
+ /// - let `Receiver` be the type of the `self` argument, i.e `Self`, `&Self`, `Rc<Self>`
+ /// - require the following bound:
+ ///
+ /// Receiver[Self => T]: DispatchFromDyn<Receiver[Self => dyn Trait]>
+ ///
+ /// where `Foo[X => Y]` means "the same type as `Foo`, but with `X` replaced with `Y`"
+ /// (substitution notation).
+ ///
+ /// some examples of receiver types and their required obligation
+ /// - `&'a mut self` requires `&'a mut Self: DispatchFromDyn<&'a mut dyn Trait>`
+ /// - `self: Rc<Self>` requires `Rc<Self>: DispatchFromDyn<Rc<dyn Trait>>`
+ /// - `self: Pin<Box<Self>>` requires `Pin<Box<Self>>: DispatchFromDyn<Pin<Box<dyn Trait>>>`
+ ///
+ /// The only case where the receiver is not dispatchable, but is still a valid receiver
+ /// type (just not object-safe), is when there is more than one level of pointer indirection.
+ /// e.g. `self: &&Self`, `self: &Rc<Self>`, `self: Box<Box<Self>>`. In these cases, there
+ /// is no way, or at least no inexpensive way, to coerce the receiver from the version where
+ /// `Self = dyn Trait` to the version where `Self = T`, where `T` is the unknown erased type
+ /// contained by the trait object, because the object that needs to be coerced is behind
+ /// a pointer.
+ ///
+ /// In practice, we cannot use `dyn Trait` explicitly in the obligation because it would result
+ /// in a new check that `Trait` is object safe, creating a cycle. So instead, we fudge a little
+ /// by introducing a new type parameter `U` such that `Self: Unsize<U>` and `U: Trait + ?Sized`,
+ /// and use `U` in place of `dyn Trait`. Written as a chalk-style query:
+ ///
+ /// forall (U: Trait + ?Sized) {
+ /// if (Self: Unsize<U>) {
+ /// Receiver: DispatchFromDyn<Receiver[Self => U]>
+ /// }
+ /// }
+ ///
+ /// for `self: &'a mut Self`, this means `&'a mut Self: DispatchFromDyn<&'a mut U>`
+ /// for `self: Rc<Self>`, this means `Rc<Self>: DispatchFromDyn<Rc<U>>`
+ /// for `self: Pin<Box<Self>>, this means `Pin<Box<Self>>: DispatchFromDyn<Pin<Box<U>>>`
+ //
+ // FIXME(mikeyhew) when unsized receivers are implemented as part of unsized rvalues, add this
+ // fallback query: `Receiver: Unsize<Receiver[Self => U]>` to support receivers like
+ // `self: Wrapper<Self>`.
+ #[allow(dead_code)]
+ fn receiver_is_dispatchable(
+ self,
+ method: &ty::AssociatedItem,
+ receiver_ty: Ty<'tcx>,
+ ) -> bool {
+ debug!("receiver_is_dispatchable: method = {:?}, receiver_ty = {:?}", method, receiver_ty);
+
+ let traits = (self.lang_items().unsize_trait(),
+ self.lang_items().dispatch_from_dyn_trait());
+ let (unsize_did, dispatch_from_dyn_did) = if let (Some(u), Some(cu)) = traits {
+ (u, cu)
+ } else {
+ debug!("receiver_is_dispatchable: Missing Unsize or DispatchFromDyn traits");
+ return false;
+ };
+
+ // the type `U` in the query
+ // use a bogus type parameter to mimick a forall(U) query using u32::MAX for now.
+ // FIXME(mikeyhew) this is a total hack, and we should replace it when real forall queries
+ // are implemented
+ let unsized_self_ty: Ty<'tcx> = self.mk_ty_param(
+ ::std::u32::MAX,
+ Name::intern("RustaceansAreAwesome").as_interned_str(),
+ );
+
+ // `Receiver[Self => U]`
+ let unsized_receiver_ty = self.receiver_for_self_ty(
+ receiver_ty, unsized_self_ty, method.def_id
+ );
+
+ // create a modified param env, with `Self: Unsize<U>` and `U: Trait` added to caller bounds
+ // `U: ?Sized` is already implied here
+ let param_env = {
+ let mut param_env = self.param_env(method.def_id);
+
+ // Self: Unsize<U>
+ let unsize_predicate = ty::TraitRef {
+ def_id: unsize_did,
+ substs: self.mk_substs_trait(self.mk_self_type(), &[unsized_self_ty.into()]),
+ }.to_predicate();
+
+ // U: Trait<Arg1, ..., ArgN>
+ let trait_predicate = {
+ let substs = Substs::for_item(self, method.container.assert_trait(), |param, _| {
+ if param.index == 0 {
+ unsized_self_ty.into()
+ } else {
+ self.mk_param_from_def(param)
+ }
+ });
+
+ ty::TraitRef {
+ def_id: unsize_did,
+ substs,
+ }.to_predicate()
+ };
+
+ let caller_bounds: Vec<Predicate<'tcx>> = param_env.caller_bounds.iter().cloned()
+ .chain(iter::once(unsize_predicate))
+ .chain(iter::once(trait_predicate))
+ .collect();
+
+ param_env.caller_bounds = self.intern_predicates(&caller_bounds);
+
+ param_env
+ };
+
+ // Receiver: DispatchFromDyn<Receiver[Self => U]>
+ let obligation = {
+ let predicate = ty::TraitRef {
+ def_id: dispatch_from_dyn_did,
+ substs: self.mk_substs_trait(receiver_ty, &[unsized_receiver_ty.into()]),
+ }.to_predicate();
+
+ Obligation::new(
+ ObligationCause::dummy(),
+ param_env,
+ predicate,
+ )
+ };
+
+ self.infer_ctxt().enter(|ref infcx| {
+ // the receiver is dispatchable iff the obligation holds
+ infcx.predicate_must_hold(&obligation)
+ })
+ }
+
fn contains_illegal_self_type_reference(self,
trait_def_id: DefId,
ty: Ty<'tcx>)
use super::Selection;
use super::SelectionContext;
use super::SelectionError;
-use super::VtableClosureData;
-use super::VtableGeneratorData;
-use super::VtableFnPointerData;
-use super::VtableImplData;
+use super::{VtableImplData, VtableClosureData, VtableGeneratorData, VtableFnPointerData};
use super::util;
use hir::def_id::DefId;
let ty = ty.super_fold_with(self);
match ty.sty {
- ty::Opaque(def_id, substs) if !substs.has_escaping_regions() => { // (*)
+ ty::Opaque(def_id, substs) if !substs.has_escaping_bound_vars() => { // (*)
// Only normalize `impl Trait` after type-checking, usually in codegen.
match self.param_env.reveal {
Reveal::UserFacing => ty,
}
}
- ty::Projection(ref data) if !data.has_escaping_regions() => { // (*)
+ ty::Projection(ref data) if !data.has_escaping_bound_vars() => { // (*)
// (*) This is kind of hacky -- we need to be able to
// handle normalization within binders because
super::VtableClosure(_) |
super::VtableGenerator(_) |
super::VtableFnPointer(_) |
- super::VtableObject(_) => {
+ super::VtableObject(_) |
+ super::VtableTraitAlias(_) => {
debug!("assemble_candidates_from_impls: vtable={:?}",
vtable);
true
confirm_object_candidate(selcx, obligation, obligation_trait_ref),
super::VtableAutoImpl(..) |
super::VtableParam(..) |
- super::VtableBuiltin(..) =>
+ super::VtableBuiltin(..) |
+ super::VtableTraitAlias(..) =>
// we don't create Select candidates with this kind of resolution
span_bug!(
obligation.cause.span,
impl_vtable: VtableImplData<'tcx, PredicateObligation<'tcx>>)
-> Progress<'tcx>
{
- let VtableImplData { substs, nested, impl_def_id } = impl_vtable;
+ let VtableImplData { impl_def_id, substs, nested } = impl_vtable;
let tcx = selcx.tcx();
let param_env = obligation.param_env;
let infcx = selcx.infcx();
// We don't do cross-snapshot caching of obligations with escaping regions,
// so there's no cache key to use
- predicate.no_late_bound_regions()
+ predicate.no_bound_vars()
.map(|predicate| ProjectionCacheKey {
// We don't attempt to match up with a specific type-variable state
// from a specific call to `opt_normalize_projection_type` - if
| ty::Param(_)
| ty::Opaque(..)
| ty::Infer(_)
+ | ty::Bound(..)
| ty::Generator(..) => false,
ty::UnnormalizedProjection(..) => bug!("only used with chalk-engine"),
fn fold_ty(&mut self, ty: Ty<'tcx>) -> Ty<'tcx> {
let ty = ty.super_fold_with(self);
match ty.sty {
- ty::Opaque(def_id, substs) if !substs.has_escaping_regions() => {
+ ty::Opaque(def_id, substs) if !substs.has_escaping_bound_vars() => {
// (*)
// Only normalize `impl Trait` after type-checking, usually in codegen.
match self.param_env.reveal {
}
}
- ty::Projection(ref data) if !data.has_escaping_regions() => {
+ ty::Projection(ref data) if !data.has_escaping_bound_vars() => {
// (*)
// (*) This is kind of hacky -- we need to be able to
// handle normalization within binders because
ty::Predicate::ClosureKind(..) |
ty::Predicate::TypeOutlives(..) |
ty::Predicate::ConstEvaluatable(..) => None,
- ty::Predicate::RegionOutlives(ref data) => data.no_late_bound_regions().map(
+ ty::Predicate::RegionOutlives(ref data) => data.no_bound_vars().map(
|ty::OutlivesPredicate(r_a, r_b)| OutlivesBound::RegionSubRegion(r_b, r_a),
),
})
tcx: TyCtxt<'_, 'gcx, 'tcx>,
canonicalized: Canonicalized<'gcx, ParamEnvAnd<'tcx, Self>>,
) -> Fallible<CanonicalizedQueryResponse<'gcx, Self::QueryResponse>> {
- // FIXME the query should take a `ImpliedOutlivesBounds`
- let Canonical {
- variables,
- value:
- ParamEnvAnd {
- param_env,
- value: ImpliedOutlivesBounds { ty },
- },
- } = canonicalized;
- let canonicalized = Canonical {
- variables,
- value: param_env.and(ty),
- };
+ // FIXME this `unchecked_map` is only necessary because the
+ // query is defined as taking a `ParamEnvAnd<Ty>`; it should
+ // take a `ImpliedOutlivesBounds` instead
+ let canonicalized = canonicalized.unchecked_map(|ParamEnvAnd { param_env, value }| {
+ let ImpliedOutlivesBounds { ty } = value;
+ param_env.and(ty)
+ });
tcx.implied_outlives_bounds(canonicalized)
}
// FIXME convert to the type expected by the `dropck_outlives`
// query. This should eventually be fixed by changing the
// *underlying query*.
- let Canonical {
- variables,
- value:
- ParamEnvAnd {
- param_env,
- value: DropckOutlives { dropped_ty },
- },
- } = canonicalized;
- let canonicalized = Canonical {
- variables,
- value: param_env.and(dropped_ty),
- };
+ let canonicalized = canonicalized.unchecked_map(|ParamEnvAnd { param_env, value }| {
+ let DropckOutlives { dropped_ty } = value;
+ param_env.and(dropped_ty)
+ });
tcx.dropck_outlives(canonicalized)
}
use super::{OutputTypeParameterMismatch, Overflow, SelectionError, Unimplemented};
use super::{
VtableAutoImpl, VtableBuiltin, VtableClosure, VtableFnPointer, VtableGenerator, VtableImpl,
- VtableObject, VtableParam,
+ VtableObject, VtableParam, VtableTraitAlias,
};
use super::{
VtableAutoImplData, VtableBuiltinData, VtableClosureData, VtableFnPointerData,
- VtableGeneratorData, VtableImplData, VtableObjectData,
+ VtableGeneratorData, VtableImplData, VtableObjectData, VtableTraitAliasData,
};
use dep_graph::{DepKind, DepNodeIndex};
/// types generated for a fn pointer type (e.g., `fn(int)->int`)
FnPointerCandidate,
+ TraitAliasCandidate(DefId),
+
ObjectCandidate,
BuiltinObjectCandidate,
ImplCandidate(def_id) => ImplCandidate(def_id),
AutoImplCandidate(def_id) => AutoImplCandidate(def_id),
ProjectionCandidate => ProjectionCandidate,
+ ClosureCandidate => ClosureCandidate,
+ GeneratorCandidate => GeneratorCandidate,
FnPointerCandidate => FnPointerCandidate,
+ TraitAliasCandidate(def_id) => TraitAliasCandidate(def_id),
ObjectCandidate => ObjectCandidate,
BuiltinObjectCandidate => BuiltinObjectCandidate,
BuiltinUnsizeCandidate => BuiltinUnsizeCandidate,
- ClosureCandidate => ClosureCandidate,
- GeneratorCandidate => GeneratorCandidate,
ParamCandidate(ref trait_ref) => {
return tcx.lift(trait_ref).map(ParamCandidate);
obligation: &TraitObligation<'tcx>,
) -> SelectionResult<'tcx, Selection<'tcx>> {
debug!("select({:?})", obligation);
- debug_assert!(!obligation.predicate.has_escaping_regions());
+ debug_assert!(!obligation.predicate.has_escaping_bound_vars());
let stack = self.push_stack(TraitObligationStackList::empty(), obligation);
match obligation.predicate {
ty::Predicate::Trait(ref t) => {
- debug_assert!(!t.has_escaping_regions());
+ debug_assert!(!t.has_escaping_bound_vars());
let obligation = obligation.with(t.clone());
self.evaluate_trait_predicate_recursively(previous_stack, obligation)
}
},
ty::Predicate::TypeOutlives(ref binder) => {
- assert!(!binder.has_escaping_regions());
- // Check if the type has higher-ranked regions.
- if binder.skip_binder().0.has_escaping_regions() {
+ assert!(!binder.has_escaping_bound_vars());
+ // Check if the type has higher-ranked vars.
+ if binder.skip_binder().0.has_escaping_bound_vars() {
// If so, this obligation is an error (for now). Eventually we should be
// able to support additional cases here, like `for<'a> &'a str: 'a`.
Ok(EvaluatedToErr)
}
} else {
- // If the type has no late bound regions, then if we assign all
+ // If the type has no late bound vars, then if we assign all
// the inference variables in it to be 'static, then the type
// will be 'static itself.
//
"candidate_from_obligation(cache_fresh_trait_pred={:?}, obligation={:?})",
cache_fresh_trait_pred, stack
);
- debug_assert!(!stack.obligation.predicate.has_escaping_regions());
+ debug_assert!(!stack.obligation.predicate.has_escaping_bound_vars());
if let Some(c) =
self.check_candidate_cache(stack.obligation.param_env, &cache_fresh_trait_pred)
// Winnow, but record the exact outcome of evaluation, which
// is needed for specialization. Propagate overflow if it occurs.
- let mut candidates = candidates.into_iter()
+ let mut candidates = candidates
+ .into_iter()
.map(|c| match self.evaluate_candidate(stack, &c) {
Ok(eval) if eval.may_apply() => Ok(Some(EvaluatedCandidate {
candidate: c,
Ok(_) => Ok(None),
Err(OverflowError) => Err(Overflow),
})
- .flat_map(Result::transpose)
- .collect::<Result<Vec<_>, _>>()?;
+ .flat_map(Result::transpose)
+ .collect::<Result<Vec<_>, _>>()?;
debug!(
"winnowed to {} candidates for {:?}: {:?}",
let predicate = self.infcx()
.resolve_type_vars_if_possible(&obligation.predicate);
- // ok to skip binder because of the nature of the
+ // OK to skip binder because of the nature of the
// trait-ref-is-knowable check, which does not care about
// bound regions
let trait_ref = predicate.skip_binder().trait_ref;
ambiguous: false,
};
+ self.assemble_candidates_for_trait_alias(obligation, &mut candidates)?;
+
// Other bounds. Consider both in-scope bounds from fn decl
// and applicable impls. There is a certain set of precedence rules here.
let def_id = obligation.predicate.def_id();
placeholder_map: &infer::PlaceholderMap<'tcx>,
snapshot: &infer::CombinedSnapshot<'cx, 'tcx>,
) -> bool {
- debug_assert!(!skol_trait_ref.has_escaping_regions());
+ debug_assert!(!skol_trait_ref.has_escaping_bound_vars());
if self.infcx
.at(&obligation.cause, obligation.param_env)
.sup(ty::Binder::dummy(skol_trait_ref), trait_bound)
return Ok(());
}
- // ok to skip binder because the substs on generator types never
+ // OK to skip binder because the substs on generator types never
// touch bound regions, they just capture the in-scope
// type/region parameters
let self_ty = *obligation.self_ty().skip_binder();
}
};
- // ok to skip binder because the substs on closure types never
+ // OK to skip binder because the substs on closure types never
// touch bound regions, they just capture the in-scope
// type/region parameters
match obligation.self_ty().skip_binder().sty {
return Ok(());
}
- // ok to skip binder because what we are inspecting doesn't involve bound regions
+ // OK to skip binder because what we are inspecting doesn't involve bound regions
let self_ty = *obligation.self_ty().skip_binder();
match self_ty.sty {
ty::Infer(ty::TyVar(_)) => {
obligation.self_ty().skip_binder()
);
- // Object-safety candidates are only applicable to object-safe
- // traits. Including this check is useful because it helps
- // inference in cases of traits like `BorrowFrom`, which are
- // not object-safe, and which rely on being able to infer the
- // self-type from one of the other inputs. Without this check,
- // these cases wind up being considered ambiguous due to a
- // (spurious) ambiguity introduced here.
- let predicate_trait_ref = obligation.predicate.to_poly_trait_ref();
- if !self.tcx().is_object_safe(predicate_trait_ref.def_id()) {
- return;
- }
-
self.probe(|this, _snapshot| {
// the code below doesn't care about regions, and the
// self-ty here doesn't escape this probe, so just erase
// T: Trait
// so it seems ok if we (conservatively) fail to accept that `Unsize`
// obligation above. Should be possible to extend this in the future.
- let source = match obligation.self_ty().no_late_bound_regions() {
+ let source = match obligation.self_ty().no_bound_vars() {
Some(t) => t,
None => {
// Don't add any candidates if there are bound regions.
}
}
+ fn assemble_candidates_for_trait_alias(
+ &mut self,
+ obligation: &TraitObligation<'tcx>,
+ candidates: &mut SelectionCandidateSet<'tcx>,
+ ) -> Result<(), SelectionError<'tcx>> {
+ // OK to skip binder here because the tests we do below do not involve bound regions
+ let self_ty = *obligation.self_ty().skip_binder();
+ debug!("assemble_candidates_for_trait_alias(self_ty={:?})", self_ty);
+
+ let def_id = obligation.predicate.def_id();
+
+ if ty::is_trait_alias(self.tcx(), def_id) {
+ candidates.vec.push(TraitAliasCandidate(def_id.clone()));
+ }
+
+ Ok(())
+ }
+
///////////////////////////////////////////////////////////////////////////
// WINNOW
//
| FnPointerCandidate
| BuiltinObjectCandidate
| BuiltinUnsizeCandidate
- | BuiltinCandidate { .. } => {
+ | BuiltinCandidate { .. }
+ | TraitAliasCandidate(..) => {
// Global bounds from the where clause should be ignored
// here (see issue #50825). Otherwise, we have a where
// clause so don't go around looking for impls.
| FnPointerCandidate
| BuiltinObjectCandidate
| BuiltinUnsizeCandidate
- | BuiltinCandidate { .. } => true,
+ | BuiltinCandidate { .. }
+ | TraitAliasCandidate(..) => true,
ObjectCandidate | ProjectionCandidate => {
// Arbitrarily give param candidates priority
// over projection and object candidates.
ty::Infer(ty::TyVar(_)) => Ambiguous,
ty::UnnormalizedProjection(..)
- | ty::Infer(ty::BoundTy(_))
+ | ty::Bound(_)
| ty::Infer(ty::FreshTy(_))
| ty::Infer(ty::FreshIntTy(_))
| ty::Infer(ty::FreshFloatTy(_)) => {
}
ty::UnnormalizedProjection(..)
- | ty::Infer(ty::BoundTy(_))
+ | ty::Bound(_)
| ty::Infer(ty::FreshTy(_))
| ty::Infer(ty::FreshIntTy(_))
| ty::Infer(ty::FreshFloatTy(_)) => {
| ty::Param(..)
| ty::Foreign(..)
| ty::Projection(..)
- | ty::Infer(ty::BoundTy(_))
+ | ty::Bound(_)
| ty::Infer(ty::TyVar(_))
| ty::Infer(ty::FreshTy(_))
| ty::Infer(ty::FreshIntTy(_))
Ok(VtableParam(obligations))
}
+ ImplCandidate(impl_def_id) => Ok(VtableImpl(self.confirm_impl_candidate(
+ obligation,
+ impl_def_id,
+ ))),
+
AutoImplCandidate(trait_def_id) => {
let data = self.confirm_auto_impl_candidate(obligation, trait_def_id);
Ok(VtableAutoImpl(data))
}
- ImplCandidate(impl_def_id) => Ok(VtableImpl(self.confirm_impl_candidate(
- obligation,
- impl_def_id,
- ))),
+ ProjectionCandidate => {
+ self.confirm_projection_candidate(obligation);
+ Ok(VtableParam(Vec::new()))
+ }
ClosureCandidate => {
let vtable_closure = self.confirm_closure_candidate(obligation)?;
Ok(VtableGenerator(vtable_generator))
}
- BuiltinObjectCandidate => {
- // This indicates something like `(Trait+Send) :
- // Send`. In this case, we know that this holds
- // because that's what the object type is telling us,
- // and there's really no additional obligations to
- // prove and no types in particular to unify etc.
- Ok(VtableParam(Vec::new()))
+ FnPointerCandidate => {
+ let data = self.confirm_fn_pointer_candidate(obligation)?;
+ Ok(VtableFnPointer(data))
+ }
+
+ TraitAliasCandidate(alias_def_id) => {
+ let data = self.confirm_trait_alias_candidate(obligation, alias_def_id);
+ Ok(VtableTraitAlias(data))
}
ObjectCandidate => {
Ok(VtableObject(data))
}
- FnPointerCandidate => {
- let data = self.confirm_fn_pointer_candidate(obligation)?;
- Ok(VtableFnPointer(data))
- }
-
- ProjectionCandidate => {
- self.confirm_projection_candidate(obligation);
+ BuiltinObjectCandidate => {
+ // This indicates something like `(Trait+Send) :
+ // Send`. In this case, we know that this holds
+ // because that's what the object type is telling us,
+ // and there's really no additional obligations to
+ // prove and no types in particular to unify etc.
Ok(VtableParam(Vec::new()))
}
self.vtable_auto_impl(obligation, trait_def_id, types)
}
- /// See `confirm_auto_impl_candidate`
+ /// See `confirm_auto_impl_candidate`.
fn vtable_auto_impl(
&mut self,
obligation: &TraitObligation<'tcx>,
// this time not in a probe.
self.in_snapshot(|this, snapshot| {
let (substs, placeholder_map) = this.rematch_impl(impl_def_id, obligation, snapshot);
- debug!("confirm_impl_candidate substs={:?}", substs);
+ debug!("confirm_impl_candidate: substs={:?}", substs);
let cause = obligation.derived_cause(ImplDerivedObligation);
this.vtable_impl(
impl_def_id,
) -> VtableObjectData<'tcx, PredicateObligation<'tcx>> {
debug!("confirm_object_candidate({:?})", obligation);
- // FIXME skipping binder here seems wrong -- we should
- // probably flatten the binder from the obligation and the
- // binder from the object. Have to try to make a broken test
- // case that results. -nmatsakis
+ // FIXME(nmatsakis) skipping binder here seems wrong -- we should
+ // probably flatten the binder from the obligation and the binder
+ // from the object. Have to try to make a broken test case that
+ // results.
let self_ty = self.infcx
.shallow_resolve(*obligation.self_ty().skip_binder());
let poly_trait_ref = match self_ty.sty {
- ty::Dynamic(ref data, ..) => {
- data.principal().with_self_ty(self.tcx(), self_ty)
- }
+ ty::Dynamic(ref data, ..) => data.principal().with_self_ty(self.tcx(), self_ty),
_ => span_bug!(obligation.cause.span, "object candidate with non-object"),
};
) -> Result<VtableFnPointerData<'tcx, PredicateObligation<'tcx>>, SelectionError<'tcx>> {
debug!("confirm_fn_pointer_candidate({:?})", obligation);
- // ok to skip binder; it is reintroduced below
+ // OK to skip binder; it is reintroduced below
let self_ty = self.infcx
.shallow_resolve(*obligation.self_ty().skip_binder());
let sig = self_ty.fn_sig(self.tcx());
})
}
+ fn confirm_trait_alias_candidate(
+ &mut self,
+ obligation: &TraitObligation<'tcx>,
+ alias_def_id: DefId,
+ ) -> VtableTraitAliasData<'tcx, PredicateObligation<'tcx>> {
+ debug!(
+ "confirm_trait_alias_candidate({:?}, {:?})",
+ obligation, alias_def_id
+ );
+
+ self.in_snapshot(|this, snapshot| {
+ let (predicate, placeholder_map) = this.infcx()
+ .replace_late_bound_regions_with_placeholders(&obligation.predicate);
+ let trait_ref = predicate.trait_ref;
+ let trait_def_id = trait_ref.def_id;
+ let substs = trait_ref.substs;
+
+ let trait_obligations = this.impl_or_trait_obligations(
+ obligation.cause.clone(),
+ obligation.recursion_depth,
+ obligation.param_env,
+ trait_def_id,
+ &substs,
+ placeholder_map,
+ snapshot,
+ );
+
+ debug!(
+ "confirm_trait_alias_candidate: trait_def_id={:?} trait_obligations={:?}",
+ trait_def_id, trait_obligations
+ );
+
+ VtableTraitAliasData {
+ alias_def_id,
+ substs: substs,
+ nested: trait_obligations,
+ }
+ })
+ }
+
fn confirm_generator_candidate(
&mut self,
obligation: &TraitObligation<'tcx>,
) -> Result<VtableGeneratorData<'tcx, PredicateObligation<'tcx>>, SelectionError<'tcx>> {
- // ok to skip binder because the substs on generator types never
+ // OK to skip binder because the substs on generator types never
// touch bound regions, they just capture the in-scope
// type/region parameters
let self_ty = self.infcx
.fn_trait_kind(obligation.predicate.def_id())
.unwrap_or_else(|| bug!("closure candidate for non-fn trait {:?}", obligation));
- // ok to skip binder because the substs on closure types never
+ // OK to skip binder because the substs on closure types never
// touch bound regions, they just capture the in-scope
// type/region parameters
let self_ty = self.infcx
// assemble_candidates_for_unsizing should ensure there are no late bound
// regions here. See the comment there for more details.
let source = self.infcx
- .shallow_resolve(obligation.self_ty().no_late_bound_regions().unwrap());
+ .shallow_resolve(obligation.self_ty().no_bound_vars().unwrap());
let target = obligation
.predicate
.skip_binder()
closure_def_id: DefId,
substs: ty::ClosureSubsts<'tcx>,
) -> ty::PolyTraitRef<'tcx> {
+ debug!(
+ "closure_trait_ref_unnormalized(obligation={:?}, closure_def_id={:?}, substs={:?})",
+ obligation, closure_def_id, substs,
+ );
let closure_type = self.infcx.closure_sig(closure_def_id, substs);
+ debug!(
+ "closure_trait_ref_unnormalized: closure_type = {:?}",
+ closure_type
+ );
+
// (1) Feels icky to skip the binder here, but OTOH we know
// that the self-type is an unboxed closure type and hence is
// in fact unparameterized (or at least does not reference any
super::VtableParam(ref n) => write!(f, "VtableParam({:?})", n),
super::VtableBuiltin(ref d) => write!(f, "{:?}", d),
+
+ super::VtableTraitAlias(ref d) => write!(f, "{:?}", d),
}
}
}
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(
f,
- "VtableImpl(impl_def_id={:?}, substs={:?}, nested={:?})",
+ "VtableImplData(impl_def_id={:?}, substs={:?}, nested={:?})",
self.impl_def_id, self.substs, self.nested
)
}
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(
f,
- "VtableGenerator(generator_def_id={:?}, substs={:?}, nested={:?})",
+ "VtableGeneratorData(generator_def_id={:?}, substs={:?}, nested={:?})",
self.generator_def_id, self.substs, self.nested
)
}
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(
f,
- "VtableClosure(closure_def_id={:?}, substs={:?}, nested={:?})",
+ "VtableClosureData(closure_def_id={:?}, substs={:?}, nested={:?})",
self.closure_def_id, self.substs, self.nested
)
}
impl<'tcx, N: fmt::Debug> fmt::Debug for traits::VtableBuiltinData<N> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
- write!(f, "VtableBuiltin(nested={:?})", self.nested)
+ write!(f, "VtableBuiltinData(nested={:?})", self.nested)
}
}
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(
f,
- "VtableObject(upcast={:?}, vtable_base={}, nested={:?})",
+ "VtableObjectData(upcast={:?}, vtable_base={}, nested={:?})",
self.upcast_trait_ref, self.vtable_base, self.nested
)
}
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(
f,
- "VtableFnPointer(fn_ty={:?}, nested={:?})",
+ "VtableFnPointerData(fn_ty={:?}, nested={:?})",
self.fn_ty, self.nested
)
}
}
+impl<'tcx, N: fmt::Debug> fmt::Debug for traits::VtableTraitAliasData<'tcx, N> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ write!(
+ f,
+ "VtableTraitAlias(alias_def_id={:?}, substs={:?}, nested={:?})",
+ self.alias_def_id, self.substs, self.nested
+ )
+ }
+}
+
impl<'tcx> fmt::Debug for traits::FulfillmentError<'tcx> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "FulfillmentError({:?},{:?})", self.obligation, self.code)
nested,
})
),
+ traits::VtableTraitAlias(traits::VtableTraitAliasData {
+ alias_def_id,
+ substs,
+ nested,
+ }) => tcx.lift(&substs).map(|substs|
+ traits::VtableTraitAlias(traits::VtableTraitAliasData {
+ alias_def_id,
+ substs,
+ nested,
+ })
+ ),
}
}
}
} where N: TypeFoldable<'tcx>
}
+BraceStructTypeFoldableImpl! {
+ impl<'tcx, N> TypeFoldable<'tcx> for traits::VtableTraitAliasData<'tcx, N> {
+ alias_def_id, substs, nested
+ } where N: TypeFoldable<'tcx>
+}
+
EnumTypeFoldableImpl! {
impl<'tcx, N> TypeFoldable<'tcx> for traits::Vtable<'tcx, N> {
(traits::VtableImpl)(a),
(traits::VtableParam)(a),
(traits::VtableBuiltin)(a),
(traits::VtableObject)(a),
+ (traits::VtableTraitAlias)(a),
} where N: TypeFoldable<'tcx>
}
pub fn elaborate_trait_refs<'cx, 'gcx, 'tcx>(
tcx: TyCtxt<'cx, 'gcx, 'tcx>,
- trait_refs: &[ty::PolyTraitRef<'tcx>])
+ trait_refs: impl Iterator<Item = ty::PolyTraitRef<'tcx>>)
-> Elaborator<'cx, 'gcx, 'tcx>
{
- let predicates = trait_refs.iter()
- .map(|trait_ref| trait_ref.to_predicate())
+ let predicates = trait_refs.map(|trait_ref| trait_ref.to_predicate())
.collect();
elaborate_predicates(tcx, predicates)
}
}
pub fn transitive_bounds<'cx, 'gcx, 'tcx>(tcx: TyCtxt<'cx, 'gcx, 'tcx>,
- bounds: &[ty::PolyTraitRef<'tcx>])
+ bounds: impl Iterator<Item = ty::PolyTraitRef<'tcx>>)
-> Supertraits<'cx, 'gcx, 'tcx>
{
elaborate_trait_refs(tcx, bounds).filter_to_traits()
}
}
-impl<'tcx,I:Iterator<Item=ty::Predicate<'tcx>>> Iterator for FilterToTraits<I> {
+impl<'tcx,I:Iterator<Item = ty::Predicate<'tcx>>> Iterator for FilterToTraits<I> {
type Item = ty::PolyTraitRef<'tcx>;
fn next(&mut self) -> Option<ty::PolyTraitRef<'tcx>> {
}
/// Should we emit EndRegion MIR statements? These are consumed by
- /// MIR borrowck, but not when NLL is used. They are also consumed
- /// by the validation stuff.
+ /// MIR borrowck, but not when NLL is used.
pub fn emit_end_regions(self) -> bool {
self.sess.opts.debugging_opts.emit_end_regions ||
- self.sess.opts.debugging_opts.mir_emit_validate > 0 ||
self.use_mir_borrowck()
}
sty_debug_print!(
self,
Adt, Array, Slice, RawPtr, Ref, FnDef, FnPtr,
- Generator, GeneratorWitness, Dynamic, Closure, Tuple,
+ Generator, GeneratorWitness, Dynamic, Closure, Tuple, Bound,
Param, Infer, UnnormalizedProjection, Projection, Opaque, Foreign);
println!("Substs interner: #{}", self.interners.substs.borrow().len());
ProjectionMismatched(ExpectedFound<DefId>),
ProjectionBoundsLength(ExpectedFound<usize>),
ExistentialMismatch(ExpectedFound<&'tcx ty::List<ty::ExistentialPredicate<'tcx>>>),
-
- OldStyleLUB(Box<TypeError<'tcx>>),
}
#[derive(Clone, RustcEncodable, RustcDecodable, PartialEq, Eq, Hash, Debug, Copy)]
report_maybe_different(f, &format!("trait `{}`", values.expected),
&format!("trait `{}`", values.found))
}
- OldStyleLUB(ref err) => {
- write!(f, "{}", err)
- }
}
}
}
ty::Infer(ty::TyVar(_)) => "inferred type".into(),
ty::Infer(ty::IntVar(_)) => "integral variable".into(),
ty::Infer(ty::FloatVar(_)) => "floating-point variable".into(),
- ty::Infer(ty::BoundTy(_)) |
+ ty::Bound(_) |
ty::Infer(ty::FreshTy(_)) => "fresh type".into(),
ty::Infer(ty::FreshIntTy(_)) => "fresh integral type".into(),
ty::Infer(ty::FreshFloatTy(_)) => "fresh floating-point type".into(),
}
}
},
- OldStyleLUB(err) => {
- db.note("this was previously accepted by the compiler but has been phased out");
- db.note("for more information, see https://github.com/rust-lang/rust/issues/45852");
-
- self.note_and_explain_type_err(db, &err, sp);
- }
CyclicTy(ty) => {
// Watch out for various cases of cyclic types and try to explain.
if ty.is_closure() || ty.is_generator() {
ty::Foreign(def_id) => {
Some(ForeignSimplifiedType(def_id))
}
- ty::Infer(_) | ty::Error => None,
+ ty::Bound(..) | ty::Infer(_) | ty::Error => None,
}
}
self.add_substs(&substs.substs);
}
+ &ty::Bound(bound_ty) => {
+ self.add_binder(bound_ty.index);
+ }
+
&ty::Infer(infer) => {
self.add_flags(TypeFlags::HAS_FREE_LOCAL_NAMES); // it might, right?
self.add_flags(TypeFlags::HAS_TY_INFER);
match infer {
ty::FreshTy(_) |
ty::FreshIntTy(_) |
- ty::FreshFloatTy(_) |
- ty::BoundTy(_) => {
- self.add_flags(TypeFlags::HAS_CANONICAL_VARS);
+ ty::FreshFloatTy(_) => {
}
ty::TyVar(_) |
&ty::Projection(ref data) => {
// currently we can't normalize projections that
// include bound regions, so track those separately.
- if !data.has_escaping_regions() {
+ if !data.has_escaping_bound_vars() {
self.add_flags(TypeFlags::HAS_NORMALIZABLE_PROJECTION);
}
self.add_flags(TypeFlags::HAS_PROJECTION);
/// bound by `binder` or bound by some binder outside of `binder`.
/// If `binder` is `ty::INNERMOST`, this indicates whether
/// there are any late-bound regions that appear free.
- fn has_regions_bound_at_or_above(&self, binder: ty::DebruijnIndex) -> bool {
- self.visit_with(&mut HasEscapingRegionsVisitor { outer_index: binder })
+ fn has_vars_bound_at_or_above(&self, binder: ty::DebruijnIndex) -> bool {
+ self.visit_with(&mut HasEscapingVarsVisitor { outer_index: binder })
}
/// True if this `self` has any regions that escape `binder` (and
/// hence are not bound by it).
- fn has_regions_bound_above(&self, binder: ty::DebruijnIndex) -> bool {
- self.has_regions_bound_at_or_above(binder.shifted_in(1))
+ fn has_vars_bound_above(&self, binder: ty::DebruijnIndex) -> bool {
+ self.has_vars_bound_at_or_above(binder.shifted_in(1))
}
- fn has_escaping_regions(&self) -> bool {
- self.has_regions_bound_at_or_above(ty::INNERMOST)
+ fn has_escaping_bound_vars(&self) -> bool {
+ self.has_vars_bound_at_or_above(ty::INNERMOST)
}
fn has_type_flags(&self, flags: TypeFlags) -> bool {
}
///////////////////////////////////////////////////////////////////////////
-// Late-bound region replacer
+// Bound vars replacer
-// Replaces the escaping regions in a type.
-
-struct RegionReplacer<'a, 'gcx: 'a+'tcx, 'tcx: 'a> {
+/// Replaces the escaping bound vars (late bound regions or bound types) in a type.
+struct BoundVarReplacer<'a, 'gcx: 'a + 'tcx, 'tcx: 'a> {
tcx: TyCtxt<'a, 'gcx, 'tcx>,
/// As with `RegionFolder`, represents the index of a binder *just outside*
current_index: ty::DebruijnIndex,
fld_r: &'a mut (dyn FnMut(ty::BoundRegion) -> ty::Region<'tcx> + 'a),
- map: BTreeMap<ty::BoundRegion, ty::Region<'tcx>>
+ fld_t: &'a mut (dyn FnMut(ty::BoundTy) -> ty::Ty<'tcx> + 'a),
+}
+
+impl<'a, 'gcx, 'tcx> BoundVarReplacer<'a, 'gcx, 'tcx> {
+ fn new<F, G>(
+ tcx: TyCtxt<'a, 'gcx, 'tcx>,
+ fld_r: &'a mut F,
+ fld_t: &'a mut G
+ ) -> Self
+ where F: FnMut(ty::BoundRegion) -> ty::Region<'tcx>,
+ G: FnMut(ty::BoundTy) -> ty::Ty<'tcx>
+ {
+ BoundVarReplacer {
+ tcx,
+ current_index: ty::INNERMOST,
+ fld_r,
+ fld_t,
+ }
+ }
+}
+
+impl<'a, 'gcx, 'tcx> TypeFolder<'gcx, 'tcx> for BoundVarReplacer<'a, 'gcx, 'tcx> {
+ fn tcx<'b>(&'b self) -> TyCtxt<'b, 'gcx, 'tcx> { self.tcx }
+
+ fn fold_binder<T: TypeFoldable<'tcx>>(&mut self, t: &ty::Binder<T>) -> ty::Binder<T> {
+ self.current_index.shift_in(1);
+ let t = t.super_fold_with(self);
+ self.current_index.shift_out(1);
+ t
+ }
+
+ fn fold_ty(&mut self, t: Ty<'tcx>) -> Ty<'tcx> {
+ match t.sty {
+ ty::Bound(bound_ty) => {
+ if bound_ty.index == self.current_index {
+ let fld_t = &mut self.fld_t;
+ let ty = fld_t(bound_ty);
+ ty::fold::shift_vars(
+ self.tcx,
+ &ty,
+ self.current_index.as_u32()
+ )
+ } else {
+ t
+ }
+ }
+ _ => {
+ if !t.has_vars_bound_at_or_above(self.current_index) {
+ // Nothing more to substitute.
+ t
+ } else {
+ t.super_fold_with(self)
+ }
+ }
+ }
+ }
+
+ fn fold_region(&mut self, r: ty::Region<'tcx>) -> ty::Region<'tcx> {
+ match *r {
+ ty::ReLateBound(debruijn, br) if debruijn == self.current_index => {
+ let fld_r = &mut self.fld_r;
+ let region = fld_r(br);
+ if let ty::ReLateBound(debruijn1, br) = *region {
+ // If the callback returns a late-bound region,
+ // that region should always use the INNERMOST
+ // debruijn index. Then we adjust it to the
+ // correct depth.
+ assert_eq!(debruijn1, ty::INNERMOST);
+ self.tcx.mk_region(ty::ReLateBound(debruijn, br))
+ } else {
+ region
+ }
+ }
+ _ => r
+ }
+ }
}
impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> {
/// same `BoundRegion` will reuse the previous result. A map is
/// returned at the end with each bound region and the free region
/// that replaced it.
- pub fn replace_late_bound_regions<T,F>(self,
+ ///
+ /// This method only replaces late bound regions and the result may still
+ /// contain escaping bound types.
+ pub fn replace_late_bound_regions<T, F>(
+ self,
value: &Binder<T>,
- mut f: F)
- -> (T, BTreeMap<ty::BoundRegion, ty::Region<'tcx>>)
- where F : FnMut(ty::BoundRegion) -> ty::Region<'tcx>,
- T : TypeFoldable<'tcx>,
+ mut fld_r: F
+ ) -> (T, BTreeMap<ty::BoundRegion, ty::Region<'tcx>>)
+ where F: FnMut(ty::BoundRegion) -> ty::Region<'tcx>,
+ T: TypeFoldable<'tcx>
{
- let mut replacer = RegionReplacer::new(self, &mut f);
+ let mut map = BTreeMap::new();
+ let mut real_fldr = |br| {
+ *map.entry(br).or_insert_with(|| fld_r(br))
+ };
+
+ // identity for bound types
+ let mut fld_t = |bound_ty| self.mk_ty(ty::Bound(bound_ty));
+
+ let mut replacer = BoundVarReplacer::new(self, &mut real_fldr, &mut fld_t);
let result = value.skip_binder().fold_with(&mut replacer);
- (result, replacer.map)
+ (result, map)
+ }
+
+ /// Replace all escaping bound vars. The `fld_r` closure replaces escaping
+ /// bound regions while the `fld_t` closure replaces escaping bound types.
+ pub fn replace_escaping_bound_vars<T, F, G>(
+ self,
+ value: &T,
+ mut fld_r: F,
+ mut fld_t: G
+ ) -> T
+ where F: FnMut(ty::BoundRegion) -> ty::Region<'tcx>,
+ G: FnMut(ty::BoundTy) -> ty::Ty<'tcx>,
+ T: TypeFoldable<'tcx>
+ {
+ if !value.has_escaping_bound_vars() {
+ value.clone()
+ } else {
+ let mut replacer = BoundVarReplacer::new(self, &mut fld_r, &mut fld_t);
+ let result = value.fold_with(&mut replacer);
+ result
+ }
+ }
+
+ /// Replace all types or regions bound by the given `Binder`. The `fld_r`
+ /// closure replaces bound regions while the `fld_t` closure replaces bound
+ /// types.
+ pub fn replace_bound_vars<T, F, G>(
+ self,
+ value: &Binder<T>,
+ fld_r: F,
+ fld_t: G
+ ) -> T
+ where F: FnMut(ty::BoundRegion) -> ty::Region<'tcx>,
+ G: FnMut(ty::BoundTy) -> ty::Ty<'tcx>,
+ T: TypeFoldable<'tcx>
+ {
+ self.replace_escaping_bound_vars(value.skip_binder(), fld_r, fld_t)
}
/// Replace any late-bound regions bound in `value` with
}
}
-impl<'a, 'gcx, 'tcx> RegionReplacer<'a, 'gcx, 'tcx> {
- fn new<F>(tcx: TyCtxt<'a, 'gcx, 'tcx>, fld_r: &'a mut F)
- -> RegionReplacer<'a, 'gcx, 'tcx>
- where F : FnMut(ty::BoundRegion) -> ty::Region<'tcx>
- {
- RegionReplacer {
+///////////////////////////////////////////////////////////////////////////
+// Shifter
+//
+// Shifts the De Bruijn indices on all escaping bound vars by a
+// fixed amount. Useful in substitution or when otherwise introducing
+// a binding level that is not intended to capture the existing bound
+// vars. See comment on `shift_vars_through_binders` method in
+// `subst.rs` for more details.
+
+struct Shifter<'a, 'gcx: 'a+'tcx, 'tcx: 'a> {
+ tcx: TyCtxt<'a, 'gcx, 'tcx>,
+
+ current_index: ty::DebruijnIndex,
+ amount: u32,
+}
+
+impl Shifter<'a, 'gcx, 'tcx> {
+ pub fn new(tcx: TyCtxt<'a, 'gcx, 'tcx>, amount: u32) -> Self {
+ Shifter {
tcx,
current_index: ty::INNERMOST,
- fld_r,
- map: BTreeMap::default()
+ amount,
}
}
}
-impl<'a, 'gcx, 'tcx> TypeFolder<'gcx, 'tcx> for RegionReplacer<'a, 'gcx, 'tcx> {
+impl<'a, 'gcx, 'tcx> TypeFolder<'gcx, 'tcx> for Shifter<'a, 'gcx, 'tcx> {
fn tcx<'b>(&'b self) -> TyCtxt<'b, 'gcx, 'tcx> { self.tcx }
fn fold_binder<T: TypeFoldable<'tcx>>(&mut self, t: &ty::Binder<T>) -> ty::Binder<T> {
t
}
- fn fold_ty(&mut self, t: Ty<'tcx>) -> Ty<'tcx> {
- if !t.has_regions_bound_at_or_above(self.current_index) {
- return t;
- }
-
- t.super_fold_with(self)
- }
-
fn fold_region(&mut self, r: ty::Region<'tcx>) -> ty::Region<'tcx> {
match *r {
- ty::ReLateBound(debruijn, br) if debruijn == self.current_index => {
- let fld_r = &mut self.fld_r;
- let region = *self.map.entry(br).or_insert_with(|| fld_r(br));
- if let ty::ReLateBound(debruijn1, br) = *region {
- // If the callback returns a late-bound region,
- // that region should always use the INNERMOST
- // debruijn index. Then we adjust it to the
- // correct depth.
- assert_eq!(debruijn1, ty::INNERMOST);
- self.tcx.mk_region(ty::ReLateBound(debruijn, br))
+ ty::ReLateBound(debruijn, br) => {
+ if self.amount == 0 || debruijn < self.current_index {
+ r
} else {
- region
+ let shifted = ty::ReLateBound(debruijn.shifted_in(self.amount), br);
+ self.tcx.mk_region(shifted)
}
}
_ => r
}
}
-}
-///////////////////////////////////////////////////////////////////////////
-// Region shifter
-//
-// Shifts the De Bruijn indices on all escaping bound regions by a
-// fixed amount. Useful in substitution or when otherwise introducing
-// a binding level that is not intended to capture the existing bound
-// regions. See comment on `shift_regions_through_binders` method in
-// `subst.rs` for more details.
+ fn fold_ty(&mut self, ty: ty::Ty<'tcx>) -> ty::Ty<'tcx> {
+ match ty.sty {
+ ty::Bound(bound_ty) => {
+ if self.amount == 0 || bound_ty.index < self.current_index {
+ ty
+ } else {
+ let shifted = ty::BoundTy {
+ index: bound_ty.index.shifted_in(self.amount),
+ var: bound_ty.var,
+ kind: bound_ty.kind,
+ };
+ self.tcx.mk_ty(ty::Bound(shifted))
+ }
+ }
-pub fn shift_region(region: ty::RegionKind, amount: u32) -> ty::RegionKind {
- match region {
- ty::ReLateBound(debruijn, br) => {
- ty::ReLateBound(debruijn.shifted_in(amount), br)
- }
- _ => {
- region
+ _ => ty.super_fold_with(self),
}
}
}
-pub fn shift_region_ref<'a, 'gcx, 'tcx>(
+pub fn shift_region<'a, 'gcx, 'tcx>(
tcx: TyCtxt<'a, 'gcx, 'tcx>,
region: ty::Region<'tcx>,
- amount: u32)
- -> ty::Region<'tcx>
-{
+ amount: u32
+) -> ty::Region<'tcx> {
match region {
- &ty::ReLateBound(debruijn, br) if amount > 0 => {
- tcx.mk_region(ty::ReLateBound(debruijn.shifted_in(amount), br))
+ ty::ReLateBound(debruijn, br) if amount > 0 => {
+ tcx.mk_region(ty::ReLateBound(debruijn.shifted_in(amount), *br))
}
_ => {
region
}
}
-pub fn shift_regions<'a, 'gcx, 'tcx, T>(tcx: TyCtxt<'a, 'gcx, 'tcx>,
- amount: u32,
- value: &T) -> T
- where T: TypeFoldable<'tcx>
-{
- debug!("shift_regions(value={:?}, amount={})",
+pub fn shift_vars<'a, 'gcx, 'tcx, T>(
+ tcx: TyCtxt<'a, 'gcx, 'tcx>,
+ value: &T,
+ amount: u32
+) -> T where T: TypeFoldable<'tcx> {
+ debug!("shift_vars(value={:?}, amount={})",
value, amount);
- value.fold_with(&mut RegionFolder::new(tcx, &mut false, &mut |region, _current_depth| {
- shift_region_ref(tcx, region, amount)
- }))
+ value.fold_with(&mut Shifter::new(tcx, amount))
}
-/// An "escaping region" is a bound region whose binder is not part of `t`.
+/// An "escaping var" is a bound var whose binder is not part of `t`. A bound var can be a
+/// bound region or a bound type.
///
/// So, for example, consider a type like the following, which has two binders:
///
/// binders of both `'a` and `'b` are part of the type itself. However, if we consider the *inner
/// fn type*, that type has an escaping region: `'a`.
///
-/// Note that what I'm calling an "escaping region" is often just called a "free region". However,
-/// we already use the term "free region". It refers to the regions that we use to represent bound
-/// regions on a fn definition while we are typechecking its body.
+/// Note that what I'm calling an "escaping var" is often just called a "free var". However,
+/// we already use the term "free var". It refers to the regions or types that we use to represent
+/// bound regions or type params on a fn definition while we are typechecking its body.
///
/// To clarify, conceptually there is no particular difference between
-/// an "escaping" region and a "free" region. However, there is a big
+/// an "escaping" var and a "free" var. However, there is a big
/// difference in practice. Basically, when "entering" a binding
/// level, one is generally required to do some sort of processing to
-/// a bound region, such as replacing it with a fresh/placeholder
-/// region, or making an entry in the environment to represent the
-/// scope to which it is attached, etc. An escaping region represents
-/// a bound region for which this processing has not yet been done.
-struct HasEscapingRegionsVisitor {
+/// a bound var, such as replacing it with a fresh/placeholder
+/// var, or making an entry in the environment to represent the
+/// scope to which it is attached, etc. An escaping var represents
+/// a bound var for which this processing has not yet been done.
+struct HasEscapingVarsVisitor {
/// Anything bound by `outer_index` or "above" is escaping
outer_index: ty::DebruijnIndex,
}
-impl<'tcx> TypeVisitor<'tcx> for HasEscapingRegionsVisitor {
+impl<'tcx> TypeVisitor<'tcx> for HasEscapingVarsVisitor {
fn visit_binder<T: TypeFoldable<'tcx>>(&mut self, t: &Binder<T>) -> bool {
self.outer_index.shift_in(1);
let result = t.super_visit_with(self);
// `outer_index`, that means that `t` contains some content
// bound at `outer_index` or above (because
// `outer_exclusive_binder` is always 1 higher than the
- // content in `t`). Therefore, `t` has some escaping regions.
+ // content in `t`). Therefore, `t` has some escaping vars.
t.outer_exclusive_binder > self.outer_index
}
impl<'a, 'b, 'tcx> Instance<'tcx> {
pub fn new(def_id: DefId, substs: &'tcx Substs<'tcx>)
-> Instance<'tcx> {
- assert!(!substs.has_escaping_regions(),
+ assert!(!substs.has_escaping_bound_vars(),
"substs of instance {:?} not normalized for codegen: {:?}",
def_id, substs);
Instance { def: InstanceDef::Item(def_id), substs: substs }
None
}
}
- traits::VtableAutoImpl(..) | traits::VtableParam(..) => None
+ traits::VtableAutoImpl(..) |
+ traits::VtableParam(..) |
+ traits::VtableTraitAlias(..) => None
}
}
ty::Param(_) |
ty::Opaque(..) |
ty::Infer(_) |
+ ty::Bound(..) |
ty::Error |
ty::GeneratorWitness(..) |
ty::Never |
pub trait IntegerExt {
fn to_ty<'a, 'tcx>(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>, signed: bool) -> Ty<'tcx>;
- fn from_attr<C: HasDataLayout>(cx: C, ity: attr::IntType) -> Integer;
+ fn from_attr<C: HasDataLayout>(cx: &C, ity: attr::IntType) -> Integer;
fn repr_discr<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
ty: Ty<'tcx>,
repr: &ReprOptions,
}
/// Get the Integer type from an attr::IntType.
- fn from_attr<C: HasDataLayout>(cx: C, ity: attr::IntType) -> Integer {
+ fn from_attr<C: HasDataLayout>(cx: &C, ity: attr::IntType) -> Integer {
let dl = cx.data_layout();
match ity {
let min_default = I8;
if let Some(ity) = repr.int {
- let discr = Integer::from_attr(tcx, ity);
+ let discr = Integer::from_attr(&tcx, ity);
let fit = if ity.is_signed() { signed_fit } else { unsigned_fit };
if discr < fit {
bug!("Integer::repr_discr: `#[repr]` hint too small for \
};
}
-#[derive(Copy, Clone)]
pub struct LayoutCx<'tcx, C> {
pub tcx: C,
pub param_env: ty::ParamEnv<'tcx>
}
impl<'a, 'tcx> LayoutCx<'tcx, TyCtxt<'a, 'tcx, 'tcx>> {
- fn layout_raw_uncached(self, ty: Ty<'tcx>)
+ fn layout_raw_uncached(&self, ty: Ty<'tcx>)
-> Result<&'tcx LayoutDetails, LayoutError<'tcx>> {
let tcx = self.tcx;
let param_env = self.param_env;
let (mut min, mut max) = (i128::max_value(), i128::min_value());
let discr_type = def.repr.discr_type();
- let bits = Integer::from_attr(tcx, discr_type).size().bits();
+ let bits = Integer::from_attr(self, discr_type).size().bits();
for (i, discr) in def.discriminants(tcx).enumerate() {
if variants[i].iter().any(|f| f.abi.is_uninhabited()) {
continue;
}
tcx.layout_raw(param_env.and(normalized))?
}
- ty::UnnormalizedProjection(..) | ty::GeneratorWitness(..) | ty::Infer(_) => {
+
+ ty::Bound(..) |
+ ty::UnnormalizedProjection(..) |
+ ty::GeneratorWitness(..) |
+ ty::Infer(_) => {
bug!("LayoutDetails::compute: unexpected type `{}`", ty)
}
+
ty::Param(_) | ty::Error => {
return Err(LayoutError::Unknown(ty));
}
/// This is invoked by the `layout_raw` query to record the final
/// layout of each type.
#[inline]
- fn record_layout_for_printing(self, layout: TyLayout<'tcx>) {
+ fn record_layout_for_printing(&self, layout: TyLayout<'tcx>) {
// If we are running with `-Zprint-type-sizes`, record layouts for
// dumping later. Ignore layouts that are done with non-empty
// environments or non-monomorphic layouts, as the user only wants
self.record_layout_for_printing_outlined(layout)
}
- fn record_layout_for_printing_outlined(self, layout: TyLayout<'tcx>) {
+ fn record_layout_for_printing_outlined(&self, layout: TyLayout<'tcx>) {
// (delay format until we actually need it)
let record = |kind, packed, opt_discr_size, variants| {
let type_desc = format!("{:?}", layout.ty);
/// Computes the layout of a type. Note that this implicitly
/// executes in "reveal all" mode.
- fn layout_of(self, ty: Ty<'tcx>) -> Self::TyLayout {
+ fn layout_of(&self, ty: Ty<'tcx>) -> Self::TyLayout {
let param_env = self.param_env.with_reveal_all();
let ty = self.tcx.normalize_erasing_regions(param_env, ty);
let details = self.tcx.layout_raw(param_env.and(ty))?;
/// Computes the layout of a type. Note that this implicitly
/// executes in "reveal all" mode.
- fn layout_of(self, ty: Ty<'tcx>) -> Self::TyLayout {
+ fn layout_of(&self, ty: Ty<'tcx>) -> Self::TyLayout {
let param_env = self.param_env.with_reveal_all();
let ty = self.tcx.normalize_erasing_regions(param_env, ty);
let details = self.tcx.layout_raw(param_env.and(ty))?;
where C: LayoutOf<Ty = Ty<'tcx>> + HasTyCtxt<'tcx>,
C::TyLayout: MaybeResult<TyLayout<'tcx>>
{
- fn for_variant(this: TyLayout<'tcx>, cx: C, variant_index: usize) -> TyLayout<'tcx> {
+ fn for_variant(this: TyLayout<'tcx>, cx: &C, variant_index: usize) -> TyLayout<'tcx> {
let details = match this.variants {
Variants::Single { index } if index == variant_index => this.details,
}
}
- fn field(this: TyLayout<'tcx>, cx: C, i: usize) -> C::TyLayout {
+ fn field(this: TyLayout<'tcx>, cx: &C, i: usize) -> C::TyLayout {
let tcx = cx.tcx();
cx.layout_of(match this.ty.sty {
ty::Bool |
Variants::Tagged { tag: ref discr, .. } |
Variants::NicheFilling { niche: ref discr, .. } => {
assert_eq!(i, 0);
- let layout = LayoutDetails::scalar(tcx, discr.clone());
+ let layout = LayoutDetails::scalar(cx, discr.clone());
return MaybeResult::from_ok(TyLayout {
details: tcx.intern_layout(layout),
ty: discr.value.to_ty(tcx)
}
}
- ty::Projection(_) | ty::UnnormalizedProjection(..) |
+ ty::Projection(_) | ty::UnnormalizedProjection(..) | ty::Bound(..) |
ty::Opaque(..) | ty::Param(_) | ty::Infer(_) | ty::Error => {
bug!("TyLayout::field_type: unexpected type `{}`", this.ty)
}
impl Niche {
fn reserve<'a, 'tcx>(
&self,
- cx: LayoutCx<'tcx, TyCtxt<'a, 'tcx, 'tcx>>,
+ cx: &LayoutCx<'tcx, TyCtxt<'a, 'tcx, 'tcx>>,
count: u128,
) -> Option<(u128, Scalar)> {
if count > self.available {
/// Find the offset of a niche leaf field, starting from
/// the given type and recursing through aggregates.
// FIXME(eddyb) traverse already optimized enums.
- fn find_niche(self, layout: TyLayout<'tcx>) -> Result<Option<Niche>, LayoutError<'tcx>> {
+ fn find_niche(&self, layout: TyLayout<'tcx>) -> Result<Option<Niche>, LayoutError<'tcx>> {
let scalar_niche = |scalar: &Scalar, offset| {
let Scalar { value, valid_range: ref v } = *scalar;
use hir;
-pub use self::sty::{Binder, BoundTy, BoundTyIndex, DebruijnIndex, INNERMOST};
+pub use self::sty::{Binder, BoundTy, BoundTyKind, BoundVar, DebruijnIndex, INNERMOST};
pub use self::sty::{FnSig, GenSig, CanonicalPolyFnSig, PolyFnSig, PolyGenSig};
pub use self::sty::{InferTy, ParamTy, ProjectionTy, ExistentialPredicate};
pub use self::sty::{ClosureSubsts, GeneratorSubsts, UpvarSubsts, TypeAndMut};
// Currently we can't normalize projections w/ bound regions.
const HAS_NORMALIZABLE_PROJECTION = 1 << 12;
- // Set if this includes a "canonical" type or region var --
- // ought to be true only for the results of canonicalization.
- const HAS_CANONICAL_VARS = 1 << 13;
-
/// Does this have any `ReLateBound` regions? Used to check
/// if a global bound is safe to evaluate.
- const HAS_RE_LATE_BOUND = 1 << 14;
+ const HAS_RE_LATE_BOUND = 1 << 13;
const NEEDS_SUBST = TypeFlags::HAS_PARAMS.bits |
TypeFlags::HAS_SELF.bits |
TypeFlags::HAS_TY_CLOSURE.bits |
TypeFlags::HAS_FREE_LOCAL_NAMES.bits |
TypeFlags::KEEP_IN_LOCAL_TCX.bits |
- TypeFlags::HAS_CANONICAL_VARS.bits |
TypeFlags::HAS_RE_LATE_BOUND.bits;
}
}
/// would be the type parameters.
Trait(PolyTraitPredicate<'tcx>),
- /// where 'a : 'b
+ /// where `'a : 'b`
RegionOutlives(PolyRegionOutlivesPredicate<'tcx>),
- /// where T : 'a
+ /// where `T : 'a`
TypeOutlives(PolyTypeOutlivesPredicate<'tcx>),
- /// where <T as TraitRef>::Name == X, approximately.
- /// See `ProjectionPredicate` struct for details.
+ /// where `<T as TraitRef>::Name == X`, approximately.
+ /// See the `ProjectionPredicate` struct for details.
Projection(PolyProjectionPredicate<'tcx>),
- /// no syntax: T WF
+ /// no syntax: `T` well-formed
WellFormed(Ty<'tcx>),
/// trait must be object-safe
ObjectSafe(DefId),
/// No direct syntax. May be thought of as `where T : FnFoo<...>`
- /// for some substitutions `...` and T being a closure type.
+ /// for some substitutions `...` and `T` being a closure type.
/// Satisfied (or refuted) once we know the closure's kind.
ClosureKind(DefId, ClosureSubsts<'tcx>, ClosureKind),
/// True if `self` can name a name from `other` -- in other words,
/// if the set of names in `self` is a superset of those in
- /// `other`.
+ /// `other` (`self >= other`).
pub fn can_name(self, other: UniverseIndex) -> bool {
self.private >= other.private
}
+
+ /// True if `self` cannot name some names from `other` -- in other
+ /// words, if the set of names in `self` is a strict subset of
+ /// those in `other` (`self < other`).
+ pub fn cannot_name(self, other: UniverseIndex) -> bool {
+ self.private < other.private
+ }
}
/// The "placeholder index" fully defines a placeholder region.
pub name: BoundRegion,
}
+impl_stable_hash_for!(struct Placeholder { universe, name });
+
/// When type checking, we use the `ParamEnv` to track
/// details about the set of where-clauses that are in scope at this
/// particular point.
}
}
+ Bound(..) |
Infer(..) => {
bug!("unexpected type `{:?}` in sized_constraint_for_ty",
ty)
}
}
- /// Determine whether an item is annotated with an attribute
+ /// Determine whether an item is annotated with an attribute.
pub fn has_attr(self, did: DefId, attr: &str) -> bool {
attr::contains_name(&self.get_attrs(did), attr)
}
self.optimized_mir(def_id).generator_layout.as_ref().unwrap()
}
- /// Given the def_id of an impl, return the def_id of the trait it implements.
+ /// Given the def-id of an impl, return the def_id of the trait it implements.
/// If it implements no trait, return `None`.
pub fn trait_id_of_impl(self, def_id: DefId) -> Option<DefId> {
self.impl_trait_ref(def_id).map(|tr| tr.def_id)
}
- /// If the given def ID describes a method belonging to an impl, return the
- /// ID of the impl that the method belongs to. Otherwise, return `None`.
+ /// If the given defid describes a method belonging to an impl, return the
+ /// def-id of the impl that the method belongs to. Otherwise, return `None`.
pub fn impl_of_method(self, def_id: DefId) -> Option<DefId> {
let item = if def_id.krate != LOCAL_CRATE {
if let Some(Def::Method(_)) = self.describe_def(def_id) {
})
}
-/// Yields the parent function's `DefId` if `def_id` is an `impl Trait` definition
+/// Yields the parent function's `DefId` if `def_id` is an `impl Trait` definition.
pub fn is_impl_trait_defn(tcx: TyCtxt<'_, '_, '_>, def_id: DefId) -> Option<DefId> {
if let Some(node_id) = tcx.hir.as_local_node_id(def_id) {
if let Node::Item(item) = tcx.hir.get(node_id) {
None
}
-/// See `ParamEnv` struct def'n for details.
+/// Returns `true` if `def_id` is a trait alias.
+pub fn is_trait_alias(tcx: TyCtxt<'_, '_, '_>, def_id: DefId) -> bool {
+ if let Some(node_id) = tcx.hir.as_local_node_id(def_id) {
+ if let Node::Item(item) = tcx.hir.get(node_id) {
+ if let hir::ItemKind::TraitAlias(..) = item.node {
+ return true;
+ }
+ }
+ }
+ false
+}
+
+/// See `ParamEnv` struct definition for details.
fn param_env<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
def_id: DefId)
-> ParamEnv<'tcx>
// we simply fallback to the most restrictive rule, which
// requires that `Pi: 'a` for all `i`.
ty::Projection(ref data) => {
- if !data.has_escaping_regions() {
+ if !data.has_escaping_bound_vars() {
// best case: no escaping regions, so push the
// projection and skip the subtree (thus generating no
// constraints for Pi). This defers the choice between
ty::FnDef(..) | // OutlivesFunction (*)
ty::FnPtr(_) | // OutlivesFunction (*)
ty::Dynamic(..) | // OutlivesObject, OutlivesFragment (*)
+ ty::Bound(..) |
ty::Error => {
// (*) Bare functions and traits are both binders. In the
// RFC, this means we would add the bound regions to the
use session::{CrateDisambiguator, Session};
use std::mem;
use syntax::ast::NodeId;
-use syntax::source_map::{SourceMap, StableFilemapId};
+use syntax::source_map::{SourceMap, StableSourceFileId};
use syntax_pos::{BytePos, Span, DUMMY_SP, SourceFile};
use syntax_pos::hygiene::{Mark, SyntaxContext, ExpnInfo};
use ty;
cnum_map: Once<IndexVec<CrateNum, Option<CrateNum>>>,
source_map: &'sess SourceMap,
- file_index_to_stable_id: FxHashMap<SourceFileIndex, StableFilemapId>,
+ file_index_to_stable_id: FxHashMap<SourceFileIndex, StableSourceFileId>,
// These two fields caches that are populated lazily during decoding.
file_index_to_file: Lock<FxHashMap<SourceFileIndex, Lrc<SourceFile>>>,
// This type is used only for (de-)serialization.
#[derive(RustcEncodable, RustcDecodable)]
struct Footer {
- file_index_to_stable_id: FxHashMap<SourceFileIndex, StableFilemapId>,
+ file_index_to_stable_id: FxHashMap<SourceFileIndex, StableSourceFileId>,
prev_cnums: Vec<(u32, String, CrateDisambiguator)>,
query_result_index: EncodedQueryResultIndex,
diagnostics_index: EncodedQueryResultIndex,
tcx.dep_graph.with_ignore(|| {
// Allocate SourceFileIndices
let (file_to_file_index, file_index_to_stable_id) = {
- let mut file_to_file_index = FxHashMap::default();
- let mut file_index_to_stable_id = FxHashMap::default();
+ let files = tcx.sess.source_map().files();
+ let mut file_to_file_index = FxHashMap::with_capacity_and_hasher(
+ files.len(), Default::default());
+ let mut file_index_to_stable_id = FxHashMap::with_capacity_and_hasher(
+ files.len(), Default::default());
- for (index, file) in tcx.sess.source_map().files().iter().enumerate() {
+ for (index, file) in files.iter().enumerate() {
let index = SourceFileIndex(index as u32);
let file_ptr: *const SourceFile = &**file as *const _;
file_to_file_index.insert(file_ptr, index);
- file_index_to_stable_id.insert(index, StableFilemapId::new(&file));
+ file_index_to_stable_id.insert(index, StableSourceFileId::new(&file));
}
(file_to_file_index, file_index_to_stable_id)
cnum_map: &'x IndexVec<CrateNum, Option<CrateNum>>,
synthetic_expansion_infos: &'x Lock<FxHashMap<AbsoluteBytePos, SyntaxContext>>,
file_index_to_file: &'x Lock<FxHashMap<SourceFileIndex, Lrc<SourceFile>>>,
- file_index_to_stable_id: &'x FxHashMap<SourceFileIndex, StableFilemapId>,
+ file_index_to_stable_id: &'x FxHashMap<SourceFileIndex, StableSourceFileId>,
alloc_decoding_session: AllocDecodingSession<'x>,
}
// except according to those terms.
use rustc_data_structures::sync::{RwLock, ReadGuard, MappedReadGuard};
-use std::mem;
/// The `Steal` struct is intended to used as the value for a query.
/// Specifically, we sometimes have queries (*cough* MIR *cough*)
pub fn steal(&self) -> T {
let value_ref = &mut *self.value.try_write().expect("stealing value which is locked");
- let value = mem::replace(value_ref, None);
+ let value = value_ref.take();
value.expect("attempt to read from stolen value")
}
}
ProjectionMismatched(x) => ProjectionMismatched(x),
ProjectionBoundsLength(x) => ProjectionBoundsLength(x),
Sorts(ref x) => return tcx.lift(x).map(Sorts),
- OldStyleLUB(ref x) => return tcx.lift(x).map(OldStyleLUB),
ExistentialMismatch(ref x) => return tcx.lift(x).map(ExistentialMismatch)
})
}
ty::UnnormalizedProjection(data.fold_with(folder))
}
ty::Opaque(did, substs) => ty::Opaque(did, substs.fold_with(folder)),
- ty::Bool | ty::Char | ty::Str | ty::Int(_) |
- ty::Uint(_) | ty::Float(_) | ty::Error | ty::Infer(_) |
- ty::Param(..) | ty::Never | ty::Foreign(..) => return self
+
+ ty::Bool |
+ ty::Char |
+ ty::Str |
+ ty::Int(_) |
+ ty::Uint(_) |
+ ty::Float(_) |
+ ty::Error |
+ ty::Infer(_) |
+ ty::Param(..) |
+ ty::Bound(..) |
+ ty::Never |
+ ty::Foreign(..) => return self
};
if self.sty == sty {
data.visit_with(visitor)
}
ty::Opaque(_, ref substs) => substs.visit_with(visitor),
- ty::Bool | ty::Char | ty::Str | ty::Int(_) |
- ty::Uint(_) | ty::Float(_) | ty::Error | ty::Infer(_) |
- ty::Param(..) | ty::Never | ty::Foreign(..) => false,
+
+ ty::Bool |
+ ty::Char |
+ ty::Str |
+ ty::Int(_) |
+ ty::Uint(_) |
+ ty::Float(_) |
+ ty::Error |
+ ty::Infer(_) |
+ ty::Bound(..) |
+ ty::Param(..) |
+ ty::Never |
+ ty::Foreign(..) => false,
}
}
(ty::error::TypeError::ProjectionBoundsLength)(x),
(ty::error::TypeError::Sorts)(x),
(ty::error::TypeError::ExistentialMismatch)(x),
- (ty::error::TypeError::OldStyleLUB)(x),
}
}
_ => false,
}
}
+
+ /// When canonicalizing, we replace unbound inference variables and free
+ /// regions with anonymous late bound regions. This method asserts that
+ /// we have an anonymous late bound region, which hence may refer to
+ /// a canonical variable.
+ pub fn assert_bound_var(&self) -> BoundVar {
+ match *self {
+ BoundRegion::BrAnon(var) => BoundVar::from_u32(var),
+ _ => bug!("bound region is not anonymous"),
+ }
+ }
}
/// N.B., If you change this, you'll probably want to change the corresponding
/// A type parameter; for example, `T` in `fn f<T>(x: T) {}
Param(ParamTy),
+ /// Bound type variable, used only when preparing a trait query.
+ Bound(BoundTy),
+
/// A type variable used during type checking.
Infer(InferTy),
TraitRef { def_id: def_id, substs: substs }
}
- /// Returns a TraitRef of the form `P0: Foo<P1..Pn>` where `Pi`
+ /// Returns a `TraitRef` of the form `P0: Foo<P1..Pn>` where `Pi`
/// are the parameters defined on trait.
pub fn identity<'a, 'gcx>(tcx: TyCtxt<'a, 'gcx, 'tcx>, def_id: DefId) -> TraitRef<'tcx> {
TraitRef {
/// or some placeholder type.
pub fn with_self_ty(&self, tcx: TyCtxt<'a, 'gcx, 'tcx>, self_ty: Ty<'tcx>)
-> ty::TraitRef<'tcx> {
- // otherwise the escaping regions would be captured by the binder
- // debug_assert!(!self_ty.has_escaping_regions());
+ // otherwise the escaping vars would be captured by the binder
+ // debug_assert!(!self_ty.has_escaping_bound_vars());
ty::TraitRef {
def_id: self.def_id,
}
}
-/// Binder is a binder for higher-ranked lifetimes. It is part of the
+/// Binder is a binder for higher-ranked lifetimes or types. It is part of the
/// compiler's representation for things like `for<'a> Fn(&'a isize)`
/// (which would be represented by the type `PolyTraitRef ==
/// Binder<TraitRef>`). Note that when we instantiate,
-/// erase, or otherwise "discharge" these bound regions, we change the
+/// erase, or otherwise "discharge" these bound vars, we change the
/// type from `Binder<T>` to just `T` (see
/// e.g. `liberate_late_bound_regions`).
#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Debug, RustcEncodable, RustcDecodable)]
impl<T> Binder<T> {
/// Wraps `value` in a binder, asserting that `value` does not
- /// contain any bound regions that would be bound by the
+ /// contain any bound vars that would be bound by the
/// binder. This is commonly used to 'inject' a value T into a
/// different binding level.
pub fn dummy<'tcx>(value: T) -> Binder<T>
where T: TypeFoldable<'tcx>
{
- debug_assert!(!value.has_escaping_regions());
+ debug_assert!(!value.has_escaping_bound_vars());
Binder(value)
}
- /// Wraps `value` in a binder, binding late-bound regions (if any).
- pub fn bind<'tcx>(value: T) -> Binder<T>
- {
+ /// Wraps `value` in a binder, binding higher-ranked vars (if any).
+ pub fn bind<'tcx>(value: T) -> Binder<T> {
Binder(value)
}
/// Skips the binder and returns the "bound" value. This is a
/// risky thing to do because it's easy to get confused about
/// debruijn indices and the like. It is usually better to
- /// discharge the binder using `no_late_bound_regions` or
+ /// discharge the binder using `no_bound_vars` or
/// `replace_late_bound_regions` or something like
/// that. `skip_binder` is only valid when you are either
- /// extracting data that has nothing to do with bound regions, you
+ /// extracting data that has nothing to do with bound vars, you
/// are doing some sort of test that does not involve bound
/// regions, or you are being very careful about your depth
/// accounting.
///
/// - extracting the def-id from a PolyTraitRef;
/// - comparing the self type of a PolyTraitRef to see if it is equal to
- /// a type parameter `X`, since the type `X` does not reference any regions
+ /// a type parameter `X`, since the type `X` does not reference any regions
pub fn skip_binder(&self) -> &T {
&self.0
}
}
/// Unwraps and returns the value within, but only if it contains
- /// no bound regions at all. (In other words, if this binder --
+ /// no bound vars at all. (In other words, if this binder --
/// and indeed any enclosing binder -- doesn't bind anything at
/// all.) Otherwise, returns `None`.
///
/// (One could imagine having a method that just unwraps a single
- /// binder, but permits late-bound regions bound by enclosing
+ /// binder, but permits late-bound vars bound by enclosing
/// binders, but that would require adjusting the debruijn
/// indices, and given the shallow binding structure we often use,
/// would not be that useful.)
- pub fn no_late_bound_regions<'tcx>(self) -> Option<T>
- where T : TypeFoldable<'tcx>
+ pub fn no_bound_vars<'tcx>(self) -> Option<T>
+ where T: TypeFoldable<'tcx>
{
- if self.skip_binder().has_escaping_regions() {
+ if self.skip_binder().has_escaping_bound_vars() {
None
} else {
Some(self.skip_binder().clone())
/// `ClosureRegionRequirements` that are produced by MIR borrowck.
/// See `ClosureRegionRequirements` for more details.
ReClosureBound(RegionVid),
-
- /// Canonicalized region, used only when preparing a trait query.
- ReCanonical(BoundTyIndex),
}
impl<'tcx> serialize::UseSpecializedDecodable for Region<'tcx> {}
FreshTy(u32),
FreshIntTy(u32),
FreshFloatTy(u32),
-
- /// Bound type variable, used only when preparing a trait query.
- BoundTy(BoundTy),
}
newtype_index! {
- pub struct BoundTyIndex { .. }
+ pub struct BoundVar { .. }
}
#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Debug, RustcEncodable, RustcDecodable)]
pub struct BoundTy {
- pub level: DebruijnIndex,
- pub var: BoundTyIndex,
+ pub index: DebruijnIndex,
+ pub var: BoundVar,
+ pub kind: BoundTyKind,
}
-impl_stable_hash_for!(struct BoundTy { level, var });
+#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Debug, RustcEncodable, RustcDecodable)]
+pub enum BoundTyKind {
+ Anon,
+ Param(InternedString),
+}
+
+impl_stable_hash_for!(struct BoundTy { index, var, kind });
+impl_stable_hash_for!(enum self::BoundTyKind { Anon, Param(a) });
+
+impl BoundTy {
+ pub fn new(index: DebruijnIndex, var: BoundVar) -> Self {
+ BoundTy {
+ index,
+ var,
+ kind: BoundTyKind::Anon,
+ }
+ }
+}
/// A `ProjectionPredicate` for an `ExistentialTraitRef`.
#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Debug, RustcEncodable, RustcDecodable)]
-> ty::ProjectionPredicate<'tcx>
{
// otherwise the escaping regions would be captured by the binders
- debug_assert!(!self_ty.has_escaping_regions());
+ debug_assert!(!self_ty.has_escaping_bound_vars());
ty::ProjectionPredicate {
projection_ty: ty::ProjectionTy {
RegionKind::ReEmpty => false,
RegionKind::ReErased => false,
RegionKind::ReClosureBound(..) => false,
- RegionKind::ReCanonical(..) => false,
}
}
}
ty::ReErased => {
}
- ty::ReCanonical(..) => {
- flags = flags | TypeFlags::HAS_FREE_REGIONS;
- flags = flags | TypeFlags::HAS_CANONICAL_VARS;
- }
ty::ReClosureBound(..) => {
flags = flags | TypeFlags::HAS_FREE_REGIONS;
}
Tuple(..) |
Foreign(..) |
Param(_) |
+ Bound(..) |
Infer(_) |
Error => {
vec![]
ty::Infer(ty::TyVar(_)) => false,
- ty::Infer(ty::BoundTy(_)) |
+ ty::Bound(_) |
ty::Infer(ty::FreshTy(_)) |
ty::Infer(ty::FreshIntTy(_)) |
ty::Infer(ty::FreshFloatTy(_)) =>
use hir::def_id::DefId;
use infer::canonical::Canonical;
-use ty::{self, BoundTyIndex, Lift, List, Ty, TyCtxt};
+use ty::{self, BoundVar, Lift, List, Ty, TyCtxt};
use ty::fold::{TypeFoldable, TypeFolder, TypeVisitor};
use serialize::{self, Encodable, Encoder, Decodable, Decoder};
span,
root_ty: None,
ty_stack_depth: 0,
- region_binders_passed: 0 };
+ binders_passed: 0 };
(*self).fold_with(&mut folder)
}
}
ty_stack_depth: usize,
// Number of region binders we have passed through while doing the substitution
- region_binders_passed: u32,
+ binders_passed: u32,
}
impl<'a, 'gcx, 'tcx> TypeFolder<'gcx, 'tcx> for SubstFolder<'a, 'gcx, 'tcx> {
fn tcx<'b>(&'b self) -> TyCtxt<'b, 'gcx, 'tcx> { self.tcx }
fn fold_binder<T: TypeFoldable<'tcx>>(&mut self, t: &ty::Binder<T>) -> ty::Binder<T> {
- self.region_binders_passed += 1;
+ self.binders_passed += 1;
let t = t.super_fold_with(self);
- self.region_binders_passed -= 1;
+ self.binders_passed -= 1;
t
}
}
};
- self.shift_regions_through_binders(ty)
+ self.shift_vars_through_binders(ty)
}
/// It is sometimes necessary to adjust the debruijn indices during substitution. This occurs
- /// when we are substituting a type with escaping regions into a context where we have passed
- /// through region binders. That's quite a mouthful. Let's see an example:
+ /// when we are substituting a type with escaping bound vars into a context where we have
+ /// passed through binders. That's quite a mouthful. Let's see an example:
///
/// ```
/// type Func<A> = fn(A);
/// As indicated in the diagram, here the same type `&'a int` is substituted once, but in the
/// first case we do not increase the Debruijn index and in the second case we do. The reason
/// is that only in the second case have we passed through a fn binder.
- fn shift_regions_through_binders(&self, ty: Ty<'tcx>) -> Ty<'tcx> {
- debug!("shift_regions(ty={:?}, region_binders_passed={:?}, has_escaping_regions={:?})",
- ty, self.region_binders_passed, ty.has_escaping_regions());
+ fn shift_vars_through_binders(&self, ty: Ty<'tcx>) -> Ty<'tcx> {
+ debug!("shift_vars(ty={:?}, binders_passed={:?}, has_escaping_bound_vars={:?})",
+ ty, self.binders_passed, ty.has_escaping_bound_vars());
- if self.region_binders_passed == 0 || !ty.has_escaping_regions() {
+ if self.binders_passed == 0 || !ty.has_escaping_bound_vars() {
return ty;
}
- let result = ty::fold::shift_regions(self.tcx(), self.region_binders_passed, &ty);
- debug!("shift_regions: shifted result = {:?}", result);
+ let result = ty::fold::shift_vars(self.tcx(), &ty, self.binders_passed);
+ debug!("shift_vars: shifted result = {:?}", result);
result
}
fn shift_region_through_binders(&self, region: ty::Region<'tcx>) -> ty::Region<'tcx> {
- if self.region_binders_passed == 0 || !region.has_escaping_regions() {
+ if self.binders_passed == 0 || !region.has_escaping_bound_vars() {
return region;
}
- self.tcx().mk_region(ty::fold::shift_region(*region, self.region_binders_passed))
+ ty::fold::shift_region(self.tcx, region, self.binders_passed)
}
}
return false;
}
- self.value.substs.iter().zip(BoundTyIndex::new(0)..).all(|(kind, cvar)| {
+ self.value.substs.iter().zip(BoundVar::new(0)..).all(|(kind, cvar)| {
match kind.unpack() {
UnpackedKind::Type(ty) => match ty.sty {
- ty::Infer(ty::BoundTy(ref b)) => cvar == b.var,
+ ty::Bound(b) => {
+ // We only allow a `ty::INNERMOST` index in substitutions.
+ assert_eq!(b.index, ty::INNERMOST);
+ cvar == b.var
+ }
_ => false,
},
UnpackedKind::Lifetime(r) => match r {
- ty::ReCanonical(cvar1) => cvar == *cvar1,
+ ty::ReLateBound(index, br) => {
+ // We only allow a `ty::INNERMOST` index in substitutions.
+ assert_eq!(*index, ty::INNERMOST);
+ cvar == br.assert_bound_var()
+ }
_ => false,
},
}
match self.ty.sty {
ty::Int(ity) => {
let bits = ty::tls::with(|tcx| {
- Integer::from_attr(tcx, SignedInt(ity)).size().bits()
+ Integer::from_attr(&tcx, SignedInt(ity)).size().bits()
});
let x = self.val as i128;
// sign extend the raw representation to be an i128
}
pub fn checked_add<'a, 'gcx>(self, tcx: TyCtxt<'a, 'gcx, 'tcx>, n: u128) -> (Self, bool) {
let (int, signed) = match self.ty.sty {
- Int(ity) => (Integer::from_attr(tcx, SignedInt(ity)), true),
- Uint(uty) => (Integer::from_attr(tcx, UnsignedInt(uty)), false),
+ Int(ity) => (Integer::from_attr(&tcx, SignedInt(ity)), true),
+ Uint(uty) => (Integer::from_attr(&tcx, UnsignedInt(uty)), false),
_ => bug!("non integer discriminant"),
};
erased_self_ty,
predicates);
- assert!(!erased_self_ty.has_escaping_regions());
+ assert!(!erased_self_ty.has_escaping_bound_vars());
traits::elaborate_predicates(self, predicates)
.filter_map(|predicate| {
// construct such an object, but this seems
// correct even if that code changes).
let ty::OutlivesPredicate(ref t, ref r) = predicate.skip_binder();
- if t == &erased_self_ty && !r.has_escaping_regions() {
+ if t == &erased_self_ty && !r.has_escaping_bound_vars() {
Some(*r)
} else {
None
self.def_key(def_id).disambiguated_data.data == DefPathData::ClosureExpr
}
- /// True if `def_id` refers to a trait (e.g., `trait Foo { ... }`).
+ /// True if `def_id` refers to a trait (i.e., `trait Foo { ... }`).
pub fn is_trait(self, def_id: DefId) -> bool {
if let DefPathData::Trait(_) = self.def_key(def_id).disambiguated_data.data {
true
// Can refer to a type which may drop.
// FIXME(eddyb) check this against a ParamEnv.
- ty::Dynamic(..) | ty::Projection(..) | ty::Param(_) |
+ ty::Dynamic(..) | ty::Projection(..) | ty::Param(_) | ty::Bound(..) |
ty::Opaque(..) | ty::Infer(_) | ty::Error => true,
ty::UnnormalizedProjection(..) => bug!("only used with chalk-engine"),
match parent_ty.sty {
ty::Bool | ty::Char | ty::Int(_) | ty::Uint(_) | ty::Float(_) |
ty::Str | ty::Infer(_) | ty::Param(_) | ty::Never | ty::Error |
- ty::Foreign(..) => {
+ ty::Bound(..) | ty::Foreign(..) => {
}
ty::Array(ty, len) => {
push_const(stack, len);
let infcx = &mut self.infcx;
let param_env = self.param_env;
self.out.iter()
- .inspect(|pred| assert!(!pred.has_escaping_regions()))
+ .inspect(|pred| assert!(!pred.has_escaping_bound_vars()))
.flat_map(|pred| {
let mut selcx = traits::SelectionContext::new(infcx);
let pred = traits::normalize(&mut selcx, param_env, cause.clone(), pred);
self.out.extend(
trait_ref.substs.types()
- .filter(|ty| !ty.has_escaping_regions())
+ .filter(|ty| !ty.has_escaping_bound_vars())
.map(|ty| traits::Obligation::new(cause.clone(),
param_env,
ty::Predicate::WellFormed(ty))));
let trait_ref = data.trait_ref(self.infcx.tcx);
self.compute_trait_ref(&trait_ref, Elaborate::None);
- if !data.has_escaping_regions() {
+ if !data.has_escaping_bound_vars() {
let predicate = trait_ref.to_predicate();
let cause = self.cause(traits::ProjectionWf(data));
self.out.push(traits::Obligation::new(cause, self.param_env, predicate));
}
fn require_sized(&mut self, subty: Ty<'tcx>, cause: traits::ObligationCauseCode<'tcx>) {
- if !subty.has_escaping_regions() {
+ if !subty.has_escaping_bound_vars() {
let cause = self.cause(cause);
let trait_ref = ty::TraitRef {
def_id: self.infcx.tcx.require_lang_item(lang_items::SizedTraitLangItem),
ty::GeneratorWitness(..) |
ty::Never |
ty::Param(_) |
+ ty::Bound(..) |
ty::Foreign(..) => {
// WfScalar, WfParameter, etc
}
ty::Ref(r, rty, _) => {
// WfReference
- if !r.has_escaping_regions() && !rty.has_escaping_regions() {
+ if !r.has_escaping_bound_vars() && !rty.has_escaping_bound_vars() {
let cause = self.cause(traits::ReferenceOutlivesReferent(ty));
self.out.push(
traits::Obligation::new(
.map(|pred| traits::Obligation::new(cause.clone(),
self.param_env,
pred))
- .filter(|pred| !pred.has_escaping_regions())
+ .filter(|pred| !pred.has_escaping_bound_vars())
.collect()
}
// Note: in fact we only permit builtin traits, not `Bar<'d>`, I
// am looking forward to the future here.
- if !data.has_escaping_regions() {
+ if !data.has_escaping_bound_vars() {
let implicit_bounds =
object_region_bounds(self.infcx.tcx, data);
use ty::{BrAnon, BrEnv, BrFresh, BrNamed};
use ty::{Bool, Char, Adt};
use ty::{Error, Str, Array, Slice, Float, FnDef, FnPtr};
-use ty::{Param, RawPtr, Ref, Never, Tuple};
+use ty::{Param, Bound, RawPtr, Ref, Never, Tuple};
use ty::{Closure, Generator, GeneratorWitness, Foreign, Projection, Opaque};
use ty::{UnnormalizedProjection, Dynamic, Int, Uint, Infer};
use ty::{self, RegionVid, Ty, TyCtxt, TypeFoldable, GenericParamCount, GenericParamDefKind};
ty::ReEarlyBound(ref data) => {
write!(f, "{}", data.name)
}
- ty::ReCanonical(_) => {
- write!(f, "'_")
- }
ty::ReLateBound(_, br) |
ty::ReFree(ty::FreeRegion { bound_region: br, .. }) |
ty::RePlaceholder(ty::Placeholder { name: br, .. }) => {
write!(f, "{:?}", vid)
}
- ty::ReCanonical(c) => {
- write!(f, "'?{}", c.index())
- }
-
ty::RePlaceholder(placeholder) => {
write!(f, "RePlaceholder({:?})", placeholder)
}
ty::TyVar(_) => write!(f, "_"),
ty::IntVar(_) => write!(f, "{}", "{integer}"),
ty::FloatVar(_) => write!(f, "{}", "{float}"),
- ty::BoundTy(_) => write!(f, "_"),
ty::FreshTy(v) => write!(f, "FreshTy({})", v),
ty::FreshIntTy(v) => write!(f, "FreshIntTy({})", v),
ty::FreshFloatTy(v) => write!(f, "FreshFloatTy({})", v)
ty::TyVar(ref v) => write!(f, "{:?}", v),
ty::IntVar(ref v) => write!(f, "{:?}", v),
ty::FloatVar(ref v) => write!(f, "{:?}", v),
- ty::BoundTy(v) => write!(f, "?{:?}", v.var.index()),
ty::FreshTy(v) => write!(f, "FreshTy({:?})", v),
ty::FreshIntTy(v) => write!(f, "FreshIntTy({:?})", v),
ty::FreshFloatTy(v) => write!(f, "FreshFloatTy({:?})", v)
Infer(infer_ty) => write!(f, "{}", infer_ty),
Error => write!(f, "[type error]"),
Param(ref param_ty) => write!(f, "{}", param_ty),
+ Bound(bound_ty) => {
+ match bound_ty.kind {
+ ty::BoundTyKind::Anon => {
+ if bound_ty.index == ty::INNERMOST {
+ write!(f, "?{}", bound_ty.var.index())
+ } else {
+ write!(f, "?{}_{}", bound_ty.index.index(), bound_ty.var.index())
+ }
+ }
+
+ ty::BoundTyKind::Param(p) => write!(f, "{}", p),
+ }
+ }
Adt(def, substs) => cx.parameterized(f, substs, def.did, &[]),
Dynamic(data, r) => {
let r = r.print_to_string(cx);
// These cannot exist in borrowck
RegionKind::ReVar(..) |
- RegionKind::ReCanonical(..) |
RegionKind::RePlaceholder(..) |
RegionKind::ReClosureBound(..) |
RegionKind::ReErased => span_bug!(borrow_span,
ty::ReStatic => self.item_ub,
- ty::ReCanonical(_) |
ty::ReEmpty |
ty::ReClosureBound(..) |
ty::ReLateBound(..) |
use type_of::{LayoutLlvmExt, PointerKind};
use value::Value;
-use rustc_target::abi::{LayoutOf, Size, TyLayout};
+use rustc_target::abi::{LayoutOf, Size, TyLayout, Abi as LayoutAbi};
use rustc::ty::{self, Ty};
use rustc::ty::layout;
FnType::new_internal(cx, sig, extra_args, |ty, arg_idx| {
let mut layout = cx.layout_of(ty);
// Don't pass the vtable, it's not an argument of the virtual fn.
- // Instead, pass just the (thin pointer) first field of `*dyn Trait`.
+ // Instead, pass just the data pointer, but give it the type `*const/mut dyn Trait`
+ // or `&/&mut dyn Trait` because this is special-cased elsewhere in codegen
if arg_idx == Some(0) {
- // FIXME(eddyb) `layout.field(cx, 0)` is not enough because e.g.
- // `Box<dyn Trait>` has a few newtype wrappers around the raw
- // pointer, so we'd have to "dig down" to find `*dyn Trait`.
- let pointee = if layout.is_unsized() {
- layout.ty
+ let fat_pointer_ty = if layout.is_unsized() {
+ // unsized `self` is passed as a pointer to `self`
+ // FIXME (mikeyhew) change this to use &own if it is ever added to the language
+ cx.tcx.mk_mut_ptr(layout.ty)
} else {
- layout.ty.builtin_deref(true)
- .unwrap_or_else(|| {
- bug!("FnType::new_vtable: non-pointer self {:?}", layout)
- }).ty
+ match layout.abi {
+ LayoutAbi::ScalarPair(..) => (),
+ _ => bug!("receiver type has unsupported layout: {:?}", layout)
+ }
+
+ // In the case of Rc<Self>, we need to explicitly pass a *mut RcBox<Self>
+ // with a Scalar (not ScalarPair) ABI. This is a hack that is understood
+ // elsewhere in the compiler as a method on a `dyn Trait`.
+ // To get the type `*mut RcBox<Self>`, we just keep unwrapping newtypes until we
+ // get a built-in pointer type
+ let mut fat_pointer_layout = layout;
+ 'descend_newtypes: while !fat_pointer_layout.ty.is_unsafe_ptr()
+ && !fat_pointer_layout.ty.is_region_ptr()
+ {
+ 'iter_fields: for i in 0..fat_pointer_layout.fields.count() {
+ let field_layout = fat_pointer_layout.field(cx, i);
+
+ if !field_layout.is_zst() {
+ fat_pointer_layout = field_layout;
+ continue 'descend_newtypes
+ }
+ }
+
+ bug!("receiver has no non-zero-sized fields {:?}", fat_pointer_layout);
+ }
+
+ fat_pointer_layout.ty
};
- let fat_ptr_ty = cx.tcx.mk_mut_ptr(pointee);
- layout = cx.layout_of(fat_ptr_ty).field(cx, 0);
+
+ // we now have a type like `*mut RcBox<dyn Trait>`
+ // change its layout to that of `*mut ()`, a thin pointer, but keep the same type
+ // this is understood as a special case elsewhere in the compiler
+ let unit_pointer_ty = cx.tcx.mk_mut_ptr(cx.tcx.mk_unit());
+ layout = cx.layout_of(unit_pointer_ty);
+ layout.ty = fat_pointer_ty;
}
ArgType::new(layout)
})
},
}
-pub fn find_library(name: &str, search_paths: &[PathBuf], sess: &Session)
- -> PathBuf {
- // On Windows, static libraries sometimes show up as libfoo.a and other
- // times show up as foo.lib
- let oslibname = format!("{}{}{}",
- sess.target.target.options.staticlib_prefix,
- name,
- sess.target.target.options.staticlib_suffix);
- let unixlibname = format!("lib{}.a", name);
-
- for path in search_paths {
- debug!("looking for {} inside {:?}", name, path);
- let test = path.join(&oslibname);
- if test.exists() { return test }
- if oslibname != unixlibname {
- let test = path.join(&unixlibname);
- if test.exists() { return test }
- }
- }
- sess.fatal(&format!("could not find native static library `{}`, \
- perhaps an -L flag is missing?", name));
-}
fn is_relevant_child(c: &Child) -> bool {
match c.name() {
/// Adds all of the contents of a native library to this archive. This will
/// search in the relevant locations for a library named `name`.
pub fn add_native_library(&mut self, name: &str) {
- let location = find_library(name, &self.config.lib_search_paths,
+ let location = ::rustc_codegen_utils::find_library(name, &self.config.lib_search_paths,
self.config.sess);
self.add_archive(&location, |_| false).unwrap_or_else(|e| {
self.config.sess.fatal(&format!("failed to add native library {}: {}",
+++ /dev/null
-// Copyright 2017 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-//! A thin wrapper around `Command` in the standard library which allows us to
-//! read the arguments that are built up.
-
-use std::ffi::{OsStr, OsString};
-use std::fmt;
-use std::io;
-use std::mem;
-use std::process::{self, Output};
-
-use rustc_target::spec::LldFlavor;
-
-#[derive(Clone)]
-pub struct Command {
- program: Program,
- args: Vec<OsString>,
- env: Vec<(OsString, OsString)>,
-}
-
-#[derive(Clone)]
-enum Program {
- Normal(OsString),
- CmdBatScript(OsString),
- Lld(OsString, LldFlavor)
-}
-
-impl Command {
- pub fn new<P: AsRef<OsStr>>(program: P) -> Command {
- Command::_new(Program::Normal(program.as_ref().to_owned()))
- }
-
- pub fn bat_script<P: AsRef<OsStr>>(program: P) -> Command {
- Command::_new(Program::CmdBatScript(program.as_ref().to_owned()))
- }
-
- pub fn lld<P: AsRef<OsStr>>(program: P, flavor: LldFlavor) -> Command {
- Command::_new(Program::Lld(program.as_ref().to_owned(), flavor))
- }
-
- fn _new(program: Program) -> Command {
- Command {
- program,
- args: Vec::new(),
- env: Vec::new(),
- }
- }
-
- pub fn arg<P: AsRef<OsStr>>(&mut self, arg: P) -> &mut Command {
- self._arg(arg.as_ref());
- self
- }
-
- pub fn args<I>(&mut self, args: I) -> &mut Command
- where I: IntoIterator,
- I::Item: AsRef<OsStr>,
- {
- for arg in args {
- self._arg(arg.as_ref());
- }
- self
- }
-
- fn _arg(&mut self, arg: &OsStr) {
- self.args.push(arg.to_owned());
- }
-
- pub fn env<K, V>(&mut self, key: K, value: V) -> &mut Command
- where K: AsRef<OsStr>,
- V: AsRef<OsStr>
- {
- self._env(key.as_ref(), value.as_ref());
- self
- }
-
- fn _env(&mut self, key: &OsStr, value: &OsStr) {
- self.env.push((key.to_owned(), value.to_owned()));
- }
-
- pub fn output(&mut self) -> io::Result<Output> {
- self.command().output()
- }
-
- pub fn command(&self) -> process::Command {
- let mut ret = match self.program {
- Program::Normal(ref p) => process::Command::new(p),
- Program::CmdBatScript(ref p) => {
- let mut c = process::Command::new("cmd");
- c.arg("/c").arg(p);
- c
- }
- Program::Lld(ref p, flavor) => {
- let mut c = process::Command::new(p);
- c.arg("-flavor").arg(match flavor {
- LldFlavor::Wasm => "wasm",
- LldFlavor::Ld => "gnu",
- LldFlavor::Link => "link",
- LldFlavor::Ld64 => "darwin",
- });
- c
- }
- };
- ret.args(&self.args);
- ret.envs(self.env.clone());
- return ret
- }
-
- // extensions
-
- pub fn get_args(&self) -> &[OsString] {
- &self.args
- }
-
- pub fn take_args(&mut self) -> Vec<OsString> {
- mem::replace(&mut self.args, Vec::new())
- }
-
- /// Returns a `true` if we're pretty sure that this'll blow OS spawn limits,
- /// or `false` if we should attempt to spawn and see what the OS says.
- pub fn very_likely_to_exceed_some_spawn_limit(&self) -> bool {
- // We mostly only care about Windows in this method, on Unix the limits
- // can be gargantuan anyway so we're pretty unlikely to hit them
- if cfg!(unix) {
- return false
- }
-
- // Right now LLD doesn't support the `@` syntax of passing an argument
- // through files, so regardless of the platform we try to go to the OS
- // on this one.
- if let Program::Lld(..) = self.program {
- return false
- }
-
- // Ok so on Windows to spawn a process is 32,768 characters in its
- // command line [1]. Unfortunately we don't actually have access to that
- // as it's calculated just before spawning. Instead we perform a
- // poor-man's guess as to how long our command line will be. We're
- // assuming here that we don't have to escape every character...
- //
- // Turns out though that `cmd.exe` has even smaller limits, 8192
- // characters [2]. Linkers can often be batch scripts (for example
- // Emscripten, Gecko's current build system) which means that we're
- // running through batch scripts. These linkers often just forward
- // arguments elsewhere (and maybe tack on more), so if we blow 8192
- // bytes we'll typically cause them to blow as well.
- //
- // Basically as a result just perform an inflated estimate of what our
- // command line will look like and test if it's > 8192 (we actually
- // test against 6k to artificially inflate our estimate). If all else
- // fails we'll fall back to the normal unix logic of testing the OS
- // error code if we fail to spawn and automatically re-spawning the
- // linker with smaller arguments.
- //
- // [1]: https://msdn.microsoft.com/en-us/library/windows/desktop/ms682425(v=vs.85).aspx
- // [2]: https://blogs.msdn.microsoft.com/oldnewthing/20031210-00/?p=41553
-
- let estimated_command_line_len =
- self.args.iter().map(|a| a.len()).sum::<usize>();
- estimated_command_line_len > 1024 * 6
- }
-}
-
-impl fmt::Debug for Command {
- fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
- self.command().fmt(f)
- }
-}
use cc::windows_registry;
use super::archive::{ArchiveBuilder, ArchiveConfig};
use super::bytecode::RLIB_BYTECODE_EXTENSION;
-use super::linker::Linker;
-use super::command::Command;
use super::rpath::RPathConfig;
use super::rpath;
use metadata::METADATA_FILENAME;
use tempfile::{Builder as TempFileBuilder, TempDir};
use rustc_target::spec::{PanicStrategy, RelroLevel, LinkerFlavor};
use rustc_data_structures::fx::FxHashSet;
+use rustc_codegen_utils::linker::Linker;
+use rustc_codegen_utils::command::Command;
use context::get_reloc_model;
use llvm;
}
{
- let mut linker = codegen_results.linker_info.to_linker(cmd, &sess, flavor);
+ let target_cpu = ::llvm_util::target_cpu(sess);
+ let mut linker = codegen_results.linker_info.to_linker(cmd, &sess, flavor, target_cpu);
link_args(&mut *linker, flavor, sess, crate_type, tmpdir,
out_filename, codegen_results);
cmd = linker.finalize();
+++ /dev/null
-// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-use rustc_data_structures::fx::FxHashMap;
-use std::ffi::{OsStr, OsString};
-use std::fs::{self, File};
-use std::io::prelude::*;
-use std::io::{self, BufWriter};
-use std::path::{Path, PathBuf};
-
-use back::archive;
-use back::command::Command;
-use back::symbol_export;
-use rustc::hir::def_id::{LOCAL_CRATE, CrateNum};
-use rustc::middle::dependency_format::Linkage;
-use rustc::session::Session;
-use rustc::session::config::{self, CrateType, OptLevel, DebugInfo,
- CrossLangLto};
-use rustc::ty::TyCtxt;
-use rustc_target::spec::{LinkerFlavor, LldFlavor};
-use serialize::{json, Encoder};
-use llvm_util;
-
-/// For all the linkers we support, and information they might
-/// need out of the shared crate context before we get rid of it.
-pub struct LinkerInfo {
- exports: FxHashMap<CrateType, Vec<String>>,
-}
-
-impl LinkerInfo {
- pub fn new(tcx: TyCtxt) -> LinkerInfo {
- LinkerInfo {
- exports: tcx.sess.crate_types.borrow().iter().map(|&c| {
- (c, exported_symbols(tcx, c))
- }).collect(),
- }
- }
-
- pub fn to_linker<'a>(&'a self,
- cmd: Command,
- sess: &'a Session,
- flavor: LinkerFlavor) -> Box<dyn Linker+'a> {
- match flavor {
- LinkerFlavor::Lld(LldFlavor::Link) |
- LinkerFlavor::Msvc => {
- Box::new(MsvcLinker {
- cmd,
- sess,
- info: self
- }) as Box<dyn Linker>
- }
- LinkerFlavor::Em => {
- Box::new(EmLinker {
- cmd,
- sess,
- info: self
- }) as Box<dyn Linker>
- }
- LinkerFlavor::Gcc => {
- Box::new(GccLinker {
- cmd,
- sess,
- info: self,
- hinted_static: false,
- is_ld: false,
- }) as Box<dyn Linker>
- }
-
- LinkerFlavor::Lld(LldFlavor::Ld) |
- LinkerFlavor::Lld(LldFlavor::Ld64) |
- LinkerFlavor::Ld => {
- Box::new(GccLinker {
- cmd,
- sess,
- info: self,
- hinted_static: false,
- is_ld: true,
- }) as Box<dyn Linker>
- }
-
- LinkerFlavor::Lld(LldFlavor::Wasm) => {
- Box::new(WasmLd {
- cmd,
- sess,
- info: self
- }) as Box<dyn Linker>
- }
- }
- }
-}
-
-/// Linker abstraction used by back::link to build up the command to invoke a
-/// linker.
-///
-/// This trait is the total list of requirements needed by `back::link` and
-/// represents the meaning of each option being passed down. This trait is then
-/// used to dispatch on whether a GNU-like linker (generally `ld.exe`) or an
-/// MSVC linker (e.g. `link.exe`) is being used.
-pub trait Linker {
- fn link_dylib(&mut self, lib: &str);
- fn link_rust_dylib(&mut self, lib: &str, path: &Path);
- fn link_framework(&mut self, framework: &str);
- fn link_staticlib(&mut self, lib: &str);
- fn link_rlib(&mut self, lib: &Path);
- fn link_whole_rlib(&mut self, lib: &Path);
- fn link_whole_staticlib(&mut self, lib: &str, search_path: &[PathBuf]);
- fn include_path(&mut self, path: &Path);
- fn framework_path(&mut self, path: &Path);
- fn output_filename(&mut self, path: &Path);
- fn add_object(&mut self, path: &Path);
- fn gc_sections(&mut self, keep_metadata: bool);
- fn position_independent_executable(&mut self);
- fn no_position_independent_executable(&mut self);
- fn full_relro(&mut self);
- fn partial_relro(&mut self);
- fn no_relro(&mut self);
- fn optimize(&mut self);
- fn pgo_gen(&mut self);
- fn debuginfo(&mut self);
- fn no_default_libraries(&mut self);
- fn build_dylib(&mut self, out_filename: &Path);
- fn build_static_executable(&mut self);
- fn args(&mut self, args: &[String]);
- fn export_symbols(&mut self, tmpdir: &Path, crate_type: CrateType);
- fn subsystem(&mut self, subsystem: &str);
- fn group_start(&mut self);
- fn group_end(&mut self);
- fn cross_lang_lto(&mut self);
- // Should have been finalize(self), but we don't support self-by-value on trait objects (yet?).
- fn finalize(&mut self) -> Command;
-}
-
-pub struct GccLinker<'a> {
- cmd: Command,
- sess: &'a Session,
- info: &'a LinkerInfo,
- hinted_static: bool, // Keeps track of the current hinting mode.
- // Link as ld
- is_ld: bool,
-}
-
-impl<'a> GccLinker<'a> {
- /// Argument that must be passed *directly* to the linker
- ///
- /// These arguments need to be prepended with '-Wl,' when a gcc-style linker is used
- fn linker_arg<S>(&mut self, arg: S) -> &mut Self
- where S: AsRef<OsStr>
- {
- if !self.is_ld {
- let mut os = OsString::from("-Wl,");
- os.push(arg.as_ref());
- self.cmd.arg(os);
- } else {
- self.cmd.arg(arg);
- }
- self
- }
-
- fn takes_hints(&self) -> bool {
- !self.sess.target.target.options.is_like_osx
- }
-
- // Some platforms take hints about whether a library is static or dynamic.
- // For those that support this, we ensure we pass the option if the library
- // was flagged "static" (most defaults are dynamic) to ensure that if
- // libfoo.a and libfoo.so both exist that the right one is chosen.
- fn hint_static(&mut self) {
- if !self.takes_hints() { return }
- if !self.hinted_static {
- self.linker_arg("-Bstatic");
- self.hinted_static = true;
- }
- }
-
- fn hint_dynamic(&mut self) {
- if !self.takes_hints() { return }
- if self.hinted_static {
- self.linker_arg("-Bdynamic");
- self.hinted_static = false;
- }
- }
-
- fn push_cross_lang_lto_args(&mut self, plugin_path: Option<&OsStr>) {
- if let Some(plugin_path) = plugin_path {
- let mut arg = OsString::from("-plugin=");
- arg.push(plugin_path);
- self.linker_arg(&arg);
- }
-
- let opt_level = match self.sess.opts.optimize {
- config::OptLevel::No => "O0",
- config::OptLevel::Less => "O1",
- config::OptLevel::Default => "O2",
- config::OptLevel::Aggressive => "O3",
- config::OptLevel::Size => "Os",
- config::OptLevel::SizeMin => "Oz",
- };
-
- self.linker_arg(&format!("-plugin-opt={}", opt_level));
- self.linker_arg(&format!("-plugin-opt=mcpu={}", llvm_util::target_cpu(self.sess)));
-
- match self.sess.lto() {
- config::Lto::Thin |
- config::Lto::ThinLocal => {
- self.linker_arg("-plugin-opt=thin");
- }
- config::Lto::Fat |
- config::Lto::No => {
- // default to regular LTO
- }
- }
- }
-}
-
-impl<'a> Linker for GccLinker<'a> {
- fn link_dylib(&mut self, lib: &str) { self.hint_dynamic(); self.cmd.arg(format!("-l{}",lib)); }
- fn link_staticlib(&mut self, lib: &str) {
- self.hint_static(); self.cmd.arg(format!("-l{}",lib));
- }
- fn link_rlib(&mut self, lib: &Path) { self.hint_static(); self.cmd.arg(lib); }
- fn include_path(&mut self, path: &Path) { self.cmd.arg("-L").arg(path); }
- fn framework_path(&mut self, path: &Path) { self.cmd.arg("-F").arg(path); }
- fn output_filename(&mut self, path: &Path) { self.cmd.arg("-o").arg(path); }
- fn add_object(&mut self, path: &Path) { self.cmd.arg(path); }
- fn position_independent_executable(&mut self) { self.cmd.arg("-pie"); }
- fn no_position_independent_executable(&mut self) { self.cmd.arg("-no-pie"); }
- fn full_relro(&mut self) { self.linker_arg("-zrelro"); self.linker_arg("-znow"); }
- fn partial_relro(&mut self) { self.linker_arg("-zrelro"); }
- fn no_relro(&mut self) { self.linker_arg("-znorelro"); }
- fn build_static_executable(&mut self) { self.cmd.arg("-static"); }
- fn args(&mut self, args: &[String]) { self.cmd.args(args); }
-
- fn link_rust_dylib(&mut self, lib: &str, _path: &Path) {
- self.hint_dynamic();
- self.cmd.arg(format!("-l{}",lib));
- }
-
- fn link_framework(&mut self, framework: &str) {
- self.hint_dynamic();
- self.cmd.arg("-framework").arg(framework);
- }
-
- // Here we explicitly ask that the entire archive is included into the
- // result artifact. For more details see #15460, but the gist is that
- // the linker will strip away any unused objects in the archive if we
- // don't otherwise explicitly reference them. This can occur for
- // libraries which are just providing bindings, libraries with generic
- // functions, etc.
- fn link_whole_staticlib(&mut self, lib: &str, search_path: &[PathBuf]) {
- self.hint_static();
- let target = &self.sess.target.target;
- if !target.options.is_like_osx {
- self.linker_arg("--whole-archive").cmd.arg(format!("-l{}",lib));
- self.linker_arg("--no-whole-archive");
- } else {
- // -force_load is the macOS equivalent of --whole-archive, but it
- // involves passing the full path to the library to link.
- self.linker_arg("-force_load");
- let lib = archive::find_library(lib, search_path, &self.sess);
- self.linker_arg(&lib);
- }
- }
-
- fn link_whole_rlib(&mut self, lib: &Path) {
- self.hint_static();
- if self.sess.target.target.options.is_like_osx {
- self.linker_arg("-force_load");
- self.linker_arg(&lib);
- } else {
- self.linker_arg("--whole-archive").cmd.arg(lib);
- self.linker_arg("--no-whole-archive");
- }
- }
-
- fn gc_sections(&mut self, keep_metadata: bool) {
- // The dead_strip option to the linker specifies that functions and data
- // unreachable by the entry point will be removed. This is quite useful
- // with Rust's compilation model of compiling libraries at a time into
- // one object file. For example, this brings hello world from 1.7MB to
- // 458K.
- //
- // Note that this is done for both executables and dynamic libraries. We
- // won't get much benefit from dylibs because LLVM will have already
- // stripped away as much as it could. This has not been seen to impact
- // link times negatively.
- //
- // -dead_strip can't be part of the pre_link_args because it's also used
- // for partial linking when using multiple codegen units (-r). So we
- // insert it here.
- if self.sess.target.target.options.is_like_osx {
- self.linker_arg("-dead_strip");
- } else if self.sess.target.target.options.is_like_solaris {
- self.linker_arg("-zignore");
-
- // If we're building a dylib, we don't use --gc-sections because LLVM
- // has already done the best it can do, and we also don't want to
- // eliminate the metadata. If we're building an executable, however,
- // --gc-sections drops the size of hello world from 1.8MB to 597K, a 67%
- // reduction.
- } else if !keep_metadata {
- self.linker_arg("--gc-sections");
- }
- }
-
- fn optimize(&mut self) {
- if !self.sess.target.target.options.linker_is_gnu { return }
-
- // GNU-style linkers support optimization with -O. GNU ld doesn't
- // need a numeric argument, but other linkers do.
- if self.sess.opts.optimize == config::OptLevel::Default ||
- self.sess.opts.optimize == config::OptLevel::Aggressive {
- self.linker_arg("-O1");
- }
- }
-
- fn pgo_gen(&mut self) {
- if !self.sess.target.target.options.linker_is_gnu { return }
-
- // If we're doing PGO generation stuff and on a GNU-like linker, use the
- // "-u" flag to properly pull in the profiler runtime bits.
- //
- // This is because LLVM otherwise won't add the needed initialization
- // for us on Linux (though the extra flag should be harmless if it
- // does).
- //
- // See https://reviews.llvm.org/D14033 and https://reviews.llvm.org/D14030.
- //
- // Though it may be worth to try to revert those changes upstream, since
- // the overhead of the initialization should be minor.
- self.cmd.arg("-u");
- self.cmd.arg("__llvm_profile_runtime");
- }
-
- fn debuginfo(&mut self) {
- match self.sess.opts.debuginfo {
- DebugInfo::None => {
- // If we are building without debuginfo enabled and we were called with
- // `-Zstrip-debuginfo-if-disabled=yes`, tell the linker to strip any debuginfo
- // found when linking to get rid of symbols from libstd.
- match self.sess.opts.debugging_opts.strip_debuginfo_if_disabled {
- Some(true) => { self.linker_arg("-S"); },
- _ => {},
- }
- },
- _ => {},
- };
- }
-
- fn no_default_libraries(&mut self) {
- if !self.is_ld {
- self.cmd.arg("-nodefaultlibs");
- }
- }
-
- fn build_dylib(&mut self, out_filename: &Path) {
- // On mac we need to tell the linker to let this library be rpathed
- if self.sess.target.target.options.is_like_osx {
- self.cmd.arg("-dynamiclib");
- self.linker_arg("-dylib");
-
- // Note that the `osx_rpath_install_name` option here is a hack
- // purely to support rustbuild right now, we should get a more
- // principled solution at some point to force the compiler to pass
- // the right `-Wl,-install_name` with an `@rpath` in it.
- if self.sess.opts.cg.rpath ||
- self.sess.opts.debugging_opts.osx_rpath_install_name {
- self.linker_arg("-install_name");
- let mut v = OsString::from("@rpath/");
- v.push(out_filename.file_name().unwrap());
- self.linker_arg(&v);
- }
- } else {
- self.cmd.arg("-shared");
- }
- }
-
- fn export_symbols(&mut self, tmpdir: &Path, crate_type: CrateType) {
- // If we're compiling a dylib, then we let symbol visibility in object
- // files to take care of whether they're exported or not.
- //
- // If we're compiling a cdylib, however, we manually create a list of
- // exported symbols to ensure we don't expose any more. The object files
- // have far more public symbols than we actually want to export, so we
- // hide them all here.
- if crate_type == CrateType::Dylib ||
- crate_type == CrateType::ProcMacro {
- return
- }
-
- let mut arg = OsString::new();
- let path = tmpdir.join("list");
-
- debug!("EXPORTED SYMBOLS:");
-
- if self.sess.target.target.options.is_like_osx {
- // Write a plain, newline-separated list of symbols
- let res = (|| -> io::Result<()> {
- let mut f = BufWriter::new(File::create(&path)?);
- for sym in self.info.exports[&crate_type].iter() {
- debug!(" _{}", sym);
- writeln!(f, "_{}", sym)?;
- }
- Ok(())
- })();
- if let Err(e) = res {
- self.sess.fatal(&format!("failed to write lib.def file: {}", e));
- }
- } else {
- // Write an LD version script
- let res = (|| -> io::Result<()> {
- let mut f = BufWriter::new(File::create(&path)?);
- writeln!(f, "{{\n global:")?;
- for sym in self.info.exports[&crate_type].iter() {
- debug!(" {};", sym);
- writeln!(f, " {};", sym)?;
- }
- writeln!(f, "\n local:\n *;\n}};")?;
- Ok(())
- })();
- if let Err(e) = res {
- self.sess.fatal(&format!("failed to write version script: {}", e));
- }
- }
-
- if self.sess.target.target.options.is_like_osx {
- if !self.is_ld {
- arg.push("-Wl,")
- }
- arg.push("-exported_symbols_list,");
- } else if self.sess.target.target.options.is_like_solaris {
- if !self.is_ld {
- arg.push("-Wl,")
- }
- arg.push("-M,");
- } else {
- if !self.is_ld {
- arg.push("-Wl,")
- }
- arg.push("--version-script=");
- }
-
- arg.push(&path);
- self.cmd.arg(arg);
- }
-
- fn subsystem(&mut self, subsystem: &str) {
- self.linker_arg("--subsystem");
- self.linker_arg(&subsystem);
- }
-
- fn finalize(&mut self) -> Command {
- self.hint_dynamic(); // Reset to default before returning the composed command line.
- let mut cmd = Command::new("");
- ::std::mem::swap(&mut cmd, &mut self.cmd);
- cmd
- }
-
- fn group_start(&mut self) {
- if !self.sess.target.target.options.is_like_osx {
- self.linker_arg("--start-group");
- }
- }
-
- fn group_end(&mut self) {
- if !self.sess.target.target.options.is_like_osx {
- self.linker_arg("--end-group");
- }
- }
-
- fn cross_lang_lto(&mut self) {
- match self.sess.opts.debugging_opts.cross_lang_lto {
- CrossLangLto::Disabled => {
- // Nothing to do
- }
- CrossLangLto::LinkerPluginAuto => {
- self.push_cross_lang_lto_args(None);
- }
- CrossLangLto::LinkerPlugin(ref path) => {
- self.push_cross_lang_lto_args(Some(path.as_os_str()));
- }
- }
- }
-}
-
-pub struct MsvcLinker<'a> {
- cmd: Command,
- sess: &'a Session,
- info: &'a LinkerInfo
-}
-
-impl<'a> Linker for MsvcLinker<'a> {
- fn link_rlib(&mut self, lib: &Path) { self.cmd.arg(lib); }
- fn add_object(&mut self, path: &Path) { self.cmd.arg(path); }
- fn args(&mut self, args: &[String]) { self.cmd.args(args); }
-
- fn build_dylib(&mut self, out_filename: &Path) {
- self.cmd.arg("/DLL");
- let mut arg: OsString = "/IMPLIB:".into();
- arg.push(out_filename.with_extension("dll.lib"));
- self.cmd.arg(arg);
- }
-
- fn build_static_executable(&mut self) {
- // noop
- }
-
- fn gc_sections(&mut self, _keep_metadata: bool) {
- // MSVC's ICF (Identical COMDAT Folding) link optimization is
- // slow for Rust and thus we disable it by default when not in
- // optimization build.
- if self.sess.opts.optimize != config::OptLevel::No {
- self.cmd.arg("/OPT:REF,ICF");
- } else {
- // It is necessary to specify NOICF here, because /OPT:REF
- // implies ICF by default.
- self.cmd.arg("/OPT:REF,NOICF");
- }
- }
-
- fn link_dylib(&mut self, lib: &str) {
- self.cmd.arg(&format!("{}.lib", lib));
- }
-
- fn link_rust_dylib(&mut self, lib: &str, path: &Path) {
- // When producing a dll, the MSVC linker may not actually emit a
- // `foo.lib` file if the dll doesn't actually export any symbols, so we
- // check to see if the file is there and just omit linking to it if it's
- // not present.
- let name = format!("{}.dll.lib", lib);
- if fs::metadata(&path.join(&name)).is_ok() {
- self.cmd.arg(name);
- }
- }
-
- fn link_staticlib(&mut self, lib: &str) {
- self.cmd.arg(&format!("{}.lib", lib));
- }
-
- fn position_independent_executable(&mut self) {
- // noop
- }
-
- fn no_position_independent_executable(&mut self) {
- // noop
- }
-
- fn full_relro(&mut self) {
- // noop
- }
-
- fn partial_relro(&mut self) {
- // noop
- }
-
- fn no_relro(&mut self) {
- // noop
- }
-
- fn no_default_libraries(&mut self) {
- // Currently we don't pass the /NODEFAULTLIB flag to the linker on MSVC
- // as there's been trouble in the past of linking the C++ standard
- // library required by LLVM. This likely needs to happen one day, but
- // in general Windows is also a more controlled environment than
- // Unix, so it's not necessarily as critical that this be implemented.
- //
- // Note that there are also some licensing worries about statically
- // linking some libraries which require a specific agreement, so it may
- // not ever be possible for us to pass this flag.
- }
-
- fn include_path(&mut self, path: &Path) {
- let mut arg = OsString::from("/LIBPATH:");
- arg.push(path);
- self.cmd.arg(&arg);
- }
-
- fn output_filename(&mut self, path: &Path) {
- let mut arg = OsString::from("/OUT:");
- arg.push(path);
- self.cmd.arg(&arg);
- }
-
- fn framework_path(&mut self, _path: &Path) {
- bug!("frameworks are not supported on windows")
- }
- fn link_framework(&mut self, _framework: &str) {
- bug!("frameworks are not supported on windows")
- }
-
- fn link_whole_staticlib(&mut self, lib: &str, _search_path: &[PathBuf]) {
- // not supported?
- self.link_staticlib(lib);
- }
- fn link_whole_rlib(&mut self, path: &Path) {
- // not supported?
- self.link_rlib(path);
- }
- fn optimize(&mut self) {
- // Needs more investigation of `/OPT` arguments
- }
-
- fn pgo_gen(&mut self) {
- // Nothing needed here.
- }
-
- fn debuginfo(&mut self) {
- // This will cause the Microsoft linker to generate a PDB file
- // from the CodeView line tables in the object files.
- self.cmd.arg("/DEBUG");
-
- // This will cause the Microsoft linker to embed .natvis info into the the PDB file
- let sysroot = self.sess.sysroot();
- let natvis_dir_path = sysroot.join("lib\\rustlib\\etc");
- if let Ok(natvis_dir) = fs::read_dir(&natvis_dir_path) {
- // LLVM 5.0.0's lld-link frontend doesn't yet recognize, and chokes
- // on, the /NATVIS:... flags. LLVM 6 (or earlier) should at worst ignore
- // them, eventually mooting this workaround, per this landed patch:
- // https://github.com/llvm-mirror/lld/commit/27b9c4285364d8d76bb43839daa100
- if let Some(ref linker_path) = self.sess.opts.cg.linker {
- if let Some(linker_name) = Path::new(&linker_path).file_stem() {
- if linker_name.to_str().unwrap().to_lowercase() == "lld-link" {
- self.sess.warn("not embedding natvis: lld-link may not support the flag");
- return;
- }
- }
- }
- for entry in natvis_dir {
- match entry {
- Ok(entry) => {
- let path = entry.path();
- if path.extension() == Some("natvis".as_ref()) {
- let mut arg = OsString::from("/NATVIS:");
- arg.push(path);
- self.cmd.arg(arg);
- }
- },
- Err(err) => {
- self.sess.warn(&format!("error enumerating natvis directory: {}", err));
- },
- }
- }
- }
- }
-
- // Currently the compiler doesn't use `dllexport` (an LLVM attribute) to
- // export symbols from a dynamic library. When building a dynamic library,
- // however, we're going to want some symbols exported, so this function
- // generates a DEF file which lists all the symbols.
- //
- // The linker will read this `*.def` file and export all the symbols from
- // the dynamic library. Note that this is not as simple as just exporting
- // all the symbols in the current crate (as specified by `codegen.reachable`)
- // but rather we also need to possibly export the symbols of upstream
- // crates. Upstream rlibs may be linked statically to this dynamic library,
- // in which case they may continue to transitively be used and hence need
- // their symbols exported.
- fn export_symbols(&mut self,
- tmpdir: &Path,
- crate_type: CrateType) {
- let path = tmpdir.join("lib.def");
- let res = (|| -> io::Result<()> {
- let mut f = BufWriter::new(File::create(&path)?);
-
- // Start off with the standard module name header and then go
- // straight to exports.
- writeln!(f, "LIBRARY")?;
- writeln!(f, "EXPORTS")?;
- for symbol in self.info.exports[&crate_type].iter() {
- debug!(" _{}", symbol);
- writeln!(f, " {}", symbol)?;
- }
- Ok(())
- })();
- if let Err(e) = res {
- self.sess.fatal(&format!("failed to write lib.def file: {}", e));
- }
- let mut arg = OsString::from("/DEF:");
- arg.push(path);
- self.cmd.arg(&arg);
- }
-
- fn subsystem(&mut self, subsystem: &str) {
- // Note that previous passes of the compiler validated this subsystem,
- // so we just blindly pass it to the linker.
- self.cmd.arg(&format!("/SUBSYSTEM:{}", subsystem));
-
- // Windows has two subsystems we're interested in right now, the console
- // and windows subsystems. These both implicitly have different entry
- // points (starting symbols). The console entry point starts with
- // `mainCRTStartup` and the windows entry point starts with
- // `WinMainCRTStartup`. These entry points, defined in system libraries,
- // will then later probe for either `main` or `WinMain`, respectively to
- // start the application.
- //
- // In Rust we just always generate a `main` function so we want control
- // to always start there, so we force the entry point on the windows
- // subsystem to be `mainCRTStartup` to get everything booted up
- // correctly.
- //
- // For more information see RFC #1665
- if subsystem == "windows" {
- self.cmd.arg("/ENTRY:mainCRTStartup");
- }
- }
-
- fn finalize(&mut self) -> Command {
- let mut cmd = Command::new("");
- ::std::mem::swap(&mut cmd, &mut self.cmd);
- cmd
- }
-
- // MSVC doesn't need group indicators
- fn group_start(&mut self) {}
- fn group_end(&mut self) {}
-
- fn cross_lang_lto(&mut self) {
- // Do nothing
- }
-}
-
-pub struct EmLinker<'a> {
- cmd: Command,
- sess: &'a Session,
- info: &'a LinkerInfo
-}
-
-impl<'a> Linker for EmLinker<'a> {
- fn include_path(&mut self, path: &Path) {
- self.cmd.arg("-L").arg(path);
- }
-
- fn link_staticlib(&mut self, lib: &str) {
- self.cmd.arg("-l").arg(lib);
- }
-
- fn output_filename(&mut self, path: &Path) {
- self.cmd.arg("-o").arg(path);
- }
-
- fn add_object(&mut self, path: &Path) {
- self.cmd.arg(path);
- }
-
- fn link_dylib(&mut self, lib: &str) {
- // Emscripten always links statically
- self.link_staticlib(lib);
- }
-
- fn link_whole_staticlib(&mut self, lib: &str, _search_path: &[PathBuf]) {
- // not supported?
- self.link_staticlib(lib);
- }
-
- fn link_whole_rlib(&mut self, lib: &Path) {
- // not supported?
- self.link_rlib(lib);
- }
-
- fn link_rust_dylib(&mut self, lib: &str, _path: &Path) {
- self.link_dylib(lib);
- }
-
- fn link_rlib(&mut self, lib: &Path) {
- self.add_object(lib);
- }
-
- fn position_independent_executable(&mut self) {
- // noop
- }
-
- fn no_position_independent_executable(&mut self) {
- // noop
- }
-
- fn full_relro(&mut self) {
- // noop
- }
-
- fn partial_relro(&mut self) {
- // noop
- }
-
- fn no_relro(&mut self) {
- // noop
- }
-
- fn args(&mut self, args: &[String]) {
- self.cmd.args(args);
- }
-
- fn framework_path(&mut self, _path: &Path) {
- bug!("frameworks are not supported on Emscripten")
- }
-
- fn link_framework(&mut self, _framework: &str) {
- bug!("frameworks are not supported on Emscripten")
- }
-
- fn gc_sections(&mut self, _keep_metadata: bool) {
- // noop
- }
-
- fn optimize(&mut self) {
- // Emscripten performs own optimizations
- self.cmd.arg(match self.sess.opts.optimize {
- OptLevel::No => "-O0",
- OptLevel::Less => "-O1",
- OptLevel::Default => "-O2",
- OptLevel::Aggressive => "-O3",
- OptLevel::Size => "-Os",
- OptLevel::SizeMin => "-Oz"
- });
- // Unusable until https://github.com/rust-lang/rust/issues/38454 is resolved
- self.cmd.args(&["--memory-init-file", "0"]);
- }
-
- fn pgo_gen(&mut self) {
- // noop, but maybe we need something like the gnu linker?
- }
-
- fn debuginfo(&mut self) {
- // Preserve names or generate source maps depending on debug info
- self.cmd.arg(match self.sess.opts.debuginfo {
- DebugInfo::None => "-g0",
- DebugInfo::Limited => "-g3",
- DebugInfo::Full => "-g4"
- });
- }
-
- fn no_default_libraries(&mut self) {
- self.cmd.args(&["-s", "DEFAULT_LIBRARY_FUNCS_TO_INCLUDE=[]"]);
- }
-
- fn build_dylib(&mut self, _out_filename: &Path) {
- bug!("building dynamic library is unsupported on Emscripten")
- }
-
- fn build_static_executable(&mut self) {
- // noop
- }
-
- fn export_symbols(&mut self, _tmpdir: &Path, crate_type: CrateType) {
- let symbols = &self.info.exports[&crate_type];
-
- debug!("EXPORTED SYMBOLS:");
-
- self.cmd.arg("-s");
-
- let mut arg = OsString::from("EXPORTED_FUNCTIONS=");
- let mut encoded = String::new();
-
- {
- let mut encoder = json::Encoder::new(&mut encoded);
- let res = encoder.emit_seq(symbols.len(), |encoder| {
- for (i, sym) in symbols.iter().enumerate() {
- encoder.emit_seq_elt(i, |encoder| {
- encoder.emit_str(&("_".to_string() + sym))
- })?;
- }
- Ok(())
- });
- if let Err(e) = res {
- self.sess.fatal(&format!("failed to encode exported symbols: {}", e));
- }
- }
- debug!("{}", encoded);
- arg.push(encoded);
-
- self.cmd.arg(arg);
- }
-
- fn subsystem(&mut self, _subsystem: &str) {
- // noop
- }
-
- fn finalize(&mut self) -> Command {
- let mut cmd = Command::new("");
- ::std::mem::swap(&mut cmd, &mut self.cmd);
- cmd
- }
-
- // Appears not necessary on Emscripten
- fn group_start(&mut self) {}
- fn group_end(&mut self) {}
-
- fn cross_lang_lto(&mut self) {
- // Do nothing
- }
-}
-
-fn exported_symbols(tcx: TyCtxt, crate_type: CrateType) -> Vec<String> {
- let mut symbols = Vec::new();
-
- let export_threshold = symbol_export::crates_export_threshold(&[crate_type]);
- for &(symbol, level) in tcx.exported_symbols(LOCAL_CRATE).iter() {
- if level.is_below_threshold(export_threshold) {
- symbols.push(symbol.symbol_name(tcx).to_string());
- }
- }
-
- let formats = tcx.sess.dependency_formats.borrow();
- let deps = formats[&crate_type].iter();
-
- for (index, dep_format) in deps.enumerate() {
- let cnum = CrateNum::new(index + 1);
- // For each dependency that we are linking to statically ...
- if *dep_format == Linkage::Static {
- // ... we add its symbol list to our export list.
- for &(symbol, level) in tcx.exported_symbols(cnum).iter() {
- if level.is_below_threshold(export_threshold) {
- symbols.push(symbol.symbol_name(tcx).to_string());
- }
- }
- }
- }
-
- symbols
-}
-
-pub struct WasmLd<'a> {
- cmd: Command,
- sess: &'a Session,
- info: &'a LinkerInfo,
-}
-
-impl<'a> Linker for WasmLd<'a> {
- fn link_dylib(&mut self, lib: &str) {
- self.cmd.arg("-l").arg(lib);
- }
-
- fn link_staticlib(&mut self, lib: &str) {
- self.cmd.arg("-l").arg(lib);
- }
-
- fn link_rlib(&mut self, lib: &Path) {
- self.cmd.arg(lib);
- }
-
- fn include_path(&mut self, path: &Path) {
- self.cmd.arg("-L").arg(path);
- }
-
- fn framework_path(&mut self, _path: &Path) {
- panic!("frameworks not supported")
- }
-
- fn output_filename(&mut self, path: &Path) {
- self.cmd.arg("-o").arg(path);
- }
-
- fn add_object(&mut self, path: &Path) {
- self.cmd.arg(path);
- }
-
- fn position_independent_executable(&mut self) {
- }
-
- fn full_relro(&mut self) {
- }
-
- fn partial_relro(&mut self) {
- }
-
- fn no_relro(&mut self) {
- }
-
- fn build_static_executable(&mut self) {
- }
-
- fn args(&mut self, args: &[String]) {
- self.cmd.args(args);
- }
-
- fn link_rust_dylib(&mut self, lib: &str, _path: &Path) {
- self.cmd.arg("-l").arg(lib);
- }
-
- fn link_framework(&mut self, _framework: &str) {
- panic!("frameworks not supported")
- }
-
- fn link_whole_staticlib(&mut self, lib: &str, _search_path: &[PathBuf]) {
- self.cmd.arg("-l").arg(lib);
- }
-
- fn link_whole_rlib(&mut self, lib: &Path) {
- self.cmd.arg(lib);
- }
-
- fn gc_sections(&mut self, _keep_metadata: bool) {
- self.cmd.arg("--gc-sections");
- }
-
- fn optimize(&mut self) {
- self.cmd.arg(match self.sess.opts.optimize {
- OptLevel::No => "-O0",
- OptLevel::Less => "-O1",
- OptLevel::Default => "-O2",
- OptLevel::Aggressive => "-O3",
- // Currently LLD doesn't support `Os` and `Oz`, so pass through `O2`
- // instead.
- OptLevel::Size => "-O2",
- OptLevel::SizeMin => "-O2"
- });
- }
-
- fn pgo_gen(&mut self) {
- }
-
- fn debuginfo(&mut self) {
- }
-
- fn no_default_libraries(&mut self) {
- }
-
- fn build_dylib(&mut self, _out_filename: &Path) {
- }
-
- fn export_symbols(&mut self, _tmpdir: &Path, crate_type: CrateType) {
- for sym in self.info.exports[&crate_type].iter() {
- self.cmd.arg("--export").arg(&sym);
- }
- }
-
- fn subsystem(&mut self, _subsystem: &str) {
- }
-
- fn no_position_independent_executable(&mut self) {
- }
-
- fn finalize(&mut self) -> Command {
- // There have been reports in the wild (rustwasm/wasm-bindgen#119) of
- // using threads causing weird hangs and bugs. Disable it entirely as
- // this isn't yet the bottleneck of compilation at all anyway.
- self.cmd.arg("--no-threads");
-
- // By default LLD only gives us one page of stack (64k) which is a
- // little small. Default to a larger stack closer to other PC platforms
- // (1MB) and users can always inject their own link-args to override this.
- self.cmd.arg("-z").arg("stack-size=1048576");
-
- // By default LLD's memory layout is:
- //
- // 1. First, a blank page
- // 2. Next, all static data
- // 3. Finally, the main stack (which grows down)
- //
- // This has the unfortunate consequence that on stack overflows you
- // corrupt static data and can cause some exceedingly weird bugs. To
- // help detect this a little sooner we instead request that the stack is
- // placed before static data.
- //
- // This means that we'll generate slightly larger binaries as references
- // to static data will take more bytes in the ULEB128 encoding, but
- // stack overflow will be guaranteed to trap as it underflows instead of
- // corrupting static data.
- self.cmd.arg("--stack-first");
-
- // FIXME we probably shouldn't pass this but instead pass an explicit
- // whitelist of symbols we'll allow to be undefined. Unfortunately
- // though we can't handle symbols like `log10` that LLVM injects at a
- // super late date without actually parsing object files. For now let's
- // stick to this and hopefully fix it before stabilization happens.
- self.cmd.arg("--allow-undefined");
-
- // For now we just never have an entry symbol
- self.cmd.arg("--no-entry");
-
- // Make the default table accessible
- self.cmd.arg("--export-table");
-
- // Rust code should never have warnings, and warnings are often
- // indicative of bugs, let's prevent them.
- self.cmd.arg("--fatal-warnings");
-
- let mut cmd = Command::new("");
- ::std::mem::swap(&mut cmd, &mut self.cmd);
- cmd
- }
-
- // Not needed for now with LLD
- fn group_start(&mut self) {}
- fn group_end(&mut self) {}
-
- fn cross_lang_lto(&mut self) {
- // Do nothing for now
- }
-}
// except according to those terms.
use back::bytecode::{DecodedBytecode, RLIB_BYTECODE_EXTENSION};
-use back::symbol_export;
use back::write::{ModuleConfig, with_llvm_pmb, CodegenContext};
use back::write::{self, DiagnosticHandlers, pre_lto_bitcode_filename};
use errors::{FatalError, Handler};
use rustc::session::config::{self, Lto};
use rustc::util::common::time_ext;
use rustc_data_structures::fx::FxHashMap;
+use rustc_codegen_utils::symbol_export;
use time_graph::Timeline;
use {ModuleCodegen, ModuleLlvm, ModuleKind};
+++ /dev/null
-// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-use rustc_data_structures::sync::Lrc;
-use std::sync::Arc;
-
-use monomorphize::Instance;
-use rustc::hir;
-use rustc::hir::Node;
-use rustc::hir::CodegenFnAttrFlags;
-use rustc::hir::def_id::{CrateNum, DefId, LOCAL_CRATE, CRATE_DEF_INDEX};
-use rustc_data_structures::fingerprint::Fingerprint;
-use rustc::middle::exported_symbols::{SymbolExportLevel, ExportedSymbol, metadata_symbol_name};
-use rustc::session::config;
-use rustc::ty::{TyCtxt, SymbolName};
-use rustc::ty::query::Providers;
-use rustc::ty::subst::Substs;
-use rustc::util::nodemap::{FxHashMap, DefIdMap};
-use rustc_allocator::ALLOCATOR_METHODS;
-use rustc_data_structures::indexed_vec::IndexVec;
-use std::collections::hash_map::Entry::*;
-
-pub type ExportedSymbols = FxHashMap<
- CrateNum,
- Arc<Vec<(String, SymbolExportLevel)>>,
->;
-
-pub fn threshold(tcx: TyCtxt) -> SymbolExportLevel {
- crates_export_threshold(&tcx.sess.crate_types.borrow())
-}
-
-fn crate_export_threshold(crate_type: config::CrateType) -> SymbolExportLevel {
- match crate_type {
- config::CrateType::Executable |
- config::CrateType::Staticlib |
- config::CrateType::ProcMacro |
- config::CrateType::Cdylib => SymbolExportLevel::C,
- config::CrateType::Rlib |
- config::CrateType::Dylib => SymbolExportLevel::Rust,
- }
-}
-
-pub fn crates_export_threshold(crate_types: &[config::CrateType])
- -> SymbolExportLevel {
- if crate_types.iter().any(|&crate_type| {
- crate_export_threshold(crate_type) == SymbolExportLevel::Rust
- }) {
- SymbolExportLevel::Rust
- } else {
- SymbolExportLevel::C
- }
-}
-
-fn reachable_non_generics_provider<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
- cnum: CrateNum)
- -> Lrc<DefIdMap<SymbolExportLevel>>
-{
- assert_eq!(cnum, LOCAL_CRATE);
-
- if !tcx.sess.opts.output_types.should_codegen() {
- return Lrc::new(DefIdMap())
- }
-
- // Check to see if this crate is a "special runtime crate". These
- // crates, implementation details of the standard library, typically
- // have a bunch of `pub extern` and `#[no_mangle]` functions as the
- // ABI between them. We don't want their symbols to have a `C`
- // export level, however, as they're just implementation details.
- // Down below we'll hardwire all of the symbols to the `Rust` export
- // level instead.
- let special_runtime_crate = tcx.is_panic_runtime(LOCAL_CRATE) ||
- tcx.is_compiler_builtins(LOCAL_CRATE);
-
- let mut reachable_non_generics: DefIdMap<_> = tcx.reachable_set(LOCAL_CRATE).0
- .iter()
- .filter_map(|&node_id| {
- // We want to ignore some FFI functions that are not exposed from
- // this crate. Reachable FFI functions can be lumped into two
- // categories:
- //
- // 1. Those that are included statically via a static library
- // 2. Those included otherwise (e.g. dynamically or via a framework)
- //
- // Although our LLVM module is not literally emitting code for the
- // statically included symbols, it's an export of our library which
- // needs to be passed on to the linker and encoded in the metadata.
- //
- // As a result, if this id is an FFI item (foreign item) then we only
- // let it through if it's included statically.
- match tcx.hir.get(node_id) {
- Node::ForeignItem(..) => {
- let def_id = tcx.hir.local_def_id(node_id);
- if tcx.is_statically_included_foreign_item(def_id) {
- Some(def_id)
- } else {
- None
- }
- }
-
- // Only consider nodes that actually have exported symbols.
- Node::Item(&hir::Item {
- node: hir::ItemKind::Static(..),
- ..
- }) |
- Node::Item(&hir::Item {
- node: hir::ItemKind::Fn(..), ..
- }) |
- Node::ImplItem(&hir::ImplItem {
- node: hir::ImplItemKind::Method(..),
- ..
- }) => {
- let def_id = tcx.hir.local_def_id(node_id);
- let generics = tcx.generics_of(def_id);
- if !generics.requires_monomorphization(tcx) &&
- // Functions marked with #[inline] are only ever codegened
- // with "internal" linkage and are never exported.
- !Instance::mono(tcx, def_id).def.requires_local(tcx) {
- Some(def_id)
- } else {
- None
- }
- }
-
- _ => None
- }
- })
- .map(|def_id| {
- let export_level = if special_runtime_crate {
- let name = tcx.symbol_name(Instance::mono(tcx, def_id)).as_str();
- // We can probably do better here by just ensuring that
- // it has hidden visibility rather than public
- // visibility, as this is primarily here to ensure it's
- // not stripped during LTO.
- //
- // In general though we won't link right if these
- // symbols are stripped, and LTO currently strips them.
- if &*name == "rust_eh_personality" ||
- &*name == "rust_eh_register_frames" ||
- &*name == "rust_eh_unregister_frames" {
- SymbolExportLevel::C
- } else {
- SymbolExportLevel::Rust
- }
- } else {
- symbol_export_level(tcx, def_id)
- };
- debug!("EXPORTED SYMBOL (local): {} ({:?})",
- tcx.symbol_name(Instance::mono(tcx, def_id)),
- export_level);
- (def_id, export_level)
- })
- .collect();
-
- if let Some(id) = *tcx.sess.derive_registrar_fn.get() {
- let def_id = tcx.hir.local_def_id(id);
- reachable_non_generics.insert(def_id, SymbolExportLevel::C);
- }
-
- if let Some(id) = *tcx.sess.plugin_registrar_fn.get() {
- let def_id = tcx.hir.local_def_id(id);
- reachable_non_generics.insert(def_id, SymbolExportLevel::C);
- }
-
- Lrc::new(reachable_non_generics)
-}
-
-fn is_reachable_non_generic_provider_local<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
- def_id: DefId)
- -> bool {
- let export_threshold = threshold(tcx);
-
- if let Some(&level) = tcx.reachable_non_generics(def_id.krate).get(&def_id) {
- level.is_below_threshold(export_threshold)
- } else {
- false
- }
-}
-
-fn is_reachable_non_generic_provider_extern<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
- def_id: DefId)
- -> bool {
- tcx.reachable_non_generics(def_id.krate).contains_key(&def_id)
-}
-
-fn exported_symbols_provider_local<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
- cnum: CrateNum)
- -> Arc<Vec<(ExportedSymbol<'tcx>,
- SymbolExportLevel)>>
-{
- assert_eq!(cnum, LOCAL_CRATE);
-
- if !tcx.sess.opts.output_types.should_codegen() {
- return Arc::new(vec![])
- }
-
- let mut symbols: Vec<_> = tcx.reachable_non_generics(LOCAL_CRATE)
- .iter()
- .map(|(&def_id, &level)| {
- (ExportedSymbol::NonGeneric(def_id), level)
- })
- .collect();
-
- if tcx.sess.entry_fn.borrow().is_some() {
- let exported_symbol = ExportedSymbol::NoDefId(SymbolName::new("main"));
-
- symbols.push((exported_symbol, SymbolExportLevel::C));
- }
-
- if tcx.sess.allocator_kind.get().is_some() {
- for method in ALLOCATOR_METHODS {
- let symbol_name = format!("__rust_{}", method.name);
- let exported_symbol = ExportedSymbol::NoDefId(SymbolName::new(&symbol_name));
-
- symbols.push((exported_symbol, SymbolExportLevel::Rust));
- }
- }
-
- if tcx.sess.opts.debugging_opts.pgo_gen.is_some() {
- // These are weak symbols that point to the profile version and the
- // profile name, which need to be treated as exported so LTO doesn't nix
- // them.
- const PROFILER_WEAK_SYMBOLS: [&'static str; 2] = [
- "__llvm_profile_raw_version",
- "__llvm_profile_filename",
- ];
- for sym in &PROFILER_WEAK_SYMBOLS {
- let exported_symbol = ExportedSymbol::NoDefId(SymbolName::new(sym));
- symbols.push((exported_symbol, SymbolExportLevel::C));
- }
- }
-
- if tcx.sess.crate_types.borrow().contains(&config::CrateType::Dylib) {
- let symbol_name = metadata_symbol_name(tcx);
- let exported_symbol = ExportedSymbol::NoDefId(SymbolName::new(&symbol_name));
-
- symbols.push((exported_symbol, SymbolExportLevel::Rust));
- }
-
- if tcx.sess.opts.share_generics() && tcx.local_crate_exports_generics() {
- use rustc::mir::mono::{Linkage, Visibility, MonoItem};
- use rustc::ty::InstanceDef;
-
- // Normally, we require that shared monomorphizations are not hidden,
- // because if we want to re-use a monomorphization from a Rust dylib, it
- // needs to be exported.
- // However, on platforms that don't allow for Rust dylibs, having
- // external linkage is enough for monomorphization to be linked to.
- let need_visibility = tcx.sess.target.target.options.dynamic_linking &&
- !tcx.sess.target.target.options.only_cdylib;
-
- let (_, cgus) = tcx.collect_and_partition_mono_items(LOCAL_CRATE);
-
- for (mono_item, &(linkage, visibility)) in cgus.iter()
- .flat_map(|cgu| cgu.items().iter()) {
- if linkage != Linkage::External {
- // We can only re-use things with external linkage, otherwise
- // we'll get a linker error
- continue
- }
-
- if need_visibility && visibility == Visibility::Hidden {
- // If we potentially share things from Rust dylibs, they must
- // not be hidden
- continue
- }
-
- if let &MonoItem::Fn(Instance {
- def: InstanceDef::Item(def_id),
- substs,
- }) = mono_item {
- if substs.types().next().is_some() {
- symbols.push((ExportedSymbol::Generic(def_id, substs),
- SymbolExportLevel::Rust));
- }
- }
- }
- }
-
- // Sort so we get a stable incr. comp. hash.
- symbols.sort_unstable_by(|&(ref symbol1, ..), &(ref symbol2, ..)| {
- symbol1.compare_stable(tcx, symbol2)
- });
-
- Arc::new(symbols)
-}
-
-fn upstream_monomorphizations_provider<'a, 'tcx>(
- tcx: TyCtxt<'a, 'tcx, 'tcx>,
- cnum: CrateNum)
- -> Lrc<DefIdMap<Lrc<FxHashMap<&'tcx Substs<'tcx>, CrateNum>>>>
-{
- debug_assert!(cnum == LOCAL_CRATE);
-
- let cnums = tcx.all_crate_nums(LOCAL_CRATE);
-
- let mut instances: DefIdMap<FxHashMap<_, _>> = DefIdMap();
-
- let cnum_stable_ids: IndexVec<CrateNum, Fingerprint> = {
- let mut cnum_stable_ids = IndexVec::from_elem_n(Fingerprint::ZERO,
- cnums.len() + 1);
-
- for &cnum in cnums.iter() {
- cnum_stable_ids[cnum] = tcx.def_path_hash(DefId {
- krate: cnum,
- index: CRATE_DEF_INDEX,
- }).0;
- }
-
- cnum_stable_ids
- };
-
- for &cnum in cnums.iter() {
- for &(ref exported_symbol, _) in tcx.exported_symbols(cnum).iter() {
- if let &ExportedSymbol::Generic(def_id, substs) = exported_symbol {
- let substs_map = instances.entry(def_id).or_default();
-
- match substs_map.entry(substs) {
- Occupied(mut e) => {
- // If there are multiple monomorphizations available,
- // we select one deterministically.
- let other_cnum = *e.get();
- if cnum_stable_ids[other_cnum] > cnum_stable_ids[cnum] {
- e.insert(cnum);
- }
- }
- Vacant(e) => {
- e.insert(cnum);
- }
- }
- }
- }
- }
-
- Lrc::new(instances.into_iter()
- .map(|(key, value)| (key, Lrc::new(value)))
- .collect())
-}
-
-fn upstream_monomorphizations_for_provider<'a, 'tcx>(
- tcx: TyCtxt<'a, 'tcx, 'tcx>,
- def_id: DefId)
- -> Option<Lrc<FxHashMap<&'tcx Substs<'tcx>, CrateNum>>>
-{
- debug_assert!(!def_id.is_local());
- tcx.upstream_monomorphizations(LOCAL_CRATE)
- .get(&def_id)
- .cloned()
-}
-
-fn is_unreachable_local_definition_provider(tcx: TyCtxt, def_id: DefId) -> bool {
- if let Some(node_id) = tcx.hir.as_local_node_id(def_id) {
- !tcx.reachable_set(LOCAL_CRATE).0.contains(&node_id)
- } else {
- bug!("is_unreachable_local_definition called with non-local DefId: {:?}",
- def_id)
- }
-}
-
-pub fn provide(providers: &mut Providers) {
- providers.reachable_non_generics = reachable_non_generics_provider;
- providers.is_reachable_non_generic = is_reachable_non_generic_provider_local;
- providers.exported_symbols = exported_symbols_provider_local;
- providers.upstream_monomorphizations = upstream_monomorphizations_provider;
- providers.is_unreachable_local_definition = is_unreachable_local_definition_provider;
-}
-
-pub fn provide_extern(providers: &mut Providers) {
- providers.is_reachable_non_generic = is_reachable_non_generic_provider_extern;
- providers.upstream_monomorphizations_for = upstream_monomorphizations_for_provider;
-}
-
-fn symbol_export_level(tcx: TyCtxt, sym_def_id: DefId) -> SymbolExportLevel {
- // We export anything that's not mangled at the "C" layer as it probably has
- // to do with ABI concerns. We do not, however, apply such treatment to
- // special symbols in the standard library for various plumbing between
- // core/std/allocators/etc. For example symbols used to hook up allocation
- // are not considered for export
- let codegen_fn_attrs = tcx.codegen_fn_attrs(sym_def_id);
- let is_extern = codegen_fn_attrs.contains_extern_indicator();
- let std_internal =
- codegen_fn_attrs.flags.contains(CodegenFnAttrFlags::RUSTC_STD_INTERNAL_SYMBOL);
-
- if is_extern && !std_internal {
- SymbolExportLevel::C
- } else {
- SymbolExportLevel::Rust
- }
-}
use attributes;
use back::bytecode::{self, RLIB_BYTECODE_EXTENSION};
-use back::lto::{self, ModuleBuffer, ThinBuffer, SerializedModule};
+use back::lto::{self, ThinBuffer, SerializedModule};
use back::link::{self, get_linker, remove};
-use back::command::Command;
-use back::linker::LinkerInfo;
-use back::symbol_export::ExportedSymbols;
use base;
use consts;
use memmap;
use rustc_fs_util::{path2cstr, link_or_copy};
use rustc_data_structures::small_c_str::SmallCStr;
use rustc_data_structures::svh::Svh;
+use rustc_codegen_utils::command::Command;
+use rustc_codegen_utils::linker::LinkerInfo;
+use rustc_codegen_utils::symbol_export::ExportedSymbols;
use errors::{self, Handler, Level, DiagnosticBuilder, FatalError, DiagnosticId};
use errors::emitter::{Emitter};
use syntax::attr;
// Some options cause LLVM bitcode to be emitted, which uses ThinLTOBuffers, so we need
// to make sure we run LLVM's NameAnonGlobals pass when emitting bitcode; otherwise
// we'll get errors in LLVM.
- let using_thin_buffers = llvm::LLVMRustThinLTOAvailable() && (config.emit_bc
- || config.obj_is_bitcode || config.emit_bc_compressed || config.embed_bitcode);
+ let using_thin_buffers = config.emit_bc || config.obj_is_bitcode
+ || config.emit_bc_compressed || config.embed_bitcode;
let mut have_name_anon_globals_pass = false;
if !config.no_prepopulate_passes {
llvm::LLVMRustAddAnalysisPasses(tm, fpm, llmod);
if write_bc || config.emit_bc_compressed || config.embed_bitcode {
- let thin;
- let old;
- let data = if llvm::LLVMRustThinLTOAvailable() {
- thin = ThinBuffer::new(llmod);
- thin.data()
- } else {
- old = ModuleBuffer::new(llmod);
- old.data()
- };
+ let thin = ThinBuffer::new(llmod);
+ let data = thin.data();
timeline.record("make-bc");
if write_bc {
// builds we don't actually want to LTO the allocator modules if
// it shows up. This is due to various linker shenanigans that
// we'll encounter later.
- //
- // Additionally here's where we also factor in the current LLVM
- // version. If it doesn't support ThinLTO we skip this.
Lto::ThinLocal => {
- module.kind != ModuleKind::Allocator &&
- unsafe { llvm::LLVMRustThinLTOAvailable() }
+ module.kind != ModuleKind::Allocator
}
};
},
CodegenComplete,
CodegenItem,
+ CodegenAborted,
}
struct Diagnostic {
let mut needs_lto = Vec::new();
let mut lto_import_only_modules = Vec::new();
let mut started_lto = false;
+ let mut codegen_aborted = false;
// This flag tracks whether all items have gone through codegens
let mut codegen_done = false;
let mut llvm_start_time = None;
// Run the message loop while there's still anything that needs message
- // processing:
+ // processing. Note that as soon as codegen is aborted we simply want to
+ // wait for all existing work to finish, so many of the conditions here
+ // only apply if codegen hasn't been aborted as they represent pending
+ // work to be done.
while !codegen_done ||
- work_items.len() > 0 ||
running > 0 ||
- needs_lto.len() > 0 ||
- lto_import_only_modules.len() > 0 ||
- main_thread_worker_state != MainThreadWorkerState::Idle {
+ (!codegen_aborted && (
+ work_items.len() > 0 ||
+ needs_lto.len() > 0 ||
+ lto_import_only_modules.len() > 0 ||
+ main_thread_worker_state != MainThreadWorkerState::Idle
+ ))
+ {
// While there are still CGUs to be codegened, the coordinator has
// to decide how to utilize the compiler processes implicit Token:
spawn_work(cgcx, item);
}
}
+ } else if codegen_aborted {
+ // don't queue up any more work if codegen was aborted, we're
+ // just waiting for our existing children to finish
} else {
// If we've finished everything related to normal codegen
// then it must be the case that we've got some LTO work to do.
// Spin up what work we can, only doing this while we've got available
// parallelism slots and work left to spawn.
- while work_items.len() > 0 && running < tokens.len() {
+ while !codegen_aborted && work_items.len() > 0 && running < tokens.len() {
let (item, _) = work_items.pop().unwrap();
maybe_start_llvm_timer(cgcx.config(item.module_kind()),
if !cgcx.opts.debugging_opts.no_parallel_llvm {
helper.request_token();
}
+ assert!(!codegen_aborted);
assert_eq!(main_thread_worker_state,
MainThreadWorkerState::Codegenning);
main_thread_worker_state = MainThreadWorkerState::Idle;
Message::CodegenComplete => {
codegen_done = true;
+ assert!(!codegen_aborted);
assert_eq!(main_thread_worker_state,
MainThreadWorkerState::Codegenning);
main_thread_worker_state = MainThreadWorkerState::Idle;
}
+ // If codegen is aborted that means translation was aborted due
+ // to some normal-ish compiler error. In this situation we want
+ // to exit as soon as possible, but we want to make sure all
+ // existing work has finished. Flag codegen as being done, and
+ // then conditions above will ensure no more work is spawned but
+ // we'll keep executing this loop until `running` hits 0.
+ Message::CodegenAborted => {
+ assert!(!codegen_aborted);
+ codegen_done = true;
+ codegen_aborted = true;
+ assert_eq!(main_thread_worker_state,
+ MainThreadWorkerState::Codegenning);
+ }
+
// If a thread exits successfully then we drop a token associated
// with that worker and update our `running` count. We may later
// re-acquire a token to continue running more work. We may also not
drop(self.coordinator_send.send(Box::new(Message::CodegenComplete)));
}
+ /// Consume this context indicating that codegen was entirely aborted, and
+ /// we need to exit as quickly as possible.
+ ///
+ /// This method blocks the current thread until all worker threads have
+ /// finished, and all worker threads should have exited or be real close to
+ /// exiting at this point.
+ pub fn codegen_aborted(self) {
+ // Signal to the coordinator it should spawn no more work and start
+ // shutdown.
+ drop(self.coordinator_send.send(Box::new(Message::CodegenAborted)));
+ drop(self.future.join());
+ }
+
pub fn check_for_errors(&self, sess: &Session) {
self.shared_emitter_main.check(sess, false);
}
}
}
+// impl Drop for OngoingCodegen {
+// fn drop(&mut self) {
+// }
+// }
+
pub(crate) fn submit_codegened_module_to_llvm(tcx: TyCtxt,
module: ModuleCodegen,
cost: u64) {
use builder::{Builder, MemFlags};
use callee;
use common::{C_bool, C_bytes_in_context, C_i32, C_usize};
-use rustc_mir::monomorphize::collector::{self, MonoItemCollectionMode};
use rustc_mir::monomorphize::item::DefPathBasedNames;
use common::{C_struct_in_context, C_array, val_ty};
use consts;
use meth;
use mir;
use monomorphize::Instance;
-use monomorphize::partitioning::{self, PartitioningStrategy, CodegenUnit, CodegenUnitExt};
+use monomorphize::partitioning::{CodegenUnit, CodegenUnitExt};
use rustc_codegen_utils::symbol_names_test;
use time_graph;
-use mono_item::{MonoItem, BaseMonoItemExt, MonoItemExt};
+use mono_item::{MonoItem, MonoItemExt};
use type_::Type;
use type_of::LayoutLlvmExt;
-use rustc::util::nodemap::{FxHashMap, DefIdSet};
+use rustc::util::nodemap::FxHashMap;
use CrateInfo;
use rustc_data_structures::small_c_str::SmallCStr;
use rustc_data_structures::sync::Lrc;
use std::any::Any;
+use std::cmp;
use std::ffi::CString;
-use std::sync::Arc;
-use std::time::{Instant, Duration};
use std::i32;
-use std::cmp;
+use std::ops::{Deref, DerefMut};
use std::sync::mpsc;
+use std::time::{Instant, Duration};
use syntax_pos::Span;
use syntax_pos::symbol::InternedString;
use syntax::attr;
// regions must appear in the argument
// listing.
let main_ret_ty = cx.tcx.erase_regions(
- &main_ret_ty.no_late_bound_regions().unwrap(),
+ &main_ret_ty.no_bound_vars().unwrap(),
);
if declare::get_defined_value(cx, "main").is_some() {
{
check_for_rustc_errors_attr(tcx);
- if let Some(true) = tcx.sess.opts.debugging_opts.thinlto {
- if unsafe { !llvm::LLVMRustThinLTOAvailable() } {
- tcx.sess.fatal("this compiler's LLVM does not support ThinLTO");
- }
- }
-
- if (tcx.sess.opts.debugging_opts.pgo_gen.is_some() ||
- !tcx.sess.opts.debugging_opts.pgo_use.is_empty()) &&
- unsafe { !llvm::LLVMRustPGOAvailable() }
- {
- tcx.sess.fatal("this compiler's LLVM does not support PGO");
- }
-
let cgu_name_builder = &mut CodegenUnitNameBuilder::new(tcx);
// Codegen the metadata.
metadata,
rx,
codegen_units.len());
+ let ongoing_codegen = AbortCodegenOnDrop(Some(ongoing_codegen));
// Codegen an allocator shim, if necessary.
//
ongoing_codegen.check_for_errors(tcx.sess);
assert_and_save_dep_graph(tcx);
- ongoing_codegen
+ ongoing_codegen.into_inner()
}
-fn assert_and_save_dep_graph<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>) {
- time(tcx.sess,
- "assert dep graph",
- || rustc_incremental::assert_dep_graph(tcx));
-
- time(tcx.sess,
- "serialize dep graph",
- || rustc_incremental::save_dep_graph(tcx));
+/// A curious wrapper structure whose only purpose is to call `codegen_aborted`
+/// when it's dropped abnormally.
+///
+/// In the process of working on rust-lang/rust#55238 a mysterious segfault was
+/// stumbled upon. The segfault was never reproduced locally, but it was
+/// suspected to be releated to the fact that codegen worker threads were
+/// sticking around by the time the main thread was exiting, causing issues.
+///
+/// This structure is an attempt to fix that issue where the `codegen_aborted`
+/// message will block until all workers have finished. This should ensure that
+/// even if the main codegen thread panics we'll wait for pending work to
+/// complete before returning from the main thread, hopefully avoiding
+/// segfaults.
+///
+/// If you see this comment in the code, then it means that this workaround
+/// worked! We may yet one day track down the mysterious cause of that
+/// segfault...
+struct AbortCodegenOnDrop(Option<OngoingCodegen>);
+
+impl AbortCodegenOnDrop {
+ fn into_inner(mut self) -> OngoingCodegen {
+ self.0.take().unwrap()
+ }
}
-fn collect_and_partition_mono_items<'a, 'tcx>(
- tcx: TyCtxt<'a, 'tcx, 'tcx>,
- cnum: CrateNum,
-) -> (Arc<DefIdSet>, Arc<Vec<Arc<CodegenUnit<'tcx>>>>)
-{
- assert_eq!(cnum, LOCAL_CRATE);
-
- let collection_mode = match tcx.sess.opts.debugging_opts.print_mono_items {
- Some(ref s) => {
- let mode_string = s.to_lowercase();
- let mode_string = mode_string.trim();
- if mode_string == "eager" {
- MonoItemCollectionMode::Eager
- } else {
- if mode_string != "lazy" {
- let message = format!("Unknown codegen-item collection mode '{}'. \
- Falling back to 'lazy' mode.",
- mode_string);
- tcx.sess.warn(&message);
- }
-
- MonoItemCollectionMode::Lazy
- }
- }
- None => {
- if tcx.sess.opts.cg.link_dead_code {
- MonoItemCollectionMode::Eager
- } else {
- MonoItemCollectionMode::Lazy
- }
- }
- };
-
- let (items, inlining_map) =
- time(tcx.sess, "monomorphization collection", || {
- collector::collect_crate_mono_items(tcx, collection_mode)
- });
-
- tcx.sess.abort_if_errors();
-
- ::rustc_mir::monomorphize::assert_symbols_are_distinct(tcx, items.iter());
-
- let strategy = if tcx.sess.opts.incremental.is_some() {
- PartitioningStrategy::PerModule
- } else {
- PartitioningStrategy::FixedUnitCount(tcx.sess.codegen_units())
- };
-
- let codegen_units = time(tcx.sess, "codegen unit partitioning", || {
- partitioning::partition(tcx,
- items.iter().cloned(),
- strategy,
- &inlining_map)
- .into_iter()
- .map(Arc::new)
- .collect::<Vec<_>>()
- });
-
- let mono_items: DefIdSet = items.iter().filter_map(|mono_item| {
- match *mono_item {
- MonoItem::Fn(ref instance) => Some(instance.def_id()),
- MonoItem::Static(def_id) => Some(def_id),
- _ => None,
- }
- }).collect();
-
- if tcx.sess.opts.debugging_opts.print_mono_items.is_some() {
- let mut item_to_cgus: FxHashMap<_, Vec<_>> = Default::default();
-
- for cgu in &codegen_units {
- for (&mono_item, &linkage) in cgu.items() {
- item_to_cgus.entry(mono_item)
- .or_default()
- .push((cgu.name().clone(), linkage));
- }
- }
+impl Deref for AbortCodegenOnDrop {
+ type Target = OngoingCodegen;
- let mut item_keys: Vec<_> = items
- .iter()
- .map(|i| {
- let mut output = i.to_string(tcx);
- output.push_str(" @@");
- let mut empty = Vec::new();
- let cgus = item_to_cgus.get_mut(i).unwrap_or(&mut empty);
- cgus.as_mut_slice().sort_by_key(|&(ref name, _)| name.clone());
- cgus.dedup();
- for &(ref cgu_name, (linkage, _)) in cgus.iter() {
- output.push_str(" ");
- output.push_str(&cgu_name.as_str());
-
- let linkage_abbrev = match linkage {
- Linkage::External => "External",
- Linkage::AvailableExternally => "Available",
- Linkage::LinkOnceAny => "OnceAny",
- Linkage::LinkOnceODR => "OnceODR",
- Linkage::WeakAny => "WeakAny",
- Linkage::WeakODR => "WeakODR",
- Linkage::Appending => "Appending",
- Linkage::Internal => "Internal",
- Linkage::Private => "Private",
- Linkage::ExternalWeak => "ExternalWeak",
- Linkage::Common => "Common",
- };
-
- output.push_str("[");
- output.push_str(linkage_abbrev);
- output.push_str("]");
- }
- output
- })
- .collect();
+ fn deref(&self) -> &OngoingCodegen {
+ self.0.as_ref().unwrap()
+ }
+}
- item_keys.sort();
+impl DerefMut for AbortCodegenOnDrop {
+ fn deref_mut(&mut self) -> &mut OngoingCodegen {
+ self.0.as_mut().unwrap()
+ }
+}
- for item in item_keys {
- println!("MONO_ITEM {}", item);
+impl Drop for AbortCodegenOnDrop {
+ fn drop(&mut self) {
+ if let Some(codegen) = self.0.take() {
+ codegen.codegen_aborted();
}
}
+}
- (Arc::new(mono_items), Arc::new(codegen_units))
+fn assert_and_save_dep_graph<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>) {
+ time(tcx.sess,
+ "assert dep graph",
+ || rustc_incremental::assert_dep_graph(tcx));
+
+ time(tcx.sess,
+ "serialize dep graph",
+ || rustc_incremental::save_dep_graph(tcx));
}
impl CrateInfo {
}
}
-fn is_codegened_item(tcx: TyCtxt, id: DefId) -> bool {
- let (all_mono_items, _) =
- tcx.collect_and_partition_mono_items(LOCAL_CRATE);
- all_mono_items.contains(&id)
-}
-
fn compile_codegen_unit<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
cgu_name: InternedString)
-> Stats {
}
}
-pub fn provide(providers: &mut Providers) {
- providers.collect_and_partition_mono_items =
- collect_and_partition_mono_items;
-
- providers.is_codegened_item = is_codegened_item;
-
- providers.codegen_unit = |tcx, name| {
- let (_, all) = tcx.collect_and_partition_mono_items(LOCAL_CRATE);
- all.iter()
- .find(|cgu| *cgu.name() == name)
- .cloned()
- .unwrap_or_else(|| panic!("failed to find cgu with name {:?}", name))
- };
-
- provide_extern(providers);
-}
-
-pub fn provide_extern(providers: &mut Providers) {
+pub fn provide_both(providers: &mut Providers) {
providers.dllimport_foreign_items = |tcx, krate| {
let module_map = tcx.foreign_modules(krate);
let module_map = module_map.iter()
}
}
- pub fn atomic_load(&self, ptr: &'ll Value, order: AtomicOrdering, align: Align) -> &'ll Value {
+ pub fn atomic_load(&self, ptr: &'ll Value, order: AtomicOrdering, size: Size) -> &'ll Value {
self.count_insn("load.atomic");
unsafe {
let load = llvm::LLVMRustBuildAtomicLoad(self.llbuilder, ptr, noname(), order);
- // FIXME(eddyb) Isn't it UB to use `pref` instead of `abi` here?
- // However, 64-bit atomic loads on `i686-apple-darwin` appear to
- // require `___atomic_load` with ABI-alignment, so it's staying.
- llvm::LLVMSetAlignment(load, align.pref() as c_uint);
+ // LLVM requires the alignment of atomic loads to be at least the size of the type.
+ llvm::LLVMSetAlignment(load, size.bytes() as c_uint);
load
}
}
}
pub fn atomic_store(&self, val: &'ll Value, ptr: &'ll Value,
- order: AtomicOrdering, align: Align) {
+ order: AtomicOrdering, size: Size) {
debug!("Store {:?} -> {:?}", val, ptr);
self.count_insn("store.atomic");
let ptr = self.check_store(val, ptr);
unsafe {
let store = llvm::LLVMRustBuildAtomicStore(self.llbuilder, val, ptr, order);
- // FIXME(eddyb) Isn't it UB to use `pref` instead of `abi` here?
- // Also see `atomic_load` for more context.
- llvm::LLVMSetAlignment(store, align.pref() as c_uint);
+ // LLVM requires the alignment of atomic stores to be at least the size of the type.
+ llvm::LLVMSetAlignment(store, size.bytes() as c_uint);
}
}
debug!("get_fn(instance={:?})", instance);
assert!(!instance.substs.needs_infer());
- assert!(!instance.substs.has_escaping_regions());
+ assert!(!instance.substs.has_escaping_bound_vars());
assert!(!instance.substs.has_param_types());
let sig = instance.fn_sig(cx.tcx);
}
}
-impl ty::layout::HasDataLayout for &'a CodegenCx<'ll, 'tcx> {
+impl ty::layout::HasDataLayout for CodegenCx<'ll, 'tcx> {
fn data_layout(&self) -> &ty::layout::TargetDataLayout {
&self.tcx.data_layout
}
}
-impl HasTargetSpec for &'a CodegenCx<'ll, 'tcx> {
+impl HasTargetSpec for CodegenCx<'ll, 'tcx> {
fn target_spec(&self) -> &Target {
&self.tcx.sess.target.target
}
}
-impl ty::layout::HasTyCtxt<'tcx> for &'a CodegenCx<'ll, 'tcx> {
- fn tcx<'b>(&'b self) -> TyCtxt<'b, 'tcx, 'tcx> {
+impl ty::layout::HasTyCtxt<'tcx> for CodegenCx<'ll, 'tcx> {
+ fn tcx<'a>(&'a self) -> TyCtxt<'a, 'tcx, 'tcx> {
self.tcx
}
}
-impl LayoutOf for &'a CodegenCx<'ll, 'tcx> {
+impl LayoutOf for CodegenCx<'ll, 'tcx> {
type Ty = Ty<'tcx>;
type TyLayout = TyLayout<'tcx>;
- fn layout_of(self, ty: Ty<'tcx>) -> Self::TyLayout {
+ fn layout_of(&self, ty: Ty<'tcx>) -> Self::TyLayout {
self.tcx.layout_of(ty::ParamEnv::reveal_all().and(ty))
.unwrap_or_else(|e| if let LayoutError::SizeOverflow(_) = e {
self.sess().fatal(&e.to_string())
use rustc::ich::NodeIdHashingMode;
use rustc_data_structures::fingerprint::Fingerprint;
use rustc::ty::Instance;
-use common::CodegenCx;
+use common::{CodegenCx, C_u64};
use rustc::ty::{self, AdtKind, ParamEnv, Ty, TyCtxt};
-use rustc::ty::layout::{self, Align, LayoutOf, PrimitiveExt, Size, TyLayout};
+use rustc::ty::layout::{self, Align, HasDataLayout, Integer, IntegerExt, LayoutOf,
+ PrimitiveExt, Size, TyLayout};
use rustc::session::config;
use rustc::util::nodemap::FxHashMap;
use rustc_fs_util::path2cstr;
unfinished_type: Ty<'tcx>,
unique_type_id: UniqueTypeId,
metadata_stub: &'ll DICompositeType,
+ member_holding_stub: &'ll DICompositeType,
member_description_factory: MemberDescriptionFactory<'ll, 'tcx>,
},
FinalMetadata(&'ll DICompositeType)
unfinished_type: Ty<'tcx>,
unique_type_id: UniqueTypeId,
metadata_stub: &'ll DICompositeType,
+ member_holding_stub: &'ll DICompositeType,
member_description_factory: MemberDescriptionFactory<'ll, 'tcx>,
) -> RecursiveTypeDescription<'ll, 'tcx> {
unfinished_type,
unique_type_id,
metadata_stub,
+ member_holding_stub,
member_description_factory,
}
}
unfinished_type,
unique_type_id,
metadata_stub,
+ member_holding_stub,
ref member_description_factory,
} => {
// Make sure that we have a forward declaration of the type in
// ... and attach them to the stub to complete it.
set_members_of_composite_type(cx,
- metadata_stub,
+ member_holding_stub,
member_descriptions);
return MetadataCreationResult::new(metadata_stub, true);
}
size: pointer_size,
align: pointer_align,
flags: DIFlags::FlagZero,
+ discriminant: None,
},
MemberDescription {
name: "length".to_owned(),
size: usize_size,
align: usize_align,
flags: DIFlags::FlagZero,
+ discriminant: None,
},
];
size: data_ptr_field.size,
align: data_ptr_field.align,
flags: DIFlags::FlagArtificial,
+ discriminant: None,
},
MemberDescription {
name: "vtable".to_owned(),
size: vtable_field.size,
align: vtable_field.align,
flags: DIFlags::FlagArtificial,
+ discriminant: None,
},
];
size: Size,
align: Align,
flags: DIFlags,
+ discriminant: Option<u64>,
}
// A factory for MemberDescriptions. It produces a list of member descriptions
size,
align,
flags: DIFlags::FlagZero,
+ discriminant: None,
}
}).collect()
}
struct_type,
unique_type_id,
struct_metadata_stub,
+ struct_metadata_stub,
StructMDF(StructMemberDescriptionFactory {
ty: struct_type,
variant,
size,
align,
flags: DIFlags::FlagZero,
+ discriminant: None,
}
}).collect()
}
) -> RecursiveTypeDescription<'ll, 'tcx> {
let tuple_name = compute_debuginfo_type_name(cx, tuple_type, false);
+ let struct_stub = create_struct_stub(cx,
+ tuple_type,
+ &tuple_name[..],
+ unique_type_id,
+ NO_SCOPE_METADATA);
+
create_and_register_recursive_type_forward_declaration(
cx,
tuple_type,
unique_type_id,
- create_struct_stub(cx,
- tuple_type,
- &tuple_name[..],
- unique_type_id,
- NO_SCOPE_METADATA),
+ struct_stub,
+ struct_stub,
TupleMDF(TupleMemberDescriptionFactory {
ty: tuple_type,
component_types: component_types.to_vec(),
size,
align,
flags: DIFlags::FlagZero,
+ discriminant: None,
}
}).collect()
}
union_type,
unique_type_id,
union_metadata_stub,
+ union_metadata_stub,
UnionMDF(UnionMemberDescriptionFactory {
layout: cx.layout_of(union_type),
variant,
// Enums
//=-----------------------------------------------------------------------------
+// DWARF variant support is only available starting in LLVM 7.
+// Although the earlier enum debug info output did not work properly
+// in all situations, it is better for the time being to continue to
+// sometimes emit the old style rather than emit something completely
+// useless when rust is compiled against LLVM 6 or older. This
+// function decides which representation will be emitted.
+fn use_enum_fallback(cx: &CodegenCx) -> bool {
+ // On MSVC we have to use the fallback mode, because LLVM doesn't
+ // lower variant parts to PDB.
+ return cx.sess().target.target.options.is_like_msvc || unsafe {
+ llvm::LLVMRustVersionMajor() < 7
+ };
+}
+
// Describes the members of an enum value: An enum is described as a union of
// structs in DWARF. This MemberDescriptionFactory provides the description for
// the members of this union; so for every variant of the given enum, this
fn create_member_descriptions(&self, cx: &CodegenCx<'ll, 'tcx>)
-> Vec<MemberDescription<'ll>> {
let adt = &self.enum_type.ty_adt_def().unwrap();
+
+ // This will always find the metadata in the type map.
+ let fallback = use_enum_fallback(cx);
+ let self_metadata = if fallback {
+ self.containing_scope
+ } else {
+ type_metadata(cx, self.enum_type, self.span)
+ };
+
match self.layout.variants {
layout::Variants::Single { .. } if adt.variants.is_empty() => vec![],
layout::Variants::Single { index } => {
self.layout,
&adt.variants[index],
NoDiscriminant,
- self.containing_scope,
+ self_metadata,
self.span);
let member_descriptions =
member_descriptions);
vec![
MemberDescription {
- name: String::new(),
+ name: if fallback {
+ String::new()
+ } else {
+ adt.variants[index].name.as_str().to_string()
+ },
type_metadata: variant_type_metadata,
offset: Size::ZERO,
size: self.layout.size,
align: self.layout.align,
- flags: DIFlags::FlagZero
+ flags: DIFlags::FlagZero,
+ discriminant: None,
}
]
}
layout::Variants::Tagged { ref variants, .. } => {
- let discriminant_info = RegularDiscriminant(self.discriminant_type_metadata
- .expect(""));
+ let discriminant_info = if fallback {
+ RegularDiscriminant(self.discriminant_type_metadata
+ .expect(""))
+ } else {
+ // This doesn't matter in this case.
+ NoDiscriminant
+ };
(0..variants.len()).map(|i| {
let variant = self.layout.for_variant(cx, i);
let (variant_type_metadata, member_desc_factory) =
variant,
&adt.variants[i],
discriminant_info,
- self.containing_scope,
+ self_metadata,
self.span);
let member_descriptions = member_desc_factory
variant_type_metadata,
member_descriptions);
MemberDescription {
- name: String::new(),
+ name: if fallback {
+ String::new()
+ } else {
+ adt.variants[i].name.as_str().to_string()
+ },
type_metadata: variant_type_metadata,
offset: Size::ZERO,
- size: variant.size,
- align: variant.align,
- flags: DIFlags::FlagZero
+ size: self.layout.size,
+ align: self.layout.align,
+ flags: DIFlags::FlagZero,
+ discriminant: Some(self.layout.ty.ty_adt_def().unwrap()
+ .discriminant_for_variant(cx.tcx, i)
+ .val as u64),
}
}).collect()
}
- layout::Variants::NicheFilling { dataful_variant, ref niche_variants, .. } => {
- let variant = self.layout.for_variant(cx, dataful_variant);
- // Create a description of the non-null variant
- let (variant_type_metadata, member_description_factory) =
- describe_enum_variant(cx,
- variant,
- &adt.variants[dataful_variant],
- OptimizedDiscriminant,
- self.containing_scope,
- self.span);
+ layout::Variants::NicheFilling {
+ ref niche_variants,
+ niche_start,
+ ref variants,
+ dataful_variant,
+ ..
+ } => {
+ if fallback {
+ let variant = self.layout.for_variant(cx, dataful_variant);
+ // Create a description of the non-null variant
+ let (variant_type_metadata, member_description_factory) =
+ describe_enum_variant(cx,
+ variant,
+ &adt.variants[dataful_variant],
+ OptimizedDiscriminant,
+ self.containing_scope,
+ self.span);
- let variant_member_descriptions =
- member_description_factory.create_member_descriptions(cx);
+ let variant_member_descriptions =
+ member_description_factory.create_member_descriptions(cx);
- set_members_of_composite_type(cx,
- variant_type_metadata,
- variant_member_descriptions);
-
- // Encode the information about the null variant in the union
- // member's name.
- let mut name = String::from("RUST$ENCODED$ENUM$");
- // HACK(eddyb) the debuggers should just handle offset+size
- // of discriminant instead of us having to recover its path.
- // Right now it's not even going to work for `niche_start > 0`,
- // and for multiple niche variants it only supports the first.
- fn compute_field_path<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>,
- name: &mut String,
- layout: TyLayout<'tcx>,
- offset: Size,
- size: Size) {
- for i in 0..layout.fields.count() {
- let field_offset = layout.fields.offset(i);
- if field_offset > offset {
- continue;
- }
- let inner_offset = offset - field_offset;
- let field = layout.field(cx, i);
- if inner_offset + size <= field.size {
- write!(name, "{}$", i).unwrap();
- compute_field_path(cx, name, field, inner_offset, size);
+ set_members_of_composite_type(cx,
+ variant_type_metadata,
+ variant_member_descriptions);
+
+ // Encode the information about the null variant in the union
+ // member's name.
+ let mut name = String::from("RUST$ENCODED$ENUM$");
+ // Right now it's not even going to work for `niche_start > 0`,
+ // and for multiple niche variants it only supports the first.
+ fn compute_field_path<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>,
+ name: &mut String,
+ layout: TyLayout<'tcx>,
+ offset: Size,
+ size: Size) {
+ for i in 0..layout.fields.count() {
+ let field_offset = layout.fields.offset(i);
+ if field_offset > offset {
+ continue;
+ }
+ let inner_offset = offset - field_offset;
+ let field = layout.field(cx, i);
+ if inner_offset + size <= field.size {
+ write!(name, "{}$", i).unwrap();
+ compute_field_path(cx, name, field, inner_offset, size);
+ }
}
}
+ compute_field_path(cx, &mut name,
+ self.layout,
+ self.layout.fields.offset(0),
+ self.layout.field(cx, 0).size);
+ name.push_str(&adt.variants[*niche_variants.start()].name.as_str());
+
+ // Create the (singleton) list of descriptions of union members.
+ vec![
+ MemberDescription {
+ name,
+ type_metadata: variant_type_metadata,
+ offset: Size::ZERO,
+ size: variant.size,
+ align: variant.align,
+ flags: DIFlags::FlagZero,
+ discriminant: None,
+ }
+ ]
+ } else {
+ (0..variants.len()).map(|i| {
+ let variant = self.layout.for_variant(cx, i);
+ let (variant_type_metadata, member_desc_factory) =
+ describe_enum_variant(cx,
+ variant,
+ &adt.variants[i],
+ OptimizedDiscriminant,
+ self_metadata,
+ self.span);
+
+ let member_descriptions = member_desc_factory
+ .create_member_descriptions(cx);
+
+ set_members_of_composite_type(cx,
+ variant_type_metadata,
+ member_descriptions);
+
+ let niche_value = if i == dataful_variant {
+ None
+ } else {
+ let niche = (i as u128)
+ .wrapping_sub(*niche_variants.start() as u128)
+ .wrapping_add(niche_start);
+ assert_eq!(niche as u64 as u128, niche);
+ Some(niche as u64)
+ };
+
+ MemberDescription {
+ name: adt.variants[i].name.as_str().to_string(),
+ type_metadata: variant_type_metadata,
+ offset: Size::ZERO,
+ size: self.layout.size,
+ align: self.layout.align,
+ flags: DIFlags::FlagZero,
+ discriminant: niche_value,
+ }
+ }).collect()
}
- compute_field_path(cx, &mut name,
- self.layout,
- self.layout.fields.offset(0),
- self.layout.field(cx, 0).size);
- name.push_str(&adt.variants[*niche_variants.start()].name.as_str());
-
- // Create the (singleton) list of descriptions of union members.
- vec![
- MemberDescription {
- name,
- type_metadata: variant_type_metadata,
- offset: Size::ZERO,
- size: variant.size,
- align: variant.align,
- flags: DIFlags::FlagZero
- }
- ]
}
}
}
let (size, align) = cx.size_and_align_of(ty);
MemberDescription {
name: name.to_string(),
- type_metadata: match self.discriminant_type_metadata {
- Some(metadata) if i == 0 => metadata,
- _ => type_metadata(cx, ty, self.span)
+ type_metadata: if use_enum_fallback(cx) {
+ match self.discriminant_type_metadata {
+ Some(metadata) if i == 0 => metadata,
+ _ => type_metadata(cx, ty, self.span)
+ }
+ } else {
+ type_metadata(cx, ty, self.span)
},
offset: self.offsets[i],
size,
align,
- flags: DIFlags::FlagZero
+ flags: DIFlags::FlagZero,
+ discriminant: None,
}
}).collect()
}
NoDiscriminant
}
-// Returns a tuple of (1) type_metadata_stub of the variant, (2) the llvm_type
-// of the variant, and (3) a MemberDescriptionFactory for producing the
-// descriptions of the fields of the variant. This is a rudimentary version of a
-// full RecursiveTypeDescription.
+// Returns a tuple of (1) type_metadata_stub of the variant, (2) a
+// MemberDescriptionFactory for producing the descriptions of the
+// fields of the variant. This is a rudimentary version of a full
+// RecursiveTypeDescription.
fn describe_enum_variant(
cx: &CodegenCx<'ll, 'tcx>,
layout: layout::TyLayout<'tcx>,
unique_type_id,
Some(containing_scope));
- // If this is not a univariant enum, there is also the discriminant field.
- let (discr_offset, discr_arg) = match discriminant_info {
- RegularDiscriminant(_) => {
- // We have the layout of an enum variant, we need the layout of the outer enum
- let enum_layout = cx.layout_of(layout.ty);
- (Some(enum_layout.fields.offset(0)),
- Some(("RUST$ENUM$DISR".to_owned(), enum_layout.field(cx, 0).ty)))
- }
- _ => (None, None),
- };
- let offsets = discr_offset.into_iter().chain((0..layout.fields.count()).map(|i| {
- layout.fields.offset(i)
- })).collect();
-
// Build an array of (field name, field type) pairs to be captured in the factory closure.
- let args = discr_arg.into_iter().chain((0..layout.fields.count()).map(|i| {
- let name = if variant.ctor_kind == CtorKind::Fn {
- format!("__{}", i)
- } else {
- variant.fields[i].ident.to_string()
+ let (offsets, args) = if use_enum_fallback(cx) {
+ // If this is not a univariant enum, there is also the discriminant field.
+ let (discr_offset, discr_arg) = match discriminant_info {
+ RegularDiscriminant(_) => {
+ // We have the layout of an enum variant, we need the layout of the outer enum
+ let enum_layout = cx.layout_of(layout.ty);
+ (Some(enum_layout.fields.offset(0)),
+ Some(("RUST$ENUM$DISR".to_owned(), enum_layout.field(cx, 0).ty)))
+ }
+ _ => (None, None),
};
- (name, layout.field(cx, i).ty)
- })).collect();
+ (
+ discr_offset.into_iter().chain((0..layout.fields.count()).map(|i| {
+ layout.fields.offset(i)
+ })).collect(),
+ discr_arg.into_iter().chain((0..layout.fields.count()).map(|i| {
+ let name = if variant.ctor_kind == CtorKind::Fn {
+ format!("__{}", i)
+ } else {
+ variant.fields[i].ident.to_string()
+ };
+ (name, layout.field(cx, i).ty)
+ })).collect()
+ )
+ } else {
+ (
+ (0..layout.fields.count()).map(|i| {
+ layout.fields.offset(i)
+ }).collect(),
+ (0..layout.fields.count()).map(|i| {
+ let name = if variant.ctor_kind == CtorKind::Fn {
+ format!("__{}", i)
+ } else {
+ variant.fields[i].ident.to_string()
+ };
+ (name, layout.field(cx, i).ty)
+ }).collect()
+ )
+ };
let member_description_factory =
VariantMDF(VariantMemberDescriptionFactory {
// <unknown>
let file_metadata = unknown_file_metadata(cx);
- let def = enum_type.ty_adt_def().unwrap();
- let enumerators_metadata: Vec<_> = def.discriminants(cx.tcx)
- .zip(&def.variants)
- .map(|(discr, v)| {
- let name = SmallCStr::new(&v.name.as_str());
- unsafe {
- Some(llvm::LLVMRustDIBuilderCreateEnumerator(
- DIB(cx),
- name.as_ptr(),
- // FIXME: what if enumeration has i128 discriminant?
- discr.val as u64))
- }
- })
- .collect();
-
let discriminant_type_metadata = |discr: layout::Primitive| {
+ let def = enum_type.ty_adt_def().unwrap();
+ let enumerators_metadata: Vec<_> = def.discriminants(cx.tcx)
+ .zip(&def.variants)
+ .map(|(discr, v)| {
+ let name = SmallCStr::new(&v.name.as_str());
+ unsafe {
+ Some(llvm::LLVMRustDIBuilderCreateEnumerator(
+ DIB(cx),
+ name.as_ptr(),
+ // FIXME: what if enumeration has i128 discriminant?
+ discr.val as u64))
+ }
+ })
+ .collect();
+
let disr_type_key = (enum_def_id, discr);
let cached_discriminant_type_metadata = debug_context(cx).created_enum_disr_types
.borrow()
discriminant_size.bits(),
discriminant_align.abi_bits() as u32,
create_DIArray(DIB(cx), &enumerators_metadata),
- discriminant_base_type_metadata)
+ discriminant_base_type_metadata, true)
};
debug_context(cx).created_enum_disr_types
let layout = cx.layout_of(enum_type);
- let discriminant_type_metadata = match layout.variants {
- layout::Variants::Single { .. } |
- layout::Variants::NicheFilling { .. } => None,
- layout::Variants::Tagged { ref tag, .. } => {
- Some(discriminant_type_metadata(tag.value))
- }
- };
-
- if let (&layout::Abi::Scalar(_), Some(discr)) = (&layout.abi, discriminant_type_metadata) {
- return FinalMetadata(discr);
+ match (&layout.abi, &layout.variants) {
+ (&layout::Abi::Scalar(_), &layout::Variants::Tagged {ref tag, .. }) =>
+ return FinalMetadata(discriminant_type_metadata(tag.value)),
+ _ => {}
}
let (enum_type_size, enum_type_align) = layout.size_and_align();
let unique_type_id_str = SmallCStr::new(
debug_context(cx).type_map.borrow().get_unique_type_id_as_string(unique_type_id)
);
- let enum_metadata = unsafe {
- llvm::LLVMRustDIBuilderCreateUnionType(
- DIB(cx),
- containing_scope,
- enum_name.as_ptr(),
- file_metadata,
- UNKNOWN_LINE_NUMBER,
- enum_type_size.bits(),
- enum_type_align.abi_bits() as u32,
- DIFlags::FlagZero,
- None,
- 0, // RuntimeLang
- unique_type_id_str.as_ptr())
+
+ if use_enum_fallback(cx) {
+ let discriminant_type_metadata = match layout.variants {
+ layout::Variants::Single { .. } |
+ layout::Variants::NicheFilling { .. } => None,
+ layout::Variants::Tagged { ref tag, .. } => {
+ Some(discriminant_type_metadata(tag.value))
+ }
+ };
+
+ let enum_metadata = unsafe {
+ llvm::LLVMRustDIBuilderCreateUnionType(
+ DIB(cx),
+ containing_scope,
+ enum_name.as_ptr(),
+ file_metadata,
+ UNKNOWN_LINE_NUMBER,
+ enum_type_size.bits(),
+ enum_type_align.abi_bits() as u32,
+ DIFlags::FlagZero,
+ None,
+ 0, // RuntimeLang
+ unique_type_id_str.as_ptr())
+ };
+
+ return create_and_register_recursive_type_forward_declaration(
+ cx,
+ enum_type,
+ unique_type_id,
+ enum_metadata,
+ enum_metadata,
+ EnumMDF(EnumMemberDescriptionFactory {
+ enum_type,
+ layout,
+ discriminant_type_metadata,
+ containing_scope,
+ span,
+ }),
+ );
+ }
+
+ let discriminator_metadata = match &layout.variants {
+ // A single-variant enum has no discriminant.
+ &layout::Variants::Single { .. } => None,
+
+ &layout::Variants::NicheFilling { ref niche, .. } => {
+ // Find the integer type of the correct size.
+ let size = niche.value.size(cx);
+ let align = niche.value.align(cx);
+
+ let discr_type = match niche.value {
+ layout::Int(t, _) => t,
+ layout::Float(layout::FloatTy::F32) => Integer::I32,
+ layout::Float(layout::FloatTy::F64) => Integer::I64,
+ layout::Pointer => cx.data_layout().ptr_sized_integer(),
+ }.to_ty(cx.tcx, false);
+
+ let discr_metadata = basic_type_metadata(cx, discr_type);
+ unsafe {
+ Some(llvm::LLVMRustDIBuilderCreateMemberType(
+ DIB(cx),
+ containing_scope,
+ ptr::null_mut(),
+ file_metadata,
+ UNKNOWN_LINE_NUMBER,
+ size.bits(),
+ align.abi_bits() as u32,
+ layout.fields.offset(0).bits(),
+ DIFlags::FlagArtificial,
+ discr_metadata))
+ }
+ },
+
+ &layout::Variants::Tagged { ref tag, .. } => {
+ let discr_type = tag.value.to_ty(cx.tcx);
+ let (size, align) = cx.size_and_align_of(discr_type);
+
+ let discr_metadata = basic_type_metadata(cx, discr_type);
+ unsafe {
+ Some(llvm::LLVMRustDIBuilderCreateMemberType(
+ DIB(cx),
+ containing_scope,
+ ptr::null_mut(),
+ file_metadata,
+ UNKNOWN_LINE_NUMBER,
+ size.bits(),
+ align.abi_bits() as u32,
+ layout.fields.offset(0).bits(),
+ DIFlags::FlagArtificial,
+ discr_metadata))
+ }
+ },
+ };
+
+ let empty_array = create_DIArray(DIB(cx), &[]);
+ let variant_part = unsafe {
+ llvm::LLVMRustDIBuilderCreateVariantPart(
+ DIB(cx),
+ containing_scope,
+ ptr::null_mut(),
+ file_metadata,
+ UNKNOWN_LINE_NUMBER,
+ enum_type_size.bits(),
+ enum_type_align.abi_bits() as u32,
+ DIFlags::FlagZero,
+ discriminator_metadata,
+ empty_array,
+ unique_type_id_str.as_ptr())
+ };
+
+ // The variant part must be wrapped in a struct according to DWARF.
+ let type_array = create_DIArray(DIB(cx), &[Some(variant_part)]);
+ let struct_wrapper = unsafe {
+ llvm::LLVMRustDIBuilderCreateStructType(
+ DIB(cx),
+ Some(containing_scope),
+ enum_name.as_ptr(),
+ file_metadata,
+ UNKNOWN_LINE_NUMBER,
+ enum_type_size.bits(),
+ enum_type_align.abi_bits() as u32,
+ DIFlags::FlagZero,
+ None,
+ type_array,
+ 0,
+ None,
+ unique_type_id_str.as_ptr())
};
return create_and_register_recursive_type_forward_declaration(
cx,
enum_type,
unique_type_id,
- enum_metadata,
+ struct_wrapper,
+ variant_part,
EnumMDF(EnumMemberDescriptionFactory {
enum_type,
layout,
- discriminant_type_metadata,
+ discriminant_type_metadata: None,
containing_scope,
span,
}),
.map(|member_description| {
let member_name = CString::new(member_description.name).unwrap();
unsafe {
- Some(llvm::LLVMRustDIBuilderCreateMemberType(
+ Some(llvm::LLVMRustDIBuilderCreateVariantMemberType(
DIB(cx),
composite_type_metadata,
member_name.as_ptr(),
member_description.size.bits(),
member_description.align.abi_bits() as u32,
member_description.offset.bits(),
+ match member_description.discriminant {
+ None => None,
+ Some(value) => Some(C_u64(cx, value)),
+ },
member_description.flags,
member_description.type_metadata))
}
ty::Infer(_) |
ty::UnnormalizedProjection(..) |
ty::Projection(..) |
+ ty::Bound(..) |
ty::Opaque(..) |
ty::GeneratorWitness(..) |
ty::Param(_) => {
"load" => {
let ty = substs.type_at(0);
if int_type_width_signed(ty, cx).is_some() {
- let align = cx.align_of(ty);
- bx.atomic_load(args[0].immediate(), order, align)
+ let size = cx.size_of(ty);
+ bx.atomic_load(args[0].immediate(), order, size)
} else {
return invalid_monomorphization(ty);
}
"store" => {
let ty = substs.type_at(0);
if int_type_width_signed(ty, cx).is_some() {
- let align = cx.align_of(ty);
- bx.atomic_store(args[1].immediate(), args[0].immediate(), order, align);
+ let size = cx.size_of(ty);
+ bx.atomic_store(args[1].immediate(), args[0].immediate(), order, size);
return;
} else {
return invalid_monomorphization(ty);
pub use llvm_util::target_features;
use std::any::Any;
-use std::path::{PathBuf};
use std::sync::mpsc;
use rustc_data_structures::sync::Lrc;
use rustc::util::nodemap::{FxHashSet, FxHashMap};
use rustc::util::profiling::ProfileCategory;
use rustc_mir::monomorphize;
+use rustc_codegen_utils::{CompiledModule, ModuleKind};
use rustc_codegen_utils::codegen_backend::CodegenBackend;
use rustc_data_structures::svh::Svh;
mod diagnostics;
mod back {
- pub use rustc_codegen_utils::symbol_names;
mod archive;
pub mod bytecode;
- mod command;
- pub mod linker;
pub mod link;
pub mod lto;
- pub mod symbol_export;
pub mod write;
mod rpath;
pub mod wasm;
}
fn provide(&self, providers: &mut ty::query::Providers) {
- back::symbol_names::provide(providers);
- back::symbol_export::provide(providers);
- base::provide(providers);
+ rustc_codegen_utils::symbol_export::provide(providers);
+ rustc_codegen_utils::symbol_names::provide(providers);
+ base::provide_both(providers);
attributes::provide(providers);
}
fn provide_extern(&self, providers: &mut ty::query::Providers) {
- back::symbol_export::provide_extern(providers);
- base::provide_extern(providers);
+ rustc_codegen_utils::symbol_export::provide_extern(providers);
+ base::provide_both(providers);
attributes::provide_extern(providers);
}
source: WorkProduct,
}
-#[derive(Copy, Clone, Debug, PartialEq)]
-enum ModuleKind {
- Regular,
- Metadata,
- Allocator,
-}
-
impl ModuleCodegen {
fn into_compiled_module(self,
emit_obj: bool,
}
}
-#[derive(Debug)]
-struct CompiledModule {
- name: String,
- kind: ModuleKind,
- object: Option<PathBuf>,
- bytecode: Option<PathBuf>,
- bytecode_compressed: Option<PathBuf>,
-}
-
struct ModuleLlvm {
llcx: &'static mut llvm::Context,
llmod_raw: *const llvm::Module,
crate_hash: Svh,
metadata: rustc::middle::cstore::EncodedMetadata,
windows_subsystem: Option<String>,
- linker_info: back::linker::LinkerInfo,
+ linker_info: rustc_codegen_utils::linker::LinkerInfo,
crate_info: CrateInfo,
}
Ty: &'a DIType)
-> &'a DIDerivedType;
+ pub fn LLVMRustDIBuilderCreateVariantMemberType(Builder: &DIBuilder<'a>,
+ Scope: &'a DIScope,
+ Name: *const c_char,
+ File: &'a DIFile,
+ LineNumber: c_uint,
+ SizeInBits: u64,
+ AlignInBits: u32,
+ OffsetInBits: u64,
+ Discriminant: Option<&'a Value>,
+ Flags: DIFlags,
+ Ty: &'a DIType)
+ -> &'a DIType;
+
pub fn LLVMRustDIBuilderCreateLexicalBlock(Builder: &DIBuilder<'a>,
Scope: &'a DIScope,
File: &'a DIFile,
SizeInBits: u64,
AlignInBits: u32,
Elements: &'a DIArray,
- ClassType: &'a DIType)
+ ClassType: &'a DIType,
+ IsFixed: bool)
-> &'a DIType;
pub fn LLVMRustDIBuilderCreateUnionType(Builder: &DIBuilder<'a>,
UniqueId: *const c_char)
-> &'a DIType;
+ pub fn LLVMRustDIBuilderCreateVariantPart(Builder: &DIBuilder<'a>,
+ Scope: &'a DIScope,
+ Name: *const c_char,
+ File: &'a DIFile,
+ LineNo: c_uint,
+ SizeInBits: u64,
+ AlignInBits: u32,
+ Flags: DIFlags,
+ Discriminator: Option<&'a DIDerivedType>,
+ Elements: &'a DIArray,
+ UniqueId: *const c_char)
+ -> &'a DIDerivedType;
+
pub fn LLVMSetUnnamedAddr(GlobalVar: &Value, UnnamedAddr: Bool);
pub fn LLVMRustDIBuilderCreateTemplateTypeParameter(Builder: &DIBuilder<'a>,
pub fn LLVMRustModuleBufferFree(p: &'static mut ModuleBuffer);
pub fn LLVMRustModuleCost(M: &Module) -> u64;
- pub fn LLVMRustThinLTOAvailable() -> bool;
- pub fn LLVMRustPGOAvailable() -> bool;
pub fn LLVMRustThinLTOBufferCreate(M: &Module) -> &'static mut ThinLTOBuffer;
pub fn LLVMRustThinLTOBufferFree(M: &'static mut ThinLTOBuffer);
pub fn LLVMRustThinLTOBufferPtr(M: &ThinLTOBuffer) -> *const c_char;
self.assign(local, location);
}
- PlaceContext::NonUse(_) => {}
+ PlaceContext::NonUse(_) |
+ PlaceContext::MutatingUse(MutatingUseContext::Retag) => {}
PlaceContext::NonMutatingUse(NonMutatingUseContext::Copy) |
PlaceContext::NonMutatingUse(NonMutatingUseContext::Move) => {
(&args[..], None)
};
- for (i, arg) in first_args.iter().enumerate() {
+ 'make_args: for (i, arg) in first_args.iter().enumerate() {
let mut op = self.codegen_operand(&bx, arg);
+
if let (0, Some(ty::InstanceDef::Virtual(_, idx))) = (i, def) {
- if let Pair(data_ptr, meta) = op.val {
- llfn = Some(meth::VirtualIndex::from_index(idx)
- .get_fn(&bx, meta, &fn_ty));
- llargs.push(data_ptr);
- continue;
+ if let Pair(..) = op.val {
+ // In the case of Rc<Self>, we need to explicitly pass a
+ // *mut RcBox<Self> with a Scalar (not ScalarPair) ABI. This is a hack
+ // that is understood elsewhere in the compiler as a method on
+ // `dyn Trait`.
+ // To get a `*mut RcBox<Self>`, we just keep unwrapping newtypes until
+ // we get a value of a built-in pointer type
+ 'descend_newtypes: while !op.layout.ty.is_unsafe_ptr()
+ && !op.layout.ty.is_region_ptr()
+ {
+ 'iter_fields: for i in 0..op.layout.fields.count() {
+ let field = op.extract_field(&bx, i);
+ if !field.layout.is_zst() {
+ // we found the one non-zero-sized field that is allowed
+ // now find *its* non-zero-sized field, or stop if it's a
+ // pointer
+ op = field;
+ continue 'descend_newtypes
+ }
+ }
+
+ span_bug!(span, "receiver has no non-zero-sized fields {:?}", op);
+ }
+
+ // now that we have `*dyn Trait` or `&dyn Trait`, split it up into its
+ // data pointer and vtable. Look up the method in the vtable, and pass
+ // the data pointer as the first argument
+ match op.val {
+ Pair(data_ptr, meta) => {
+ llfn = Some(meth::VirtualIndex::from_index(idx)
+ .get_fn(&bx, meta, &fn_ty));
+ llargs.push(data_ptr);
+ continue 'make_args
+ }
+ other => bug!("expected a Pair, got {:?}", other)
+ }
} else if let Ref(data_ptr, Some(meta), _) = op.val {
// by-value dynamic dispatch
llfn = Some(meth::VirtualIndex::from_index(idx)
pub fn const_alloc_to_llvm(cx: &CodegenCx<'ll, '_>, alloc: &Allocation) -> &'ll Value {
let mut llvals = Vec::with_capacity(alloc.relocations.len() + 1);
- let layout = cx.data_layout();
- let pointer_size = layout.pointer_size.bytes() as usize;
+ let dl = cx.data_layout();
+ let pointer_size = dl.pointer_size.bytes() as usize;
let mut next_offset = 0;
for &(offset, ((), alloc_id)) in alloc.relocations.iter() {
llvals.push(C_bytes(cx, &alloc.bytes[next_offset..offset]));
}
let ptr_offset = read_target_uint(
- layout.endian,
+ dl.endian,
&alloc.bytes[offset..(offset + pointer_size)],
).expect("const_alloc_to_llvm: could not read relocation pointer") as u64;
llvals.push(scalar_to_llvm(
}).collect();
let input_vals = inputs.iter()
- .try_fold(Vec::with_capacity(inputs.len()), |mut acc, input| {
+ .fold(Vec::with_capacity(inputs.len()), |mut acc, (span, input)| {
let op = self.codegen_operand(&bx, input);
if let OperandValue::Immediate(_) = op.val {
acc.push(op.immediate());
- Ok(acc)
} else {
- Err(op)
+ span_err!(bx.sess(), span.to_owned(), E0669,
+ "invalid value for constraint in inline assembly");
}
+ acc
});
- if input_vals.is_err() {
- span_err!(bx.sess(), statement.source_info.span, E0669,
- "invalid value for constraint in inline assembly");
- } else {
- let input_vals = input_vals.unwrap();
+ if input_vals.len() == inputs.len() {
let res = asm::codegen_inline_asm(&bx, asm, outputs, input_vals);
if !res {
span_err!(bx.sess(), statement.source_info.span, E0668,
}
mir::StatementKind::FakeRead(..) |
mir::StatementKind::EndRegion(_) |
- mir::StatementKind::Validate(..) |
+ mir::StatementKind::Retag { .. } |
mir::StatementKind::AscribeUserType(..) |
mir::StatementKind::Nop => bx,
}
debug!("llvm_type({:#?})", self);
- assert!(!self.ty.has_escaping_regions(), "{:?} has escaping regions", self.ty);
+ assert!(!self.ty.has_escaping_bound_vars(), "{:?} has escaping bound vars", self.ty);
// Make sure lifetimes are erased, to avoid generating distinct LLVM
// types for Rust types that only differ in the choice of lifetimes.
flate2 = "1.0"
log = "0.4"
+serialize = { path = "../libserialize" }
syntax = { path = "../libsyntax" }
syntax_pos = { path = "../libsyntax_pos" }
rustc = { path = "../librustc" }
+rustc_allocator = { path = "../librustc_allocator" }
rustc_target = { path = "../librustc_target" }
rustc_data_structures = { path = "../librustc_data_structures" }
+rustc_metadata = { path = "../librustc_metadata" }
rustc_mir = { path = "../librustc_mir" }
rustc_incremental = { path = "../librustc_incremental" }
-rustc_metadata_utils = { path = "../librustc_metadata_utils" }
--- /dev/null
+// Copyright 2017 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+//! A thin wrapper around `Command` in the standard library which allows us to
+//! read the arguments that are built up.
+
+use std::ffi::{OsStr, OsString};
+use std::fmt;
+use std::io;
+use std::mem;
+use std::process::{self, Output};
+
+use rustc_target::spec::LldFlavor;
+
+#[derive(Clone)]
+pub struct Command {
+ program: Program,
+ args: Vec<OsString>,
+ env: Vec<(OsString, OsString)>,
+}
+
+#[derive(Clone)]
+enum Program {
+ Normal(OsString),
+ CmdBatScript(OsString),
+ Lld(OsString, LldFlavor)
+}
+
+impl Command {
+ pub fn new<P: AsRef<OsStr>>(program: P) -> Command {
+ Command::_new(Program::Normal(program.as_ref().to_owned()))
+ }
+
+ pub fn bat_script<P: AsRef<OsStr>>(program: P) -> Command {
+ Command::_new(Program::CmdBatScript(program.as_ref().to_owned()))
+ }
+
+ pub fn lld<P: AsRef<OsStr>>(program: P, flavor: LldFlavor) -> Command {
+ Command::_new(Program::Lld(program.as_ref().to_owned(), flavor))
+ }
+
+ fn _new(program: Program) -> Command {
+ Command {
+ program,
+ args: Vec::new(),
+ env: Vec::new(),
+ }
+ }
+
+ pub fn arg<P: AsRef<OsStr>>(&mut self, arg: P) -> &mut Command {
+ self._arg(arg.as_ref());
+ self
+ }
+
+ pub fn args<I>(&mut self, args: I) -> &mut Command
+ where I: IntoIterator,
+ I::Item: AsRef<OsStr>,
+ {
+ for arg in args {
+ self._arg(arg.as_ref());
+ }
+ self
+ }
+
+ fn _arg(&mut self, arg: &OsStr) {
+ self.args.push(arg.to_owned());
+ }
+
+ pub fn env<K, V>(&mut self, key: K, value: V) -> &mut Command
+ where K: AsRef<OsStr>,
+ V: AsRef<OsStr>
+ {
+ self._env(key.as_ref(), value.as_ref());
+ self
+ }
+
+ fn _env(&mut self, key: &OsStr, value: &OsStr) {
+ self.env.push((key.to_owned(), value.to_owned()));
+ }
+
+ pub fn output(&mut self) -> io::Result<Output> {
+ self.command().output()
+ }
+
+ pub fn command(&self) -> process::Command {
+ let mut ret = match self.program {
+ Program::Normal(ref p) => process::Command::new(p),
+ Program::CmdBatScript(ref p) => {
+ let mut c = process::Command::new("cmd");
+ c.arg("/c").arg(p);
+ c
+ }
+ Program::Lld(ref p, flavor) => {
+ let mut c = process::Command::new(p);
+ c.arg("-flavor").arg(match flavor {
+ LldFlavor::Wasm => "wasm",
+ LldFlavor::Ld => "gnu",
+ LldFlavor::Link => "link",
+ LldFlavor::Ld64 => "darwin",
+ });
+ c
+ }
+ };
+ ret.args(&self.args);
+ ret.envs(self.env.clone());
+ return ret
+ }
+
+ // extensions
+
+ pub fn get_args(&self) -> &[OsString] {
+ &self.args
+ }
+
+ pub fn take_args(&mut self) -> Vec<OsString> {
+ mem::replace(&mut self.args, Vec::new())
+ }
+
+ /// Returns a `true` if we're pretty sure that this'll blow OS spawn limits,
+ /// or `false` if we should attempt to spawn and see what the OS says.
+ pub fn very_likely_to_exceed_some_spawn_limit(&self) -> bool {
+ // We mostly only care about Windows in this method, on Unix the limits
+ // can be gargantuan anyway so we're pretty unlikely to hit them
+ if cfg!(unix) {
+ return false
+ }
+
+ // Right now LLD doesn't support the `@` syntax of passing an argument
+ // through files, so regardless of the platform we try to go to the OS
+ // on this one.
+ if let Program::Lld(..) = self.program {
+ return false
+ }
+
+ // Ok so on Windows to spawn a process is 32,768 characters in its
+ // command line [1]. Unfortunately we don't actually have access to that
+ // as it's calculated just before spawning. Instead we perform a
+ // poor-man's guess as to how long our command line will be. We're
+ // assuming here that we don't have to escape every character...
+ //
+ // Turns out though that `cmd.exe` has even smaller limits, 8192
+ // characters [2]. Linkers can often be batch scripts (for example
+ // Emscripten, Gecko's current build system) which means that we're
+ // running through batch scripts. These linkers often just forward
+ // arguments elsewhere (and maybe tack on more), so if we blow 8192
+ // bytes we'll typically cause them to blow as well.
+ //
+ // Basically as a result just perform an inflated estimate of what our
+ // command line will look like and test if it's > 8192 (we actually
+ // test against 6k to artificially inflate our estimate). If all else
+ // fails we'll fall back to the normal unix logic of testing the OS
+ // error code if we fail to spawn and automatically re-spawning the
+ // linker with smaller arguments.
+ //
+ // [1]: https://msdn.microsoft.com/en-us/library/windows/desktop/ms682425(v=vs.85).aspx
+ // [2]: https://blogs.msdn.microsoft.com/oldnewthing/20031210-00/?p=41553
+
+ let estimated_command_line_len =
+ self.args.iter().map(|a| a.len()).sum::<usize>();
+ estimated_command_line_len > 1024 * 6
+ }
+}
+
+impl fmt::Debug for Command {
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ self.command().fmt(f)
+ }
+}
#[macro_use]
extern crate log;
+extern crate serialize;
#[macro_use]
extern crate rustc;
+extern crate rustc_allocator;
extern crate rustc_target;
+extern crate rustc_metadata;
extern crate rustc_mir;
extern crate rustc_incremental;
extern crate syntax;
extern crate syntax_pos;
#[macro_use] extern crate rustc_data_structures;
-extern crate rustc_metadata_utils;
+use std::path::PathBuf;
+
+use rustc::session::Session;
use rustc::ty::TyCtxt;
+pub mod command;
pub mod link;
+pub mod linker;
pub mod codegen_backend;
+pub mod symbol_export;
pub mod symbol_names;
pub mod symbol_names_test;
}
}
+#[derive(Copy, Clone, Debug, PartialEq)]
+pub enum ModuleKind {
+ Regular,
+ Metadata,
+ Allocator,
+}
+
+#[derive(Debug)]
+pub struct CompiledModule {
+ pub name: String,
+ pub kind: ModuleKind,
+ pub object: Option<PathBuf>,
+ pub bytecode: Option<PathBuf>,
+ pub bytecode_compressed: Option<PathBuf>,
+}
+
+pub fn find_library(name: &str, search_paths: &[PathBuf], sess: &Session)
+ -> PathBuf {
+ // On Windows, static libraries sometimes show up as libfoo.a and other
+ // times show up as foo.lib
+ let oslibname = format!("{}{}{}",
+ sess.target.target.options.staticlib_prefix,
+ name,
+ sess.target.target.options.staticlib_suffix);
+ let unixlibname = format!("lib{}.a", name);
+
+ for path in search_paths {
+ debug!("looking for {} inside {:?}", name, path);
+ let test = path.join(&oslibname);
+ if test.exists() { return test }
+ if oslibname != unixlibname {
+ let test = path.join(&unixlibname);
+ if test.exists() { return test }
+ }
+ }
+ sess.fatal(&format!("could not find native static library `{}`, \
+ perhaps an -L flag is missing?", name));
+}
+
__build_diagnostic_array! { librustc_codegen_utils, DIAGNOSTICS }
use std::path::{Path, PathBuf};
use syntax::{ast, attr};
use syntax_pos::Span;
-use rustc_metadata_utils::validate_crate_name;
pub fn out_filename(sess: &Session,
crate_type: config::CrateType,
attrs: &[ast::Attribute],
input: &Input) -> String {
let validate = |s: String, span: Option<Span>| {
- validate_crate_name(sess, &s, span);
+ ::rustc_metadata::validate_crate_name(sess, &s, span);
s
};
--- /dev/null
+// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+use rustc_data_structures::fx::FxHashMap;
+use std::ffi::{OsStr, OsString};
+use std::fs::{self, File};
+use std::io::prelude::*;
+use std::io::{self, BufWriter};
+use std::path::{Path, PathBuf};
+
+use command::Command;
+use rustc::hir::def_id::{LOCAL_CRATE, CrateNum};
+use rustc::middle::dependency_format::Linkage;
+use rustc::session::Session;
+use rustc::session::config::{self, CrateType, OptLevel, DebugInfo,
+ CrossLangLto};
+use rustc::ty::TyCtxt;
+use rustc_target::spec::{LinkerFlavor, LldFlavor};
+use serialize::{json, Encoder};
+
+/// For all the linkers we support, and information they might
+/// need out of the shared crate context before we get rid of it.
+pub struct LinkerInfo {
+ exports: FxHashMap<CrateType, Vec<String>>,
+}
+
+impl LinkerInfo {
+ pub fn new(tcx: TyCtxt) -> LinkerInfo {
+ LinkerInfo {
+ exports: tcx.sess.crate_types.borrow().iter().map(|&c| {
+ (c, exported_symbols(tcx, c))
+ }).collect(),
+ }
+ }
+
+ pub fn to_linker<'a>(
+ &'a self,
+ cmd: Command,
+ sess: &'a Session,
+ flavor: LinkerFlavor,
+ target_cpu: &'a str,
+ ) -> Box<dyn Linker+'a> {
+ match flavor {
+ LinkerFlavor::Lld(LldFlavor::Link) |
+ LinkerFlavor::Msvc => {
+ Box::new(MsvcLinker {
+ cmd,
+ sess,
+ info: self
+ }) as Box<dyn Linker>
+ }
+ LinkerFlavor::Em => {
+ Box::new(EmLinker {
+ cmd,
+ sess,
+ info: self
+ }) as Box<dyn Linker>
+ }
+ LinkerFlavor::Gcc => {
+ Box::new(GccLinker {
+ cmd,
+ sess,
+ info: self,
+ hinted_static: false,
+ is_ld: false,
+ target_cpu,
+ }) as Box<dyn Linker>
+ }
+
+ LinkerFlavor::Lld(LldFlavor::Ld) |
+ LinkerFlavor::Lld(LldFlavor::Ld64) |
+ LinkerFlavor::Ld => {
+ Box::new(GccLinker {
+ cmd,
+ sess,
+ info: self,
+ hinted_static: false,
+ is_ld: true,
+ target_cpu,
+ }) as Box<dyn Linker>
+ }
+
+ LinkerFlavor::Lld(LldFlavor::Wasm) => {
+ Box::new(WasmLd {
+ cmd,
+ sess,
+ info: self
+ }) as Box<dyn Linker>
+ }
+ }
+ }
+}
+
+/// Linker abstraction used by back::link to build up the command to invoke a
+/// linker.
+///
+/// This trait is the total list of requirements needed by `back::link` and
+/// represents the meaning of each option being passed down. This trait is then
+/// used to dispatch on whether a GNU-like linker (generally `ld.exe`) or an
+/// MSVC linker (e.g. `link.exe`) is being used.
+pub trait Linker {
+ fn link_dylib(&mut self, lib: &str);
+ fn link_rust_dylib(&mut self, lib: &str, path: &Path);
+ fn link_framework(&mut self, framework: &str);
+ fn link_staticlib(&mut self, lib: &str);
+ fn link_rlib(&mut self, lib: &Path);
+ fn link_whole_rlib(&mut self, lib: &Path);
+ fn link_whole_staticlib(&mut self, lib: &str, search_path: &[PathBuf]);
+ fn include_path(&mut self, path: &Path);
+ fn framework_path(&mut self, path: &Path);
+ fn output_filename(&mut self, path: &Path);
+ fn add_object(&mut self, path: &Path);
+ fn gc_sections(&mut self, keep_metadata: bool);
+ fn position_independent_executable(&mut self);
+ fn no_position_independent_executable(&mut self);
+ fn full_relro(&mut self);
+ fn partial_relro(&mut self);
+ fn no_relro(&mut self);
+ fn optimize(&mut self);
+ fn pgo_gen(&mut self);
+ fn debuginfo(&mut self);
+ fn no_default_libraries(&mut self);
+ fn build_dylib(&mut self, out_filename: &Path);
+ fn build_static_executable(&mut self);
+ fn args(&mut self, args: &[String]);
+ fn export_symbols(&mut self, tmpdir: &Path, crate_type: CrateType);
+ fn subsystem(&mut self, subsystem: &str);
+ fn group_start(&mut self);
+ fn group_end(&mut self);
+ fn cross_lang_lto(&mut self);
+ // Should have been finalize(self), but we don't support self-by-value on trait objects (yet?).
+ fn finalize(&mut self) -> Command;
+}
+
+pub struct GccLinker<'a> {
+ cmd: Command,
+ sess: &'a Session,
+ info: &'a LinkerInfo,
+ hinted_static: bool, // Keeps track of the current hinting mode.
+ // Link as ld
+ is_ld: bool,
+ target_cpu: &'a str,
+}
+
+impl<'a> GccLinker<'a> {
+ /// Argument that must be passed *directly* to the linker
+ ///
+ /// These arguments need to be prepended with '-Wl,' when a gcc-style linker is used
+ fn linker_arg<S>(&mut self, arg: S) -> &mut Self
+ where S: AsRef<OsStr>
+ {
+ if !self.is_ld {
+ let mut os = OsString::from("-Wl,");
+ os.push(arg.as_ref());
+ self.cmd.arg(os);
+ } else {
+ self.cmd.arg(arg);
+ }
+ self
+ }
+
+ fn takes_hints(&self) -> bool {
+ !self.sess.target.target.options.is_like_osx
+ }
+
+ // Some platforms take hints about whether a library is static or dynamic.
+ // For those that support this, we ensure we pass the option if the library
+ // was flagged "static" (most defaults are dynamic) to ensure that if
+ // libfoo.a and libfoo.so both exist that the right one is chosen.
+ fn hint_static(&mut self) {
+ if !self.takes_hints() { return }
+ if !self.hinted_static {
+ self.linker_arg("-Bstatic");
+ self.hinted_static = true;
+ }
+ }
+
+ fn hint_dynamic(&mut self) {
+ if !self.takes_hints() { return }
+ if self.hinted_static {
+ self.linker_arg("-Bdynamic");
+ self.hinted_static = false;
+ }
+ }
+
+ fn push_cross_lang_lto_args(&mut self, plugin_path: Option<&OsStr>) {
+ if let Some(plugin_path) = plugin_path {
+ let mut arg = OsString::from("-plugin=");
+ arg.push(plugin_path);
+ self.linker_arg(&arg);
+ }
+
+ let opt_level = match self.sess.opts.optimize {
+ config::OptLevel::No => "O0",
+ config::OptLevel::Less => "O1",
+ config::OptLevel::Default => "O2",
+ config::OptLevel::Aggressive => "O3",
+ config::OptLevel::Size => "Os",
+ config::OptLevel::SizeMin => "Oz",
+ };
+
+ self.linker_arg(&format!("-plugin-opt={}", opt_level));
+ let target_cpu = self.target_cpu;
+ self.linker_arg(&format!("-plugin-opt=mcpu={}", target_cpu));
+
+ match self.sess.lto() {
+ config::Lto::Thin |
+ config::Lto::ThinLocal => {
+ self.linker_arg("-plugin-opt=thin");
+ }
+ config::Lto::Fat |
+ config::Lto::No => {
+ // default to regular LTO
+ }
+ }
+ }
+}
+
+impl<'a> Linker for GccLinker<'a> {
+ fn link_dylib(&mut self, lib: &str) { self.hint_dynamic(); self.cmd.arg(format!("-l{}",lib)); }
+ fn link_staticlib(&mut self, lib: &str) {
+ self.hint_static(); self.cmd.arg(format!("-l{}",lib));
+ }
+ fn link_rlib(&mut self, lib: &Path) { self.hint_static(); self.cmd.arg(lib); }
+ fn include_path(&mut self, path: &Path) { self.cmd.arg("-L").arg(path); }
+ fn framework_path(&mut self, path: &Path) { self.cmd.arg("-F").arg(path); }
+ fn output_filename(&mut self, path: &Path) { self.cmd.arg("-o").arg(path); }
+ fn add_object(&mut self, path: &Path) { self.cmd.arg(path); }
+ fn position_independent_executable(&mut self) { self.cmd.arg("-pie"); }
+ fn no_position_independent_executable(&mut self) { self.cmd.arg("-no-pie"); }
+ fn full_relro(&mut self) { self.linker_arg("-zrelro"); self.linker_arg("-znow"); }
+ fn partial_relro(&mut self) { self.linker_arg("-zrelro"); }
+ fn no_relro(&mut self) { self.linker_arg("-znorelro"); }
+ fn build_static_executable(&mut self) { self.cmd.arg("-static"); }
+ fn args(&mut self, args: &[String]) { self.cmd.args(args); }
+
+ fn link_rust_dylib(&mut self, lib: &str, _path: &Path) {
+ self.hint_dynamic();
+ self.cmd.arg(format!("-l{}",lib));
+ }
+
+ fn link_framework(&mut self, framework: &str) {
+ self.hint_dynamic();
+ self.cmd.arg("-framework").arg(framework);
+ }
+
+ // Here we explicitly ask that the entire archive is included into the
+ // result artifact. For more details see #15460, but the gist is that
+ // the linker will strip away any unused objects in the archive if we
+ // don't otherwise explicitly reference them. This can occur for
+ // libraries which are just providing bindings, libraries with generic
+ // functions, etc.
+ fn link_whole_staticlib(&mut self, lib: &str, search_path: &[PathBuf]) {
+ self.hint_static();
+ let target = &self.sess.target.target;
+ if !target.options.is_like_osx {
+ self.linker_arg("--whole-archive").cmd.arg(format!("-l{}",lib));
+ self.linker_arg("--no-whole-archive");
+ } else {
+ // -force_load is the macOS equivalent of --whole-archive, but it
+ // involves passing the full path to the library to link.
+ self.linker_arg("-force_load");
+ let lib = ::find_library(lib, search_path, &self.sess);
+ self.linker_arg(&lib);
+ }
+ }
+
+ fn link_whole_rlib(&mut self, lib: &Path) {
+ self.hint_static();
+ if self.sess.target.target.options.is_like_osx {
+ self.linker_arg("-force_load");
+ self.linker_arg(&lib);
+ } else {
+ self.linker_arg("--whole-archive").cmd.arg(lib);
+ self.linker_arg("--no-whole-archive");
+ }
+ }
+
+ fn gc_sections(&mut self, keep_metadata: bool) {
+ // The dead_strip option to the linker specifies that functions and data
+ // unreachable by the entry point will be removed. This is quite useful
+ // with Rust's compilation model of compiling libraries at a time into
+ // one object file. For example, this brings hello world from 1.7MB to
+ // 458K.
+ //
+ // Note that this is done for both executables and dynamic libraries. We
+ // won't get much benefit from dylibs because LLVM will have already
+ // stripped away as much as it could. This has not been seen to impact
+ // link times negatively.
+ //
+ // -dead_strip can't be part of the pre_link_args because it's also used
+ // for partial linking when using multiple codegen units (-r). So we
+ // insert it here.
+ if self.sess.target.target.options.is_like_osx {
+ self.linker_arg("-dead_strip");
+ } else if self.sess.target.target.options.is_like_solaris {
+ self.linker_arg("-zignore");
+
+ // If we're building a dylib, we don't use --gc-sections because LLVM
+ // has already done the best it can do, and we also don't want to
+ // eliminate the metadata. If we're building an executable, however,
+ // --gc-sections drops the size of hello world from 1.8MB to 597K, a 67%
+ // reduction.
+ } else if !keep_metadata {
+ self.linker_arg("--gc-sections");
+ }
+ }
+
+ fn optimize(&mut self) {
+ if !self.sess.target.target.options.linker_is_gnu { return }
+
+ // GNU-style linkers support optimization with -O. GNU ld doesn't
+ // need a numeric argument, but other linkers do.
+ if self.sess.opts.optimize == config::OptLevel::Default ||
+ self.sess.opts.optimize == config::OptLevel::Aggressive {
+ self.linker_arg("-O1");
+ }
+ }
+
+ fn pgo_gen(&mut self) {
+ if !self.sess.target.target.options.linker_is_gnu { return }
+
+ // If we're doing PGO generation stuff and on a GNU-like linker, use the
+ // "-u" flag to properly pull in the profiler runtime bits.
+ //
+ // This is because LLVM otherwise won't add the needed initialization
+ // for us on Linux (though the extra flag should be harmless if it
+ // does).
+ //
+ // See https://reviews.llvm.org/D14033 and https://reviews.llvm.org/D14030.
+ //
+ // Though it may be worth to try to revert those changes upstream, since
+ // the overhead of the initialization should be minor.
+ self.cmd.arg("-u");
+ self.cmd.arg("__llvm_profile_runtime");
+ }
+
+ fn debuginfo(&mut self) {
+ match self.sess.opts.debuginfo {
+ DebugInfo::None => {
+ // If we are building without debuginfo enabled and we were called with
+ // `-Zstrip-debuginfo-if-disabled=yes`, tell the linker to strip any debuginfo
+ // found when linking to get rid of symbols from libstd.
+ match self.sess.opts.debugging_opts.strip_debuginfo_if_disabled {
+ Some(true) => { self.linker_arg("-S"); },
+ _ => {},
+ }
+ },
+ _ => {},
+ };
+ }
+
+ fn no_default_libraries(&mut self) {
+ if !self.is_ld {
+ self.cmd.arg("-nodefaultlibs");
+ }
+ }
+
+ fn build_dylib(&mut self, out_filename: &Path) {
+ // On mac we need to tell the linker to let this library be rpathed
+ if self.sess.target.target.options.is_like_osx {
+ self.cmd.arg("-dynamiclib");
+ self.linker_arg("-dylib");
+
+ // Note that the `osx_rpath_install_name` option here is a hack
+ // purely to support rustbuild right now, we should get a more
+ // principled solution at some point to force the compiler to pass
+ // the right `-Wl,-install_name` with an `@rpath` in it.
+ if self.sess.opts.cg.rpath ||
+ self.sess.opts.debugging_opts.osx_rpath_install_name {
+ self.linker_arg("-install_name");
+ let mut v = OsString::from("@rpath/");
+ v.push(out_filename.file_name().unwrap());
+ self.linker_arg(&v);
+ }
+ } else {
+ self.cmd.arg("-shared");
+ }
+ }
+
+ fn export_symbols(&mut self, tmpdir: &Path, crate_type: CrateType) {
+ // If we're compiling a dylib, then we let symbol visibility in object
+ // files to take care of whether they're exported or not.
+ //
+ // If we're compiling a cdylib, however, we manually create a list of
+ // exported symbols to ensure we don't expose any more. The object files
+ // have far more public symbols than we actually want to export, so we
+ // hide them all here.
+ if crate_type == CrateType::Dylib ||
+ crate_type == CrateType::ProcMacro {
+ return
+ }
+
+ let mut arg = OsString::new();
+ let path = tmpdir.join("list");
+
+ debug!("EXPORTED SYMBOLS:");
+
+ if self.sess.target.target.options.is_like_osx {
+ // Write a plain, newline-separated list of symbols
+ let res = (|| -> io::Result<()> {
+ let mut f = BufWriter::new(File::create(&path)?);
+ for sym in self.info.exports[&crate_type].iter() {
+ debug!(" _{}", sym);
+ writeln!(f, "_{}", sym)?;
+ }
+ Ok(())
+ })();
+ if let Err(e) = res {
+ self.sess.fatal(&format!("failed to write lib.def file: {}", e));
+ }
+ } else {
+ // Write an LD version script
+ let res = (|| -> io::Result<()> {
+ let mut f = BufWriter::new(File::create(&path)?);
+ writeln!(f, "{{\n global:")?;
+ for sym in self.info.exports[&crate_type].iter() {
+ debug!(" {};", sym);
+ writeln!(f, " {};", sym)?;
+ }
+ writeln!(f, "\n local:\n *;\n}};")?;
+ Ok(())
+ })();
+ if let Err(e) = res {
+ self.sess.fatal(&format!("failed to write version script: {}", e));
+ }
+ }
+
+ if self.sess.target.target.options.is_like_osx {
+ if !self.is_ld {
+ arg.push("-Wl,")
+ }
+ arg.push("-exported_symbols_list,");
+ } else if self.sess.target.target.options.is_like_solaris {
+ if !self.is_ld {
+ arg.push("-Wl,")
+ }
+ arg.push("-M,");
+ } else {
+ if !self.is_ld {
+ arg.push("-Wl,")
+ }
+ arg.push("--version-script=");
+ }
+
+ arg.push(&path);
+ self.cmd.arg(arg);
+ }
+
+ fn subsystem(&mut self, subsystem: &str) {
+ self.linker_arg("--subsystem");
+ self.linker_arg(&subsystem);
+ }
+
+ fn finalize(&mut self) -> Command {
+ self.hint_dynamic(); // Reset to default before returning the composed command line.
+ let mut cmd = Command::new("");
+ ::std::mem::swap(&mut cmd, &mut self.cmd);
+ cmd
+ }
+
+ fn group_start(&mut self) {
+ if !self.sess.target.target.options.is_like_osx {
+ self.linker_arg("--start-group");
+ }
+ }
+
+ fn group_end(&mut self) {
+ if !self.sess.target.target.options.is_like_osx {
+ self.linker_arg("--end-group");
+ }
+ }
+
+ fn cross_lang_lto(&mut self) {
+ match self.sess.opts.debugging_opts.cross_lang_lto {
+ CrossLangLto::Disabled => {
+ // Nothing to do
+ }
+ CrossLangLto::LinkerPluginAuto => {
+ self.push_cross_lang_lto_args(None);
+ }
+ CrossLangLto::LinkerPlugin(ref path) => {
+ self.push_cross_lang_lto_args(Some(path.as_os_str()));
+ }
+ }
+ }
+}
+
+pub struct MsvcLinker<'a> {
+ cmd: Command,
+ sess: &'a Session,
+ info: &'a LinkerInfo
+}
+
+impl<'a> Linker for MsvcLinker<'a> {
+ fn link_rlib(&mut self, lib: &Path) { self.cmd.arg(lib); }
+ fn add_object(&mut self, path: &Path) { self.cmd.arg(path); }
+ fn args(&mut self, args: &[String]) { self.cmd.args(args); }
+
+ fn build_dylib(&mut self, out_filename: &Path) {
+ self.cmd.arg("/DLL");
+ let mut arg: OsString = "/IMPLIB:".into();
+ arg.push(out_filename.with_extension("dll.lib"));
+ self.cmd.arg(arg);
+ }
+
+ fn build_static_executable(&mut self) {
+ // noop
+ }
+
+ fn gc_sections(&mut self, _keep_metadata: bool) {
+ // MSVC's ICF (Identical COMDAT Folding) link optimization is
+ // slow for Rust and thus we disable it by default when not in
+ // optimization build.
+ if self.sess.opts.optimize != config::OptLevel::No {
+ self.cmd.arg("/OPT:REF,ICF");
+ } else {
+ // It is necessary to specify NOICF here, because /OPT:REF
+ // implies ICF by default.
+ self.cmd.arg("/OPT:REF,NOICF");
+ }
+ }
+
+ fn link_dylib(&mut self, lib: &str) {
+ self.cmd.arg(&format!("{}.lib", lib));
+ }
+
+ fn link_rust_dylib(&mut self, lib: &str, path: &Path) {
+ // When producing a dll, the MSVC linker may not actually emit a
+ // `foo.lib` file if the dll doesn't actually export any symbols, so we
+ // check to see if the file is there and just omit linking to it if it's
+ // not present.
+ let name = format!("{}.dll.lib", lib);
+ if fs::metadata(&path.join(&name)).is_ok() {
+ self.cmd.arg(name);
+ }
+ }
+
+ fn link_staticlib(&mut self, lib: &str) {
+ self.cmd.arg(&format!("{}.lib", lib));
+ }
+
+ fn position_independent_executable(&mut self) {
+ // noop
+ }
+
+ fn no_position_independent_executable(&mut self) {
+ // noop
+ }
+
+ fn full_relro(&mut self) {
+ // noop
+ }
+
+ fn partial_relro(&mut self) {
+ // noop
+ }
+
+ fn no_relro(&mut self) {
+ // noop
+ }
+
+ fn no_default_libraries(&mut self) {
+ // Currently we don't pass the /NODEFAULTLIB flag to the linker on MSVC
+ // as there's been trouble in the past of linking the C++ standard
+ // library required by LLVM. This likely needs to happen one day, but
+ // in general Windows is also a more controlled environment than
+ // Unix, so it's not necessarily as critical that this be implemented.
+ //
+ // Note that there are also some licensing worries about statically
+ // linking some libraries which require a specific agreement, so it may
+ // not ever be possible for us to pass this flag.
+ }
+
+ fn include_path(&mut self, path: &Path) {
+ let mut arg = OsString::from("/LIBPATH:");
+ arg.push(path);
+ self.cmd.arg(&arg);
+ }
+
+ fn output_filename(&mut self, path: &Path) {
+ let mut arg = OsString::from("/OUT:");
+ arg.push(path);
+ self.cmd.arg(&arg);
+ }
+
+ fn framework_path(&mut self, _path: &Path) {
+ bug!("frameworks are not supported on windows")
+ }
+ fn link_framework(&mut self, _framework: &str) {
+ bug!("frameworks are not supported on windows")
+ }
+
+ fn link_whole_staticlib(&mut self, lib: &str, _search_path: &[PathBuf]) {
+ // not supported?
+ self.link_staticlib(lib);
+ }
+ fn link_whole_rlib(&mut self, path: &Path) {
+ // not supported?
+ self.link_rlib(path);
+ }
+ fn optimize(&mut self) {
+ // Needs more investigation of `/OPT` arguments
+ }
+
+ fn pgo_gen(&mut self) {
+ // Nothing needed here.
+ }
+
+ fn debuginfo(&mut self) {
+ // This will cause the Microsoft linker to generate a PDB file
+ // from the CodeView line tables in the object files.
+ self.cmd.arg("/DEBUG");
+
+ // This will cause the Microsoft linker to embed .natvis info into the the PDB file
+ let sysroot = self.sess.sysroot();
+ let natvis_dir_path = sysroot.join("lib\\rustlib\\etc");
+ if let Ok(natvis_dir) = fs::read_dir(&natvis_dir_path) {
+ // LLVM 5.0.0's lld-link frontend doesn't yet recognize, and chokes
+ // on, the /NATVIS:... flags. LLVM 6 (or earlier) should at worst ignore
+ // them, eventually mooting this workaround, per this landed patch:
+ // https://github.com/llvm-mirror/lld/commit/27b9c4285364d8d76bb43839daa100
+ if let Some(ref linker_path) = self.sess.opts.cg.linker {
+ if let Some(linker_name) = Path::new(&linker_path).file_stem() {
+ if linker_name.to_str().unwrap().to_lowercase() == "lld-link" {
+ self.sess.warn("not embedding natvis: lld-link may not support the flag");
+ return;
+ }
+ }
+ }
+ for entry in natvis_dir {
+ match entry {
+ Ok(entry) => {
+ let path = entry.path();
+ if path.extension() == Some("natvis".as_ref()) {
+ let mut arg = OsString::from("/NATVIS:");
+ arg.push(path);
+ self.cmd.arg(arg);
+ }
+ },
+ Err(err) => {
+ self.sess.warn(&format!("error enumerating natvis directory: {}", err));
+ },
+ }
+ }
+ }
+ }
+
+ // Currently the compiler doesn't use `dllexport` (an LLVM attribute) to
+ // export symbols from a dynamic library. When building a dynamic library,
+ // however, we're going to want some symbols exported, so this function
+ // generates a DEF file which lists all the symbols.
+ //
+ // The linker will read this `*.def` file and export all the symbols from
+ // the dynamic library. Note that this is not as simple as just exporting
+ // all the symbols in the current crate (as specified by `codegen.reachable`)
+ // but rather we also need to possibly export the symbols of upstream
+ // crates. Upstream rlibs may be linked statically to this dynamic library,
+ // in which case they may continue to transitively be used and hence need
+ // their symbols exported.
+ fn export_symbols(&mut self,
+ tmpdir: &Path,
+ crate_type: CrateType) {
+ let path = tmpdir.join("lib.def");
+ let res = (|| -> io::Result<()> {
+ let mut f = BufWriter::new(File::create(&path)?);
+
+ // Start off with the standard module name header and then go
+ // straight to exports.
+ writeln!(f, "LIBRARY")?;
+ writeln!(f, "EXPORTS")?;
+ for symbol in self.info.exports[&crate_type].iter() {
+ debug!(" _{}", symbol);
+ writeln!(f, " {}", symbol)?;
+ }
+ Ok(())
+ })();
+ if let Err(e) = res {
+ self.sess.fatal(&format!("failed to write lib.def file: {}", e));
+ }
+ let mut arg = OsString::from("/DEF:");
+ arg.push(path);
+ self.cmd.arg(&arg);
+ }
+
+ fn subsystem(&mut self, subsystem: &str) {
+ // Note that previous passes of the compiler validated this subsystem,
+ // so we just blindly pass it to the linker.
+ self.cmd.arg(&format!("/SUBSYSTEM:{}", subsystem));
+
+ // Windows has two subsystems we're interested in right now, the console
+ // and windows subsystems. These both implicitly have different entry
+ // points (starting symbols). The console entry point starts with
+ // `mainCRTStartup` and the windows entry point starts with
+ // `WinMainCRTStartup`. These entry points, defined in system libraries,
+ // will then later probe for either `main` or `WinMain`, respectively to
+ // start the application.
+ //
+ // In Rust we just always generate a `main` function so we want control
+ // to always start there, so we force the entry point on the windows
+ // subsystem to be `mainCRTStartup` to get everything booted up
+ // correctly.
+ //
+ // For more information see RFC #1665
+ if subsystem == "windows" {
+ self.cmd.arg("/ENTRY:mainCRTStartup");
+ }
+ }
+
+ fn finalize(&mut self) -> Command {
+ let mut cmd = Command::new("");
+ ::std::mem::swap(&mut cmd, &mut self.cmd);
+ cmd
+ }
+
+ // MSVC doesn't need group indicators
+ fn group_start(&mut self) {}
+ fn group_end(&mut self) {}
+
+ fn cross_lang_lto(&mut self) {
+ // Do nothing
+ }
+}
+
+pub struct EmLinker<'a> {
+ cmd: Command,
+ sess: &'a Session,
+ info: &'a LinkerInfo
+}
+
+impl<'a> Linker for EmLinker<'a> {
+ fn include_path(&mut self, path: &Path) {
+ self.cmd.arg("-L").arg(path);
+ }
+
+ fn link_staticlib(&mut self, lib: &str) {
+ self.cmd.arg("-l").arg(lib);
+ }
+
+ fn output_filename(&mut self, path: &Path) {
+ self.cmd.arg("-o").arg(path);
+ }
+
+ fn add_object(&mut self, path: &Path) {
+ self.cmd.arg(path);
+ }
+
+ fn link_dylib(&mut self, lib: &str) {
+ // Emscripten always links statically
+ self.link_staticlib(lib);
+ }
+
+ fn link_whole_staticlib(&mut self, lib: &str, _search_path: &[PathBuf]) {
+ // not supported?
+ self.link_staticlib(lib);
+ }
+
+ fn link_whole_rlib(&mut self, lib: &Path) {
+ // not supported?
+ self.link_rlib(lib);
+ }
+
+ fn link_rust_dylib(&mut self, lib: &str, _path: &Path) {
+ self.link_dylib(lib);
+ }
+
+ fn link_rlib(&mut self, lib: &Path) {
+ self.add_object(lib);
+ }
+
+ fn position_independent_executable(&mut self) {
+ // noop
+ }
+
+ fn no_position_independent_executable(&mut self) {
+ // noop
+ }
+
+ fn full_relro(&mut self) {
+ // noop
+ }
+
+ fn partial_relro(&mut self) {
+ // noop
+ }
+
+ fn no_relro(&mut self) {
+ // noop
+ }
+
+ fn args(&mut self, args: &[String]) {
+ self.cmd.args(args);
+ }
+
+ fn framework_path(&mut self, _path: &Path) {
+ bug!("frameworks are not supported on Emscripten")
+ }
+
+ fn link_framework(&mut self, _framework: &str) {
+ bug!("frameworks are not supported on Emscripten")
+ }
+
+ fn gc_sections(&mut self, _keep_metadata: bool) {
+ // noop
+ }
+
+ fn optimize(&mut self) {
+ // Emscripten performs own optimizations
+ self.cmd.arg(match self.sess.opts.optimize {
+ OptLevel::No => "-O0",
+ OptLevel::Less => "-O1",
+ OptLevel::Default => "-O2",
+ OptLevel::Aggressive => "-O3",
+ OptLevel::Size => "-Os",
+ OptLevel::SizeMin => "-Oz"
+ });
+ // Unusable until https://github.com/rust-lang/rust/issues/38454 is resolved
+ self.cmd.args(&["--memory-init-file", "0"]);
+ }
+
+ fn pgo_gen(&mut self) {
+ // noop, but maybe we need something like the gnu linker?
+ }
+
+ fn debuginfo(&mut self) {
+ // Preserve names or generate source maps depending on debug info
+ self.cmd.arg(match self.sess.opts.debuginfo {
+ DebugInfo::None => "-g0",
+ DebugInfo::Limited => "-g3",
+ DebugInfo::Full => "-g4"
+ });
+ }
+
+ fn no_default_libraries(&mut self) {
+ self.cmd.args(&["-s", "DEFAULT_LIBRARY_FUNCS_TO_INCLUDE=[]"]);
+ }
+
+ fn build_dylib(&mut self, _out_filename: &Path) {
+ bug!("building dynamic library is unsupported on Emscripten")
+ }
+
+ fn build_static_executable(&mut self) {
+ // noop
+ }
+
+ fn export_symbols(&mut self, _tmpdir: &Path, crate_type: CrateType) {
+ let symbols = &self.info.exports[&crate_type];
+
+ debug!("EXPORTED SYMBOLS:");
+
+ self.cmd.arg("-s");
+
+ let mut arg = OsString::from("EXPORTED_FUNCTIONS=");
+ let mut encoded = String::new();
+
+ {
+ let mut encoder = json::Encoder::new(&mut encoded);
+ let res = encoder.emit_seq(symbols.len(), |encoder| {
+ for (i, sym) in symbols.iter().enumerate() {
+ encoder.emit_seq_elt(i, |encoder| {
+ encoder.emit_str(&("_".to_string() + sym))
+ })?;
+ }
+ Ok(())
+ });
+ if let Err(e) = res {
+ self.sess.fatal(&format!("failed to encode exported symbols: {}", e));
+ }
+ }
+ debug!("{}", encoded);
+ arg.push(encoded);
+
+ self.cmd.arg(arg);
+ }
+
+ fn subsystem(&mut self, _subsystem: &str) {
+ // noop
+ }
+
+ fn finalize(&mut self) -> Command {
+ let mut cmd = Command::new("");
+ ::std::mem::swap(&mut cmd, &mut self.cmd);
+ cmd
+ }
+
+ // Appears not necessary on Emscripten
+ fn group_start(&mut self) {}
+ fn group_end(&mut self) {}
+
+ fn cross_lang_lto(&mut self) {
+ // Do nothing
+ }
+}
+
+fn exported_symbols(tcx: TyCtxt, crate_type: CrateType) -> Vec<String> {
+ let mut symbols = Vec::new();
+
+ let export_threshold =
+ ::symbol_export::crates_export_threshold(&[crate_type]);
+ for &(symbol, level) in tcx.exported_symbols(LOCAL_CRATE).iter() {
+ if level.is_below_threshold(export_threshold) {
+ symbols.push(symbol.symbol_name(tcx).to_string());
+ }
+ }
+
+ let formats = tcx.sess.dependency_formats.borrow();
+ let deps = formats[&crate_type].iter();
+
+ for (index, dep_format) in deps.enumerate() {
+ let cnum = CrateNum::new(index + 1);
+ // For each dependency that we are linking to statically ...
+ if *dep_format == Linkage::Static {
+ // ... we add its symbol list to our export list.
+ for &(symbol, level) in tcx.exported_symbols(cnum).iter() {
+ if level.is_below_threshold(export_threshold) {
+ symbols.push(symbol.symbol_name(tcx).to_string());
+ }
+ }
+ }
+ }
+
+ symbols
+}
+
+pub struct WasmLd<'a> {
+ cmd: Command,
+ sess: &'a Session,
+ info: &'a LinkerInfo,
+}
+
+impl<'a> Linker for WasmLd<'a> {
+ fn link_dylib(&mut self, lib: &str) {
+ self.cmd.arg("-l").arg(lib);
+ }
+
+ fn link_staticlib(&mut self, lib: &str) {
+ self.cmd.arg("-l").arg(lib);
+ }
+
+ fn link_rlib(&mut self, lib: &Path) {
+ self.cmd.arg(lib);
+ }
+
+ fn include_path(&mut self, path: &Path) {
+ self.cmd.arg("-L").arg(path);
+ }
+
+ fn framework_path(&mut self, _path: &Path) {
+ panic!("frameworks not supported")
+ }
+
+ fn output_filename(&mut self, path: &Path) {
+ self.cmd.arg("-o").arg(path);
+ }
+
+ fn add_object(&mut self, path: &Path) {
+ self.cmd.arg(path);
+ }
+
+ fn position_independent_executable(&mut self) {
+ }
+
+ fn full_relro(&mut self) {
+ }
+
+ fn partial_relro(&mut self) {
+ }
+
+ fn no_relro(&mut self) {
+ }
+
+ fn build_static_executable(&mut self) {
+ }
+
+ fn args(&mut self, args: &[String]) {
+ self.cmd.args(args);
+ }
+
+ fn link_rust_dylib(&mut self, lib: &str, _path: &Path) {
+ self.cmd.arg("-l").arg(lib);
+ }
+
+ fn link_framework(&mut self, _framework: &str) {
+ panic!("frameworks not supported")
+ }
+
+ fn link_whole_staticlib(&mut self, lib: &str, _search_path: &[PathBuf]) {
+ self.cmd.arg("-l").arg(lib);
+ }
+
+ fn link_whole_rlib(&mut self, lib: &Path) {
+ self.cmd.arg(lib);
+ }
+
+ fn gc_sections(&mut self, _keep_metadata: bool) {
+ self.cmd.arg("--gc-sections");
+ }
+
+ fn optimize(&mut self) {
+ self.cmd.arg(match self.sess.opts.optimize {
+ OptLevel::No => "-O0",
+ OptLevel::Less => "-O1",
+ OptLevel::Default => "-O2",
+ OptLevel::Aggressive => "-O3",
+ // Currently LLD doesn't support `Os` and `Oz`, so pass through `O2`
+ // instead.
+ OptLevel::Size => "-O2",
+ OptLevel::SizeMin => "-O2"
+ });
+ }
+
+ fn pgo_gen(&mut self) {
+ }
+
+ fn debuginfo(&mut self) {
+ }
+
+ fn no_default_libraries(&mut self) {
+ }
+
+ fn build_dylib(&mut self, _out_filename: &Path) {
+ }
+
+ fn export_symbols(&mut self, _tmpdir: &Path, crate_type: CrateType) {
+ for sym in self.info.exports[&crate_type].iter() {
+ self.cmd.arg("--export").arg(&sym);
+ }
+ }
+
+ fn subsystem(&mut self, _subsystem: &str) {
+ }
+
+ fn no_position_independent_executable(&mut self) {
+ }
+
+ fn finalize(&mut self) -> Command {
+ // There have been reports in the wild (rustwasm/wasm-bindgen#119) of
+ // using threads causing weird hangs and bugs. Disable it entirely as
+ // this isn't yet the bottleneck of compilation at all anyway.
+ self.cmd.arg("--no-threads");
+
+ // By default LLD only gives us one page of stack (64k) which is a
+ // little small. Default to a larger stack closer to other PC platforms
+ // (1MB) and users can always inject their own link-args to override this.
+ self.cmd.arg("-z").arg("stack-size=1048576");
+
+ // By default LLD's memory layout is:
+ //
+ // 1. First, a blank page
+ // 2. Next, all static data
+ // 3. Finally, the main stack (which grows down)
+ //
+ // This has the unfortunate consequence that on stack overflows you
+ // corrupt static data and can cause some exceedingly weird bugs. To
+ // help detect this a little sooner we instead request that the stack is
+ // placed before static data.
+ //
+ // This means that we'll generate slightly larger binaries as references
+ // to static data will take more bytes in the ULEB128 encoding, but
+ // stack overflow will be guaranteed to trap as it underflows instead of
+ // corrupting static data.
+ self.cmd.arg("--stack-first");
+
+ // FIXME we probably shouldn't pass this but instead pass an explicit
+ // whitelist of symbols we'll allow to be undefined. Unfortunately
+ // though we can't handle symbols like `log10` that LLVM injects at a
+ // super late date without actually parsing object files. For now let's
+ // stick to this and hopefully fix it before stabilization happens.
+ self.cmd.arg("--allow-undefined");
+
+ // For now we just never have an entry symbol
+ self.cmd.arg("--no-entry");
+
+ // Make the default table accessible
+ self.cmd.arg("--export-table");
+
+ // Rust code should never have warnings, and warnings are often
+ // indicative of bugs, let's prevent them.
+ self.cmd.arg("--fatal-warnings");
+
+ let mut cmd = Command::new("");
+ ::std::mem::swap(&mut cmd, &mut self.cmd);
+ cmd
+ }
+
+ // Not needed for now with LLD
+ fn group_start(&mut self) {}
+ fn group_end(&mut self) {}
+
+ fn cross_lang_lto(&mut self) {
+ // Do nothing for now
+ }
+}
--- /dev/null
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+use rustc_data_structures::sync::Lrc;
+use std::sync::Arc;
+
+use rustc::ty::Instance;
+use rustc::hir;
+use rustc::hir::Node;
+use rustc::hir::CodegenFnAttrFlags;
+use rustc::hir::def_id::{CrateNum, DefId, LOCAL_CRATE, CRATE_DEF_INDEX};
+use rustc_data_structures::fingerprint::Fingerprint;
+use rustc::middle::exported_symbols::{SymbolExportLevel, ExportedSymbol, metadata_symbol_name};
+use rustc::session::config;
+use rustc::ty::{TyCtxt, SymbolName};
+use rustc::ty::query::Providers;
+use rustc::ty::subst::Substs;
+use rustc::util::nodemap::{FxHashMap, DefIdMap};
+use rustc_allocator::ALLOCATOR_METHODS;
+use rustc_data_structures::indexed_vec::IndexVec;
+use std::collections::hash_map::Entry::*;
+
+pub type ExportedSymbols = FxHashMap<
+ CrateNum,
+ Arc<Vec<(String, SymbolExportLevel)>>,
+>;
+
+pub fn threshold(tcx: TyCtxt) -> SymbolExportLevel {
+ crates_export_threshold(&tcx.sess.crate_types.borrow())
+}
+
+fn crate_export_threshold(crate_type: config::CrateType) -> SymbolExportLevel {
+ match crate_type {
+ config::CrateType::Executable |
+ config::CrateType::Staticlib |
+ config::CrateType::ProcMacro |
+ config::CrateType::Cdylib => SymbolExportLevel::C,
+ config::CrateType::Rlib |
+ config::CrateType::Dylib => SymbolExportLevel::Rust,
+ }
+}
+
+pub fn crates_export_threshold(crate_types: &[config::CrateType])
+ -> SymbolExportLevel {
+ if crate_types.iter().any(|&crate_type| {
+ crate_export_threshold(crate_type) == SymbolExportLevel::Rust
+ }) {
+ SymbolExportLevel::Rust
+ } else {
+ SymbolExportLevel::C
+ }
+}
+
+fn reachable_non_generics_provider<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
+ cnum: CrateNum)
+ -> Lrc<DefIdMap<SymbolExportLevel>>
+{
+ assert_eq!(cnum, LOCAL_CRATE);
+
+ if !tcx.sess.opts.output_types.should_codegen() {
+ return Lrc::new(DefIdMap())
+ }
+
+ // Check to see if this crate is a "special runtime crate". These
+ // crates, implementation details of the standard library, typically
+ // have a bunch of `pub extern` and `#[no_mangle]` functions as the
+ // ABI between them. We don't want their symbols to have a `C`
+ // export level, however, as they're just implementation details.
+ // Down below we'll hardwire all of the symbols to the `Rust` export
+ // level instead.
+ let special_runtime_crate = tcx.is_panic_runtime(LOCAL_CRATE) ||
+ tcx.is_compiler_builtins(LOCAL_CRATE);
+
+ let mut reachable_non_generics: DefIdMap<_> = tcx.reachable_set(LOCAL_CRATE).0
+ .iter()
+ .filter_map(|&node_id| {
+ // We want to ignore some FFI functions that are not exposed from
+ // this crate. Reachable FFI functions can be lumped into two
+ // categories:
+ //
+ // 1. Those that are included statically via a static library
+ // 2. Those included otherwise (e.g. dynamically or via a framework)
+ //
+ // Although our LLVM module is not literally emitting code for the
+ // statically included symbols, it's an export of our library which
+ // needs to be passed on to the linker and encoded in the metadata.
+ //
+ // As a result, if this id is an FFI item (foreign item) then we only
+ // let it through if it's included statically.
+ match tcx.hir.get(node_id) {
+ Node::ForeignItem(..) => {
+ let def_id = tcx.hir.local_def_id(node_id);
+ if tcx.is_statically_included_foreign_item(def_id) {
+ Some(def_id)
+ } else {
+ None
+ }
+ }
+
+ // Only consider nodes that actually have exported symbols.
+ Node::Item(&hir::Item {
+ node: hir::ItemKind::Static(..),
+ ..
+ }) |
+ Node::Item(&hir::Item {
+ node: hir::ItemKind::Fn(..), ..
+ }) |
+ Node::ImplItem(&hir::ImplItem {
+ node: hir::ImplItemKind::Method(..),
+ ..
+ }) => {
+ let def_id = tcx.hir.local_def_id(node_id);
+ let generics = tcx.generics_of(def_id);
+ if !generics.requires_monomorphization(tcx) &&
+ // Functions marked with #[inline] are only ever codegened
+ // with "internal" linkage and are never exported.
+ !Instance::mono(tcx, def_id).def.requires_local(tcx) {
+ Some(def_id)
+ } else {
+ None
+ }
+ }
+
+ _ => None
+ }
+ })
+ .map(|def_id| {
+ let export_level = if special_runtime_crate {
+ let name = tcx.symbol_name(Instance::mono(tcx, def_id)).as_str();
+ // We can probably do better here by just ensuring that
+ // it has hidden visibility rather than public
+ // visibility, as this is primarily here to ensure it's
+ // not stripped during LTO.
+ //
+ // In general though we won't link right if these
+ // symbols are stripped, and LTO currently strips them.
+ if &*name == "rust_eh_personality" ||
+ &*name == "rust_eh_register_frames" ||
+ &*name == "rust_eh_unregister_frames" {
+ SymbolExportLevel::C
+ } else {
+ SymbolExportLevel::Rust
+ }
+ } else {
+ symbol_export_level(tcx, def_id)
+ };
+ debug!("EXPORTED SYMBOL (local): {} ({:?})",
+ tcx.symbol_name(Instance::mono(tcx, def_id)),
+ export_level);
+ (def_id, export_level)
+ })
+ .collect();
+
+ if let Some(id) = *tcx.sess.derive_registrar_fn.get() {
+ let def_id = tcx.hir.local_def_id(id);
+ reachable_non_generics.insert(def_id, SymbolExportLevel::C);
+ }
+
+ if let Some(id) = *tcx.sess.plugin_registrar_fn.get() {
+ let def_id = tcx.hir.local_def_id(id);
+ reachable_non_generics.insert(def_id, SymbolExportLevel::C);
+ }
+
+ Lrc::new(reachable_non_generics)
+}
+
+fn is_reachable_non_generic_provider_local<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
+ def_id: DefId)
+ -> bool {
+ let export_threshold = threshold(tcx);
+
+ if let Some(&level) = tcx.reachable_non_generics(def_id.krate).get(&def_id) {
+ level.is_below_threshold(export_threshold)
+ } else {
+ false
+ }
+}
+
+fn is_reachable_non_generic_provider_extern<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
+ def_id: DefId)
+ -> bool {
+ tcx.reachable_non_generics(def_id.krate).contains_key(&def_id)
+}
+
+fn exported_symbols_provider_local<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
+ cnum: CrateNum)
+ -> Arc<Vec<(ExportedSymbol<'tcx>,
+ SymbolExportLevel)>>
+{
+ assert_eq!(cnum, LOCAL_CRATE);
+
+ if !tcx.sess.opts.output_types.should_codegen() {
+ return Arc::new(vec![])
+ }
+
+ let mut symbols: Vec<_> = tcx.reachable_non_generics(LOCAL_CRATE)
+ .iter()
+ .map(|(&def_id, &level)| {
+ (ExportedSymbol::NonGeneric(def_id), level)
+ })
+ .collect();
+
+ if tcx.sess.entry_fn.borrow().is_some() {
+ let exported_symbol = ExportedSymbol::NoDefId(SymbolName::new("main"));
+
+ symbols.push((exported_symbol, SymbolExportLevel::C));
+ }
+
+ if tcx.sess.allocator_kind.get().is_some() {
+ for method in ALLOCATOR_METHODS {
+ let symbol_name = format!("__rust_{}", method.name);
+ let exported_symbol = ExportedSymbol::NoDefId(SymbolName::new(&symbol_name));
+
+ symbols.push((exported_symbol, SymbolExportLevel::Rust));
+ }
+ }
+
+ if tcx.sess.opts.debugging_opts.pgo_gen.is_some() {
+ // These are weak symbols that point to the profile version and the
+ // profile name, which need to be treated as exported so LTO doesn't nix
+ // them.
+ const PROFILER_WEAK_SYMBOLS: [&'static str; 2] = [
+ "__llvm_profile_raw_version",
+ "__llvm_profile_filename",
+ ];
+ for sym in &PROFILER_WEAK_SYMBOLS {
+ let exported_symbol = ExportedSymbol::NoDefId(SymbolName::new(sym));
+ symbols.push((exported_symbol, SymbolExportLevel::C));
+ }
+ }
+
+ if tcx.sess.crate_types.borrow().contains(&config::CrateType::Dylib) {
+ let symbol_name = metadata_symbol_name(tcx);
+ let exported_symbol = ExportedSymbol::NoDefId(SymbolName::new(&symbol_name));
+
+ symbols.push((exported_symbol, SymbolExportLevel::Rust));
+ }
+
+ if tcx.sess.opts.share_generics() && tcx.local_crate_exports_generics() {
+ use rustc::mir::mono::{Linkage, Visibility, MonoItem};
+ use rustc::ty::InstanceDef;
+
+ // Normally, we require that shared monomorphizations are not hidden,
+ // because if we want to re-use a monomorphization from a Rust dylib, it
+ // needs to be exported.
+ // However, on platforms that don't allow for Rust dylibs, having
+ // external linkage is enough for monomorphization to be linked to.
+ let need_visibility = tcx.sess.target.target.options.dynamic_linking &&
+ !tcx.sess.target.target.options.only_cdylib;
+
+ let (_, cgus) = tcx.collect_and_partition_mono_items(LOCAL_CRATE);
+
+ for (mono_item, &(linkage, visibility)) in cgus.iter()
+ .flat_map(|cgu| cgu.items().iter()) {
+ if linkage != Linkage::External {
+ // We can only re-use things with external linkage, otherwise
+ // we'll get a linker error
+ continue
+ }
+
+ if need_visibility && visibility == Visibility::Hidden {
+ // If we potentially share things from Rust dylibs, they must
+ // not be hidden
+ continue
+ }
+
+ if let &MonoItem::Fn(Instance {
+ def: InstanceDef::Item(def_id),
+ substs,
+ }) = mono_item {
+ if substs.types().next().is_some() {
+ symbols.push((ExportedSymbol::Generic(def_id, substs),
+ SymbolExportLevel::Rust));
+ }
+ }
+ }
+ }
+
+ // Sort so we get a stable incr. comp. hash.
+ symbols.sort_unstable_by(|&(ref symbol1, ..), &(ref symbol2, ..)| {
+ symbol1.compare_stable(tcx, symbol2)
+ });
+
+ Arc::new(symbols)
+}
+
+fn upstream_monomorphizations_provider<'a, 'tcx>(
+ tcx: TyCtxt<'a, 'tcx, 'tcx>,
+ cnum: CrateNum)
+ -> Lrc<DefIdMap<Lrc<FxHashMap<&'tcx Substs<'tcx>, CrateNum>>>>
+{
+ debug_assert!(cnum == LOCAL_CRATE);
+
+ let cnums = tcx.all_crate_nums(LOCAL_CRATE);
+
+ let mut instances: DefIdMap<FxHashMap<_, _>> = DefIdMap();
+
+ let cnum_stable_ids: IndexVec<CrateNum, Fingerprint> = {
+ let mut cnum_stable_ids = IndexVec::from_elem_n(Fingerprint::ZERO,
+ cnums.len() + 1);
+
+ for &cnum in cnums.iter() {
+ cnum_stable_ids[cnum] = tcx.def_path_hash(DefId {
+ krate: cnum,
+ index: CRATE_DEF_INDEX,
+ }).0;
+ }
+
+ cnum_stable_ids
+ };
+
+ for &cnum in cnums.iter() {
+ for &(ref exported_symbol, _) in tcx.exported_symbols(cnum).iter() {
+ if let &ExportedSymbol::Generic(def_id, substs) = exported_symbol {
+ let substs_map = instances.entry(def_id).or_default();
+
+ match substs_map.entry(substs) {
+ Occupied(mut e) => {
+ // If there are multiple monomorphizations available,
+ // we select one deterministically.
+ let other_cnum = *e.get();
+ if cnum_stable_ids[other_cnum] > cnum_stable_ids[cnum] {
+ e.insert(cnum);
+ }
+ }
+ Vacant(e) => {
+ e.insert(cnum);
+ }
+ }
+ }
+ }
+ }
+
+ Lrc::new(instances.into_iter()
+ .map(|(key, value)| (key, Lrc::new(value)))
+ .collect())
+}
+
+fn upstream_monomorphizations_for_provider<'a, 'tcx>(
+ tcx: TyCtxt<'a, 'tcx, 'tcx>,
+ def_id: DefId)
+ -> Option<Lrc<FxHashMap<&'tcx Substs<'tcx>, CrateNum>>>
+{
+ debug_assert!(!def_id.is_local());
+ tcx.upstream_monomorphizations(LOCAL_CRATE)
+ .get(&def_id)
+ .cloned()
+}
+
+fn is_unreachable_local_definition_provider(tcx: TyCtxt, def_id: DefId) -> bool {
+ if let Some(node_id) = tcx.hir.as_local_node_id(def_id) {
+ !tcx.reachable_set(LOCAL_CRATE).0.contains(&node_id)
+ } else {
+ bug!("is_unreachable_local_definition called with non-local DefId: {:?}",
+ def_id)
+ }
+}
+
+pub fn provide(providers: &mut Providers) {
+ providers.reachable_non_generics = reachable_non_generics_provider;
+ providers.is_reachable_non_generic = is_reachable_non_generic_provider_local;
+ providers.exported_symbols = exported_symbols_provider_local;
+ providers.upstream_monomorphizations = upstream_monomorphizations_provider;
+ providers.is_unreachable_local_definition = is_unreachable_local_definition_provider;
+}
+
+pub fn provide_extern(providers: &mut Providers) {
+ providers.is_reachable_non_generic = is_reachable_non_generic_provider_extern;
+ providers.upstream_monomorphizations_for = upstream_monomorphizations_for_provider;
+}
+
+fn symbol_export_level(tcx: TyCtxt, sym_def_id: DefId) -> SymbolExportLevel {
+ // We export anything that's not mangled at the "C" layer as it probably has
+ // to do with ABI concerns. We do not, however, apply such treatment to
+ // special symbols in the standard library for various plumbing between
+ // core/std/allocators/etc. For example symbols used to hook up allocation
+ // are not considered for export
+ let codegen_fn_attrs = tcx.codegen_fn_attrs(sym_def_id);
+ let is_extern = codegen_fn_attrs.contains_extern_indicator();
+ let std_internal =
+ codegen_fn_attrs.flags.contains(CodegenFnAttrFlags::RUSTC_STD_INTERNAL_SYMBOL);
+
+ if is_extern && !std_internal {
+ SymbolExportLevel::C
+ } else {
+ SymbolExportLevel::Rust
+ }
+}
html_root_url = "https://doc.rust-lang.org/nightly/")]
#![feature(in_band_lifetimes)]
-#![cfg_attr(stage0, feature(impl_header_lifetime_elision))]
#![feature(unboxed_closures)]
#![feature(fn_traits)]
#![feature(unsize)]
//! If you expect to store more than 1 element in the common case, steer clear
//! and use a `Vec<T>`, `Box<[T]>`, or a `SmallVec<T>`.
-use std::mem;
-
#[derive(Clone, Hash, Debug, PartialEq)]
pub struct TinyList<T: PartialEq> {
head: Option<Element<T>>
pub fn insert(&mut self, data: T) {
self.head = Some(Element {
data,
- next: mem::replace(&mut self.head, None).map(Box::new),
+ next: self.head.take().map(Box::new)
});
}
pub fn remove(&mut self, data: &T) -> bool {
self.head = match self.head {
Some(ref mut head) if head.data == *data => {
- mem::replace(&mut head.next, None).map(|x| *x)
+ head.next.take().map(|x| *x)
}
Some(ref mut head) => return head.remove_next(data),
None => return false,
if next.data != *data {
return next.remove_next(data)
} else {
- mem::replace(&mut next.next, None)
+ next.next.take()
}
} else {
return false
smallvec = { version = "0.6.5", features = ["union"] }
syntax_ext = { path = "../libsyntax_ext" }
syntax_pos = { path = "../libsyntax_pos" }
+
+[dependencies.jemalloc-sys]
+version = '0.1.8'
+optional = true
+features = ['unprefixed_malloc_on_supported_platforms']
extern crate syntax_ext;
extern crate syntax_pos;
+// Note that the linkage here should be all that we need, on Linux we're not
+// prefixing the symbols here so this should naturally override our default
+// allocator. On OSX it should override via the zone allocator. We shouldn't
+// enable this by default on other platforms, so other platforms aren't handled
+// here yet.
+#[cfg(feature = "jemalloc-sys")]
+extern crate jemalloc_sys;
+
use driver::CompileController;
use pretty::{PpMode, UserIdentifiedItem};
control.compilation_done.callback = box move |state| {
old_callback(state);
let sess = state.session;
- println!("Fuel used by {}: {}",
+ eprintln!("Fuel used by {}: {}",
sess.print_fuel_crate.as_ref().unwrap(),
sess.print_fuel.get());
}
// Theta = [A -> &'a foo]
env.create_simple_region_hierarchy();
- assert!(!env.t_nil().has_escaping_regions());
+ assert!(!env.t_nil().has_escaping_bound_vars());
let t_rptr_free1 = env.t_rptr_free(1);
- assert!(!t_rptr_free1.has_escaping_regions());
+ assert!(!t_rptr_free1.has_escaping_bound_vars());
let t_rptr_bound1 = env.t_rptr_late_bound_with_debruijn(1, d1());
- assert!(t_rptr_bound1.has_escaping_regions());
+ assert!(t_rptr_bound1.has_escaping_bound_vars());
let t_rptr_bound2 = env.t_rptr_late_bound_with_debruijn(1, d2());
- assert!(t_rptr_bound2.has_escaping_regions());
+ assert!(t_rptr_bound2.has_escaping_bound_vars());
// t_fn = fn(A)
let t_param = env.t_param(0);
- assert!(!t_param.has_escaping_regions());
+ assert!(!t_param.has_escaping_bound_vars());
let t_fn = env.t_fn(&[t_param], env.t_nil());
- assert!(!t_fn.has_escaping_regions());
+ assert!(!t_fn.has_escaping_bound_vars());
})
}
}
pub fn span_suggestions_with_applicability(&mut self, sp: Span, msg: &str,
- suggestions: Vec<String>,
- applicability: Applicability) -> &mut Self {
+ suggestions: impl Iterator<Item = String>, applicability: Applicability) -> &mut Self
+ {
self.suggestions.push(CodeSuggestion {
- substitutions: suggestions.into_iter().map(|snippet| Substitution {
+ substitutions: suggestions.map(|snippet| Substitution {
parts: vec![SubstitutionPart {
snippet,
span: sp,
pub fn span_suggestions_with_applicability(&mut self,
sp: Span,
msg: &str,
- suggestions: Vec<String>,
+ suggestions: impl Iterator<Item = String>,
applicability: Applicability)
-> &mut Self {
if !self.allow_suggestions {
pub struct EmitterWriter {
dst: Destination,
- cm: Option<Lrc<SourceMapperDyn>>,
+ sm: Option<Lrc<SourceMapperDyn>>,
short_message: bool,
teach: bool,
ui_testing: bool,
impl EmitterWriter {
pub fn stderr(color_config: ColorConfig,
- code_map: Option<Lrc<SourceMapperDyn>>,
+ source_map: Option<Lrc<SourceMapperDyn>>,
short_message: bool,
teach: bool)
-> EmitterWriter {
let dst = Destination::from_stderr(color_config);
EmitterWriter {
dst,
- cm: code_map,
+ sm: source_map,
short_message,
teach,
ui_testing: false,
}
pub fn new(dst: Box<dyn Write + Send>,
- code_map: Option<Lrc<SourceMapperDyn>>,
+ source_map: Option<Lrc<SourceMapperDyn>>,
short_message: bool,
teach: bool)
-> EmitterWriter {
EmitterWriter {
dst: Raw(dst),
- cm: code_map,
+ sm: source_map,
short_message,
teach,
ui_testing: false,
let mut output = vec![];
let mut multiline_annotations = vec![];
- if let Some(ref cm) = self.cm {
+ if let Some(ref sm) = self.sm {
for span_label in msp.span_labels() {
if span_label.span.is_dummy() {
continue;
}
- let lo = cm.lookup_char_pos(span_label.span.lo());
- let mut hi = cm.lookup_char_pos(span_label.span.hi());
+ let lo = sm.lookup_char_pos(span_label.span.lo());
+ let mut hi = sm.lookup_char_pos(span_label.span.hi());
// Watch out for "empty spans". If we get a span like 6..6, we
// want to just display a `^` at 6, so convert that to
fn get_multispan_max_line_num(&mut self, msp: &MultiSpan) -> usize {
let mut max = 0;
- if let Some(ref cm) = self.cm {
+ if let Some(ref sm) = self.sm {
for primary_span in msp.primary_spans() {
if !primary_span.is_dummy() {
- let hi = cm.lookup_char_pos(primary_span.hi());
+ let hi = sm.lookup_char_pos(primary_span.hi());
if hi.line > max {
max = hi.line;
}
if !self.short_message {
for span_label in msp.span_labels() {
if !span_label.span.is_dummy() {
- let hi = cm.lookup_char_pos(span_label.span.hi());
+ let hi = sm.lookup_char_pos(span_label.span.hi());
if hi.line > max {
max = hi.line;
}
always_backtrace: bool) -> bool {
let mut spans_updated = false;
- if let Some(ref cm) = self.cm {
+ if let Some(ref sm) = self.sm {
let mut before_after: Vec<(Span, Span)> = vec![];
let mut new_labels: Vec<(Span, String)> = vec![];
if sp.is_dummy() {
continue;
}
- let call_sp = cm.call_span_if_macro(*sp);
+ let call_sp = sm.call_span_if_macro(*sp);
if call_sp != *sp && !always_backtrace {
before_after.push((*sp, call_sp));
}
})));
}
// Check to make sure we're not in any <*macros>
- if !cm.span_to_filename(def_site).is_macros() &&
+ if !sm.span_to_filename(def_site).is_macros() &&
!trace.macro_decl_name.starts_with("desugaring of ") &&
!trace.macro_decl_name.starts_with("#[") ||
always_backtrace {
if sp_label.span.is_dummy() {
continue;
}
- if cm.span_to_filename(sp_label.span.clone()).is_macros() &&
+ if sm.span_to_filename(sp_label.span.clone()).is_macros() &&
!always_backtrace
{
let v = sp_label.span.macro_backtrace();
let mut annotated_files = self.preprocess_annotations(msp);
// Make sure our primary file comes first
- let (primary_lo, cm) = if let (Some(cm), Some(ref primary_span)) =
- (self.cm.as_ref(), msp.primary_span().as_ref()) {
+ let (primary_lo, sm) = if let (Some(sm), Some(ref primary_span)) =
+ (self.sm.as_ref(), msp.primary_span().as_ref()) {
if !primary_span.is_dummy() {
- (cm.lookup_char_pos(primary_span.lo()), cm)
+ (sm.lookup_char_pos(primary_span.lo()), sm)
} else {
emit_to_destination(&buffer.render(), level, &mut self.dst, self.short_message)?;
return Ok(());
// Print out the annotate source lines that correspond with the error
for annotated_file in annotated_files {
// we can't annotate anything if the source is unavailable.
- if !cm.ensure_source_file_source_present(annotated_file.file.clone()) {
+ if !sm.ensure_source_file_source_present(annotated_file.file.clone()) {
continue;
}
buffer.append(buffer_msg_line_offset,
&format!("{}:{}:{}",
loc.file.name,
- cm.doctest_offset_line(loc.line),
+ sm.doctest_offset_line(loc.line),
loc.col.0 + 1),
Style::LineAndColumn);
for _ in 0..max_line_num_len {
buffer.prepend(0,
&format!("{}:{}:{}: ",
loc.file.name,
- cm.doctest_offset_line(loc.line),
+ sm.doctest_offset_line(loc.line),
loc.col.0 + 1),
Style::LineAndColumn);
}
};
format!("{}:{}{}",
annotated_file.file.name,
- cm.doctest_offset_line(first_line.line_index),
+ sm.doctest_offset_line(first_line.line_index),
col)
} else {
annotated_file.file.name.to_string()
level: &Level,
max_line_num_len: usize)
-> io::Result<()> {
- if let Some(ref cm) = self.cm {
+ if let Some(ref sm) = self.sm {
let mut buffer = StyledBuffer::new();
// Render the suggestion message
Some(Style::HeaderMsg));
// Render the replacements for each suggestion
- let suggestions = suggestion.splice_lines(&**cm);
+ let suggestions = suggestion.splice_lines(&**sm);
let mut row_num = 2;
for &(ref complete, ref parts) in suggestions.iter().take(MAX_SUGGESTIONS) {
&& parts[0].snippet.trim() == complete.trim())
&& complete.lines().count() == 1;
- let lines = cm.span_to_lines(parts[0].span).unwrap();
+ let lines = sm.span_to_lines(parts[0].span).unwrap();
assert!(!lines.lines.is_empty());
- let line_start = cm.lookup_char_pos(parts[0].span.lo()).line;
+ let line_start = sm.lookup_char_pos(parts[0].span.lo()).line;
draw_col_separator_no_space(&mut buffer, 1, max_line_num_len + 1);
let mut line_pos = 0;
let mut lines = complete.lines();
if show_underline {
draw_col_separator(&mut buffer, row_num, max_line_num_len + 1);
for part in parts {
- let span_start_pos = cm.lookup_char_pos(part.span.lo()).col_display;
- let span_end_pos = cm.lookup_char_pos(part.span.hi()).col_display;
+ let span_start_pos = sm.lookup_char_pos(part.span.lo()).col_display;
+ let span_end_pos = sm.lookup_char_pos(part.span.hi()).col_display;
// Do not underline the leading...
let start = part.snippet.len()
fn span_to_filename(&self, sp: Span) -> FileName;
fn merge_spans(&self, sp_lhs: Span, sp_rhs: Span) -> Option<Span>;
fn call_span_if_macro(&self, sp: Span) -> Span;
- fn ensure_source_file_source_present(&self, file_map: Lrc<SourceFile>) -> bool;
+ fn ensure_source_file_source_present(&self, source_file: Lrc<SourceFile>) -> bool;
fn doctest_offset_line(&self, line: usize) -> usize;
}
let (t, actually) = match ty {
ty::Int(t) => {
let ity = attr::IntType::SignedInt(t);
- let bits = layout::Integer::from_attr(cx.tcx, ity).size().bits();
+ let bits = layout::Integer::from_attr(&cx.tcx, ity).size().bits();
let actually = (val << (128 - bits)) as i128 >> (128 - bits);
(format!("{:?}", t), actually.to_string())
}
ty::Uint(t) => {
let ity = attr::IntType::UnsignedInt(t);
- let bits = layout::Integer::from_attr(cx.tcx, ity).size().bits();
+ let bits = layout::Integer::from_attr(&cx.tcx, ity).size().bits();
let actually = (val << (128 - bits)) >> (128 - bits);
(format!("{:?}", t), actually.to_string())
}
ty::Param(..) |
ty::Infer(..) |
+ ty::Bound(..) |
ty::Error |
ty::Closure(..) |
ty::Generator(..) |
Ok(layout) => {
let variants = &layout.variants;
if let layout::Variants::Tagged { ref variants, ref tag, .. } = variants {
- let discr_size = tag.value.size(cx.tcx).bytes();
+ let discr_size = tag.value.size(&cx.tcx).bytes();
debug!("enum `{}` is {} bytes large with layout:\n{:#?}",
t, layout.size.bytes(), layout);
declare_lint! {
pub UNUSED_MUST_USE,
Warn,
- "unused result of a type flagged as #[must_use]"
+ "unused result of a type flagged as #[must_use]",
+ report_in_external_macro: true
}
declare_lint! {
msg: &str,
followed_by_block: bool) {
if let ast::ExprKind::Paren(ref inner) = value.node {
- let necessary = followed_by_block && if let ast::ExprKind::Ret(_) = inner.node {
- true
- } else {
- parser::contains_exterior_struct_lit(&inner)
+ let necessary = followed_by_block && match inner.node {
+ ast::ExprKind::Ret(_) | ast::ExprKind::Break(..) => true,
+ _ => parser::contains_exterior_struct_lit(&inner),
};
if !necessary {
let expr_text = if let Ok(snippet) = cx.sess().source_map()
syntax = { path = "../libsyntax" }
syntax_ext = { path = "../libsyntax_ext" }
syntax_pos = { path = "../libsyntax_pos" }
-rustc_metadata_utils = { path = "../librustc_metadata_utils" }
use rustc::util::nodemap::FxHashSet;
use rustc::hir::map::Definitions;
-use rustc_metadata_utils::validate_crate_name;
-
use std::ops::Deref;
use std::path::PathBuf;
use std::{cmp, fs};
item.ident, orig_name);
let orig_name = match orig_name {
Some(orig_name) => {
- validate_crate_name(Some(self.sess), &orig_name.as_str(),
+ ::validate_crate_name(Some(self.sess), &orig_name.as_str(),
Some(item.span));
orig_name
}
for (i, dep) in root.crate_deps
.decode(self)
.enumerate() {
- write!(out, "{} {}-{}\n", i + 1, dep.name, dep.hash)?;
+ write!(out, "{} {}{}\n", i + 1, dep.name, dep.extra_filename)?;
}
write!(out, "\n")?;
Ok(())
extern crate rustc_errors as errors;
extern crate syntax_ext;
extern crate proc_macro;
-extern crate rustc_metadata_utils;
#[macro_use]
extern crate rustc;
pub mod dynamic_lib;
pub mod locator;
+pub fn validate_crate_name(
+ sess: Option<&rustc::session::Session>,
+ s: &str,
+ sp: Option<syntax_pos::Span>
+) {
+ let mut err_count = 0;
+ {
+ let mut say = |s: &str| {
+ match (sp, sess) {
+ (_, None) => bug!("{}", s),
+ (Some(sp), Some(sess)) => sess.span_err(sp, s),
+ (None, Some(sess)) => sess.err(s),
+ }
+ err_count += 1;
+ };
+ if s.is_empty() {
+ say("crate name must not be empty");
+ }
+ for c in s.chars() {
+ if c.is_alphanumeric() { continue }
+ if c == '_' { continue }
+ say(&format!("invalid character `{}` in crate name: `{}`", c, s));
+ }
+ }
+
+ if err_count > 0 {
+ sess.unwrap().abort_if_errors();
+ }
+}
+
__build_diagnostic_array! { librustc_metadata, DIAGNOSTICS }
+++ /dev/null
-[package]
-authors = ["The Rust Project Developers"]
-name = "rustc_metadata_utils"
-version = "0.0.0"
-
-[lib]
-name = "rustc_metadata_utils"
-path = "lib.rs"
-crate-type = ["dylib"]
-
-[dependencies]
-rustc = { path = "../librustc" }
-syntax = { path = "../libsyntax" }
-syntax_pos = { path = "../libsyntax_pos" }
+++ /dev/null
-// Copyright 2018 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-#[macro_use]
-extern crate rustc;
-extern crate syntax_pos;
-
-use rustc::session::Session;
-use syntax_pos::Span;
-
-pub fn validate_crate_name(sess: Option<&Session>, s: &str, sp: Option<Span>) {
- let mut err_count = 0;
- {
- let mut say = |s: &str| {
- match (sp, sess) {
- (_, None) => bug!("{}", s),
- (Some(sp), Some(sess)) => sess.span_err(sp, s),
- (None, Some(sess)) => sess.err(s),
- }
- err_count += 1;
- };
- if s.is_empty() {
- say("crate name must not be empty");
- }
- for c in s.chars() {
- if c.is_alphanumeric() { continue }
- if c == '_' { continue }
- say(&format!("invalid character `{}` in crate name: `{}`", c, s));
- }
- }
-
- if err_count > 0 {
- sess.unwrap().abort_if_errors();
- }
-}
let what_was_dropped = match self.describe_place(place) {
Some(name) => format!("`{}`", name.as_str()),
- None => format!("temporary value"),
+ None => String::from("temporary value"),
};
let label = match self.describe_place(&borrow.borrowed_place) {
match category {
ConstraintCategory::Return => {
- err.span_note(constraint_span, &format!("closure is returned here"));
+ err.span_note(constraint_span, "closure is returned here");
}
ConstraintCategory::CallArgument => {
fr_name.highlight_region_name(&mut err);
mbcx.errors_buffer.sort_by_key(|diag| diag.span.primary_span());
if tcx.migrate_borrowck() {
- match tcx.borrowck(def_id).signalled_any_error {
+ // When borrowck=migrate, check if AST-borrowck would
+ // error on the given code.
+
+ // rust-lang/rust#55492: loop over parents to ensure that
+ // errors that AST-borrowck only detects in some parent of
+ // a closure still allows NLL to signal an error.
+ let mut curr_def_id = def_id;
+ let signalled_any_error = loop {
+ match tcx.borrowck(curr_def_id).signalled_any_error {
+ SignalledError::NoErrorsSeen => {
+ // keep traversing (and borrow-checking) parents
+ }
+ SignalledError::SawSomeError => {
+ // stop search here
+ break SignalledError::SawSomeError;
+ }
+ }
+
+ if tcx.is_closure(curr_def_id) {
+ curr_def_id = tcx.parent_def_id(curr_def_id)
+ .expect("a closure must have a parent_def_id");
+ } else {
+ break SignalledError::NoErrorsSeen;
+ }
+ };
+
+ match signalled_any_error {
SignalledError::NoErrorsSeen => {
// if AST-borrowck signalled no errors, then
// downgrade all the buffered MIR-borrowck errors
);
}
}
- for input in inputs.iter() {
+ for (_, input) in inputs.iter() {
self.consume_operand(context, (input, span), flow_state);
}
}
}
StatementKind::Nop
| StatementKind::AscribeUserType(..)
- | StatementKind::Validate(..)
+ | StatementKind::Retag { .. }
| StatementKind::StorageLive(..) => {
- // `Nop`, `AscribeUserType`, `Validate`, and `StorageLive` are irrelevant
+ // `Nop`, `AscribeUserType`, `Retag`, and `StorageLive` are irrelevant
// to borrow check.
}
StatementKind::StorageDead(local) => {
| Write(wk @ WriteKind::StorageDeadOrDrop)
| Write(wk @ WriteKind::MutableBorrow(BorrowKind::Shared))
| Write(wk @ WriteKind::MutableBorrow(BorrowKind::Shallow)) => {
- if let Err(_place_err) = self.is_mutable(place, is_local_mutation_allowed) {
+ if let (Err(_place_err), true) = (
+ self.is_mutable(place, is_local_mutation_allowed),
+ self.errors_buffer.is_empty()
+ ) {
if self.infcx.tcx.migrate_borrowck() {
// rust-lang/rust#46908: In pure NLL mode this
// code path should be unreachable (and thus
location,
);
} else {
- self.infcx.tcx.sess.delay_span_bug(
+ span_bug!(
span,
- &format!(
- "Accessing `{:?}` with the kind `{:?}` shouldn't be possible",
- place, kind
- ),
+ "Accessing `{:?}` with the kind `{:?}` shouldn't be possible",
+ place,
+ kind,
);
}
}
AccessKind::Move => {
err = self.infcx.tcx
.cannot_move_out_of(span, &(item_msg + &reason), Origin::Mir);
- act = "move";
- acted_on = "moved";
- span
+ err.span_label(span, "cannot move");
+ err.buffer(&mut self.errors_buffer);
+ return;
}
AccessKind::Mutate => {
err = self.infcx.tcx
);
}
}
- for input in inputs.iter() {
+ for (_, input) in inputs.iter() {
self.consume_operand(context, input);
}
}
StatementKind::EndRegion(..) |
StatementKind::Nop |
StatementKind::AscribeUserType(..) |
- StatementKind::Validate(..) |
+ StatementKind::Retag { .. } |
StatementKind::StorageLive(..) => {
- // `Nop`, `AscribeUserType`, `Validate`, and `StorageLive` are irrelevant
+ // `Nop`, `AscribeUserType`, `Retag`, and `StorageLive` are irrelevant
// to borrow check.
}
StatementKind::StorageDead(local) => {
// Run the MIR type-checker.
let MirTypeckResults {
constraints,
- placeholder_indices,
universal_region_relations,
} = type_check::type_check(
infcx,
elements,
);
- let placeholder_indices = Rc::new(placeholder_indices);
-
if let Some(all_facts) = &mut all_facts {
all_facts
.universal_region
// base constraints generated by the type-check.
let var_origins = infcx.take_region_var_origins();
let MirTypeckRegionConstraints {
+ placeholder_indices,
+ placeholder_index_to_region: _,
mut liveness_constraints,
outlives_constraints,
closure_bounds_mapping,
type_tests,
} = constraints;
+ let placeholder_indices = Rc::new(placeholder_indices);
constraint_generation::generate_constraints(
infcx,
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-use borrow_check::nll::ConstraintDescription;
-use borrow_check::nll::constraints::{OutlivesConstraint};
+use borrow_check::nll::constraints::OutlivesConstraint;
use borrow_check::nll::region_infer::RegionInferenceContext;
use borrow_check::nll::type_check::Locations;
use borrow_check::nll::universal_regions::DefiningTy;
-use util::borrowck_errors::{BorrowckErrors, Origin};
+use borrow_check::nll::ConstraintDescription;
use rustc::hir::def_id::DefId;
use rustc::infer::error_reporting::nice_region_error::NiceRegionError;
use rustc::infer::InferCtxt;
+use rustc::infer::NLLRegionVariableOrigin;
use rustc::mir::{ConstraintCategory, Location, Mir};
use rustc::ty::{self, RegionVid};
use rustc_data_structures::indexed_vec::IndexVec;
use rustc_errors::{Diagnostic, DiagnosticBuilder};
use std::collections::VecDeque;
+use syntax::errors::Applicability;
use syntax::symbol::keywords;
use syntax_pos::Span;
-use syntax::errors::Applicability;
+use util::borrowck_errors::{BorrowckErrors, Origin};
mod region_name;
mod var_name;
debug!("best_blame_constraint(from_region={:?})", from_region);
// Find all paths
- let (path, target_region) = self
- .find_constraint_paths_between_regions(from_region, target_test)
- .unwrap();
+ let (path, target_region) =
+ self.find_constraint_paths_between_regions(from_region, target_test)
+ .unwrap();
debug!(
"best_blame_constraint: path={:#?}",
path.iter()
);
// Classify each of the constraints along the path.
- let mut categorized_path: Vec<(ConstraintCategory, bool, Span)> = path
- .iter()
+ let mut categorized_path: Vec<(ConstraintCategory, bool, Span)> = path.iter()
.map(|constraint| {
if constraint.category == ConstraintCategory::ClosureBounds {
self.retrieve_closure_constraint_info(mir, &constraint)
| ConstraintCategory::Boring
| ConstraintCategory::BoringNoLocation
| ConstraintCategory::Internal => false,
- ConstraintCategory::TypeAnnotation
- | ConstraintCategory::Return => true,
+ ConstraintCategory::TypeAnnotation | ConstraintCategory::Return => true,
_ => constraint_sup_scc != target_scc,
}
});
if let Some(i) = best_choice {
- return categorized_path[i]
+ return categorized_path[i];
}
// If that search fails, that is.. unusual. Maybe everything
deque.push_back(from_region);
while let Some(r) = deque.pop_front() {
+ debug!(
+ "find_constraint_paths_between_regions: from_region={:?} r={:?} value={}",
+ from_region,
+ r,
+ self.region_value_str(r),
+ );
+
// Check if we reached the region we were looking for. If so,
// we can reconstruct the path that led to it and return it.
if target_test(r) {
// enqueue any regions we find, keeping track of how we
// reached them.
let fr_static = self.universal_regions.fr_static;
- for constraint in self.constraint_graph.outgoing_edges(r,
- &self.constraints,
- fr_static) {
+ for constraint in self.constraint_graph
+ .outgoing_edges(r, &self.constraints, fr_static)
+ {
assert_eq!(constraint.sup, r);
let sub_region = constraint.sub;
if let Trace::NotVisited = context[sub_region] {
) {
debug!("report_error(fr={:?}, outlived_fr={:?})", fr, outlived_fr);
- let (category, _, span) = self.best_blame_constraint(
- mir,
- fr,
- |r| r == outlived_fr
- );
+ let (category, _, span) = self.best_blame_constraint(mir, fr, |r| {
+ self.provides_universal_region(r, fr, outlived_fr)
+ });
// Check if we can use one of the "nice region errors".
if let (Some(f), Some(o)) = (self.to_error_region(fr), self.to_error_region(outlived_fr)) {
self.universal_regions.is_local_free_region(outlived_fr),
);
- debug!("report_error: fr_is_local={:?} outlived_fr_is_local={:?} category={:?}",
- fr_is_local, outlived_fr_is_local, category);
+ debug!(
+ "report_error: fr_is_local={:?} outlived_fr_is_local={:?} category={:?}",
+ fr_is_local, outlived_fr_is_local, category
+ );
match (category, fr_is_local, outlived_fr_is_local) {
- (ConstraintCategory::Return, true, false) if self.is_closure_fn_mut(infcx, fr) =>
- self.report_fnmut_error(mir, infcx, mir_def_id, fr, outlived_fr, span,
- errors_buffer),
- (ConstraintCategory::Assignment, true, false) |
- (ConstraintCategory::CallArgument, true, false) =>
- self.report_escaping_data_error(mir, infcx, mir_def_id, fr, outlived_fr,
- category, span, errors_buffer),
- _ =>
- self.report_general_error(mir, infcx, mir_def_id, fr, fr_is_local,
- outlived_fr, outlived_fr_is_local,
- category, span, errors_buffer),
+ (ConstraintCategory::Return, true, false) if self.is_closure_fn_mut(infcx, fr) => {
+ self.report_fnmut_error(
+ mir,
+ infcx,
+ mir_def_id,
+ fr,
+ outlived_fr,
+ span,
+ errors_buffer,
+ )
+ }
+ (ConstraintCategory::Assignment, true, false)
+ | (ConstraintCategory::CallArgument, true, false) => self.report_escaping_data_error(
+ mir,
+ infcx,
+ mir_def_id,
+ fr,
+ outlived_fr,
+ category,
+ span,
+ errors_buffer,
+ ),
+ _ => self.report_general_error(
+ mir,
+ infcx,
+ mir_def_id,
+ fr,
+ fr_is_local,
+ outlived_fr,
+ outlived_fr_is_local,
+ category,
+ span,
+ errors_buffer,
+ ),
};
}
+ /// We have a constraint `fr1: fr2` that is not satisfied, where
+ /// `fr2` represents some universal region. Here, `r` is some
+ /// region where we know that `fr1: r` and this function has the
+ /// job of determining whether `r` is "to blame" for the fact that
+ /// `fr1: fr2` is required.
+ ///
+ /// This is true under two conditions:
+ ///
+ /// - `r == fr2`
+ /// - `fr2` is `'static` and `r` is some placeholder in a universe
+ /// that cannot be named by `fr1`; in that case, we will require
+ /// that `fr1: 'static` because it is the only way to `fr1: r` to
+ /// be satisfied. (See `add_incompatible_universe`.)
+ fn provides_universal_region(&self, r: RegionVid, fr1: RegionVid, fr2: RegionVid) -> bool {
+ debug!(
+ "provides_universal_region(r={:?}, fr1={:?}, fr2={:?})",
+ r, fr1, fr2
+ );
+ let result = {
+ r == fr2 || {
+ fr2 == self.universal_regions.fr_static && self.cannot_name_placeholder(fr1, r)
+ }
+ };
+ debug!("provides_universal_region: result = {:?}", result);
+ result
+ }
+
/// Report a specialized error when `FnMut` closures return a reference to a captured variable.
/// This function expects `fr` to be local and `outlived_fr` to not be local.
///
span: Span,
errors_buffer: &mut Vec<Diagnostic>,
) {
- let mut diag = infcx.tcx.sess.struct_span_err(
- span,
- "captured variable cannot escape `FnMut` closure body",
- );
+ let mut diag = infcx
+ .tcx
+ .sess
+ .struct_span_err(span, "captured variable cannot escape `FnMut` closure body");
// We should check if the return type of this closure is in fact a closure - in that
// case, we can special case the error further.
"returns a reference to a captured variable which escapes the closure body"
};
- diag.span_label(
- span,
- message,
- );
+ diag.span_label(span, message);
- match self.give_region_a_name(infcx, mir, mir_def_id, outlived_fr, &mut 1).source {
- RegionNameSource::NamedEarlyBoundRegion(fr_span) |
- RegionNameSource::NamedFreeRegion(fr_span) |
- RegionNameSource::SynthesizedFreeEnvRegion(fr_span, _) |
- RegionNameSource::CannotMatchHirTy(fr_span, _) |
- RegionNameSource::MatchedHirTy(fr_span) |
- RegionNameSource::MatchedAdtAndSegment(fr_span) |
- RegionNameSource::AnonRegionFromUpvar(fr_span, _) |
- RegionNameSource::AnonRegionFromOutput(fr_span, _, _) => {
+ match self.give_region_a_name(infcx, mir, mir_def_id, outlived_fr, &mut 1)
+ .source
+ {
+ RegionNameSource::NamedEarlyBoundRegion(fr_span)
+ | RegionNameSource::NamedFreeRegion(fr_span)
+ | RegionNameSource::SynthesizedFreeEnvRegion(fr_span, _)
+ | RegionNameSource::CannotMatchHirTy(fr_span, _)
+ | RegionNameSource::MatchedHirTy(fr_span)
+ | RegionNameSource::MatchedAdtAndSegment(fr_span)
+ | RegionNameSource::AnonRegionFromUpvar(fr_span, _)
+ | RegionNameSource::AnonRegionFromOutput(fr_span, _, _) => {
diag.span_label(fr_span, "inferred to be a `FnMut` closure");
- },
- _ => {},
+ }
+ _ => {}
}
- diag.note("`FnMut` closures only have access to their captured variables while they are \
- executing...");
+ diag.note(
+ "`FnMut` closures only have access to their captured variables while they are \
+ executing...",
+ );
diag.note("...therefore, they cannot allow references to captured variables to escape");
diag.buffer(errors_buffer);
DefiningTy::Closure(..) => "closure",
DefiningTy::Generator(..) => "generator",
DefiningTy::FnDef(..) => "function",
- DefiningTy::Const(..) => "const"
+ DefiningTy::Const(..) => "const",
};
// Revert to the normal error in these cases.
|| (category == ConstraintCategory::Assignment && escapes_from == "function")
|| escapes_from == "const"
{
- return self.report_general_error(mir, infcx, mir_def_id,
- fr, true, outlived_fr, false,
- category, span, errors_buffer);
+ return self.report_general_error(
+ mir,
+ infcx,
+ mir_def_id,
+ fr,
+ true,
+ outlived_fr,
+ false,
+ category,
+ span,
+ errors_buffer,
+ );
}
- let mut diag = infcx.tcx.borrowed_data_escapes_closure(span, escapes_from, Origin::Mir);
+ let mut diag = infcx
+ .tcx
+ .borrowed_data_escapes_closure(span, escapes_from, Origin::Mir);
if let Some((Some(outlived_fr_name), outlived_fr_span)) = outlived_fr_name_and_span {
diag.span_label(
),
);
- diag.span_label(span, format!("`{}` escapes the {} body here", fr_name, escapes_from));
+ diag.span_label(
+ span,
+ format!("`{}` escapes the {} body here", fr_name, escapes_from),
+ );
}
diag.buffer(errors_buffer);
let counter = &mut 1;
let fr_name = self.give_region_a_name(infcx, mir, mir_def_id, fr, counter);
fr_name.highlight_region_name(&mut diag);
- let outlived_fr_name = self.give_region_a_name(
- infcx, mir, mir_def_id, outlived_fr, counter);
+ let outlived_fr_name =
+ self.give_region_a_name(infcx, mir, mir_def_id, outlived_fr, counter);
outlived_fr_name.highlight_region_name(&mut diag);
- let mir_def_name = if infcx.tcx.is_closure(mir_def_id) { "closure" } else { "function" };
+ let mir_def_name = if infcx.tcx.is_closure(mir_def_id) {
+ "closure"
+ } else {
+ "function"
+ };
match (category, outlived_fr_is_local, fr_is_local) {
(ConstraintCategory::Return, true, _) => {
- diag.span_label(span, format!(
- "{} was supposed to return data with lifetime `{}` but it is returning \
- data with lifetime `{}`",
- mir_def_name, outlived_fr_name, fr_name
- ));
- },
+ diag.span_label(
+ span,
+ format!(
+ "{} was supposed to return data with lifetime `{}` but it is returning \
+ data with lifetime `{}`",
+ mir_def_name, outlived_fr_name, fr_name
+ ),
+ );
+ }
_ => {
- diag.span_label(span, format!(
- "{}requires that `{}` must outlive `{}`",
- category.description(), fr_name, outlived_fr_name,
- ));
- },
+ diag.span_label(
+ span,
+ format!(
+ "{}requires that `{}` must outlive `{}`",
+ category.description(),
+ fr_name,
+ outlived_fr_name,
+ ),
+ );
+ }
}
- self.add_static_impl_trait_suggestion(
- infcx, &mut diag, fr, fr_name, outlived_fr,
- );
+ self.add_static_impl_trait_suggestion(infcx, &mut diag, fr, fr_name, outlived_fr);
diag.buffer(errors_buffer);
}
fr_name: RegionName,
outlived_fr: RegionVid,
) {
- if let (
- Some(f),
- Some(ty::RegionKind::ReStatic)
- ) = (self.to_error_region(fr), self.to_error_region(outlived_fr)) {
+ if let (Some(f), Some(ty::RegionKind::ReStatic)) =
+ (self.to_error_region(fr), self.to_error_region(outlived_fr))
+ {
if let Some(ty::TyS {
sty: ty::TyKind::Opaque(did, substs),
..
- }) = infcx.tcx.is_suitable_region(f)
- .map(|r| r.def_id)
- .map(|id| infcx.tcx.return_type_impl_trait(id))
- .unwrap_or(None)
+ }) = infcx
+ .tcx
+ .is_suitable_region(f)
+ .map(|r| r.def_id)
+ .map(|id| infcx.tcx.return_type_impl_trait(id))
+ .unwrap_or(None)
{
// Check whether or not the impl trait return type is intended to capture
// data with the static lifetime.
let mut found = false;
for predicate in bounds.predicates {
if let ty::Predicate::TypeOutlives(binder) = predicate {
- if let ty::OutlivesPredicate(
- _,
- ty::RegionKind::ReStatic
- ) = binder.skip_binder() {
+ if let ty::OutlivesPredicate(_, ty::RegionKind::ReStatic) =
+ binder.skip_binder()
+ {
found = true;
break;
}
found
};
- debug!("add_static_impl_trait_suggestion: has_static_predicate={:?}",
- has_static_predicate);
+ debug!(
+ "add_static_impl_trait_suggestion: has_static_predicate={:?}",
+ has_static_predicate
+ );
let static_str = keywords::StaticLifetime.name();
// If there is a static predicate, then the only sensible suggestion is to replace
// fr with `'static`.
if has_static_predicate {
- diag.help(
- &format!(
- "consider replacing `{}` with `{}`",
- fr_name, static_str,
- ),
- );
+ diag.help(&format!(
+ "consider replacing `{}` with `{}`",
+ fr_name, static_str,
+ ));
} else {
// Otherwise, we should suggest adding a constraint on the return type.
let span = infcx.tcx.def_span(*did);
borrow_region: RegionVid,
outlived_region: RegionVid,
) -> (ConstraintCategory, bool, Span, RegionName) {
- let (category, from_closure, span) = self.best_blame_constraint(
- mir,
- borrow_region,
- |r| r == outlived_region
- );
- let outlived_fr_name = self.give_region_a_name(
- infcx, mir, mir_def_id, outlived_region, &mut 1);
+ let (category, from_closure, span) =
+ self.best_blame_constraint(mir, borrow_region, |r| r == outlived_region);
+ let outlived_fr_name =
+ self.give_region_a_name(infcx, mir, mir_def_id, outlived_region, &mut 1);
(category, from_closure, span, outlived_fr_name)
}
// Finds some region R such that `fr1: R` and `R` is live at
// `elem`.
crate fn find_sub_region_live_at(&self, fr1: RegionVid, elem: Location) -> RegionVid {
- // Find all paths
- let (_path, r) =
- self.find_constraint_paths_between_regions(fr1, |r| {
- self.liveness_constraints.contains(r, elem)
- }).unwrap();
- r
+ debug!("find_sub_region_live_at(fr1={:?}, elem={:?})", fr1, elem);
+ self.find_constraint_paths_between_regions(fr1, |r| {
+ // First look for some `r` such that `fr1: r` and `r` is live at `elem`
+ debug!(
+ "find_sub_region_live_at: liveness_constraints for {:?} are {:?}",
+ r,
+ self.liveness_constraints.region_value_str(r),
+ );
+ self.liveness_constraints.contains(r, elem)
+ }).or_else(|| {
+ // If we fail to find that, we may find some `r` such that
+ // `fr1: r` and `r` is a placeholder from some universe
+ // `fr1` cannot name. This would force `fr1` to be
+ // `'static`.
+ self.find_constraint_paths_between_regions(fr1, |r| {
+ self.cannot_name_placeholder(fr1, r)
+ })
+ })
+ .or_else(|| {
+ // If we fail to find THAT, it may be that `fr1` is a
+ // placeholder that cannot "fit" into its SCC. In that
+ // case, there should be some `r` where `fr1: r`, both
+ // `fr1` and `r` are in the same SCC, and `fr1` is a
+ // placeholder that `r` cannot name. We can blame that
+ // edge.
+ self.find_constraint_paths_between_regions(fr1, |r| {
+ self.constraint_sccs.scc(fr1) == self.constraint_sccs.scc(r)
+ && self.cannot_name_placeholder(r, fr1)
+ })
+ })
+ .map(|(_path, r)| r)
+ .unwrap()
}
// Finds a good span to blame for the fact that `fr1` outlives `fr2`.
fr1: RegionVid,
fr2: RegionVid,
) -> (ConstraintCategory, Span) {
- let (category, _, span) = self.best_blame_constraint(mir, fr1, |r| r == fr2);
+ let (category, _, span) =
+ self.best_blame_constraint(mir, fr1, |r| self.provides_universal_region(r, fr1, fr2));
(category, span)
}
fn retrieve_closure_constraint_info(
&self,
mir: &Mir<'tcx>,
- constraint: &OutlivesConstraint
+ constraint: &OutlivesConstraint,
) -> (ConstraintCategory, bool, Span) {
let loc = match constraint.locations {
Locations::All(span) => return (constraint.category, false, span),
Locations::Single(loc) => loc,
};
- let opt_span_category = self
- .closure_bounds_mapping[&loc]
- .get(&(constraint.sup, constraint.sub));
+ let opt_span_category =
+ self.closure_bounds_mapping[&loc].get(&(constraint.sup, constraint.sub));
opt_span_category
.map(|&(category, span)| (category, true, span))
.unwrap_or((constraint.category, false, mir.source_info(loc).span))
}
/// Returns `true` if a closure is inferred to be an `FnMut` closure.
- crate fn is_closure_fn_mut(
- &self,
- infcx: &InferCtxt<'_, '_, 'tcx>,
- fr: RegionVid,
- ) -> bool {
+ crate fn is_closure_fn_mut(&self, infcx: &InferCtxt<'_, '_, 'tcx>, fr: RegionVid) -> bool {
if let Some(ty::ReFree(free_region)) = self.to_error_region(fr) {
if let ty::BoundRegion::BrEnv = free_region.bound_region {
if let DefiningTy::Closure(def_id, substs) = self.universal_regions.defining_ty {
false
}
+
+ /// If `r2` represents a placeholder region, then this returns
+ /// true if `r1` cannot name that placeholder in its
+ /// value. Otherwise, returns false.
+ fn cannot_name_placeholder(&self, r1: RegionVid, r2: RegionVid) -> bool {
+ debug!("cannot_name_value_of(r1={:?}, r2={:?})", r1, r2);
+
+ match self.definitions[r2].origin {
+ NLLRegionVariableOrigin::Placeholder(placeholder) => {
+ let universe1 = self.definitions[r1].universe;
+ debug!(
+ "cannot_name_value_of: universe1={:?} placeholder={:?}",
+ universe1, placeholder
+ );
+ universe1.cannot_name(placeholder.universe)
+ }
+
+ NLLRegionVariableOrigin::FreeRegion | NLLRegionVariableOrigin::Existential => false,
+ }
+ }
}
| ty::RePlaceholder(..)
| ty::ReEmpty
| ty::ReErased
- | ty::ReClosureBound(..)
- | ty::ReCanonical(..) => None,
+ | ty::ReClosureBound(..) => None,
}
}
if scc_universe.can_name(placeholder.universe) {
self.scc_values.add_element(scc, placeholder);
} else {
+ debug!(
+ "init_free_and_bound_regions: placeholder {:?} is \
+ not compatible with universe {:?} of its SCC {:?}",
+ placeholder,
+ scc_universe,
+ scc,
+ );
self.add_incompatible_universe(scc);
}
}
let mut constraints: Vec<_> = self.constraints.iter().collect();
constraints.sort();
constraints
+ .into_iter()
+ .map(|c| (c, self.constraint_sccs.scc(c.sup), self.constraint_sccs.scc(c.sub)))
+ .collect::<Vec<_>>()
});
// To propagate constraints, we walk the DAG induced by the
/// `'a` with `'b` and not `'static`. But it will have to do for
/// now.
fn add_incompatible_universe(&mut self, scc: ConstraintSccIndex) {
+ debug!("add_incompatible_universe(scc={:?})", scc);
+
let fr_static = self.universal_regions.fr_static;
self.scc_values.add_all_points(scc);
self.scc_values.add_element(scc, fr_static);
);
let longer_fr_scc = self.constraint_sccs.scc(longer_fr);
+ debug!(
+ "check_bound_universal_region: longer_fr_scc={:?}",
+ longer_fr_scc,
+ );
// If we have some bound universal region `'a`, then the only
// elements it can contain is itself -- we don't know anything
Some(v) => v,
None => return,
};
+ debug!("check_bound_universal_region: error_element = {:?}", error_element);
// Find the region that introduced this `error_element`.
let error_region = match error_element {
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-use borrow_check::nll::constraints::{ConstraintSet, OutlivesConstraint};
+use borrow_check::nll::constraints::OutlivesConstraint;
use borrow_check::nll::region_infer::TypeTest;
-use borrow_check::nll::type_check::Locations;
+use borrow_check::nll::type_check::{Locations, MirTypeckRegionConstraints};
use borrow_check::nll::universal_regions::UniversalRegions;
+use borrow_check::nll::ToRegionVid;
use rustc::infer::canonical::QueryRegionConstraint;
use rustc::infer::outlives::env::RegionBoundPairs;
use rustc::infer::outlives::obligations::{TypeOutlives, TypeOutlivesDelegate};
use rustc::infer::region_constraints::{GenericKind, VerifyBound};
-use rustc::infer::{self, SubregionOrigin};
+use rustc::infer::{self, InferCtxt, SubregionOrigin};
use rustc::mir::ConstraintCategory;
use rustc::ty::subst::UnpackedKind;
use rustc::ty::{self, TyCtxt};
use syntax_pos::DUMMY_SP;
crate struct ConstraintConversion<'a, 'gcx: 'tcx, 'tcx: 'a> {
+ infcx: &'a InferCtxt<'a, 'gcx, 'tcx>,
tcx: TyCtxt<'a, 'gcx, 'tcx>,
universal_regions: &'a UniversalRegions<'tcx>,
region_bound_pairs: &'a RegionBoundPairs<'tcx>,
param_env: ty::ParamEnv<'tcx>,
locations: Locations,
category: ConstraintCategory,
- outlives_constraints: &'a mut ConstraintSet,
- type_tests: &'a mut Vec<TypeTest<'tcx>>,
+ constraints: &'a mut MirTypeckRegionConstraints<'tcx>,
}
impl<'a, 'gcx, 'tcx> ConstraintConversion<'a, 'gcx, 'tcx> {
crate fn new(
- tcx: TyCtxt<'a, 'gcx, 'tcx>,
+ infcx: &'a InferCtxt<'a, 'gcx, 'tcx>,
universal_regions: &'a UniversalRegions<'tcx>,
region_bound_pairs: &'a RegionBoundPairs<'tcx>,
implicit_region_bound: Option<ty::Region<'tcx>>,
param_env: ty::ParamEnv<'tcx>,
locations: Locations,
category: ConstraintCategory,
- outlives_constraints: &'a mut ConstraintSet,
- type_tests: &'a mut Vec<TypeTest<'tcx>>,
+ constraints: &'a mut MirTypeckRegionConstraints<'tcx>,
) -> Self {
Self {
- tcx,
+ infcx,
+ tcx: infcx.tcx,
universal_regions,
region_bound_pairs,
implicit_region_bound,
param_env,
locations,
category,
- outlives_constraints,
- type_tests,
+ constraints,
}
}
// when we move to universes, we will, and this assertion
// will start to fail.
let ty::OutlivesPredicate(k1, r2) =
- query_constraint.no_late_bound_regions().unwrap_or_else(|| {
+ query_constraint.no_bound_vars().unwrap_or_else(|| {
bug!(
- "query_constraint {:?} contained bound regions",
+ "query_constraint {:?} contained bound vars",
query_constraint,
);
});
}
fn verify_to_type_test(
- &self,
+ &mut self,
generic_kind: GenericKind<'tcx>,
region: ty::Region<'tcx>,
verify_bound: VerifyBound<'tcx>,
}
}
- fn to_region_vid(&self, r: ty::Region<'tcx>) -> ty::RegionVid {
- self.universal_regions.to_region_vid(r)
+ fn to_region_vid(&mut self, r: ty::Region<'tcx>) -> ty::RegionVid {
+ if let ty::RePlaceholder(placeholder) = r {
+ self.constraints
+ .placeholder_region(self.infcx, *placeholder)
+ .to_region_vid()
+ } else {
+ self.universal_regions.to_region_vid(r)
+ }
}
fn add_outlives(&mut self, sup: ty::RegionVid, sub: ty::RegionVid) {
- self.outlives_constraints.push(OutlivesConstraint {
- locations: self.locations,
- category: self.category,
- sub,
- sup,
- });
+ self.constraints
+ .outlives_constraints
+ .push(OutlivesConstraint {
+ locations: self.locations,
+ category: self.category,
+ sub,
+ sup,
+ });
}
fn add_type_test(&mut self, type_test: TypeTest<'tcx>) {
debug!("add_type_test(type_test={:?})", type_test);
- self.type_tests.push(type_test);
+ self.constraints.type_tests.push(type_test);
}
}
a: ty::Region<'tcx>,
b: ty::Region<'tcx>,
) {
- let b = self.universal_regions.to_region_vid(b);
- let a = self.universal_regions.to_region_vid(a);
+ let b = self.to_region_vid(b);
+ let a = self.to_region_vid(a);
self.add_outlives(b, a);
}
for data in constraint_sets {
constraint_conversion::ConstraintConversion::new(
- self.infcx.tcx,
+ self.infcx,
&self.universal_regions,
&self.region_bound_pairs,
self.implicit_region_bound,
self.param_env,
Locations::All(DUMMY_SP),
ConstraintCategory::Internal,
- &mut self.constraints.outlives_constraints,
- &mut self.constraints.type_tests,
+ &mut self.constraints,
).convert_all(&data);
}
use borrow_check::nll::constraints::{ConstraintSet, OutlivesConstraint};
use borrow_check::nll::facts::AllFacts;
use borrow_check::nll::region_infer::values::LivenessValues;
+use borrow_check::nll::region_infer::values::PlaceholderIndex;
use borrow_check::nll::region_infer::values::PlaceholderIndices;
use borrow_check::nll::region_infer::values::RegionValueElements;
use borrow_check::nll::region_infer::{ClosureRegionRequirementsExt, TypeTest};
use dataflow::move_paths::MoveData;
use dataflow::FlowAtLocation;
use dataflow::MaybeInitializedPlaces;
+use either::Either;
use rustc::hir;
use rustc::hir::def_id::DefId;
use rustc::infer::canonical::QueryRegionConstraint;
use rustc::infer::outlives::env::RegionBoundPairs;
-use rustc::infer::{InferCtxt, InferOk, LateBoundRegionConversionTime};
+use rustc::infer::{InferCtxt, InferOk, LateBoundRegionConversionTime, NLLRegionVariableOrigin};
use rustc::mir::interpret::EvalErrorKind::BoundsCheck;
use rustc::mir::tcx::PlaceTy;
use rustc::mir::visit::{PlaceContext, Visitor, MutatingUseContext, NonMutatingUseContext};
use rustc::ty::fold::TypeFoldable;
use rustc::ty::subst::{Subst, Substs, UnpackedKind};
use rustc::ty::{self, RegionVid, ToPolyTraitRef, Ty, TyCtxt, TyKind};
+use rustc_data_structures::fx::{FxHashMap, FxHashSet};
+use rustc_data_structures::indexed_vec::IndexVec;
use std::rc::Rc;
use std::{fmt, iter};
use syntax_pos::{Span, DUMMY_SP};
use transform::{MirPass, MirSource};
-use either::Either;
-use rustc_data_structures::fx::{FxHashMap, FxHashSet};
-
macro_rules! span_mirbug {
($context:expr, $elem:expr, $($message:tt)*) => ({
$crate::borrow_check::nll::type_check::mirbug(
) -> MirTypeckResults<'tcx> {
let implicit_region_bound = infcx.tcx.mk_region(ty::ReVar(universal_regions.fr_fn_body));
let mut constraints = MirTypeckRegionConstraints {
+ placeholder_indices: PlaceholderIndices::default(),
+ placeholder_index_to_region: IndexVec::default(),
liveness_constraints: LivenessValues::new(elements),
outlives_constraints: ConstraintSet::default(),
closure_bounds_mapping: Default::default(),
type_tests: Vec::default(),
};
- let mut placeholder_indices = PlaceholderIndices::default();
let CreateResult {
universal_region_relations,
borrow_set,
all_facts,
constraints: &mut constraints,
- placeholder_indices: &mut placeholder_indices,
};
type_check_internal(
MirTypeckResults {
constraints,
- placeholder_indices,
universal_region_relations,
}
}
}
fn sanitize_type(&mut self, parent: &dyn fmt::Debug, ty: Ty<'tcx>) -> Ty<'tcx> {
- if ty.has_escaping_regions() || ty.references_error() {
+ if ty.has_escaping_bound_vars() || ty.references_error() {
span_mirbug_and_err!(self, parent, "bad type {:?}", ty)
} else {
ty
all_facts: &'a mut Option<AllFacts>,
borrow_set: &'a BorrowSet<'tcx>,
constraints: &'a mut MirTypeckRegionConstraints<'tcx>,
- placeholder_indices: &'a mut PlaceholderIndices,
}
crate struct MirTypeckResults<'tcx> {
crate constraints: MirTypeckRegionConstraints<'tcx>,
- crate placeholder_indices: PlaceholderIndices,
crate universal_region_relations: Rc<UniversalRegionRelations<'tcx>>,
}
/// A collection of region constraints that must be satisfied for the
/// program to be considered well-typed.
crate struct MirTypeckRegionConstraints<'tcx> {
+ /// Maps from a `ty::Placeholder` to the corresponding
+ /// `PlaceholderIndex` bit that we will use for it.
+ ///
+ /// To keep everything in sync, do not insert this set
+ /// directly. Instead, use the `placeholder_region` helper.
+ crate placeholder_indices: PlaceholderIndices,
+
+ /// Each time we add a placeholder to `placeholder_indices`, we
+ /// also create a corresponding "representative" region vid for
+ /// that wraps it. This vector tracks those. This way, when we
+ /// convert the same `ty::RePlaceholder(p)` twice, we can map to
+ /// the same underlying `RegionVid`.
+ crate placeholder_index_to_region: IndexVec<PlaceholderIndex, ty::Region<'tcx>>,
+
/// In general, the type-checker is not responsible for enforcing
/// liveness constraints; this job falls to the region inferencer,
/// which performs a liveness analysis. However, in some limited
crate type_tests: Vec<TypeTest<'tcx>>,
}
+impl MirTypeckRegionConstraints<'tcx> {
+ fn placeholder_region(
+ &mut self,
+ infcx: &InferCtxt<'_, '_, 'tcx>,
+ placeholder: ty::Placeholder,
+ ) -> ty::Region<'tcx> {
+ let placeholder_index = self.placeholder_indices.insert(placeholder);
+ match self.placeholder_index_to_region.get(placeholder_index) {
+ Some(&v) => v,
+ None => {
+ let origin = NLLRegionVariableOrigin::Placeholder(placeholder);
+ let region = infcx.next_nll_region_var_in_universe(origin, placeholder.universe);
+ self.placeholder_index_to_region.push(region);
+ region
+ }
+ }
+ }
+}
+
/// The `Locations` type summarizes *where* region constraints are
/// required to hold. Normally, this is at a particular point which
/// created the obligation, but for constraints that the user gave, we
if let Some(ref mut borrowck_context) = self.borrowck_context {
constraint_conversion::ConstraintConversion::new(
- self.infcx.tcx,
+ self.infcx,
borrowck_context.universal_regions,
self.region_bound_pairs,
self.implicit_region_bound,
self.param_env,
locations,
category,
- &mut borrowck_context.constraints.outlives_constraints,
- &mut borrowck_context.constraints.type_tests,
+ &mut borrowck_context.constraints,
).convert_all(&data);
}
}
| StatementKind::StorageDead(_)
| StatementKind::InlineAsm { .. }
| StatementKind::EndRegion(_)
- | StatementKind::Validate(..)
+ | StatementKind::Retag { .. }
| StatementKind::Nop => {}
}
}
.enumerate()
.filter_map(|(idx, constraint)| {
let ty::OutlivesPredicate(k1, r2) =
- constraint.no_late_bound_regions().unwrap_or_else(|| {
- bug!("query_constraint {:?} contained bound regions", constraint,);
+ constraint.no_bound_vars().unwrap_or_else(|| {
+ bug!("query_constraint {:?} contained bound vars", constraint,);
});
match k1.unpack() {
}
fn next_existential_region_var(&mut self) -> ty::Region<'tcx> {
- let origin = NLLRegionVariableOrigin::Existential;
- self.infcx.next_nll_region_var(origin)
+ if let Some(_) = &mut self.borrowck_context {
+ let origin = NLLRegionVariableOrigin::Existential;
+ self.infcx.next_nll_region_var(origin)
+ } else {
+ self.infcx.tcx.types.re_erased
+ }
}
fn next_placeholder_region(&mut self, placeholder: ty::Placeholder) -> ty::Region<'tcx> {
- let origin = NLLRegionVariableOrigin::Placeholder(placeholder);
if let Some(borrowck_context) = &mut self.borrowck_context {
- borrowck_context.placeholder_indices.insert(placeholder);
+ borrowck_context.constraints.placeholder_region(self.infcx, placeholder)
+ } else {
+ self.infcx.tcx.types.re_erased
}
- self.infcx.next_nll_region_var(origin)
}
fn generalize_existential(&mut self, universe: ty::UniverseIndex) -> ty::Region<'tcx> {
.into_boxed_slice();
let inputs = inputs
.into_iter()
- .map(|input| unpack!(block = this.as_local_operand(block, input)))
- .collect::<Vec<_>>()
+ .map(|input| {
+ (
+ input.span(),
+ unpack!(block = this.as_local_operand(block, input)),
+ )
+ }).collect::<Vec<_>>()
.into_boxed_slice();
this.cfg.push(
block,
use syntax::source_map::{Span, DUMMY_SP};
use interpret::{self,
- PlaceTy, MemPlace, OpTy, Operand, Value, Scalar, ConstValue, Pointer,
+ PlaceTy, MemPlace, OpTy, Operand, Immediate, Scalar, ConstValue, Pointer,
EvalResult, EvalError, EvalErrorKind, GlobalId, EvalContext, StackPopCleanup,
Allocation, AllocId, MemoryKind,
snapshot, RefTracking,
_ => false,
};
let normalized_op = if normalize {
- ecx.try_read_value(op)?
+ ecx.try_read_immediate(op)?
} else {
match op.op {
Operand::Indirect(mplace) => Err(mplace),
let alloc = ecx.tcx.intern_const_alloc(alloc);
ConstValue::ByRef(ptr.alloc_id, alloc, ptr.offset)
},
- Ok(Value::Scalar(x)) =>
+ Ok(Immediate::Scalar(x)) =>
ConstValue::Scalar(x.not_undef()?),
- Ok(Value::ScalarPair(a, b)) =>
+ Ok(Immediate::ScalarPair(a, b)) =>
ConstValue::ScalarPair(a.not_undef()?, b.not_undef()?),
};
Ok(ty::Const::from_const_value(ecx.tcx.tcx, val, op.layout.ty))
key.param_env.reveal = Reveal::UserFacing;
match tcx.const_eval(key) {
// try again with reveal all as requested
- Err(ErrorHandled::TooGeneric) => {},
+ Err(ErrorHandled::TooGeneric) => {
+ // Promoteds should never be "too generic" when getting evaluated.
+ // They either don't get evaluated, or we are in a monomorphic context
+ assert!(key.value.promoted.is_none());
+ },
// dedupliate calls
other => return other,
}
mir::StatementKind::FakeRead(..) |
mir::StatementKind::SetDiscriminant { .. } |
mir::StatementKind::StorageLive(..) |
- mir::StatementKind::Validate(..) |
+ mir::StatementKind::Retag { .. } |
mir::StatementKind::AscribeUserType(..) |
mir::StatementKind::Nop => {}
use rustc::mir::*;
use rustc::mir::tcx::RvalueInitializationState;
use rustc_data_structures::indexed_vec::{IndexVec};
+use smallvec::{SmallVec, smallvec};
use std::collections::hash_map::Entry;
use std::mem;
use super::abs_domain::Lift;
-
use super::{LocationMap, MoveData, MovePath, MovePathLookup, MovePathIndex, MoveOut, MoveOutIndex};
use super::{MoveError, InitIndex, Init, InitLocation, LookupResult, InitKind};
use super::IllegalMoveOriginKind::*;
}
fn new_move_path(move_paths: &mut IndexVec<MovePathIndex, MovePath<'tcx>>,
- path_map: &mut IndexVec<MovePathIndex, Vec<MoveOutIndex>>,
- init_path_map: &mut IndexVec<MovePathIndex, Vec<InitIndex>>,
+ path_map: &mut IndexVec<MovePathIndex, SmallVec<[MoveOutIndex; 4]>>,
+ init_path_map: &mut IndexVec<MovePathIndex, SmallVec<[InitIndex; 4]>>,
parent: Option<MovePathIndex>,
place: Place<'tcx>)
-> MovePathIndex
move_paths[move_path].next_sibling = next_sibling;
}
- let path_map_ent = path_map.push(vec![]);
+ let path_map_ent = path_map.push(smallvec![]);
assert_eq!(path_map_ent, move_path);
- let init_path_map_ent = init_path_map.push(vec![]);
+ let init_path_map_ent = init_path_map.push(smallvec![]);
assert_eq!(init_path_map_ent, move_path);
move_path
self.gather_init(output, InitKind::Deep);
}
}
- for input in inputs.iter() {
+ for (_, input) in inputs.iter() {
self.gather_operand(input);
}
}
"SetDiscriminant should not exist during borrowck");
}
StatementKind::EndRegion(_) |
- StatementKind::Validate(..) |
+ StatementKind::Retag { .. } |
StatementKind::AscribeUserType(..) |
StatementKind::Nop => {}
}
use rustc::mir::*;
use rustc::util::nodemap::FxHashMap;
use rustc_data_structures::indexed_vec::{IndexVec};
+use smallvec::SmallVec;
use syntax_pos::{Span};
use std::fmt;
/// of executing the code at `l`. (There can be multiple MoveOut's
/// for a given `l` because each MoveOut is associated with one
/// particular path being moved.)
- pub loc_map: LocationMap<Vec<MoveOutIndex>>,
- pub path_map: IndexVec<MovePathIndex, Vec<MoveOutIndex>>,
+ pub loc_map: LocationMap<SmallVec<[MoveOutIndex; 4]>>,
+ pub path_map: IndexVec<MovePathIndex, SmallVec<[MoveOutIndex; 4]>>,
pub rev_lookup: MovePathLookup<'tcx>,
pub inits: IndexVec<InitIndex, Init>,
/// Each Location `l` is mapped to the Inits that are effects
/// of executing the code at `l`.
- pub init_loc_map: LocationMap<Vec<InitIndex>>,
- pub init_path_map: IndexVec<MovePathIndex, Vec<InitIndex>>,
+ pub init_loc_map: LocationMap<SmallVec<[InitIndex; 4]>>,
+ pub init_path_map: IndexVec<MovePathIndex, SmallVec<[InitIndex; 4]>>,
}
pub trait HasMoveData<'tcx> {
LitKind::Str(ref s, _) => {
let s = s.as_str();
let id = self.tcx.allocate_bytes(s.as_bytes());
- ConstValue::new_slice(Scalar::Ptr(id.into()), s.len() as u64, self.tcx)
+ ConstValue::new_slice(Scalar::Ptr(id.into()), s.len() as u64, &self.tcx)
},
LitKind::ByteStr(ref data) => {
let id = self.tcx.allocate_bytes(data);
}
ty::Int(ity) if exhaustive_integer_patterns => {
// FIXME(49937): refactor these bit manipulations into interpret.
- let bits = Integer::from_attr(cx.tcx, SignedInt(ity)).size().bits() as u128;
+ let bits = Integer::from_attr(&cx.tcx, SignedInt(ity)).size().bits() as u128;
let min = 1u128 << (bits - 1);
let max = (1u128 << (bits - 1)) - 1;
vec![ConstantRange(min, max, pcx.ty, RangeEnd::Included)]
}
ty::Uint(uty) if exhaustive_integer_patterns => {
// FIXME(49937): refactor these bit manipulations into interpret.
- let bits = Integer::from_attr(cx.tcx, UnsignedInt(uty)).size().bits() as u128;
+ let bits = Integer::from_attr(&cx.tcx, UnsignedInt(uty)).size().bits() as u128;
let max = !0u128 >> (128 - bits);
vec![ConstantRange(0, max, pcx.ty, RangeEnd::Included)]
}
fn signed_bias(tcx: TyCtxt<'_, 'tcx, 'tcx>, ty: Ty<'tcx>) -> u128 {
match ty.sty {
ty::Int(ity) => {
- let bits = Integer::from_attr(tcx, SignedInt(ity)).size().bits() as u128;
+ let bits = Integer::from_attr(&tcx, SignedInt(ity)).size().bits() as u128;
1u128 << (bits - 1)
}
_ => 0
is non-empty",
pat_ty));
span_help!(&mut err, scrut.span,
- "Please ensure that all possible cases are being handled; \
- possibly adding wildcards or more match arms.");
+ "ensure that all possible cases are being handled, \
+ possibly by adding wildcards or more match arms");
err.emit();
}
// If the type *is* uninhabited, it's vacuously exhaustive
LitKind::Str(ref s, _) => {
let s = s.as_str();
let id = tcx.allocate_bytes(s.as_bytes());
- ConstValue::new_slice(Scalar::Ptr(id.into()), s.len() as u64, tcx)
+ ConstValue::new_slice(Scalar::Ptr(id.into()), s.len() as u64, &tcx)
},
LitKind::ByteStr(ref data) => {
let id = tcx.allocate_bytes(data);
use rustc::mir::CastKind;
use rustc_apfloat::Float;
-use super::{EvalContext, Machine, PlaceTy, OpTy, Value};
+use super::{EvalContext, Machine, PlaceTy, OpTy, Immediate};
impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> {
fn type_is_fat_ptr(&self, ty: Ty<'tcx>) -> bool {
Misc => {
let src_layout = src.layout;
- let src = self.read_value(src)?;
+ let src = self.read_immediate(src)?;
let src = if M::ENABLE_PTR_TRACKING_HOOKS && src_layout.ty.is_region_ptr() {
// The only `Misc` casts on references are those creating raw pointers.
if self.type_is_fat_ptr(src_layout.ty) {
match (src, self.type_is_fat_ptr(dest.layout.ty)) {
// pointers to extern types
- (Value::Scalar(_),_) |
+ (Immediate::Scalar(_),_) |
// slices and trait objects to other slices/trait objects
- (Value::ScalarPair(..), true) => {
- // No change to value
- self.write_value(src, dest)?;
+ (Immediate::ScalarPair(..), true) => {
+ // No change to immediate
+ self.write_immediate(src, dest)?;
}
// slices and trait objects to thin pointers (dropping the metadata)
- (Value::ScalarPair(data, _), false) => {
+ (Immediate::ScalarPair(data, _), false) => {
self.write_scalar(data, dest)?;
}
}
}
UnsafeFnPointer => {
- let src = self.read_value(src)?;
+ let src = self.read_immediate(src)?;
match dest.layout.ty.sty {
ty::FnPtr(_) => {
// No change to value
- self.write_value(*src, dest)?;
+ self.write_immediate(*src, dest)?;
}
ref other => bug!("fn to unsafe fn cast on {:?}", other),
}
ty::ClosureKind::FnOnce,
);
let fn_ptr = self.memory.create_fn_alloc(instance).with_default_tag();
- let val = Value::Scalar(Scalar::Ptr(fn_ptr.into()).into());
- self.write_value(val, dest)?;
+ let val = Immediate::Scalar(Scalar::Ptr(fn_ptr.into()).into());
+ self.write_immediate(val, dest)?;
}
ref other => bug!("closure fn pointer on {:?}", other),
}
match (&src_pointee_ty.sty, &dest_pointee_ty.sty) {
(&ty::Array(_, length), &ty::Slice(_)) => {
- let ptr = self.read_value(src)?.to_scalar_ptr()?;
+ let ptr = self.read_immediate(src)?.to_scalar_ptr()?;
// u64 cast is from usize to u64, which is always good
- let val = Value::new_slice(ptr, length.unwrap_usize(self.tcx.tcx), self.tcx.tcx);
- self.write_value(val, dest)
+ let val = Immediate::new_slice(
+ ptr,
+ length.unwrap_usize(self.tcx.tcx),
+ self,
+ );
+ self.write_immediate(val, dest)
}
(&ty::Dynamic(..), &ty::Dynamic(..)) => {
// For now, upcasts are limited to changes in marker
// traits, and hence never actually require an actual
// change to the vtable.
- let val = self.read_value(src)?;
- self.write_value(*val, dest)
+ let val = self.read_immediate(src)?;
+ self.write_immediate(*val, dest)
}
(_, &ty::Dynamic(ref data, _)) => {
// Initial cast from sized to dyn trait
let vtable = self.get_vtable(src_pointee_ty, data.principal())?;
- let ptr = self.read_value(src)?.to_scalar_ptr()?;
- let val = Value::new_dyn_trait(ptr, vtable);
- self.write_value(val, dest)
+ let ptr = self.read_immediate(src)?.to_scalar_ptr()?;
+ let val = Immediate::new_dyn_trait(ptr, vtable);
+ self.write_immediate(val, dest)
}
_ => bug!("invalid unsizing {:?} -> {:?}", src.layout.ty, dest.layout.ty),
src_field.into()
}
Err(..) => {
- let src_field_layout = src.layout.field(&self, i)?;
+ let src_field_layout = src.layout.field(self, i)?;
// this must be a field covering the entire thing
assert_eq!(src.layout.fields.offset(i).bytes(), 0);
assert_eq!(src_field_layout.size, src.layout.size);
use rustc_data_structures::fx::FxHashMap;
use super::{
- Value, Operand, MemPlace, MPlaceTy, Place, PlaceTy, ScalarMaybeUndef,
+ Immediate, Operand, MemPlace, MPlaceTy, Place, PlaceTy, ScalarMaybeUndef,
Memory, Machine
};
}
}
-impl<'b, 'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> HasDataLayout
- for &'b EvalContext<'a, 'mir, 'tcx, M>
+impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> HasDataLayout
+ for EvalContext<'a, 'mir, 'tcx, M>
{
#[inline]
fn data_layout(&self) -> &layout::TargetDataLayout {
}
}
-impl<'c, 'b, 'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> HasDataLayout
- for &'c &'b mut EvalContext<'a, 'mir, 'tcx, M>
-{
- #[inline]
- fn data_layout(&self) -> &layout::TargetDataLayout {
- &self.tcx.data_layout
- }
-}
-
-impl<'b, 'a, 'mir, 'tcx, M> layout::HasTyCtxt<'tcx> for &'b EvalContext<'a, 'mir, 'tcx, M>
+impl<'a, 'mir, 'tcx, M> layout::HasTyCtxt<'tcx> for EvalContext<'a, 'mir, 'tcx, M>
where M: Machine<'a, 'mir, 'tcx>
{
#[inline]
}
}
-impl<'c, 'b, 'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> layout::HasTyCtxt<'tcx>
- for &'c &'b mut EvalContext<'a, 'mir, 'tcx, M>
-{
- #[inline]
- fn tcx<'d>(&'d self) -> TyCtxt<'d, 'tcx, 'tcx> {
- *self.tcx
- }
-}
-
-impl<'b, 'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> LayoutOf
- for &'b EvalContext<'a, 'mir, 'tcx, M>
+impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> LayoutOf
+ for EvalContext<'a, 'mir, 'tcx, M>
{
type Ty = Ty<'tcx>;
type TyLayout = EvalResult<'tcx, TyLayout<'tcx>>;
#[inline]
- fn layout_of(self, ty: Ty<'tcx>) -> Self::TyLayout {
+ fn layout_of(&self, ty: Ty<'tcx>) -> Self::TyLayout {
self.tcx.layout_of(self.param_env.and(ty))
.map_err(|layout| EvalErrorKind::Layout(layout).into())
}
}
-impl<'c, 'b, 'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> LayoutOf
- for &'c &'b mut EvalContext<'a, 'mir, 'tcx, M>
-{
- type Ty = Ty<'tcx>;
- type TyLayout = EvalResult<'tcx, TyLayout<'tcx>>;
-
- #[inline]
- fn layout_of(self, ty: Ty<'tcx>) -> Self::TyLayout {
- (&**self).layout_of(ty)
- }
-}
-
impl<'a, 'mir, 'tcx: 'mir, M: Machine<'a, 'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> {
pub fn new(
tcx: TyCtxtAt<'a, 'tcx, 'tcx>,
self.layout_of(local_ty)
}
- pub fn str_to_value(&mut self, s: &str) -> EvalResult<'tcx, Value<M::PointerTag>> {
+ pub fn str_to_immediate(&mut self, s: &str) -> EvalResult<'tcx, Immediate<M::PointerTag>> {
let ptr = self.memory.allocate_static_bytes(s.as_bytes()).with_default_tag();
- Ok(Value::new_slice(Scalar::Ptr(ptr), s.len() as u64, self.tcx.tcx))
+ Ok(Immediate::new_slice(Scalar::Ptr(ptr), s.len() as u64, self))
}
/// Return the actual dynamic size and alignment of the place at the given type.
// don't allocate at all for trivial constants
if mir.local_decls.len() > 1 {
- // We put some marker value into the locals that we later want to initialize.
+ // We put some marker immediate into the locals that we later want to initialize.
// This can be anything except for LocalValue::Dead -- because *that* is the
// value we use for things that we know are initially dead.
let dummy =
- LocalValue::Live(Operand::Immediate(Value::Scalar(ScalarMaybeUndef::Undef)));
+ LocalValue::Live(Operand::Immediate(Immediate::Scalar(ScalarMaybeUndef::Undef)));
let mut locals = IndexVec::from_elem(dummy, &mir.local_decls);
// Return place is handled specially by the `eval_place` functions, and the
// entry in `locals` should never be used. Make it dead, to be sure.
ptr => write!(msg, " by integral ref: {:?}", ptr).unwrap(),
}
}
- Ok(Operand::Immediate(Value::Scalar(val))) => {
+ Ok(Operand::Immediate(Immediate::Scalar(val))) => {
write!(msg, " {:?}", val).unwrap();
if let ScalarMaybeUndef::Scalar(Scalar::Ptr(ptr)) = val {
allocs.push(ptr.alloc_id);
}
}
- Ok(Operand::Immediate(Value::ScalarPair(val1, val2))) => {
+ Ok(Operand::Immediate(Immediate::ScalarPair(val1, val2))) => {
write!(msg, " ({:?}, {:?})", val1, val2).unwrap();
if let ScalarMaybeUndef::Scalar(Scalar::Ptr(ptr)) = val1 {
allocs.push(ptr.alloc_id);
| "add_with_overflow"
| "sub_with_overflow"
| "mul_with_overflow" => {
- let lhs = self.read_value(args[0])?;
- let rhs = self.read_value(args[1])?;
+ let lhs = self.read_immediate(args[0])?;
+ let rhs = self.read_immediate(args[1])?;
let (bin_op, ignore_overflow) = match intrinsic_name {
"overflowing_add" => (BinOp::Add, true),
"overflowing_sub" => (BinOp::Sub, true),
}
}
"unchecked_shl" | "unchecked_shr" => {
- let l = self.read_value(args[0])?;
- let r = self.read_value(args[1])?;
+ let l = self.read_immediate(args[0])?;
+ let r = self.read_immediate(args[1])?;
let bin_op = match intrinsic_name {
"unchecked_shl" => BinOp::Shl,
"unchecked_shr" => BinOp::Shr,
_ => bug!("Already checked for int ops")
};
- let (val, overflowed) = self.binary_op_val(bin_op, l, r)?;
+ let (val, overflowed) = self.binary_op_imm(bin_op, l, r)?;
if overflowed {
let layout = self.layout_of(substs.type_at(0))?;
let r_val = r.to_scalar()?.to_bits(layout.size)?;
// Some fn calls are actually BinOp intrinsics
if let Some((op, oflo)) = self.tcx.is_binop_lang_item(def_id) {
let dest = dest.expect("128 lowerings can't diverge");
- let l = self.read_value(args[0])?;
- let r = self.read_value(args[1])?;
+ let l = self.read_immediate(args[0])?;
+ let r = self.read_immediate(args[1])?;
if oflo {
self.binop_with_overflow(op, l, r, dest)?;
} else {
} else if Some(def_id) == self.tcx.lang_items().panic_fn() {
assert!(args.len() == 1);
// &(&'static str, &'static str, u32, u32)
- let ptr = self.read_value(args[0])?;
+ let ptr = self.read_immediate(args[0])?;
let place = self.ref_to_mplace(ptr)?;
let (msg, file, line, col) = (
self.mplace_field(place, 0)?,
self.mplace_field(place, 3)?,
);
- let msg_place = self.ref_to_mplace(self.read_value(msg.into())?)?;
+ let msg_place = self.ref_to_mplace(self.read_immediate(msg.into())?)?;
let msg = Symbol::intern(self.read_str(msg_place)?);
- let file_place = self.ref_to_mplace(self.read_value(file.into())?)?;
+ let file_place = self.ref_to_mplace(self.read_immediate(file.into())?)?;
let file = Symbol::intern(self.read_str(file_place)?);
let line = self.read_scalar(line.into())?.to_u32()?;
let col = self.read_scalar(col.into())?.to_u32()?;
assert!(args.len() == 2);
// &'static str, &(&'static str, u32, u32)
let msg = args[0];
- let ptr = self.read_value(args[1])?;
+ let ptr = self.read_immediate(args[1])?;
let place = self.ref_to_mplace(ptr)?;
let (file, line, col) = (
self.mplace_field(place, 0)?,
self.mplace_field(place, 2)?,
);
- let msg_place = self.ref_to_mplace(self.read_value(msg.into())?)?;
+ let msg_place = self.ref_to_mplace(self.read_immediate(msg.into())?)?;
let msg = Symbol::intern(self.read_str(msg_place)?);
- let file_place = self.ref_to_mplace(self.read_value(file.into())?)?;
+ let file_place = self.ref_to_mplace(self.read_immediate(file.into())?)?;
let file = Symbol::intern(self.read_str(file_place)?);
let line = self.read_scalar(line.into())?.to_u32()?;
let col = self.read_scalar(col.into())?.to_u32()?;
EvalContext, PlaceTy, OpTy, Pointer, MemPlace, MemoryKind,
};
-/// Classifying memory accesses
-#[derive(Clone, Copy, Debug, PartialEq, Eq)]
-pub enum MemoryAccess {
- Read,
- Write,
-}
-
/// Whether this kind of memory is allowed to leak
pub trait MayLeak: Copy {
fn may_leak(self) -> bool;
dest: PlaceTy<'tcx, Self::PointerTag>,
) -> EvalResult<'tcx>;
- /// Hook for performing extra checks on a memory access.
- ///
- /// Takes read-only access to the allocation so we can keep all the memory read
- /// operations take `&self`. Use a `RefCell` in `AllocExtra` if you
- /// need to mutate.
+ /// Hook for performing extra checks on a memory read access.
#[inline]
- fn memory_accessed(
+ fn memory_read(
_alloc: &Allocation<Self::PointerTag, Self::AllocExtra>,
_ptr: Pointer<Self::PointerTag>,
_size: Size,
- _access: MemoryAccess,
+ ) -> EvalResult<'tcx> {
+ Ok(())
+ }
+
+ /// Hook for performing extra checks on a memory write access.
+ #[inline]
+ fn memory_written(
+ _alloc: &mut Allocation<Self::PointerTag, Self::AllocExtra>,
+ _ptr: Pointer<Self::PointerTag>,
+ _size: Size,
) -> EvalResult<'tcx> {
Ok(())
}
fn memory_deallocated(
_alloc: &mut Allocation<Self::PointerTag, Self::AllocExtra>,
_ptr: Pointer<Self::PointerTag>,
+ _size: Size,
) -> EvalResult<'tcx> {
Ok(())
}
/// Execute a validation operation
#[inline]
- fn validation_op(
+ fn retag(
_ecx: &mut EvalContext<'a, 'mir, 'tcx, Self>,
- _op: ::rustc::mir::ValidationOp,
- _operand: &::rustc::mir::ValidationOperand<'tcx, ::rustc::mir::Place<'tcx>>,
+ _fn_entry: bool,
+ _place: PlaceTy<'tcx, Self::PointerTag>,
) -> EvalResult<'tcx> {
Ok(())
}
use super::{
Pointer, AllocId, Allocation, ConstValue, GlobalId,
EvalResult, Scalar, EvalErrorKind, AllocType, PointerArithmetic,
- Machine, MemoryAccess, AllocMap, MayLeak, ScalarMaybeUndef, ErrorHandled,
+ Machine, AllocMap, MayLeak, ScalarMaybeUndef, ErrorHandled,
};
#[derive(Debug, PartialEq, Eq, Copy, Clone, Hash)]
pub(super) tcx: TyCtxtAt<'a, 'tcx, 'tcx>,
}
-impl<'b, 'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> HasDataLayout
- for &'b Memory<'a, 'mir, 'tcx, M>
-{
- #[inline]
- fn data_layout(&self) -> &TargetDataLayout {
- &self.tcx.data_layout
- }
-}
-impl<'a, 'b, 'c, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> HasDataLayout
- for &'b &'c mut Memory<'a, 'mir, 'tcx, M>
+impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> HasDataLayout
+ for Memory<'a, 'mir, 'tcx, M>
{
#[inline]
fn data_layout(&self) -> &TargetDataLayout {
}
// Let the machine take some extra action
- M::memory_deallocated(&mut alloc, ptr)?;
+ let size = Size::from_bytes(alloc.bytes.len() as u64);
+ M::memory_deallocated(&mut alloc, ptr, size)?;
// Don't forget to remember size and align of this now-dead allocation
let old = self.dead_alloc_map.insert(
}
let alloc = self.get(ptr.alloc_id)?;
- M::memory_accessed(alloc, ptr, size, MemoryAccess::Read)?;
+ M::memory_read(alloc, ptr, size)?;
assert_eq!(ptr.offset.bytes() as usize as u64, ptr.offset.bytes());
assert_eq!(size.bytes() as usize as u64, size.bytes());
self.clear_relocations(ptr, size)?;
let alloc = self.get_mut(ptr.alloc_id)?;
- M::memory_accessed(alloc, ptr, size, MemoryAccess::Write)?;
+ M::memory_written(alloc, ptr, size)?;
assert_eq!(ptr.offset.bytes() as usize as u64, ptr.offset.bytes());
assert_eq!(size.bytes() as usize as u64, size.bytes());
if self.alloc_map.contains_key(&alloc) {
// Not yet interned, so proceed recursively
self.intern_static(alloc, mutability)?;
+ } else if self.dead_alloc_map.contains_key(&alloc) {
+ // dangling pointer
+ return err!(ValidationFailure(
+ "encountered dangling pointer in final constant".into(),
+ ))
}
}
Ok(())
pub use self::memory::{Memory, MemoryKind};
-pub use self::machine::{Machine, AllocMap, MemoryAccess, MayLeak};
+pub use self::machine::{Machine, AllocMap, MayLeak};
-pub use self::operand::{ScalarMaybeUndef, Value, ValTy, Operand, OpTy};
+pub use self::operand::{ScalarMaybeUndef, Immediate, ImmTy, Operand, OpTy};
pub use self::validity::RefTracking;
}
#[inline(always)]
- pub fn to_usize(self, cx: impl HasDataLayout) -> EvalResult<'tcx, u64> {
+ pub fn to_usize(self, cx: &impl HasDataLayout) -> EvalResult<'tcx, u64> {
self.not_undef()?.to_usize(cx)
}
}
#[inline(always)]
- pub fn to_isize(self, cx: impl HasDataLayout) -> EvalResult<'tcx, i64> {
+ pub fn to_isize(self, cx: &impl HasDataLayout) -> EvalResult<'tcx, i64> {
self.not_undef()?.to_isize(cx)
}
}
/// primitive values (`ScalarPair`). It allows Miri to avoid making allocations for checked binary
/// operations and fat pointers. This idea was taken from rustc's codegen.
/// In particular, thanks to `ScalarPair`, arithmetic operations and casts can be entirely
-/// defined on `Value`, and do not have to work with a `Place`.
+/// defined on `Immediate`, and do not have to work with a `Place`.
#[derive(Copy, Clone, Debug, Hash, PartialEq, Eq)]
-pub enum Value<Tag=(), Id=AllocId> {
+pub enum Immediate<Tag=(), Id=AllocId> {
Scalar(ScalarMaybeUndef<Tag, Id>),
ScalarPair(ScalarMaybeUndef<Tag, Id>, ScalarMaybeUndef<Tag, Id>),
}
-impl Value {
+impl Immediate {
#[inline]
- pub fn with_default_tag<Tag>(self) -> Value<Tag>
+ pub fn with_default_tag<Tag>(self) -> Immediate<Tag>
where Tag: Default
{
match self {
- Value::Scalar(x) => Value::Scalar(x.with_default_tag()),
- Value::ScalarPair(x, y) =>
- Value::ScalarPair(x.with_default_tag(), y.with_default_tag()),
+ Immediate::Scalar(x) => Immediate::Scalar(x.with_default_tag()),
+ Immediate::ScalarPair(x, y) =>
+ Immediate::ScalarPair(x.with_default_tag(), y.with_default_tag()),
}
}
}
-impl<'tcx, Tag> Value<Tag> {
+impl<'tcx, Tag> Immediate<Tag> {
#[inline]
- pub fn erase_tag(self) -> Value
+ pub fn erase_tag(self) -> Immediate
{
match self {
- Value::Scalar(x) => Value::Scalar(x.erase_tag()),
- Value::ScalarPair(x, y) =>
- Value::ScalarPair(x.erase_tag(), y.erase_tag()),
+ Immediate::Scalar(x) => Immediate::Scalar(x.erase_tag()),
+ Immediate::ScalarPair(x, y) =>
+ Immediate::ScalarPair(x.erase_tag(), y.erase_tag()),
}
}
pub fn new_slice(
val: Scalar<Tag>,
len: u64,
- cx: impl HasDataLayout
+ cx: &impl HasDataLayout
) -> Self {
- Value::ScalarPair(val.into(), Scalar::from_uint(len, cx.data_layout().pointer_size).into())
+ Immediate::ScalarPair(
+ val.into(),
+ Scalar::from_uint(len, cx.data_layout().pointer_size).into(),
+ )
}
pub fn new_dyn_trait(val: Scalar<Tag>, vtable: Pointer<Tag>) -> Self {
- Value::ScalarPair(val.into(), Scalar::Ptr(vtable).into())
+ Immediate::ScalarPair(val.into(), Scalar::Ptr(vtable).into())
}
#[inline]
pub fn to_scalar_or_undef(self) -> ScalarMaybeUndef<Tag> {
match self {
- Value::Scalar(val) => val,
- Value::ScalarPair(..) => bug!("Got a fat pointer where a scalar was expected"),
+ Immediate::Scalar(val) => val,
+ Immediate::ScalarPair(..) => bug!("Got a fat pointer where a scalar was expected"),
}
}
#[inline]
pub fn to_scalar_pair(self) -> EvalResult<'tcx, (Scalar<Tag>, Scalar<Tag>)> {
match self {
- Value::Scalar(..) => bug!("Got a thin pointer where a scalar pair was expected"),
- Value::ScalarPair(a, b) => Ok((a.not_undef()?, b.not_undef()?))
+ Immediate::Scalar(..) => bug!("Got a thin pointer where a scalar pair was expected"),
+ Immediate::ScalarPair(a, b) => Ok((a.not_undef()?, b.not_undef()?))
}
}
- /// Convert the value into a pointer (or a pointer-sized integer).
+ /// Convert the immediate into a pointer (or a pointer-sized integer).
/// Throws away the second half of a ScalarPair!
#[inline]
pub fn to_scalar_ptr(self) -> EvalResult<'tcx, Scalar<Tag>> {
match self {
- Value::Scalar(ptr) |
- Value::ScalarPair(ptr, _) => ptr.not_undef(),
+ Immediate::Scalar(ptr) |
+ Immediate::ScalarPair(ptr, _) => ptr.not_undef(),
}
}
#[inline]
pub fn to_meta(self) -> EvalResult<'tcx, Option<Scalar<Tag>>> {
Ok(match self {
- Value::Scalar(_) => None,
- Value::ScalarPair(_, meta) => Some(meta.not_undef()?),
+ Immediate::Scalar(_) => None,
+ Immediate::ScalarPair(_, meta) => Some(meta.not_undef()?),
})
}
}
-// ScalarPair needs a type to interpret, so we often have a value and a type together
+// ScalarPair needs a type to interpret, so we often have an immediate and a type together
// as input for binary and cast operations.
#[derive(Copy, Clone, Debug)]
-pub struct ValTy<'tcx, Tag=()> {
- value: Value<Tag>,
+pub struct ImmTy<'tcx, Tag=()> {
+ immediate: Immediate<Tag>,
pub layout: TyLayout<'tcx>,
}
-impl<'tcx, Tag> ::std::ops::Deref for ValTy<'tcx, Tag> {
- type Target = Value<Tag>;
+impl<'tcx, Tag> ::std::ops::Deref for ImmTy<'tcx, Tag> {
+ type Target = Immediate<Tag>;
#[inline(always)]
- fn deref(&self) -> &Value<Tag> {
- &self.value
+ fn deref(&self) -> &Immediate<Tag> {
+ &self.immediate
}
}
/// memory and to avoid having to store arbitrary-sized data here.
#[derive(Copy, Clone, Debug, Hash, PartialEq, Eq)]
pub enum Operand<Tag=(), Id=AllocId> {
- Immediate(Value<Tag, Id>),
+ Immediate(Immediate<Tag, Id>),
Indirect(MemPlace<Tag, Id>),
}
}
#[inline]
- pub fn to_immediate(self) -> Value<Tag>
+ pub fn to_immediate(self) -> Immediate<Tag>
where Tag: ::std::fmt::Debug
{
match self {
- Operand::Immediate(val) => val,
+ Operand::Immediate(imm) => imm,
_ => bug!("to_immediate: expected Operand::Immediate, got {:?}", self),
}
}
}
-impl<'tcx, Tag> From<ValTy<'tcx, Tag>> for OpTy<'tcx, Tag> {
+impl<'tcx, Tag> From<ImmTy<'tcx, Tag>> for OpTy<'tcx, Tag> {
#[inline(always)]
- fn from(val: ValTy<'tcx, Tag>) -> Self {
+ fn from(val: ImmTy<'tcx, Tag>) -> Self {
OpTy {
- op: Operand::Immediate(val.value),
+ op: Operand::Immediate(val.immediate),
layout: val.layout
}
}
}
impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> {
- /// Try reading a value in memory; this is interesting particularly for ScalarPair.
+ /// Try reading an immediate in memory; this is interesting particularly for ScalarPair.
/// Return None if the layout does not permit loading this as a value.
- pub(super) fn try_read_value_from_mplace(
+ pub(super) fn try_read_immediate_from_mplace(
&self,
mplace: MPlaceTy<'tcx, M::PointerTag>,
- ) -> EvalResult<'tcx, Option<Value<M::PointerTag>>> {
+ ) -> EvalResult<'tcx, Option<Immediate<M::PointerTag>>> {
if mplace.layout.is_unsized() {
// Don't touch unsized
return Ok(None);
// Not all ZSTs have a layout we would handle below, so just short-circuit them
// all here.
self.memory.check_align(ptr, ptr_align)?;
- return Ok(Some(Value::Scalar(Scalar::zst().into())));
+ return Ok(Some(Immediate::Scalar(Scalar::zst().into())));
}
let ptr = ptr.to_ptr()?;
match mplace.layout.abi {
layout::Abi::Scalar(..) => {
let scalar = self.memory.read_scalar(ptr, ptr_align, mplace.layout.size)?;
- Ok(Some(Value::Scalar(scalar)))
+ Ok(Some(Immediate::Scalar(scalar)))
}
layout::Abi::ScalarPair(ref a, ref b) => {
let (a, b) = (&a.value, &b.value);
let b_ptr = ptr.offset(b_offset, self)?.into();
let a_val = self.memory.read_scalar(a_ptr, ptr_align, a_size)?;
let b_val = self.memory.read_scalar(b_ptr, ptr_align, b_size)?;
- Ok(Some(Value::ScalarPair(a_val, b_val)))
+ Ok(Some(Immediate::ScalarPair(a_val, b_val)))
}
_ => Ok(None),
}
}
- /// Try returning an immediate value for the operand.
- /// If the layout does not permit loading this as a value, return where in memory
+ /// Try returning an immediate for the operand.
+ /// If the layout does not permit loading this as an immediate, return where in memory
/// we can find the data.
/// Note that for a given layout, this operation will either always fail or always
/// succeed! Whether it succeeds depends on whether the layout can be represented
- /// in a `Value`, not on which data is stored there currently.
- pub(crate) fn try_read_value(
+ /// in a `Immediate`, not on which data is stored there currently.
+ pub(crate) fn try_read_immediate(
&self,
src: OpTy<'tcx, M::PointerTag>,
- ) -> EvalResult<'tcx, Result<Value<M::PointerTag>, MemPlace<M::PointerTag>>> {
+ ) -> EvalResult<'tcx, Result<Immediate<M::PointerTag>, MemPlace<M::PointerTag>>> {
Ok(match src.try_as_mplace() {
Ok(mplace) => {
- if let Some(val) = self.try_read_value_from_mplace(mplace)? {
+ if let Some(val) = self.try_read_immediate_from_mplace(mplace)? {
Ok(val)
} else {
Err(*mplace)
})
}
- /// Read a value from a place, asserting that that is possible with the given layout.
+ /// Read an immediate from a place, asserting that that is possible with the given layout.
#[inline(always)]
- pub fn read_value(
+ pub fn read_immediate(
&self,
op: OpTy<'tcx, M::PointerTag>
- ) -> EvalResult<'tcx, ValTy<'tcx, M::PointerTag>> {
- if let Ok(value) = self.try_read_value(op)? {
- Ok(ValTy { value, layout: op.layout })
+ ) -> EvalResult<'tcx, ImmTy<'tcx, M::PointerTag>> {
+ if let Ok(immediate) = self.try_read_immediate(op)? {
+ Ok(ImmTy { immediate, layout: op.layout })
} else {
bug!("primitive read failed for type: {:?}", op.layout.ty);
}
&self,
op: OpTy<'tcx, M::PointerTag>
) -> EvalResult<'tcx, ScalarMaybeUndef<M::PointerTag>> {
- match *self.read_value(op)? {
- Value::ScalarPair(..) => bug!("got ScalarPair for type: {:?}", op.layout.ty),
- Value::Scalar(val) => Ok(val),
- }
+ Ok(self.read_immediate(op)?.to_scalar_or_undef())
}
// Turn the MPlace into a string (must already be dereferenced!)
layout: TyLayout<'tcx>
) -> EvalResult<'tcx, Operand<M::PointerTag>> {
// This decides which types we will use the Immediate optimization for, and hence should
- // match what `try_read_value` and `eval_place_to_op` support.
+ // match what `try_read_immediate` and `eval_place_to_op` support.
if layout.is_zst() {
- return Ok(Operand::Immediate(Value::Scalar(Scalar::zst().into())));
+ return Ok(Operand::Immediate(Immediate::Scalar(Scalar::zst().into())));
}
Ok(match layout.abi {
layout::Abi::Scalar(..) =>
- Operand::Immediate(Value::Scalar(ScalarMaybeUndef::Undef)),
+ Operand::Immediate(Immediate::Scalar(ScalarMaybeUndef::Undef)),
layout::Abi::ScalarPair(..) =>
- Operand::Immediate(Value::ScalarPair(
+ Operand::Immediate(Immediate::ScalarPair(
ScalarMaybeUndef::Undef,
ScalarMaybeUndef::Undef,
)),
let field = field.try_into().unwrap();
let field_layout = op.layout.field(self, field)?;
if field_layout.is_zst() {
- let val = Value::Scalar(Scalar::zst().into());
- return Ok(OpTy { op: Operand::Immediate(val), layout: field_layout });
+ let immediate = Immediate::Scalar(Scalar::zst().into());
+ return Ok(OpTy { op: Operand::Immediate(immediate), layout: field_layout });
}
let offset = op.layout.fields.offset(field);
- let value = match base {
+ let immediate = match base {
// the field covers the entire type
_ if offset.bytes() == 0 && field_layout.size == op.layout.size => base,
// extract fields from types with `ScalarPair` ABI
- Value::ScalarPair(a, b) => {
+ Immediate::ScalarPair(a, b) => {
let val = if offset.bytes() == 0 { a } else { b };
- Value::Scalar(val)
+ Immediate::Scalar(val)
},
- Value::Scalar(val) =>
+ Immediate::Scalar(val) =>
bug!("field access on non aggregate {:#?}, {:#?}", val, op.layout),
};
- Ok(OpTy { op: Operand::Immediate(value), layout: field_layout })
+ Ok(OpTy { op: Operand::Immediate(immediate), layout: field_layout })
}
pub fn operand_downcast(
&self,
src: OpTy<'tcx, M::PointerTag>,
) -> EvalResult<'tcx, MPlaceTy<'tcx, M::PointerTag>> {
- let val = self.read_value(src)?;
+ let val = self.read_immediate(src)?;
trace!("deref to {} on {:?}", val.layout.ty, *val);
Ok(self.ref_to_mplace(val)?)
}
Deref => self.deref_operand(base)?.into(),
Subslice { .. } | ConstantIndex { .. } | Index(_) => if base.layout.is_zst() {
OpTy {
- op: Operand::Immediate(Value::Scalar(Scalar::zst().into())),
+ op: Operand::Immediate(Immediate::Scalar(Scalar::zst().into())),
// the actual index doesn't matter, so we just pick a convenient one like 0
layout: base.layout.field(self, 0)?,
}
).with_default_tag())
},
ConstValue::ScalarPair(a, b) =>
- Ok(Operand::Immediate(Value::ScalarPair(a.into(), b.into())).with_default_tag()),
+ Ok(Operand::Immediate(Immediate::ScalarPair(
+ a.into(),
+ b.into(),
+ )).with_default_tag()),
ConstValue::Scalar(x) =>
- Ok(Operand::Immediate(Value::Scalar(x.into())).with_default_tag()),
+ Ok(Operand::Immediate(Immediate::Scalar(x.into())).with_default_tag()),
}
}
pub fn const_to_op(
}
// read raw discriminant value
let discr_op = self.operand_field(rval, 0)?;
- let discr_val = self.read_value(discr_op)?;
+ let discr_val = self.read_immediate(discr_op)?;
let raw_discr = discr_val.to_scalar()?;
trace!("discr value: {:?}", raw_discr);
// post-process
.ty_adt_def().expect("tagged layout corresponds to adt")
.repr
.discr_type();
- let discr_ty = layout::Integer::from_attr(self.tcx.tcx, discr_ty);
+ let discr_ty = layout::Integer::from_attr(self, discr_ty);
let shift = 128 - discr_ty.size().bits();
let truncatee = sexted as u128;
(truncatee << shift) >> shift
use rustc_apfloat::Float;
use rustc::mir::interpret::{EvalResult, Scalar};
-use super::{EvalContext, PlaceTy, Value, Machine, ValTy};
+use super::{EvalContext, PlaceTy, Immediate, Machine, ImmTy};
impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> {
pub fn binop_with_overflow(
&mut self,
op: mir::BinOp,
- left: ValTy<'tcx, M::PointerTag>,
- right: ValTy<'tcx, M::PointerTag>,
+ left: ImmTy<'tcx, M::PointerTag>,
+ right: ImmTy<'tcx, M::PointerTag>,
dest: PlaceTy<'tcx, M::PointerTag>,
) -> EvalResult<'tcx> {
- let (val, overflowed) = self.binary_op_val(op, left, right)?;
- let val = Value::ScalarPair(val.into(), Scalar::from_bool(overflowed).into());
- self.write_value(val, dest)
+ let (val, overflowed) = self.binary_op_imm(op, left, right)?;
+ let val = Immediate::ScalarPair(val.into(), Scalar::from_bool(overflowed).into());
+ self.write_immediate(val, dest)
}
/// Applies the binary operation `op` to the arguments and writes the result to the
pub fn binop_ignore_overflow(
&mut self,
op: mir::BinOp,
- left: ValTy<'tcx, M::PointerTag>,
- right: ValTy<'tcx, M::PointerTag>,
+ left: ImmTy<'tcx, M::PointerTag>,
+ right: ImmTy<'tcx, M::PointerTag>,
dest: PlaceTy<'tcx, M::PointerTag>,
) -> EvalResult<'tcx> {
- let (val, _overflowed) = self.binary_op_val(op, left, right)?;
+ let (val, _overflowed) = self.binary_op_imm(op, left, right)?;
self.write_scalar(val, dest)
}
}
}
/// Convenience wrapper that's useful when keeping the layout together with the
- /// value.
+ /// immediate value.
#[inline]
- pub fn binary_op_val(
+ pub fn binary_op_imm(
&self,
bin_op: mir::BinOp,
- left: ValTy<'tcx, M::PointerTag>,
- right: ValTy<'tcx, M::PointerTag>,
+ left: ImmTy<'tcx, M::PointerTag>,
+ right: ImmTy<'tcx, M::PointerTag>,
) -> EvalResult<'tcx, (Scalar<M::PointerTag>, bool)> {
self.binary_op(
bin_op,
};
use super::{
EvalContext, Machine, AllocMap,
- Value, ValTy, ScalarMaybeUndef, Operand, OpTy, MemoryKind
+ Immediate, ImmTy, ScalarMaybeUndef, Operand, OpTy, MemoryKind
};
#[derive(Copy, Clone, Debug, Hash, PartialEq, Eq)]
/// Produces a Place that will error if attempted to be read from or written to
#[inline(always)]
- pub fn null(cx: impl HasDataLayout) -> Self {
+ pub fn null(cx: &impl HasDataLayout) -> Self {
Self::from_scalar_ptr(Scalar::ptr_null(cx), Align::from_bytes(1, 1).unwrap())
}
impl<'tcx, Tag> MPlaceTy<'tcx, Tag> {
/// Produces a MemPlace that works for ZST but nothing else
#[inline]
- pub fn dangling(layout: TyLayout<'tcx>, cx: impl HasDataLayout) -> Self {
+ pub fn dangling(layout: TyLayout<'tcx>, cx: &impl HasDataLayout) -> Self {
MPlaceTy {
mplace: MemPlace::from_scalar_ptr(
Scalar::from_uint(layout.align.abi(), cx.pointer_size()),
}
#[inline]
- pub(super) fn len(self, cx: impl HasDataLayout) -> EvalResult<'tcx, u64> {
+ pub(super) fn len(self, cx: &impl HasDataLayout) -> EvalResult<'tcx, u64> {
if self.layout.is_unsized() {
// We need to consult `meta` metadata
match self.layout.ty.sty {
impl<'tcx, Tag: ::std::fmt::Debug> OpTy<'tcx, Tag> {
#[inline(always)]
- pub fn try_as_mplace(self) -> Result<MPlaceTy<'tcx, Tag>, Value<Tag>> {
+ pub fn try_as_mplace(self) -> Result<MPlaceTy<'tcx, Tag>, Immediate<Tag>> {
match self.op {
Operand::Indirect(mplace) => Ok(MPlaceTy { mplace, layout: self.layout }),
- Operand::Immediate(value) => Err(value),
+ Operand::Immediate(imm) => Err(imm),
}
}
impl<'tcx, Tag: ::std::fmt::Debug> Place<Tag> {
/// Produces a Place that will error if attempted to be read from or written to
#[inline(always)]
- pub fn null(cx: impl HasDataLayout) -> Self {
+ pub fn null(cx: &impl HasDataLayout) -> Self {
Place::Ptr(MemPlace::null(cx))
}
/// Alignment is just based on the type. This is the inverse of `create_ref`.
pub fn ref_to_mplace(
&self,
- val: ValTy<'tcx, M::PointerTag>,
+ val: ImmTy<'tcx, M::PointerTag>,
) -> EvalResult<'tcx, MPlaceTy<'tcx, M::PointerTag>> {
let pointee_type = val.layout.ty.builtin_deref(true).unwrap().ty;
let layout = self.layout_of(pointee_type)?;
/// Turn a mplace into a (thin or fat) pointer, as a reference, pointing to the same space.
/// This is the inverse of `ref_to_mplace`.
+ /// `mutbl` indicates whether we are create a shared or mutable ref, or a raw pointer (`None`).
pub fn create_ref(
&mut self,
place: MPlaceTy<'tcx, M::PointerTag>,
- borrow_kind: Option<mir::BorrowKind>,
- ) -> EvalResult<'tcx, Value<M::PointerTag>> {
+ mutbl: Option<hir::Mutability>,
+ ) -> EvalResult<'tcx, Immediate<M::PointerTag>> {
// Pointer tag tracking might want to adjust the tag
let place = if M::ENABLE_PTR_TRACKING_HOOKS {
let (size, _) = self.size_and_align_of_mplace(place)?
// for extern types, just cover what we can
.unwrap_or_else(|| place.layout.size_and_align());
- let mutbl = match borrow_kind {
- Some(mir::BorrowKind::Mut { .. }) |
- Some(mir::BorrowKind::Unique) =>
- Some(hir::MutMutable),
- Some(_) => Some(hir::MutImmutable),
- None => None,
- };
M::tag_reference(self, *place, place.layout.ty, size, mutbl)?
} else {
*place
};
Ok(match place.meta {
- None => Value::Scalar(place.ptr.into()),
- Some(meta) => Value::ScalarPair(place.ptr.into(), meta.into()),
+ None => Immediate::Scalar(place.ptr.into()),
+ Some(meta) => Immediate::ScalarPair(place.ptr.into(), meta.into()),
})
}
Place::Ptr(mplace) =>
self.mplace_downcast(MPlaceTy { mplace, layout: base.layout }, variant)?.into(),
Place::Local { .. } => {
- let layout = base.layout.for_variant(&self, variant);
+ let layout = base.layout.for_variant(self, variant);
PlaceTy { layout, ..base }
}
})
val: impl Into<ScalarMaybeUndef<M::PointerTag>>,
dest: PlaceTy<'tcx, M::PointerTag>,
) -> EvalResult<'tcx> {
- self.write_value(Value::Scalar(val.into()), dest)
+ self.write_immediate(Immediate::Scalar(val.into()), dest)
}
- /// Write a value to a place
+ /// Write an immediate to a place
#[inline(always)]
- pub fn write_value(
+ pub fn write_immediate(
&mut self,
- src_val: Value<M::PointerTag>,
+ src: Immediate<M::PointerTag>,
dest: PlaceTy<'tcx, M::PointerTag>,
) -> EvalResult<'tcx> {
- self.write_value_no_validate(src_val, dest)?;
+ self.write_immediate_no_validate(src, dest)?;
if M::enforce_validity(self) {
// Data got changed, better make sure it matches the type!
Ok(())
}
- /// Write a value to a place.
+ /// Write an immediate to a place.
/// If you use this you are responsible for validating that things got copied at the
/// right type.
- fn write_value_no_validate(
+ fn write_immediate_no_validate(
&mut self,
- src_val: Value<M::PointerTag>,
+ src: Immediate<M::PointerTag>,
dest: PlaceTy<'tcx, M::PointerTag>,
) -> EvalResult<'tcx> {
if cfg!(debug_assertions) {
// This is a very common path, avoid some checks in release mode
assert!(!dest.layout.is_unsized(), "Cannot write unsized data");
- match src_val {
- Value::Scalar(ScalarMaybeUndef::Scalar(Scalar::Ptr(_))) =>
+ match src {
+ Immediate::Scalar(ScalarMaybeUndef::Scalar(Scalar::Ptr(_))) =>
assert_eq!(self.pointer_size(), dest.layout.size,
"Size mismatch when writing pointer"),
- Value::Scalar(ScalarMaybeUndef::Scalar(Scalar::Bits { size, .. })) =>
+ Immediate::Scalar(ScalarMaybeUndef::Scalar(Scalar::Bits { size, .. })) =>
assert_eq!(Size::from_bytes(size.into()), dest.layout.size,
"Size mismatch when writing bits"),
- Value::Scalar(ScalarMaybeUndef::Undef) => {}, // undef can have any size
- Value::ScalarPair(_, _) => {
+ Immediate::Scalar(ScalarMaybeUndef::Undef) => {}, // undef can have any size
+ Immediate::ScalarPair(_, _) => {
// FIXME: Can we check anything here?
}
}
}
- trace!("write_value: {:?} <- {:?}: {}", *dest, src_val, dest.layout.ty);
+ trace!("write_immediate: {:?} <- {:?}: {}", *dest, src, dest.layout.ty);
- // See if we can avoid an allocation. This is the counterpart to `try_read_value`,
+ // See if we can avoid an allocation. This is the counterpart to `try_read_immediate`,
// but not factored as a separate function.
let mplace = match dest.place {
Place::Local { frame, local } => {
match *self.stack[frame].locals[local].access_mut()? {
Operand::Immediate(ref mut dest_val) => {
// Yay, we can just change the local directly.
- *dest_val = src_val;
+ *dest_val = src;
return Ok(());
},
Operand::Indirect(mplace) => mplace, // already in memory
let dest = MPlaceTy { mplace, layout: dest.layout };
// This is already in memory, write there.
- self.write_value_to_mplace_no_validate(src_val, dest)
+ self.write_immediate_to_mplace_no_validate(src, dest)
}
- /// Write a value to memory.
+ /// Write an immediate to memory.
/// If you use this you are responsible for validating that things git copied at the
/// right type.
- fn write_value_to_mplace_no_validate(
+ fn write_immediate_to_mplace_no_validate(
&mut self,
- value: Value<M::PointerTag>,
+ value: Immediate<M::PointerTag>,
dest: MPlaceTy<'tcx, M::PointerTag>,
) -> EvalResult<'tcx> {
let (ptr, ptr_align) = dest.to_scalar_ptr_align();
// memory. The code below is not sufficient, with enough padding it might not
// cover all the bytes!
match value {
- Value::Scalar(scalar) => {
+ Immediate::Scalar(scalar) => {
match dest.layout.abi {
layout::Abi::Scalar(_) => {}, // fine
- _ => bug!("write_value_to_mplace: invalid Scalar layout: {:#?}",
+ _ => bug!("write_immediate_to_mplace: invalid Scalar layout: {:#?}",
dest.layout)
}
ptr, ptr_align.min(dest.layout.align), scalar, dest.layout.size
)
}
- Value::ScalarPair(a_val, b_val) => {
+ Immediate::ScalarPair(a_val, b_val) => {
let (a, b) = match dest.layout.abi {
layout::Abi::ScalarPair(ref a, ref b) => (&a.value, &b.value),
- _ => bug!("write_value_to_mplace: invalid ScalarPair layout: {:#?}",
+ _ => bug!("write_immediate_to_mplace: invalid ScalarPair layout: {:#?}",
dest.layout)
};
- let (a_size, b_size) = (a.size(&self), b.size(&self));
- let (a_align, b_align) = (a.align(&self), b.align(&self));
+ let (a_size, b_size) = (a.size(self), b.size(self));
+ let (a_align, b_align) = (a.align(self), b.align(self));
let b_offset = a_size.abi_align(b_align);
- let b_ptr = ptr.offset(b_offset, &self)?.into();
+ let b_ptr = ptr.offset(b_offset, self)?.into();
// It is tempting to verify `b_offset` against `layout.fields.offset(1)`,
// but that does not work: We could be a newtype around a pair, then the
"Layout mismatch when copying!\nsrc: {:#?}\ndest: {:#?}", src, dest);
// Let us see if the layout is simple so we take a shortcut, avoid force_allocation.
- let src = match self.try_read_value(src)? {
+ let src = match self.try_read_immediate(src)? {
Ok(src_val) => {
// Yay, we got a value that we can write directly.
- return self.write_value_no_validate(src_val, dest);
+ return self.write_immediate_no_validate(src_val, dest);
}
Err(mplace) => mplace,
};
let ptr = self.allocate(local_layout, MemoryKind::Stack)?;
// We don't have to validate as we can assume the local
// was already valid for its type.
- self.write_value_to_mplace_no_validate(value, ptr)?;
+ self.write_immediate_to_mplace_no_validate(value, ptr)?;
let mplace = ptr.mplace;
// Update the local
*self.stack[frame].locals[local].access_mut()? =
if layout.is_unsized() {
assert!(self.tcx.features().unsized_locals, "cannot alloc memory for unsized type");
// FIXME: What should we do here? We should definitely also tag!
- Ok(MPlaceTy::dangling(layout, &self))
+ Ok(MPlaceTy::dangling(layout, self))
} else {
let ptr = self.memory.allocate(layout.size, layout.align, kind)?;
let ptr = M::tag_new_allocation(self, ptr, kind)?;
// raw discriminants for enums are isize or bigger during
// their computation, but the in-memory tag is the smallest possible
// representation
- let size = tag.value.size(self.tcx.tcx);
+ let size = tag.value.size(self);
let shift = 128 - size.bits();
let discr_val = (discr_val << shift) >> shift;
use syntax::source_map::Span;
use super::eval_context::{LocalValue, StackPopCleanup};
-use super::{Frame, Memory, Operand, MemPlace, Place, Value, ScalarMaybeUndef};
+use super::{Frame, Memory, Operand, MemPlace, Place, Immediate, ScalarMaybeUndef};
use const_eval::CompileTimeInterpreter;
#[derive(Default)]
}
}
-impl_stable_hash_for!(enum ::interpret::Value {
+impl_stable_hash_for!(enum ::interpret::Immediate {
Scalar(x),
ScalarPair(x, y),
});
-impl_snapshot_for!(enum Value {
+impl_snapshot_for!(enum Immediate {
Scalar(s),
ScalarPair(s, t),
});
//!
//! The main entry point is the `step` method.
-use rustc::mir;
+use rustc::{hir, mir};
use rustc::ty::layout::LayoutOf;
use rustc::mir::interpret::{EvalResult, Scalar, PointerArithmetic};
// interpreter is solely intended for borrowck'ed code.
FakeRead(..) => {}
- // Validity checks.
- Validate(op, ref places) => {
- for operand in places {
- M::validation_op(self, op, operand)?;
- }
+ // Retagging.
+ Retag { fn_entry, ref place } => {
+ let dest = self.eval_place(place)?;
+ M::retag(self, fn_entry, dest)?;
}
EndRegion(..) => {}
BinaryOp(bin_op, ref left, ref right) => {
let layout = if binop_left_homogeneous(bin_op) { Some(dest.layout) } else { None };
- let left = self.read_value(self.eval_operand(left, layout)?)?;
+ let left = self.read_immediate(self.eval_operand(left, layout)?)?;
let layout = if binop_right_homogeneous(bin_op) { Some(left.layout) } else { None };
- let right = self.read_value(self.eval_operand(right, layout)?)?;
+ let right = self.read_immediate(self.eval_operand(right, layout)?)?;
self.binop_ignore_overflow(
bin_op,
left,
CheckedBinaryOp(bin_op, ref left, ref right) => {
// Due to the extra boolean in the result, we can never reuse the `dest.layout`.
- let left = self.read_value(self.eval_operand(left, None)?)?;
+ let left = self.read_immediate(self.eval_operand(left, None)?)?;
let layout = if binop_right_homogeneous(bin_op) { Some(left.layout) } else { None };
- let right = self.read_value(self.eval_operand(right, layout)?)?;
+ let right = self.read_immediate(self.eval_operand(right, layout)?)?;
self.binop_with_overflow(
bin_op,
left,
UnaryOp(un_op, ref operand) => {
// The operand always has the same type as the result.
- let val = self.read_value(self.eval_operand(operand, Some(dest.layout))?)?;
+ let val = self.read_immediate(self.eval_operand(operand, Some(dest.layout))?)?;
let val = self.unary_op(un_op, val.to_scalar()?, dest.layout)?;
self.write_scalar(val, dest)?;
}
Repeat(ref operand, _) => {
let op = self.eval_operand(operand, None)?;
let dest = self.force_allocation(dest)?;
- let length = dest.len(&self)?;
+ let length = dest.len(self)?;
if length > 0 {
// write the first
if length > 1 {
// copy the rest
let (dest, dest_align) = first.to_scalar_ptr_align();
- let rest = dest.ptr_offset(first.layout.size, &self)?;
+ let rest = dest.ptr_offset(first.layout.size, self)?;
self.memory.copy_repeatedly(
dest, dest_align, rest, dest_align, first.layout.size, length - 1, true
)?;
// FIXME(CTFE): don't allow computing the length of arrays in const eval
let src = self.eval_place(place)?;
let mplace = self.force_allocation(src)?;
- let len = mplace.len(&self)?;
+ let len = mplace.len(self)?;
let size = self.pointer_size();
self.write_scalar(
Scalar::from_uint(len, size),
Ref(_, borrow_kind, ref place) => {
let src = self.eval_place(place)?;
let val = self.force_allocation(src)?;
- let val = self.create_ref(val, Some(borrow_kind))?;
- self.write_value(val, dest)?;
+ let mutbl = match borrow_kind {
+ mir::BorrowKind::Mut { .. } |
+ mir::BorrowKind::Unique =>
+ hir::MutMutable,
+ mir::BorrowKind::Shared |
+ mir::BorrowKind::Shallow =>
+ hir::MutImmutable,
+ };
+ let val = self.create_ref(val, Some(mutbl))?;
+ self.write_immediate(val, dest)?;
}
NullaryOp(mir::NullOp::Box, _) => {
use rustc::mir::interpret::{EvalResult, PointerArithmetic, EvalErrorKind, Scalar};
use super::{
- EvalContext, Machine, Value, OpTy, PlaceTy, MPlaceTy, Operand, StackPopCleanup
+ EvalContext, Machine, Immediate, OpTy, PlaceTy, MPlaceTy, Operand, StackPopCleanup
};
impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> {
ref targets,
..
} => {
- let discr = self.read_value(self.eval_operand(discr, None)?)?;
+ let discr = self.read_immediate(self.eval_operand(discr, None)?)?;
trace!("SwitchInt({:?})", *discr);
// Branch to the `otherwise` case by default, if no match is found.
target,
..
} => {
- let cond_val = self.read_value(self.eval_operand(cond, None)?)?
+ let cond_val = self.read_immediate(self.eval_operand(cond, None)?)?
.to_scalar()?.to_bool()?;
if expected == cond_val {
self.goto_block(Some(target))?;
use rustc::mir::interpret::EvalErrorKind::*;
return match *msg {
BoundsCheck { ref len, ref index } => {
- let len = self.read_value(self.eval_operand(len, None)?)
+ let len = self.read_immediate(self.eval_operand(len, None)?)
.expect("can't eval len").to_scalar()?
.to_bits(self.memory().pointer_size())? as u64;
- let index = self.read_value(self.eval_operand(index, None)?)
+ let index = self.read_immediate(self.eval_operand(index, None)?)
.expect("can't eval index").to_scalar()?
.to_bits(self.memory().pointer_size())? as u64;
err!(BoundsCheck { len, index })
ty::InstanceDef::Virtual(_, idx) => {
let ptr_size = self.pointer_size();
let ptr_align = self.tcx.data_layout.pointer_align;
- let ptr = self.ref_to_mplace(self.read_value(args[0])?)?;
+ let ptr = self.ref_to_mplace(self.read_immediate(args[0])?)?;
let vtable = ptr.vtable()?;
let fn_ptr = self.memory.read_ptr_sized(
- vtable.offset(ptr_size * (idx as u64 + 3), &self)?,
+ vtable.offset(ptr_size * (idx as u64 + 3), self)?,
ptr_align
)?.to_ptr()?;
let instance = self.memory.get_fn(fn_ptr)?;
let mut args = args.to_vec();
let pointee = args[0].layout.ty.builtin_deref(true).unwrap().ty;
let fake_fat_ptr_ty = self.tcx.mk_mut_ptr(pointee);
- args[0].layout = self.layout_of(fake_fat_ptr_ty)?.field(&self, 0)?;
- args[0].op = Operand::Immediate(Value::Scalar(ptr.ptr.into())); // strip vtable
+ args[0].layout = self.layout_of(fake_fat_ptr_ty)?.field(self, 0)?;
+ args[0].op = Operand::Immediate(Immediate::Scalar(ptr.ptr.into())); // strip vtable
trace!("Patched self operand to {:#?}", args[0]);
// recurse with concrete function
self.eval_fn_call(instance, span, caller_abi, &args, dest, ret)
};
let ty = self.tcx.mk_unit(); // return type is ()
- let dest = MPlaceTy::dangling(self.layout_of(ty)?, &self);
+ let dest = MPlaceTy::dangling(self.layout_of(ty)?, self);
self.eval_fn_call(
instance,
let drop = self.memory.create_fn_alloc(drop).with_default_tag();
self.memory.write_ptr_sized(vtable, ptr_align, Scalar::Ptr(drop).into())?;
- let size_ptr = vtable.offset(ptr_size, &self)?;
+ let size_ptr = vtable.offset(ptr_size, self)?;
self.memory.write_ptr_sized(size_ptr, ptr_align, Scalar::from_uint(size, ptr_size).into())?;
- let align_ptr = vtable.offset(ptr_size * 2, &self)?;
+ let align_ptr = vtable.offset(ptr_size * 2, self)?;
self.memory.write_ptr_sized(align_ptr, ptr_align,
Scalar::from_uint(align, ptr_size).into())?;
if let Some((def_id, substs)) = *method {
let instance = self.resolve(def_id, substs)?;
let fn_ptr = self.memory.create_fn_alloc(instance).with_default_tag();
- let method_ptr = vtable.offset(ptr_size * (3 + i as u64), &self)?;
+ let method_ptr = vtable.offset(ptr_size * (3 + i as u64), self)?;
self.memory.write_ptr_sized(method_ptr, ptr_align, Scalar::Ptr(fn_ptr).into())?;
}
}
};
use super::{
- ValTy, OpTy, MPlaceTy, Machine, EvalContext, ScalarMaybeUndef
+ ImmTy, OpTy, MPlaceTy, Machine, EvalContext, ScalarMaybeUndef
};
macro_rules! validation_failure {
/// Make sure that `value` is valid for `ty`, *assuming* `ty` is a primitive type.
fn validate_primitive_type(
&self,
- value: ValTy<'tcx, M::PointerTag>,
+ value: ImmTy<'tcx, M::PointerTag>,
path: &Vec<PathElem>,
ref_tracking: Option<&mut RefTracking<'tcx, M::PointerTag>>,
const_mode: bool,
let (lo, hi) = layout.valid_range.clone().into_inner();
let max_hi = u128::max_value() >> (128 - size.bits()); // as big as the size fits
assert!(hi <= max_hi);
- if lo == 0 && hi == max_hi {
+ // We could also write `(hi + 1) % (max_hi + 1) == lo` but `max_hi + 1` overflows for `u128`
+ if (lo == 0 && hi == max_hi) || (hi + 1 == lo) {
// Nothing to check
return Ok(());
}
),
_ =>
return validation_failure!(
- format!("non-integer enum discriminant"), path
+ String::from("non-integer enum discriminant"), path
),
}
};
_ => dest.layout.ty.builtin_deref(true).is_some(),
};
if primitive {
- let value = try_validation!(self.read_value(dest),
+ let value = try_validation!(self.read_immediate(dest),
"uninitialized or unrepresentable data", path);
return self.validate_primitive_type(
value,
#![feature(nll)]
#![feature(in_band_lifetimes)]
-#![cfg_attr(stage0, feature(impl_header_lifetime_elision))]
#![feature(slice_patterns)]
#![feature(slice_sort_by_cached_key)]
#![feature(box_patterns)]
borrow_check::provide(providers);
shim::provide(providers);
transform::provide(providers);
+ monomorphize::partitioning::provide(providers);
providers.const_eval = const_eval::const_eval_provider;
providers.const_eval_raw = const_eval::const_eval_raw_provider;
providers.check_match = hair::pattern::check_match;
trait_ty: Ty<'tcx>,
impl_ty: Ty<'tcx>,
output: &mut Vec<MonoItem<'tcx>>) {
- assert!(!trait_ty.needs_subst() && !trait_ty.has_escaping_regions() &&
- !impl_ty.needs_subst() && !impl_ty.has_escaping_regions());
+ assert!(!trait_ty.needs_subst() && !trait_ty.has_escaping_bound_vars() &&
+ !impl_ty.needs_subst() && !impl_ty.has_escaping_bound_vars());
if let ty::Dynamic(ref trait_ty, ..) = trait_ty.sty {
let poly_trait_ref = trait_ty.principal().with_self_ty(tcx, impl_ty);
- assert!(!poly_trait_ref.has_escaping_regions());
+ assert!(!poly_trait_ref.has_escaping_bound_vars());
// Walk all methods of the trait, including those of its supertraits
let methods = tcx.vtable_methods(poly_trait_ref);
// regions must appear in the argument
// listing.
let main_ret_ty = self.tcx.erase_regions(
- &main_ret_ty.no_late_bound_regions().unwrap(),
+ &main_ret_ty.no_bound_vars().unwrap(),
);
let start_instance = Instance::resolve(
self.push_type_params(substs, iter::empty(), output);
}
ty::Error |
+ ty::Bound(..) |
ty::Infer(_) |
ty::UnnormalizedProjection(..) |
ty::Projection(..) |
//! source-level module, functions from the same module will be available for
//! inlining, even when they are not marked #[inline].
-use monomorphize::collector::InliningMap;
+use std::collections::hash_map::Entry;
+use std::cmp;
+use std::sync::Arc;
+
+use syntax::ast::NodeId;
+use syntax::symbol::InternedString;
use rustc::dep_graph::{WorkProductId, WorkProduct, DepNode, DepConstructor};
use rustc::hir::CodegenFnAttrFlags;
-use rustc::hir::def_id::{DefId, LOCAL_CRATE, CRATE_DEF_INDEX};
+use rustc::hir::def_id::{CrateNum, DefId, LOCAL_CRATE, CRATE_DEF_INDEX};
use rustc::hir::map::DefPathData;
use rustc::mir::mono::{Linkage, Visibility, CodegenUnitNameBuilder};
use rustc::middle::exported_symbols::SymbolExportLevel;
use rustc::ty::{self, TyCtxt, InstanceDef};
use rustc::ty::item_path::characteristic_def_id_of_type;
-use rustc::util::nodemap::{FxHashMap, FxHashSet};
-use std::collections::hash_map::Entry;
-use std::cmp;
-use syntax::ast::NodeId;
-use syntax::symbol::InternedString;
+use rustc::ty::query::Providers;
+use rustc::util::common::time;
+use rustc::util::nodemap::{DefIdSet, FxHashMap, FxHashSet};
use rustc::mir::mono::MonoItem;
+
+use monomorphize::collector::InliningMap;
+use monomorphize::collector::{self, MonoItemCollectionMode};
use monomorphize::item::{MonoItemExt, InstantiationMode};
pub use rustc::mir::mono::CodegenUnit;
//
// * First is weak lang items. These are basically mechanisms for
// libcore to forward-reference symbols defined later in crates like
- // the standard library or `#[panic_implementation]` definitions. The
+ // the standard library or `#[panic_handler]` definitions. The
// definition of these weak lang items needs to be referenceable by
// libcore, so we're no longer a candidate for internalization.
// Removal of these functions can't be done by LLVM but rather must be
}
}
}
+
+fn collect_and_partition_mono_items<'a, 'tcx>(
+ tcx: TyCtxt<'a, 'tcx, 'tcx>,
+ cnum: CrateNum,
+) -> (Arc<DefIdSet>, Arc<Vec<Arc<CodegenUnit<'tcx>>>>)
+{
+ assert_eq!(cnum, LOCAL_CRATE);
+
+ let collection_mode = match tcx.sess.opts.debugging_opts.print_mono_items {
+ Some(ref s) => {
+ let mode_string = s.to_lowercase();
+ let mode_string = mode_string.trim();
+ if mode_string == "eager" {
+ MonoItemCollectionMode::Eager
+ } else {
+ if mode_string != "lazy" {
+ let message = format!("Unknown codegen-item collection mode '{}'. \
+ Falling back to 'lazy' mode.",
+ mode_string);
+ tcx.sess.warn(&message);
+ }
+
+ MonoItemCollectionMode::Lazy
+ }
+ }
+ None => {
+ if tcx.sess.opts.cg.link_dead_code {
+ MonoItemCollectionMode::Eager
+ } else {
+ MonoItemCollectionMode::Lazy
+ }
+ }
+ };
+
+ let (items, inlining_map) =
+ time(tcx.sess, "monomorphization collection", || {
+ collector::collect_crate_mono_items(tcx, collection_mode)
+ });
+
+ tcx.sess.abort_if_errors();
+
+ ::monomorphize::assert_symbols_are_distinct(tcx, items.iter());
+
+ let strategy = if tcx.sess.opts.incremental.is_some() {
+ PartitioningStrategy::PerModule
+ } else {
+ PartitioningStrategy::FixedUnitCount(tcx.sess.codegen_units())
+ };
+
+ let codegen_units = time(tcx.sess, "codegen unit partitioning", || {
+ partition(
+ tcx,
+ items.iter().cloned(),
+ strategy,
+ &inlining_map
+ )
+ .into_iter()
+ .map(Arc::new)
+ .collect::<Vec<_>>()
+ });
+
+ let mono_items: DefIdSet = items.iter().filter_map(|mono_item| {
+ match *mono_item {
+ MonoItem::Fn(ref instance) => Some(instance.def_id()),
+ MonoItem::Static(def_id) => Some(def_id),
+ _ => None,
+ }
+ }).collect();
+
+ if tcx.sess.opts.debugging_opts.print_mono_items.is_some() {
+ let mut item_to_cgus: FxHashMap<_, Vec<_>> = Default::default();
+
+ for cgu in &codegen_units {
+ for (&mono_item, &linkage) in cgu.items() {
+ item_to_cgus.entry(mono_item)
+ .or_default()
+ .push((cgu.name().clone(), linkage));
+ }
+ }
+
+ let mut item_keys: Vec<_> = items
+ .iter()
+ .map(|i| {
+ let mut output = i.to_string(tcx);
+ output.push_str(" @@");
+ let mut empty = Vec::new();
+ let cgus = item_to_cgus.get_mut(i).unwrap_or(&mut empty);
+ cgus.as_mut_slice().sort_by_key(|&(ref name, _)| name.clone());
+ cgus.dedup();
+ for &(ref cgu_name, (linkage, _)) in cgus.iter() {
+ output.push_str(" ");
+ output.push_str(&cgu_name.as_str());
+
+ let linkage_abbrev = match linkage {
+ Linkage::External => "External",
+ Linkage::AvailableExternally => "Available",
+ Linkage::LinkOnceAny => "OnceAny",
+ Linkage::LinkOnceODR => "OnceODR",
+ Linkage::WeakAny => "WeakAny",
+ Linkage::WeakODR => "WeakODR",
+ Linkage::Appending => "Appending",
+ Linkage::Internal => "Internal",
+ Linkage::Private => "Private",
+ Linkage::ExternalWeak => "ExternalWeak",
+ Linkage::Common => "Common",
+ };
+
+ output.push_str("[");
+ output.push_str(linkage_abbrev);
+ output.push_str("]");
+ }
+ output
+ })
+ .collect();
+
+ item_keys.sort();
+
+ for item in item_keys {
+ println!("MONO_ITEM {}", item);
+ }
+ }
+
+ (Arc::new(mono_items), Arc::new(codegen_units))
+}
+
+pub fn provide(providers: &mut Providers) {
+ providers.collect_and_partition_mono_items =
+ collect_and_partition_mono_items;
+
+ providers.is_codegened_item = |tcx, def_id| {
+ let (all_mono_items, _) =
+ tcx.collect_and_partition_mono_items(LOCAL_CRATE);
+ all_mono_items.contains(&def_id)
+ };
+
+ providers.codegen_unit = |tcx, name| {
+ let (_, all) = tcx.collect_and_partition_mono_items(LOCAL_CRATE);
+ all.iter()
+ .find(|cgu| *cgu.name() == name)
+ .cloned()
+ .unwrap_or_else(|| panic!("failed to find cgu with name {:?}", name))
+ };
+}
let param_env = gcx.param_env(def_id);
// Normalize the sig.
- let sig = gcx.fn_sig(def_id).no_late_bound_regions().expect("LBR in ADT constructor signature");
+ let sig = gcx.fn_sig(def_id)
+ .no_bound_vars()
+ .expect("LBR in ADT constructor signature");
let sig = gcx.normalize_erasing_regions(param_env, sig);
let (adt_def, substs) = match sig.output().sty {
--- /dev/null
+// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+//! This pass adds validation calls (AcquireValid, ReleaseValid) where appropriate.
+//! It has to be run really early, before transformations like inlining, because
+//! introducing these calls *adds* UB -- so, conceptually, this pass is actually part
+//! of MIR building, and only after this pass we think of the program has having the
+//! normal MIR semantics.
+
+use rustc::ty::{self, Ty, TyCtxt};
+use rustc::mir::*;
+use transform::{MirPass, MirSource};
+
+pub struct AddRetag;
+
+/// Determines whether this place is local: If it is part of a local variable.
+/// We do not consider writes to pointers local, only writes that immediately assign
+/// to a local variable.
+/// One important property here is that evaluating the place immediately after
+/// the assignment must produce the same place as what was used during the assignment.
+fn is_local<'tcx>(
+ place: &Place<'tcx>,
+) -> bool {
+ use rustc::mir::Place::*;
+
+ match *place {
+ Local { .. } => true,
+ Promoted(_) |
+ Static(_) => false,
+ Projection(ref proj) => {
+ match proj.elem {
+ ProjectionElem::Deref |
+ ProjectionElem::Index(_) =>
+ // Which place these point to depends on external circumstances
+ // (a local storing the array index, the current value of
+ // the projection base), so we stop tracking here.
+ false,
+ ProjectionElem::Field { .. } |
+ ProjectionElem::ConstantIndex { .. } |
+ ProjectionElem::Subslice { .. } |
+ ProjectionElem::Downcast { .. } =>
+ // These just offset by a constant, entirely independent of everything else.
+ is_local(&proj.base),
+ }
+ }
+ }
+}
+
+/// Determine whether this type has a reference in it, recursing below compound types but
+/// not below references.
+fn has_reference<'a, 'gcx, 'tcx>(ty: Ty<'tcx>, tcx: TyCtxt<'a, 'gcx, 'tcx>) -> bool {
+ match ty.sty {
+ // Primitive types that are not references
+ ty::Bool | ty::Char |
+ ty::Float(_) | ty::Int(_) | ty::Uint(_) |
+ ty::RawPtr(..) | ty::FnPtr(..) |
+ ty::Str | ty::FnDef(..) | ty::Never =>
+ false,
+ // References
+ ty::Ref(..) => true,
+ ty::Adt(..) if ty.is_box() => true,
+ // Compound types
+ ty::Array(ty, ..) | ty::Slice(ty) =>
+ has_reference(ty, tcx),
+ ty::Tuple(tys) =>
+ tys.iter().any(|ty| has_reference(ty, tcx)),
+ ty::Adt(adt, substs) =>
+ adt.variants.iter().any(|v| v.fields.iter().any(|f|
+ has_reference(f.ty(tcx, substs), tcx)
+ )),
+ // Conservative fallback
+ _ => true,
+ }
+}
+
+impl MirPass for AddRetag {
+ fn run_pass<'a, 'tcx>(&self,
+ tcx: TyCtxt<'a, 'tcx, 'tcx>,
+ _src: MirSource,
+ mir: &mut Mir<'tcx>)
+ {
+ if !tcx.sess.opts.debugging_opts.mir_emit_retag {
+ return;
+ }
+ let (span, arg_count) = (mir.span, mir.arg_count);
+ let (basic_blocks, local_decls) = mir.basic_blocks_and_local_decls_mut();
+ let needs_retag = |place: &Place<'tcx>| {
+ is_local(place) && has_reference(place.ty(&*local_decls, tcx).to_ty(tcx), tcx)
+ };
+
+ // PART 1
+ // Retag arguments at the beginning of the start block.
+ {
+ let source_info = SourceInfo {
+ scope: OUTERMOST_SOURCE_SCOPE,
+ span: span, // FIXME: Consider using just the span covering the function
+ // argument declaration.
+ };
+ // Gather all arguments, skip return value.
+ let places = local_decls.iter_enumerated().skip(1).take(arg_count)
+ .map(|(local, _)| Place::Local(local))
+ .filter(needs_retag)
+ .collect::<Vec<_>>();
+ // Emit their retags.
+ basic_blocks[START_BLOCK].statements.splice(0..0,
+ places.into_iter().map(|place| Statement {
+ source_info,
+ kind: StatementKind::Retag { fn_entry: true, place },
+ })
+ );
+ }
+
+ // PART 2
+ // Retag return values of functions.
+ // We collect the return destinations because we cannot mutate while iterating.
+ let mut returns: Vec<(SourceInfo, Place<'tcx>, BasicBlock)> = Vec::new();
+ for block_data in basic_blocks.iter_mut() {
+ match block_data.terminator {
+ Some(Terminator { kind: TerminatorKind::Call { ref destination, .. },
+ source_info }) => {
+ // Remember the return destination for later
+ if let Some(ref destination) = destination {
+ if needs_retag(&destination.0) {
+ returns.push((source_info, destination.0.clone(), destination.1));
+ }
+ }
+ }
+ _ => {
+ // Not a block ending in a Call -> ignore.
+ // `Drop` is also a call, but it doesn't return anything so we are good.
+ }
+ }
+ }
+ // Now we go over the returns we collected to retag the return values.
+ for (source_info, dest_place, dest_block) in returns {
+ basic_blocks[dest_block].statements.insert(0, Statement {
+ source_info,
+ kind: StatementKind::Retag { fn_entry: false, place: dest_place },
+ });
+ }
+
+ // PART 3
+ // Add retag after assignment.
+ for block_data in basic_blocks {
+ // We want to insert statements as we iterate. To this end, we
+ // iterate backwards using indices.
+ for i in (0..block_data.statements.len()).rev() {
+ match block_data.statements[i].kind {
+ // Assignments can make values obtained elsewhere "local".
+ // We could try to be smart here and e.g. only retag if the assignment
+ // loaded from memory, but that seems risky: We might miss a subtle corner
+ // case.
+ StatementKind::Assign(ref place, box Rvalue::Use(..))
+ if needs_retag(place) => {
+ // Insert a retag after the assignment.
+ let source_info = block_data.statements[i].source_info;
+ block_data.statements.insert(i+1,Statement {
+ source_info,
+ kind: StatementKind::Retag { fn_entry: false, place: place.clone() },
+ });
+ }
+ _ => {},
+ }
+ }
+ }
+ }
+}
+++ /dev/null
-// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-//! This pass adds validation calls (AcquireValid, ReleaseValid) where appropriate.
-//! It has to be run really early, before transformations like inlining, because
-//! introducing these calls *adds* UB -- so, conceptually, this pass is actually part
-//! of MIR building, and only after this pass we think of the program has having the
-//! normal MIR semantics.
-
-use rustc::ty::{self, TyCtxt, RegionKind};
-use rustc::hir;
-use rustc::mir::*;
-use rustc::middle::region;
-use transform::{MirPass, MirSource};
-
-pub struct AddValidation;
-
-/// Determine the "context" of the place: Mutability and region.
-fn place_context<'a, 'tcx, D>(
- place: &Place<'tcx>,
- local_decls: &D,
- tcx: TyCtxt<'a, 'tcx, 'tcx>
-) -> (Option<region::Scope>, hir::Mutability)
- where D: HasLocalDecls<'tcx>
-{
- use rustc::mir::Place::*;
-
- match *place {
- Local { .. } => (None, hir::MutMutable),
- Promoted(_) |
- Static(_) => (None, hir::MutImmutable),
- Projection(ref proj) => {
- match proj.elem {
- ProjectionElem::Deref => {
- // Computing the inside the recursion makes this quadratic.
- // We don't expect deep paths though.
- let ty = proj.base.ty(local_decls, tcx).to_ty(tcx);
- // A Deref projection may restrict the context, this depends on the type
- // being deref'd.
- let context = match ty.sty {
- ty::Ref(re, _, mutbl) => {
- let re = match re {
- &RegionKind::ReScope(ce) => Some(ce),
- &RegionKind::ReErased =>
- bug!("AddValidation pass must be run before erasing lifetimes"),
- _ => None
- };
- (re, mutbl)
- }
- ty::RawPtr(_) =>
- // There is no guarantee behind even a mutable raw pointer,
- // no write locks are acquired there, so we also don't want to
- // release any.
- (None, hir::MutImmutable),
- ty::Adt(adt, _) if adt.is_box() => (None, hir::MutMutable),
- _ => bug!("Deref on a non-pointer type {:?}", ty),
- };
- // "Intersect" this restriction with proj.base.
- if let (Some(_), hir::MutImmutable) = context {
- // This is already as restricted as it gets, no need to even recurse
- context
- } else {
- let base_context = place_context(&proj.base, local_decls, tcx);
- // The region of the outermost Deref is always most restrictive.
- let re = context.0.or(base_context.0);
- let mutbl = context.1.and(base_context.1);
- (re, mutbl)
- }
-
- }
- _ => place_context(&proj.base, local_decls, tcx),
- }
- }
- }
-}
-
-/// Check if this function contains an unsafe block or is an unsafe function.
-fn fn_contains_unsafe<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, src: MirSource) -> bool {
- use rustc::hir::intravisit::{self, Visitor, FnKind};
- use rustc::hir::map::blocks::FnLikeNode;
- use rustc::hir::Node;
-
- /// Decide if this is an unsafe block
- fn block_is_unsafe(block: &hir::Block) -> bool {
- use rustc::hir::BlockCheckMode::*;
-
- match block.rules {
- UnsafeBlock(_) | PushUnsafeBlock(_) => true,
- // For PopUnsafeBlock, we don't actually know -- but we will always also check all
- // parent blocks, so we can safely declare the PopUnsafeBlock to not be unsafe.
- DefaultBlock | PopUnsafeBlock(_) => false,
- }
- }
-
- /// Decide if this FnLike is a closure
- fn fn_is_closure<'a>(fn_like: FnLikeNode<'a>) -> bool {
- match fn_like.kind() {
- FnKind::Closure(_) => true,
- FnKind::Method(..) | FnKind::ItemFn(..) => false,
- }
- }
-
- let node_id = tcx.hir.as_local_node_id(src.def_id).unwrap();
- let fn_like = match tcx.hir.body_owner_kind(node_id) {
- hir::BodyOwnerKind::Fn => {
- match FnLikeNode::from_node(tcx.hir.get(node_id)) {
- Some(fn_like) => fn_like,
- None => return false, // e.g. struct ctor shims -- such auto-generated code cannot
- // contain unsafe.
- }
- },
- _ => return false, // only functions can have unsafe
- };
-
- // Test if the function is marked unsafe.
- if fn_like.unsafety() == hir::Unsafety::Unsafe {
- return true;
- }
-
- // For closures, we need to walk up the parents and see if we are inside an unsafe fn or
- // unsafe block.
- if fn_is_closure(fn_like) {
- let mut cur = fn_like.id();
- loop {
- // Go further upwards.
- cur = tcx.hir.get_parent_node(cur);
- let node = tcx.hir.get(cur);
- // Check if this is an unsafe function
- if let Some(fn_like) = FnLikeNode::from_node(node) {
- if !fn_is_closure(fn_like) {
- if fn_like.unsafety() == hir::Unsafety::Unsafe {
- return true;
- }
- }
- }
- // Check if this is an unsafe block, or an item
- match node {
- Node::Expr(&hir::Expr { node: hir::ExprKind::Block(ref block, _), ..}) => {
- if block_is_unsafe(&*block) {
- // Found an unsafe block, we can bail out here.
- return true;
- }
- }
- Node::Item(..) => {
- // No walking up beyond items. This makes sure the loop always terminates.
- break;
- }
- _ => {},
- }
- }
- }
-
- // Visit the entire body of the function and check for unsafe blocks in there
- struct FindUnsafe {
- found_unsafe: bool,
- }
- let mut finder = FindUnsafe { found_unsafe: false };
- // Run the visitor on the NodeId we got. Seems like there is no uniform way to do that.
- finder.visit_body(tcx.hir.body(fn_like.body()));
-
- impl<'tcx> Visitor<'tcx> for FindUnsafe {
- fn nested_visit_map<'this>(&'this mut self) -> intravisit::NestedVisitorMap<'this, 'tcx> {
- intravisit::NestedVisitorMap::None
- }
-
- fn visit_block(&mut self, b: &'tcx hir::Block) {
- if self.found_unsafe { return; } // short-circuit
-
- if block_is_unsafe(b) {
- // We found an unsafe block. We can stop searching.
- self.found_unsafe = true;
- } else {
- // No unsafe block here, go on searching.
- intravisit::walk_block(self, b);
- }
- }
- }
-
- finder.found_unsafe
-}
-
-impl MirPass for AddValidation {
- fn run_pass<'a, 'tcx>(&self,
- tcx: TyCtxt<'a, 'tcx, 'tcx>,
- src: MirSource,
- mir: &mut Mir<'tcx>)
- {
- let emit_validate = tcx.sess.opts.debugging_opts.mir_emit_validate;
- if emit_validate == 0 {
- return;
- }
- let restricted_validation = emit_validate == 1 && fn_contains_unsafe(tcx, src);
- let (span, arg_count) = (mir.span, mir.arg_count);
- let (basic_blocks, local_decls) = mir.basic_blocks_and_local_decls_mut();
-
- // Convert a place to a validation operand.
- let place_to_operand = |place: Place<'tcx>| -> ValidationOperand<'tcx, Place<'tcx>> {
- let (re, mutbl) = place_context(&place, local_decls, tcx);
- let ty = place.ty(local_decls, tcx).to_ty(tcx);
- ValidationOperand { place, ty, re, mutbl }
- };
-
- // Emit an Acquire at the beginning of the given block. If we are in restricted emission
- // mode (mir_emit_validate=1), also emit a Release immediately after the Acquire.
- let emit_acquire = |block: &mut BasicBlockData<'tcx>, source_info, operands: Vec<_>| {
- if operands.len() == 0 {
- return; // Nothing to do
- }
- // Emit the release first, to avoid cloning if we do not emit it
- if restricted_validation {
- let release_stmt = Statement {
- source_info,
- kind: StatementKind::Validate(ValidationOp::Release, operands.clone()),
- };
- block.statements.insert(0, release_stmt);
- }
- // Now, the acquire
- let acquire_stmt = Statement {
- source_info,
- kind: StatementKind::Validate(ValidationOp::Acquire, operands),
- };
- block.statements.insert(0, acquire_stmt);
- };
-
- // PART 1
- // Add an AcquireValid at the beginning of the start block.
- {
- let source_info = SourceInfo {
- scope: OUTERMOST_SOURCE_SCOPE,
- span: span, // FIXME: Consider using just the span covering the function
- // argument declaration.
- };
- // Gather all arguments, skip return value.
- let operands = local_decls.iter_enumerated().skip(1).take(arg_count)
- .map(|(local, _)| place_to_operand(Place::Local(local))).collect();
- emit_acquire(&mut basic_blocks[START_BLOCK], source_info, operands);
- }
-
- // PART 2
- // Add ReleaseValid/AcquireValid around function call terminators. We don't use a visitor
- // because we need to access the block that a Call jumps to.
- let mut returns : Vec<(SourceInfo, Place<'tcx>, BasicBlock)> = Vec::new();
- for block_data in basic_blocks.iter_mut() {
- match block_data.terminator {
- Some(Terminator { kind: TerminatorKind::Call { ref args, ref destination, .. },
- source_info }) => {
- // Before the call: Release all arguments *and* the return value.
- // The callee may write into the return value! Note that this relies
- // on "release of uninitialized" to be a NOP.
- if !restricted_validation {
- let release_stmt = Statement {
- source_info,
- kind: StatementKind::Validate(ValidationOp::Release,
- destination.iter().map(|dest| place_to_operand(dest.0.clone()))
- .chain(
- args.iter().filter_map(|op| {
- match op {
- &Operand::Copy(ref place) |
- &Operand::Move(ref place) =>
- Some(place_to_operand(place.clone())),
- &Operand::Constant(..) => { None },
- }
- })
- ).collect())
- };
- block_data.statements.push(release_stmt);
- }
- // Remember the return destination for later
- if let &Some(ref destination) = destination {
- returns.push((source_info, destination.0.clone(), destination.1));
- }
- }
- Some(Terminator { kind: TerminatorKind::Drop { location: ref place, .. },
- source_info }) |
- Some(Terminator { kind: TerminatorKind::DropAndReplace { location: ref place, .. },
- source_info }) => {
- // Before the call: Release all arguments
- if !restricted_validation {
- let release_stmt = Statement {
- source_info,
- kind: StatementKind::Validate(ValidationOp::Release,
- vec![place_to_operand(place.clone())]),
- };
- block_data.statements.push(release_stmt);
- }
- // drop doesn't return anything, so we need no acquire.
- }
- _ => {
- // Not a block ending in a Call -> ignore.
- }
- }
- }
- // Now we go over the returns we collected to acquire the return values.
- for (source_info, dest_place, dest_block) in returns {
- emit_acquire(
- &mut basic_blocks[dest_block],
- source_info,
- vec![place_to_operand(dest_place)]
- );
- }
-
- if restricted_validation {
- // No part 3 for us.
- return;
- }
-
- // PART 3
- // Add ReleaseValid/AcquireValid around Ref and Cast. Again an iterator does not seem very
- // suited as we need to add new statements before and after each Ref.
- for block_data in basic_blocks {
- // We want to insert statements around Ref commands as we iterate. To this end, we
- // iterate backwards using indices.
- for i in (0..block_data.statements.len()).rev() {
- match block_data.statements[i].kind {
- // When the borrow of this ref expires, we need to recover validation.
- StatementKind::Assign(_, box Rvalue::Ref(_, _, _)) => {
- // Due to a lack of NLL; we can't capture anything directly here.
- // Instead, we have to re-match and clone there.
- let (dest_place, re, src_place) = match block_data.statements[i].kind {
- StatementKind::Assign(ref dest_place,
- box Rvalue::Ref(re, _, ref src_place)) => {
- (dest_place.clone(), re, src_place.clone())
- },
- _ => bug!("We already matched this."),
- };
- // So this is a ref, and we got all the data we wanted.
- // Do an acquire of the result -- but only what it points to, so add a Deref
- // projection.
- let acquire_stmt = Statement {
- source_info: block_data.statements[i].source_info,
- kind: StatementKind::Validate(ValidationOp::Acquire,
- vec![place_to_operand(dest_place.deref())]),
- };
- block_data.statements.insert(i+1, acquire_stmt);
-
- // The source is released until the region of the borrow ends.
- let op = match re {
- &RegionKind::ReScope(ce) => ValidationOp::Suspend(ce),
- &RegionKind::ReErased =>
- bug!("AddValidation pass must be run before erasing lifetimes"),
- _ => ValidationOp::Release,
- };
- let release_stmt = Statement {
- source_info: block_data.statements[i].source_info,
- kind: StatementKind::Validate(op, vec![place_to_operand(src_place)]),
- };
- block_data.statements.insert(i, release_stmt);
- }
- // Casts can change what validation does (e.g. unsizing)
- StatementKind::Assign(_, box Rvalue::Cast(kind, Operand::Copy(_), _)) |
- StatementKind::Assign(_, box Rvalue::Cast(kind, Operand::Move(_), _))
- if kind != CastKind::Misc =>
- {
- // Due to a lack of NLL; we can't capture anything directly here.
- // Instead, we have to re-match and clone there.
- let (dest_place, src_place) = match block_data.statements[i].kind {
- StatementKind::Assign(ref dest_place,
- box Rvalue::Cast(_, Operand::Copy(ref src_place), _)) |
- StatementKind::Assign(ref dest_place,
- box Rvalue::Cast(_, Operand::Move(ref src_place), _)) =>
- {
- (dest_place.clone(), src_place.clone())
- },
- _ => bug!("We already matched this."),
- };
-
- // Acquire of the result
- let acquire_stmt = Statement {
- source_info: block_data.statements[i].source_info,
- kind: StatementKind::Validate(ValidationOp::Acquire,
- vec![place_to_operand(dest_place)]),
- };
- block_data.statements.insert(i+1, acquire_stmt);
-
- // Release of the input
- let release_stmt = Statement {
- source_info: block_data.statements[i].source_info,
- kind: StatementKind::Validate(ValidationOp::Release,
- vec![place_to_operand(src_place)]),
- };
- block_data.statements.insert(i, release_stmt);
- }
- _ => {},
- }
- }
- }
- }
-}
StatementKind::StorageLive(..) |
StatementKind::StorageDead(..) |
StatementKind::EndRegion(..) |
- StatementKind::Validate(..) |
+ StatementKind::Retag { .. } |
StatementKind::AscribeUserType(..) |
StatementKind::Nop => {
// safe (at least as emitted during MIR construction)
HasTyCtxt, TargetDataLayout, HasDataLayout,
};
-use interpret::{self, EvalContext, ScalarMaybeUndef, Value, OpTy, MemoryKind};
+use interpret::{self, EvalContext, ScalarMaybeUndef, Immediate, OpTy, MemoryKind};
use const_eval::{CompileTimeInterpreter, error_to_const_error, eval_promoted, mk_borrowck_eval_cx};
use transform::{MirPass, MirSource};
param_env: ParamEnv<'tcx>,
}
-impl<'a, 'b, 'tcx> LayoutOf for &'a ConstPropagator<'a, 'b, 'tcx> {
+impl<'a, 'b, 'tcx> LayoutOf for ConstPropagator<'a, 'b, 'tcx> {
type Ty = ty::Ty<'tcx>;
type TyLayout = Result<TyLayout<'tcx>, LayoutError<'tcx>>;
- fn layout_of(self, ty: ty::Ty<'tcx>) -> Self::TyLayout {
+ fn layout_of(&self, ty: ty::Ty<'tcx>) -> Self::TyLayout {
self.tcx.layout_of(self.param_env.and(ty))
}
}
-impl<'a, 'b, 'tcx> HasDataLayout for &'a ConstPropagator<'a, 'b, 'tcx> {
+impl<'a, 'b, 'tcx> HasDataLayout for ConstPropagator<'a, 'b, 'tcx> {
#[inline]
fn data_layout(&self) -> &TargetDataLayout {
&self.tcx.data_layout
}
}
-impl<'a, 'b, 'tcx> HasTyCtxt<'tcx> for &'a ConstPropagator<'a, 'b, 'tcx> {
+impl<'a, 'b, 'tcx> HasTyCtxt<'tcx> for ConstPropagator<'a, 'b, 'tcx> {
#[inline]
fn tcx<'c>(&'c self) -> TyCtxt<'c, 'tcx, 'tcx> {
self.tcx
Rvalue::NullaryOp(NullOp::SizeOf, ty) => {
type_size_of(self.tcx, self.param_env, ty).and_then(|n| Some((
OpTy {
- op: interpret::Operand::Immediate(Value::Scalar(
+ op: interpret::Operand::Immediate(Immediate::Scalar(
Scalar::Bits {
bits: n as u128,
size: self.tcx.data_layout.pointer_size.bytes() as u8,
this.ecx.unary_op(op, prim, arg.layout)
})?;
let res = OpTy {
- op: interpret::Operand::Immediate(Value::Scalar(val.into())),
+ op: interpret::Operand::Immediate(Immediate::Scalar(val.into())),
layout: place_layout,
};
Some((res, span))
}
let r = self.use_ecx(source_info, |this| {
- this.ecx.read_value(right.0)
+ this.ecx.read_immediate(right.0)
})?;
if op == BinOp::Shr || op == BinOp::Shl {
let left_ty = left.ty(self.mir, self.tcx);
}
let left = self.eval_operand(left, source_info)?;
let l = self.use_ecx(source_info, |this| {
- this.ecx.read_value(left.0)
+ this.ecx.read_immediate(left.0)
})?;
trace!("const evaluating {:?} for {:?} and {:?}", op, left, right);
let (val, overflow) = self.use_ecx(source_info, |this| {
- this.ecx.binary_op_val(op, l, r)
+ this.ecx.binary_op_imm(op, l, r)
})?;
let val = if let Rvalue::CheckedBinaryOp(..) = *rvalue {
- Value::ScalarPair(
+ Immediate::ScalarPair(
val.into(),
Scalar::from_bool(overflow).into(),
)
let _: Option<()> = self.use_ecx(source_info, |_| Err(err));
return None;
}
- Value::Scalar(val.into())
+ Immediate::Scalar(val.into())
};
let res = OpTy {
op: interpret::Operand::Immediate(val),
if let TerminatorKind::Assert { expected, msg, cond, .. } = kind {
if let Some(value) = self.eval_operand(cond, source_info) {
trace!("assertion on {:?} should be {:?}", value, expected);
- let expected = Value::Scalar(Scalar::from_bool(*expected).into());
+ let expected = Immediate::Scalar(Scalar::from_bool(*expected).into());
if expected != value.0.to_immediate() {
// poison all places this operand references so that further code
// doesn't use the invalid value
.eval_operand(len, source_info)
.expect("len must be const");
let len = match len.0.to_immediate() {
- Value::Scalar(ScalarMaybeUndef::Scalar(Scalar::Bits {
+ Immediate::Scalar(ScalarMaybeUndef::Scalar(Scalar::Bits {
bits, ..
})) => bits,
_ => bug!("const len not primitive: {:?}", len),
.eval_operand(index, source_info)
.expect("index must be const");
let index = match index.0.to_immediate() {
- Value::Scalar(ScalarMaybeUndef::Scalar(Scalar::Bits {
+ Immediate::Scalar(ScalarMaybeUndef::Scalar(Scalar::Bits {
bits, ..
})) => bits,
_ => bug!("const index not primitive: {:?}", index),
struct EraseRegionsVisitor<'a, 'tcx: 'a> {
tcx: TyCtxt<'a, 'tcx, 'tcx>,
- in_validation_statement: bool,
}
impl<'a, 'tcx> EraseRegionsVisitor<'a, 'tcx> {
pub fn new(tcx: TyCtxt<'a, 'tcx, 'tcx>) -> Self {
EraseRegionsVisitor {
tcx,
- in_validation_statement: false,
}
}
}
impl<'a, 'tcx> MutVisitor<'tcx> for EraseRegionsVisitor<'a, 'tcx> {
fn visit_ty(&mut self, ty: &mut Ty<'tcx>, _: TyContext) {
- if !self.in_validation_statement {
- *ty = self.tcx.erase_regions(ty);
- }
+ *ty = self.tcx.erase_regions(ty);
self.super_ty(ty);
}
block: BasicBlock,
statement: &mut Statement<'tcx>,
location: Location) {
- // Do NOT delete EndRegion if validation statements are emitted.
- // Validation needs EndRegion.
- if self.tcx.sess.opts.debugging_opts.mir_emit_validate == 0 {
- if let StatementKind::EndRegion(_) = statement.kind {
- statement.kind = StatementKind::Nop;
- }
+ if let StatementKind::EndRegion(_) = statement.kind {
+ statement.kind = StatementKind::Nop;
}
- self.in_validation_statement = match statement.kind {
- StatementKind::Validate(..) => true,
- _ => false,
- };
self.super_statement(block, statement, location);
- self.in_validation_statement = false;
}
}
self.in_cleanup_block = false;
}
+ fn visit_retag(&mut self, fn_entry: &mut bool, place: &mut Place<'tcx>, loc: Location) {
+ self.super_retag(fn_entry, place, loc);
+
+ // We have to patch all inlined retags to be aware that they are no longer
+ // happening on function entry.
+ *fn_entry = false;
+ }
+
fn visit_terminator_kind(&mut self, block: BasicBlock,
kind: &mut TerminatorKind<'tcx>, loc: Location) {
self.super_terminator_kind(block, kind, loc);
{
let did = tcx.require_lang_item(lang_item);
let poly_sig = tcx.fn_sig(did);
- let sig = poly_sig.no_late_bound_regions().unwrap();
+ let sig = poly_sig.no_bound_vars().unwrap();
let lhs_ty = lhs.ty(local_decls, tcx);
let rhs_ty = rhs.ty(local_decls, tcx);
let place_ty = place.ty(local_decls, tcx).to_ty(tcx);
use syntax::ast;
use syntax_pos::Span;
-pub mod add_validation;
+pub mod add_retag;
pub mod add_moves_for_packed_drops;
pub mod cleanup_post_borrowck;
pub mod check_unsafety;
// Remove all `FakeRead` statements and the borrows that are only
// used for checking matches
&cleanup_post_borrowck::CleanFakeReadsAndBorrows,
+
&simplify::SimplifyCfg::new("early-opt"),
// These next passes must be executed together
&add_call_guards::CriticalCallEdges,
&elaborate_drops::ElaborateDrops,
&no_landing_pads::NoLandingPads,
- // AddValidation needs to run after ElaborateDrops and before EraseRegions, and it needs
- // an AllCallEdges pass right before it.
- &add_call_guards::AllCallEdges,
- &add_validation::AddValidation,
// AddMovesForPackedDrops needs to run after drop
// elaboration.
&add_moves_for_packed_drops::AddMovesForPackedDrops,
+ // AddRetag needs to run after ElaborateDrops, and it needs
+ // an AllCallEdges pass right before it. Otherwise it should
+ // run fairly late, but before optimizations begin.
+ &add_call_guards::AllCallEdges,
+ &add_retag::AddRetag,
&simplify::SimplifyCfg::new("elaborate-drops"),
StatementKind::StorageDead(_) |
StatementKind::InlineAsm {..} |
StatementKind::EndRegion(_) |
- StatementKind::Validate(..) |
+ StatementKind::Retag { .. } |
StatementKind::AscribeUserType(..) |
StatementKind::Nop => {}
}
// These are all NOPs
| StatementKind::StorageLive(_)
| StatementKind::StorageDead(_)
- | StatementKind::Validate(..)
+ | StatementKind::Retag { .. }
| StatementKind::EndRegion(_)
| StatementKind::AscribeUserType(..)
| StatementKind::Nop => Ok(()),
StatementKind::Assign(_, _) |
StatementKind::SetDiscriminant { .. } |
StatementKind::InlineAsm { .. } |
- StatementKind::Validate { .. } => {
+ StatementKind::Retag { .. } => {
return false;
}
}
mir::StatementKind::StorageDead(_) |
mir::StatementKind::InlineAsm { .. } |
mir::StatementKind::EndRegion(_) |
- mir::StatementKind::Validate(..) |
+ mir::StatementKind::Retag { .. } |
mir::StatementKind::AscribeUserType(..) |
mir::StatementKind::Nop => continue,
mir::StatementKind::SetDiscriminant{ .. } =>
OGN = o
);
err.span_label(mutate_span, format!("cannot {}", action));
- err.span_label(match_span, format!("value is immutable in match guard"));
+ err.span_label(match_span, String::from("value is immutable in match guard"));
self.cancel_if_wrong_origin(err, o)
}
PlaceContext::NonMutatingUse(NonMutatingUseContext::Copy) |
PlaceContext::NonMutatingUse(NonMutatingUseContext::Move) |
PlaceContext::NonUse(NonUseContext::AscribeUserTy) |
- PlaceContext::NonUse(NonUseContext::Validate) =>
+ PlaceContext::MutatingUse(MutatingUseContext::Retag) =>
Some(DefUse::Use),
///////////////////////////////////////////////////////////////////////////
}
}
}
- ItemKind::TraitAlias(Generics { ref params, .. }, ..) => {
- for param in params {
- match param.kind {
- GenericParamKind::Lifetime { .. } => {}
- GenericParamKind::Type { ref default, .. } => {
- if !param.bounds.is_empty() {
- self.err_handler()
- .span_err(param.ident.span, "type parameters on the left \
- side of a trait alias cannot be bounded");
- }
- if !default.is_none() {
- self.err_handler()
- .span_err(param.ident.span, "type parameters on the left \
- side of a trait alias cannot have defaults");
- }
- }
- }
- }
- }
ItemKind::Mod(_) => {
// Ensure that `path` attributes on modules are recorded as used (c.f. #35584).
attr::first_attr_value_str_by_name(&item.attrs, "path");
StatementKind::Assign(..) => "StatementKind::Assign",
StatementKind::FakeRead(..) => "StatementKind::FakeRead",
StatementKind::EndRegion(..) => "StatementKind::EndRegion",
- StatementKind::Validate(..) => "StatementKind::Validate",
+ StatementKind::Retag { .. } => "StatementKind::Retag",
StatementKind::SetDiscriminant { .. } => "StatementKind::SetDiscriminant",
StatementKind::StorageLive(..) => "StatementKind::StorageLive",
StatementKind::StorageDead(..) => "StatementKind::StorageDead",
match self {
PathSource::Type => match def {
Def::Struct(..) | Def::Union(..) | Def::Enum(..) |
- Def::Trait(..) | Def::TyAlias(..) | Def::AssociatedTy(..) |
- Def::PrimTy(..) | Def::TyParam(..) | Def::SelfTy(..) |
- Def::Existential(..) |
+ Def::Trait(..) | Def::TraitAlias(..) | Def::TyAlias(..) |
+ Def::AssociatedTy(..) | Def::PrimTy(..) | Def::TyParam(..) |
+ Def::SelfTy(..) | Def::Existential(..) |
Def::ForeignTy(..) => true,
_ => false,
},
return (err, candidates);
}
(Def::TyAlias(..), PathSource::Trait(_)) => {
- err.span_label(span, "type aliases cannot be used for traits");
+ err.span_label(span, "type aliases cannot be used as traits");
+ if nightly_options::is_nightly_build() {
+ err.note("did you mean to use a trait alias?");
+ }
return (err, candidates);
}
(Def::Mod(..), PathSource::Expr(Some(parent))) => match parent.node {
);
for (i, &Segment { ident, id }) in path.iter().enumerate() {
- debug!("resolve_path ident {} {:?}", i, ident);
+ debug!("resolve_path ident {} {:?} {:?}", i, ident, id);
+ let record_segment_def = |this: &mut Self, def| {
+ if record_used {
+ if let Some(id) = id {
+ if !this.def_map.contains_key(&id) {
+ assert!(id != ast::DUMMY_NODE_ID, "Trying to resolve dummy id");
+ this.record_def(id, PathResolution::new(def));
+ }
+ }
+ }
+ };
let is_last = i == path.len() - 1;
let ns = if is_last { opt_ns.unwrap_or(TypeNS) } else { TypeNS };
// we found a local variable or type param
Some(LexicalScopeBinding::Def(def))
if opt_ns == Some(TypeNS) || opt_ns == Some(ValueNS) => {
+ record_segment_def(self, def);
return PathResult::NonModule(PathResolution::with_unresolved_segments(
def, path.len() - 1
));
let maybe_assoc = opt_ns != Some(MacroNS) && PathSource::Type.is_expected(def);
if let Some(next_module) = binding.module() {
module = Some(ModuleOrUniformRoot::Module(next_module));
- if record_used {
- if let Some(id) = id {
- if !self.def_map.contains_key(&id) {
- assert!(id != ast::DUMMY_NODE_ID, "Trying to resolve dummy id");
- self.record_def(id, PathResolution::new(def));
- }
- }
- }
+ record_segment_def(self, def);
} else if def == Def::ToolMod && i + 1 != path.len() {
let def = Def::NonMacroAttr(NonMacroAttrKind::Tool);
return PathResult::NonModule(PathResolution::new(def));
// report an error.
if record_used {
resolve_error(self, span,
- ResolutionError::CannotCaptureDynamicEnvironmentInFnItem);
+ ResolutionError::CannotCaptureDynamicEnvironmentInFnItem);
}
return Def::Err;
}
// Still doesn't deal with upvars
if record_used {
resolve_error(self, span,
- ResolutionError::AttemptToUseNonConstantValueInConstant);
+ ResolutionError::AttemptToUseNonConstantValueInConstant);
}
return Def::Err;
}
err.span_suggestions_with_applicability(
span,
&msg,
- path_strings,
+ path_strings.into_iter(),
Applicability::Unspecified,
);
} else {
return Err(Determinacy::Determined);
}
}
+ Def::Err => {
+ return Err(Determinacy::Determined);
+ }
_ => panic!("expected `Def::Macro` or `Def::NonMacroAttr`"),
}
use std::cell::{Cell, RefCell};
use std::collections::BTreeMap;
-use std::fmt::Write;
use std::{mem, ptr};
/// Contains data for specific types of import directives.
let msg = format!("`{}` import is ambiguous", name);
let mut err = self.session.struct_span_err(span, &msg);
- let mut suggestion_choices = String::new();
+ let mut suggestion_choices = vec![];
if external_crate.is_some() {
- write!(suggestion_choices, "`::{}`", name);
+ suggestion_choices.push(format!("`::{}`", name));
err.span_label(span,
format!("can refer to external crate `::{}`", name));
}
if let Some(result) = results.module_scope {
- if !suggestion_choices.is_empty() {
- suggestion_choices.push_str(" or ");
- }
- write!(suggestion_choices, "`self::{}`", name);
+ suggestion_choices.push(format!("`self::{}`", name));
if uniform_paths_feature {
err.span_label(result.span,
format!("can refer to `self::{}`", name));
err.span_label(result.span,
format!("shadowed by block-scoped `{}`", name));
}
- err.help(&format!("write {} explicitly instead", suggestion_choices));
+ err.help(&format!("write {} explicitly instead", suggestion_choices.join(" or ")));
if uniform_paths_feature {
err.note("relative `use` paths enabled by `#![feature(uniform_paths)]`");
} else {
// we only write one macro def per unique macro definition, and
// one macro use per unique callsite span.
// mac_defs: FxHashSet<Span>,
- macro_calls: FxHashSet<Span>,
+ // macro_calls: FxHashSet<Span>,
}
impl<'l, 'tcx: 'l, 'll, O: DumpOutput + 'll> DumpVisitor<'l, 'tcx, 'll, O> {
span: span_utils,
cur_scope: CRATE_NODE_ID,
// mac_defs: FxHashSet::default(),
- macro_calls: FxHashSet::default(),
+ // macro_calls: FxHashSet::default(),
}
}
}
fn process_path(&mut self, id: NodeId, path: &'l ast::Path) {
- debug!("process_path {:?}", path);
- if generated_code(path.span) {
+ if self.span.filter_generated(path.span) {
return;
}
self.dump_path_ref(id, path);
/// If the span is not macro-generated, do nothing, else use callee and
/// callsite spans to record macro definition and use data, using the
/// mac_uses and mac_defs sets to prevent multiples.
- fn process_macro_use(&mut self, span: Span) {
- let source_span = span.source_callsite();
- if !self.macro_calls.insert(source_span) {
- return;
- }
+ fn process_macro_use(&mut self, _span: Span) {
+ // FIXME if we're not dumping the defs (see below), there is no point
+ // dumping refs either.
+ // let source_span = span.source_callsite();
+ // if !self.macro_calls.insert(source_span) {
+ // return;
+ // }
- let data = match self.save_ctxt.get_macro_use_data(span) {
- None => return,
- Some(data) => data,
- };
+ // let data = match self.save_ctxt.get_macro_use_data(span) {
+ // None => return,
+ // Some(data) => data,
+ // };
- self.dumper.macro_use(data);
+ // self.dumper.macro_use(data);
// FIXME write the macro def
// let mut hasher = DefaultHasher::new();
self.result.compilation = Some(data);
}
- pub fn macro_use(&mut self, data: MacroRef) {
+ pub fn _macro_use(&mut self, data: MacroRef) {
if self.config.pub_only || self.config.reachable_only {
return;
}
log = "0.4"
rustc_cratesio_shim = { path = "../librustc_cratesio_shim" }
serialize = { path = "../libserialize" }
-
-[features]
-jemalloc = []
use abi::call::{FnType, ArgType, Reg, RegKind, Uniform};
use abi::{HasDataLayout, LayoutOf, TyLayout, TyLayoutMethods};
-fn is_homogeneous_aggregate<'a, Ty, C>(cx: C, arg: &mut ArgType<'a, Ty>)
+fn is_homogeneous_aggregate<'a, Ty, C>(cx: &C, arg: &mut ArgType<'a, Ty>)
-> Option<Uniform>
where Ty: TyLayoutMethods<'a, C> + Copy,
C: LayoutOf<Ty = Ty, TyLayout = TyLayout<'a, Ty>> + HasDataLayout
})
}
-fn classify_ret_ty<'a, Ty, C>(cx: C, ret: &mut ArgType<'a, Ty>)
+fn classify_ret_ty<'a, Ty, C>(cx: &C, ret: &mut ArgType<'a, Ty>)
where Ty: TyLayoutMethods<'a, C> + Copy,
C: LayoutOf<Ty = Ty, TyLayout = TyLayout<'a, Ty>> + HasDataLayout
{
ret.make_indirect();
}
-fn classify_arg_ty<'a, Ty, C>(cx: C, arg: &mut ArgType<'a, Ty>)
+fn classify_arg_ty<'a, Ty, C>(cx: &C, arg: &mut ArgType<'a, Ty>)
where Ty: TyLayoutMethods<'a, C> + Copy,
C: LayoutOf<Ty = Ty, TyLayout = TyLayout<'a, Ty>> + HasDataLayout
{
arg.make_indirect();
}
-pub fn compute_abi_info<'a, Ty, C>(cx: C, fty: &mut FnType<'a, Ty>)
+pub fn compute_abi_info<'a, Ty, C>(cx: &C, fty: &mut FnType<'a, Ty>)
where Ty: TyLayoutMethods<'a, C> + Copy,
C: LayoutOf<Ty = Ty, TyLayout = TyLayout<'a, Ty>> + HasDataLayout
{
use abi::call::{ArgType, FnType, };
use abi::{HasDataLayout, LayoutOf, TyLayout, TyLayoutMethods};
-fn classify_ret_ty<'a, Ty, C>(_tuncx: C, ret: &mut ArgType<'a, Ty>)
+fn classify_ret_ty<'a, Ty, C>(_cx: &C, ret: &mut ArgType<'a, Ty>)
where Ty: TyLayoutMethods<'a, C> + Copy,
C: LayoutOf<Ty = Ty, TyLayout = TyLayout<'a, Ty>> + HasDataLayout
{
ret.extend_integer_width_to(32);
}
-fn classify_arg_ty<'a, Ty, C>(_cx: C, arg: &mut ArgType<'a, Ty>)
+fn classify_arg_ty<'a, Ty, C>(_cx: &C, arg: &mut ArgType<'a, Ty>)
where Ty: TyLayoutMethods<'a, C> + Copy,
C: LayoutOf<Ty = Ty, TyLayout = TyLayout<'a, Ty>> + HasDataLayout
{
arg.extend_integer_width_to(32);
}
-pub fn compute_abi_info<'a, Ty, C>(cx: C, fty: &mut FnType<'a, Ty>)
+pub fn compute_abi_info<'a, Ty, C>(cx: &C, fty: &mut FnType<'a, Ty>)
where Ty: TyLayoutMethods<'a, C> + Copy,
C: LayoutOf<Ty = Ty, TyLayout = TyLayout<'a, Ty>> + HasDataLayout
{
use abi::{HasDataLayout, LayoutOf, TyLayout, TyLayoutMethods};
use spec::HasTargetSpec;
-fn is_homogeneous_aggregate<'a, Ty, C>(cx: C, arg: &mut ArgType<'a, Ty>)
+fn is_homogeneous_aggregate<'a, Ty, C>(cx: &C, arg: &mut ArgType<'a, Ty>)
-> Option<Uniform>
where Ty: TyLayoutMethods<'a, C> + Copy,
C: LayoutOf<Ty = Ty, TyLayout = TyLayout<'a, Ty>> + HasDataLayout
})
}
-fn classify_ret_ty<'a, Ty, C>(cx: C, ret: &mut ArgType<'a, Ty>, vfp: bool)
+fn classify_ret_ty<'a, Ty, C>(cx: &C, ret: &mut ArgType<'a, Ty>, vfp: bool)
where Ty: TyLayoutMethods<'a, C> + Copy,
C: LayoutOf<Ty = Ty, TyLayout = TyLayout<'a, Ty>> + HasDataLayout
{
ret.make_indirect();
}
-fn classify_arg_ty<'a, Ty, C>(cx: C, arg: &mut ArgType<'a, Ty>, vfp: bool)
+fn classify_arg_ty<'a, Ty, C>(cx: &C, arg: &mut ArgType<'a, Ty>, vfp: bool)
where Ty: TyLayoutMethods<'a, C> + Copy,
C: LayoutOf<Ty = Ty, TyLayout = TyLayout<'a, Ty>> + HasDataLayout
{
});
}
-pub fn compute_abi_info<'a, Ty, C>(cx: C, fty: &mut FnType<'a, Ty>)
+pub fn compute_abi_info<'a, Ty, C>(cx: &C, fty: &mut FnType<'a, Ty>)
where Ty: TyLayoutMethods<'a, C> + Copy,
C: LayoutOf<Ty = Ty, TyLayout = TyLayout<'a, Ty>> + HasDataLayout + HasTargetSpec
{
// See the https://github.com/kripken/emscripten-fastcomp-clang repository.
// The class `EmscriptenABIInfo` in `/lib/CodeGen/TargetInfo.cpp` contains the ABI definitions.
-fn classify_ret_ty<'a, Ty, C>(cx: C, ret: &mut ArgType<'a, Ty>)
+fn classify_ret_ty<'a, Ty, C>(cx: &C, ret: &mut ArgType<'a, Ty>)
where Ty: TyLayoutMethods<'a, C> + Copy,
C: LayoutOf<Ty = Ty, TyLayout = TyLayout<'a, Ty>> + HasDataLayout
{
}
}
-pub fn compute_abi_info<'a, Ty, C>(cx: C, fty: &mut FnType<'a, Ty>)
+pub fn compute_abi_info<'a, Ty, C>(cx: &C, fty: &mut FnType<'a, Ty>)
where Ty: TyLayoutMethods<'a, C> + Copy,
C: LayoutOf<Ty = Ty, TyLayout = TyLayout<'a, Ty>> + HasDataLayout
{
use abi::call::{ArgType, FnType, Reg, Uniform};
use abi::{HasDataLayout, LayoutOf, Size, TyLayoutMethods};
-fn classify_ret_ty<'a, Ty, C>(cx: C, ret: &mut ArgType<Ty>, offset: &mut Size)
+fn classify_ret_ty<'a, Ty, C>(cx: &C, ret: &mut ArgType<Ty>, offset: &mut Size)
where Ty: TyLayoutMethods<'a, C>, C: LayoutOf<Ty = Ty> + HasDataLayout
{
if !ret.layout.is_aggregate() {
}
}
-fn classify_arg_ty<'a, Ty, C>(cx: C, arg: &mut ArgType<Ty>, offset: &mut Size)
+fn classify_arg_ty<'a, Ty, C>(cx: &C, arg: &mut ArgType<Ty>, offset: &mut Size)
where Ty: TyLayoutMethods<'a, C>, C: LayoutOf<Ty = Ty> + HasDataLayout
{
let dl = cx.data_layout();
*offset = offset.abi_align(align) + size.abi_align(align);
}
-pub fn compute_abi_info<'a, Ty, C>(cx: C, fty: &mut FnType<Ty>)
+pub fn compute_abi_info<'a, Ty, C>(cx: &C, fty: &mut FnType<Ty>)
where Ty: TyLayoutMethods<'a, C>, C: LayoutOf<Ty = Ty> + HasDataLayout
{
let mut offset = Size::ZERO;
arg.extend_integer_width_to(bits);
}
-fn float_reg<'a, Ty, C>(cx: C, ret: &ArgType<'a, Ty>, i: usize) -> Option<Reg>
+fn float_reg<'a, Ty, C>(cx: &C, ret: &ArgType<'a, Ty>, i: usize) -> Option<Reg>
where Ty: TyLayoutMethods<'a, C> + Copy,
C: LayoutOf<Ty = Ty, TyLayout = TyLayout<'a, Ty>> + HasDataLayout
{
}
}
-fn classify_ret_ty<'a, Ty, C>(cx: C, ret: &mut ArgType<'a, Ty>)
+fn classify_ret_ty<'a, Ty, C>(cx: &C, ret: &mut ArgType<'a, Ty>)
where Ty: TyLayoutMethods<'a, C> + Copy,
C: LayoutOf<Ty = Ty, TyLayout = TyLayout<'a, Ty>> + HasDataLayout
{
}
}
-fn classify_arg_ty<'a, Ty, C>(cx: C, arg: &mut ArgType<'a, Ty>)
+fn classify_arg_ty<'a, Ty, C>(cx: &C, arg: &mut ArgType<'a, Ty>)
where Ty: TyLayoutMethods<'a, C> + Copy,
C: LayoutOf<Ty = Ty, TyLayout = TyLayout<'a, Ty>> + HasDataLayout
{
});
}
-pub fn compute_abi_info<'a, Ty, C>(cx: C, fty: &mut FnType<'a, Ty>)
+pub fn compute_abi_info<'a, Ty, C>(cx: &C, fty: &mut FnType<'a, Ty>)
where Ty: TyLayoutMethods<'a, C> + Copy,
C: LayoutOf<Ty = Ty, TyLayout = TyLayout<'a, Ty>> + HasDataLayout
{
}
impl Reg {
- pub fn align<C: HasDataLayout>(&self, cx: C) -> Align {
+ pub fn align<C: HasDataLayout>(&self, cx: &C) -> Align {
let dl = cx.data_layout();
match self.kind {
RegKind::Integer => {
}
impl Uniform {
- pub fn align<C: HasDataLayout>(&self, cx: C) -> Align {
+ pub fn align<C: HasDataLayout>(&self, cx: &C) -> Align {
self.unit.align(cx)
}
}
}
}
- pub fn size<C: HasDataLayout>(&self, cx: C) -> Size {
+ pub fn size<C: HasDataLayout>(&self, cx: &C) -> Size {
(self.prefix_chunk * self.prefix.iter().filter(|x| x.is_some()).count() as u64)
.abi_align(self.rest.align(cx)) + self.rest.total
}
- pub fn align<C: HasDataLayout>(&self, cx: C) -> Align {
+ pub fn align<C: HasDataLayout>(&self, cx: &C) -> Align {
self.prefix.iter()
.filter_map(|x| x.map(|kind| Reg { kind, size: self.prefix_chunk }.align(cx)))
.fold(cx.data_layout().aggregate_align.max(self.rest.align(cx)),
}
}
- fn homogeneous_aggregate<C>(&self, cx: C) -> Option<Reg>
- where Ty: TyLayoutMethods<'a, C> + Copy, C: LayoutOf<Ty = Ty, TyLayout = Self> + Copy
+ fn homogeneous_aggregate<C>(&self, cx: &C) -> Option<Reg>
+ where Ty: TyLayoutMethods<'a, C> + Copy, C: LayoutOf<Ty = Ty, TyLayout = Self>
{
match self.abi {
Abi::Uninhabited => None,
}
impl<'a, Ty> FnType<'a, Ty> {
- pub fn adjust_for_cabi<C>(&mut self, cx: C, abi: ::spec::abi::Abi) -> Result<(), String>
+ pub fn adjust_for_cabi<C>(&mut self, cx: &C, abi: ::spec::abi::Abi) -> Result<(), String>
where Ty: TyLayoutMethods<'a, C> + Copy,
C: LayoutOf<Ty = Ty, TyLayout = TyLayout<'a, Ty>> + HasDataLayout + HasTargetSpec
{
use abi::call::{ArgType, FnType, Reg, Uniform};
use abi::{HasDataLayout, LayoutOf, Size, TyLayoutMethods};
-fn classify_ret_ty<'a, Ty, C>(cx: C, ret: &mut ArgType<Ty>, offset: &mut Size)
+fn classify_ret_ty<'a, Ty, C>(cx: &C, ret: &mut ArgType<Ty>, offset: &mut Size)
where Ty: TyLayoutMethods<'a, C>, C: LayoutOf<Ty = Ty> + HasDataLayout
{
if !ret.layout.is_aggregate() {
}
}
-fn classify_arg_ty<'a, Ty, C>(cx: C, arg: &mut ArgType<Ty>, offset: &mut Size)
+fn classify_arg_ty<'a, Ty, C>(cx: &C, arg: &mut ArgType<Ty>, offset: &mut Size)
where Ty: TyLayoutMethods<'a, C>, C: LayoutOf<Ty = Ty> + HasDataLayout
{
let dl = cx.data_layout();
*offset = offset.abi_align(align) + size.abi_align(align);
}
-pub fn compute_abi_info<'a, Ty, C>(cx: C, fty: &mut FnType<Ty>)
+pub fn compute_abi_info<'a, Ty, C>(cx: &C, fty: &mut FnType<Ty>)
where Ty: TyLayoutMethods<'a, C>, C: LayoutOf<Ty = Ty> + HasDataLayout
{
let mut offset = Size::ZERO;
}
use self::ABI::*;
-fn is_homogeneous_aggregate<'a, Ty, C>(cx: C, arg: &mut ArgType<'a, Ty>, abi: ABI)
+fn is_homogeneous_aggregate<'a, Ty, C>(cx: &C, arg: &mut ArgType<'a, Ty>, abi: ABI)
-> Option<Uniform>
where Ty: TyLayoutMethods<'a, C> + Copy,
C: LayoutOf<Ty = Ty, TyLayout = TyLayout<'a, Ty>> + HasDataLayout
})
}
-fn classify_ret_ty<'a, Ty, C>(cx: C, ret: &mut ArgType<'a, Ty>, abi: ABI)
+fn classify_ret_ty<'a, Ty, C>(cx: &C, ret: &mut ArgType<'a, Ty>, abi: ABI)
where Ty: TyLayoutMethods<'a, C> + Copy,
C: LayoutOf<Ty = Ty, TyLayout = TyLayout<'a, Ty>> + HasDataLayout
{
ret.make_indirect();
}
-fn classify_arg_ty<'a, Ty, C>(cx: C, arg: &mut ArgType<'a, Ty>, abi: ABI)
+fn classify_arg_ty<'a, Ty, C>(cx: &C, arg: &mut ArgType<'a, Ty>, abi: ABI)
where Ty: TyLayoutMethods<'a, C> + Copy,
C: LayoutOf<Ty = Ty, TyLayout = TyLayout<'a, Ty>> + HasDataLayout
{
});
}
-pub fn compute_abi_info<'a, Ty, C>(cx: C, fty: &mut FnType<'a, Ty>)
+pub fn compute_abi_info<'a, Ty, C>(cx: &C, fty: &mut FnType<'a, Ty>)
where Ty: TyLayoutMethods<'a, C> + Copy,
C: LayoutOf<Ty = Ty, TyLayout = TyLayout<'a, Ty>> + HasDataLayout
{
}
}
-fn is_single_fp_element<'a, Ty, C>(cx: C, layout: TyLayout<'a, Ty>) -> bool
+fn is_single_fp_element<'a, Ty, C>(cx: &C, layout: TyLayout<'a, Ty>) -> bool
where Ty: TyLayoutMethods<'a, C>,
C: LayoutOf<Ty = Ty, TyLayout = TyLayout<'a, Ty>> + HasDataLayout
{
}
}
-fn classify_arg_ty<'a, Ty, C>(cx: C, arg: &mut ArgType<'a, Ty>)
+fn classify_arg_ty<'a, Ty, C>(cx: &C, arg: &mut ArgType<'a, Ty>)
where Ty: TyLayoutMethods<'a, C> + Copy,
C: LayoutOf<Ty = Ty, TyLayout = TyLayout<'a, Ty>> + HasDataLayout
{
}
}
-pub fn compute_abi_info<'a, Ty, C>(cx: C, fty: &mut FnType<'a, Ty>)
+pub fn compute_abi_info<'a, Ty, C>(cx: &C, fty: &mut FnType<'a, Ty>)
where Ty: TyLayoutMethods<'a, C> + Copy,
C: LayoutOf<Ty = Ty, TyLayout = TyLayout<'a, Ty>> + HasDataLayout
{
use abi::call::{ArgType, FnType, Reg, Uniform};
use abi::{HasDataLayout, LayoutOf, Size, TyLayoutMethods};
-fn classify_ret_ty<'a, Ty, C>(cx: C, ret: &mut ArgType<Ty>, offset: &mut Size)
+fn classify_ret_ty<'a, Ty, C>(cx: &C, ret: &mut ArgType<Ty>, offset: &mut Size)
where Ty: TyLayoutMethods<'a, C>, C: LayoutOf<Ty = Ty> + HasDataLayout
{
if !ret.layout.is_aggregate() {
}
}
-fn classify_arg_ty<'a, Ty, C>(cx: C, arg: &mut ArgType<Ty>, offset: &mut Size)
+fn classify_arg_ty<'a, Ty, C>(cx: &C, arg: &mut ArgType<Ty>, offset: &mut Size)
where Ty: TyLayoutMethods<'a, C>, C: LayoutOf<Ty = Ty> + HasDataLayout
{
let dl = cx.data_layout();
*offset = offset.abi_align(align) + size.abi_align(align);
}
-pub fn compute_abi_info<'a, Ty, C>(cx: C, fty: &mut FnType<Ty>)
+pub fn compute_abi_info<'a, Ty, C>(cx: &C, fty: &mut FnType<Ty>)
where Ty: TyLayoutMethods<'a, C>, C: LayoutOf<Ty = Ty> + HasDataLayout
{
let mut offset = Size::ZERO;
use abi::call::{FnType, ArgType, Reg, RegKind, Uniform};
use abi::{HasDataLayout, LayoutOf, TyLayout, TyLayoutMethods};
-fn is_homogeneous_aggregate<'a, Ty, C>(cx: C, arg: &mut ArgType<'a, Ty>)
+fn is_homogeneous_aggregate<'a, Ty, C>(cx: &C, arg: &mut ArgType<'a, Ty>)
-> Option<Uniform>
where Ty: TyLayoutMethods<'a, C> + Copy,
C: LayoutOf<Ty = Ty, TyLayout = TyLayout<'a, Ty>> + HasDataLayout
})
}
-fn classify_ret_ty<'a, Ty, C>(cx: C, ret: &mut ArgType<'a, Ty>)
+fn classify_ret_ty<'a, Ty, C>(cx: &C, ret: &mut ArgType<'a, Ty>)
where Ty: TyLayoutMethods<'a, C> + Copy,
C: LayoutOf<Ty = Ty, TyLayout = TyLayout<'a, Ty>> + HasDataLayout
{
ret.make_indirect();
}
-fn classify_arg_ty<'a, Ty, C>(cx: C, arg: &mut ArgType<'a, Ty>)
+fn classify_arg_ty<'a, Ty, C>(cx: &C, arg: &mut ArgType<'a, Ty>)
where Ty: TyLayoutMethods<'a, C> + Copy,
C: LayoutOf<Ty = Ty, TyLayout = TyLayout<'a, Ty>> + HasDataLayout
{
});
}
-pub fn compute_abi_info<'a, Ty, C>(cx: C, fty: &mut FnType<'a, Ty>)
+pub fn compute_abi_info<'a, Ty, C>(cx: &C, fty: &mut FnType<'a, Ty>)
where Ty: TyLayoutMethods<'a, C> + Copy,
C: LayoutOf<Ty = Ty, TyLayout = TyLayout<'a, Ty>> + HasDataLayout
{
Fastcall
}
-fn is_single_fp_element<'a, Ty, C>(cx: C, layout: TyLayout<'a, Ty>) -> bool
+fn is_single_fp_element<'a, Ty, C>(cx: &C, layout: TyLayout<'a, Ty>) -> bool
where Ty: TyLayoutMethods<'a, C> + Copy,
C: LayoutOf<Ty = Ty, TyLayout = TyLayout<'a, Ty>> + HasDataLayout
{
}
}
-pub fn compute_abi_info<'a, Ty, C>(cx: C, fty: &mut FnType<'a, Ty>, flavor: Flavor)
+pub fn compute_abi_info<'a, Ty, C>(cx: &C, fty: &mut FnType<'a, Ty>, flavor: Flavor)
where Ty: TyLayoutMethods<'a, C> + Copy,
C: LayoutOf<Ty = Ty, TyLayout = TyLayout<'a, Ty>> + HasDataLayout + HasTargetSpec
{
const LARGEST_VECTOR_SIZE: usize = 512;
const MAX_EIGHTBYTES: usize = LARGEST_VECTOR_SIZE / 64;
-fn classify_arg<'a, Ty, C>(cx: C, arg: &ArgType<'a, Ty>)
+fn classify_arg<'a, Ty, C>(cx: &C, arg: &ArgType<'a, Ty>)
-> Result<[Option<Class>; MAX_EIGHTBYTES], Memory>
where Ty: TyLayoutMethods<'a, C> + Copy,
C: LayoutOf<Ty = Ty, TyLayout = TyLayout<'a, Ty>> + HasDataLayout
{
- fn classify<'a, Ty, C>(cx: C, layout: TyLayout<'a, Ty>,
+ fn classify<'a, Ty, C>(cx: &C, layout: TyLayout<'a, Ty>,
cls: &mut [Option<Class>], off: Size) -> Result<(), Memory>
where Ty: TyLayoutMethods<'a, C> + Copy,
C: LayoutOf<Ty = Ty, TyLayout = TyLayout<'a, Ty>> + HasDataLayout
target
}
-pub fn compute_abi_info<'a, Ty, C>(cx: C, fty: &mut FnType<'a, Ty>)
+pub fn compute_abi_info<'a, Ty, C>(cx: &C, fty: &mut FnType<'a, Ty>)
where Ty: TyLayoutMethods<'a, C> + Copy,
C: LayoutOf<Ty = Ty, TyLayout = TyLayout<'a, Ty>> + HasDataLayout
{
}
}
-pub trait HasDataLayout: Copy {
+pub trait HasDataLayout {
fn data_layout(&self) -> &TargetDataLayout;
}
-impl<'a> HasDataLayout for &'a TargetDataLayout {
+impl HasDataLayout for TargetDataLayout {
fn data_layout(&self) -> &TargetDataLayout {
self
}
}
#[inline]
- pub fn checked_add<C: HasDataLayout>(self, offset: Size, cx: C) -> Option<Size> {
+ pub fn checked_add<C: HasDataLayout>(self, offset: Size, cx: &C) -> Option<Size> {
let dl = cx.data_layout();
let bytes = self.bytes().checked_add(offset.bytes())?;
}
#[inline]
- pub fn checked_mul<C: HasDataLayout>(self, count: u64, cx: C) -> Option<Size> {
+ pub fn checked_mul<C: HasDataLayout>(self, count: u64, cx: &C) -> Option<Size> {
let dl = cx.data_layout();
let bytes = self.bytes().checked_mul(count)?;
}
}
- pub fn align<C: HasDataLayout>(self, cx: C) -> Align {
+ pub fn align<C: HasDataLayout>(self, cx: &C) -> Align {
let dl = cx.data_layout();
match self {
}
/// Find the smallest integer with the given alignment.
- pub fn for_abi_align<C: HasDataLayout>(cx: C, align: Align) -> Option<Integer> {
+ pub fn for_abi_align<C: HasDataLayout>(cx: &C, align: Align) -> Option<Integer> {
let dl = cx.data_layout();
let wanted = align.abi();
}
/// Find the largest integer with the given alignment or less.
- pub fn approximate_abi_align<C: HasDataLayout>(cx: C, align: Align) -> Integer {
+ pub fn approximate_abi_align<C: HasDataLayout>(cx: &C, align: Align) -> Integer {
let dl = cx.data_layout();
let wanted = align.abi();
}
impl<'a, 'tcx> Primitive {
- pub fn size<C: HasDataLayout>(self, cx: C) -> Size {
+ pub fn size<C: HasDataLayout>(self, cx: &C) -> Size {
let dl = cx.data_layout();
match self {
}
}
- pub fn align<C: HasDataLayout>(self, cx: C) -> Align {
+ pub fn align<C: HasDataLayout>(self, cx: &C) -> Align {
let dl = cx.data_layout();
match self {
/// Returns the valid range as a `x..y` range.
///
/// If `x` and `y` are equal, the range is full, not empty.
- pub fn valid_range_exclusive<C: HasDataLayout>(&self, cx: C) -> Range<u128> {
+ pub fn valid_range_exclusive<C: HasDataLayout>(&self, cx: &C) -> Range<u128> {
// For a (max) value of -1, max will be `-1 as usize`, which overflows.
// However, that is fine here (it would still represent the full range),
// i.e., if the range is everything.
}
impl LayoutDetails {
- pub fn scalar<C: HasDataLayout>(cx: C, scalar: Scalar) -> Self {
+ pub fn scalar<C: HasDataLayout>(cx: &C, scalar: Scalar) -> Self {
let size = scalar.value.size(cx);
let align = scalar.value.align(cx);
LayoutDetails {
type Ty;
type TyLayout;
- fn layout_of(self, ty: Self::Ty) -> Self::TyLayout;
+ fn layout_of(&self, ty: Self::Ty) -> Self::TyLayout;
}
pub trait TyLayoutMethods<'a, C: LayoutOf<Ty = Self>>: Sized {
- fn for_variant(this: TyLayout<'a, Self>, cx: C, variant_index: usize) -> TyLayout<'a, Self>;
- fn field(this: TyLayout<'a, Self>, cx: C, i: usize) -> C::TyLayout;
+ fn for_variant(this: TyLayout<'a, Self>, cx: &C, variant_index: usize) -> TyLayout<'a, Self>;
+ fn field(this: TyLayout<'a, Self>, cx: &C, i: usize) -> C::TyLayout;
}
impl<'a, Ty> TyLayout<'a, Ty> {
- pub fn for_variant<C>(self, cx: C, variant_index: usize) -> Self
+ pub fn for_variant<C>(self, cx: &C, variant_index: usize) -> Self
where Ty: TyLayoutMethods<'a, C>, C: LayoutOf<Ty = Ty> {
Ty::for_variant(self, cx, variant_index)
}
- pub fn field<C>(self, cx: C, i: usize) -> C::TyLayout
+ pub fn field<C>(self, cx: &C, i: usize) -> C::TyLayout
where Ty: TyLayoutMethods<'a, C>, C: LayoutOf<Ty = Ty> {
Ty::field(self, cx, i)
}
html_root_url = "https://doc.rust-lang.org/nightly/")]
#![feature(box_syntax)]
-#![cfg_attr(stage0, feature(min_const_fn))]
#![feature(nll)]
#![feature(slice_patterns)]
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-use spec::{LinkerFlavor, Target, TargetOptions, TargetResult};
+use spec::{LldFlavor, LinkerFlavor, Target, TargetOptions, TargetResult};
pub fn target() -> TargetResult {
let mut base = super::fuchsia_base::opts();
target_os: "fuchsia".to_string(),
target_env: String::new(),
target_vendor: String::new(),
- linker_flavor: LinkerFlavor::Gcc,
+ linker_flavor: LinkerFlavor::Lld(LldFlavor::Ld),
options: TargetOptions {
abi_blacklist: super::arm_base::abi_blacklist(),
.. base
dll_suffix: ".dylib".to_string(),
archive_format: "bsd".to_string(),
pre_link_args: LinkArgs::new(),
- exe_allocation_crate: super::maybe_jemalloc(),
has_elf_tls: version >= (10, 7),
abi_return_struct_as_int: true,
emit_debug_gdb_scripts: false,
pre_link_args,
has_elf_tls: false,
eliminate_frame_pointer: false,
- // The following line is a workaround for jemalloc 4.5 being broken on
- // ios. jemalloc 5.0 is supposed to fix this.
- // see https://github.com/rust-lang/rust/issues/45262
- exe_allocation_crate: None,
.. super::apple_base::opts()
})
}
let mut base = super::android_base::opts();
// https://developer.android.com/ndk/guides/abis.html#armeabi
base.features = "+strict-align,+v5te".to_string();
- base.max_atomic_width = Some(64);
+ base.max_atomic_width = Some(32);
Ok(Target {
llvm_target: "arm-linux-androideabi".to_string(),
// dynamic linking.
tls_model: "local-exec".to_string(),
relro_level: RelroLevel::Full,
- exe_allocation_crate: super::maybe_jemalloc(),
.. Default::default()
}
}
pre_link_args: args,
position_independent_executables: true,
relro_level: RelroLevel::Full,
- exe_allocation_crate: super::maybe_jemalloc(),
.. Default::default()
}
}
position_independent_executables: true,
eliminate_frame_pointer: false, // FIXME 43575
relro_level: RelroLevel::Full,
- exe_allocation_crate: super::maybe_jemalloc(),
abi_return_struct_as_int: true,
.. Default::default()
}
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-use spec::{LinkArgs, LinkerFlavor, TargetOptions};
+use spec::{LldFlavor, LinkArgs, LinkerFlavor, TargetOptions};
use std::default::Default;
pub fn opts() -> TargetOptions {
let mut args = LinkArgs::new();
- args.insert(LinkerFlavor::Gcc, vec![
- // We want to be able to strip as much executable code as possible
- // from the linker command line, and this flag indicates to the
- // linker that it can avoid linking in dynamic libraries that don't
- // actually satisfy any symbols up to that point (as with many other
- // resolutions the linker does). This option only applies to all
- // following libraries so we're sure to pass it as one of the first
- // arguments.
- // FIXME: figure out whether these linker args are desirable
- //"-Wl,--as-needed".to_string(),
-
- // Always enable NX protection when it is available
- //"-Wl,-z,noexecstack".to_string(),
+ args.insert(LinkerFlavor::Lld(LldFlavor::Ld), vec![
+ "--build-id".to_string(), "--hash-style=gnu".to_string(),
+ "-z".to_string(), "rodynamic".to_string(),
]);
TargetOptions {
+ linker: Some("rust-lld".to_owned()),
+ lld_flavor: LldFlavor::Ld,
dynamic_linking: true,
executables: true,
target_family: Some("unix".to_string()),
pre_link_args: args,
position_independent_executables: true,
relro_level: RelroLevel::Full,
- exe_allocation_crate: super::maybe_jemalloc(),
has_elf_tls: true,
.. Default::default()
}
pub options: TargetOptions,
}
-pub trait HasTargetSpec: Copy {
+pub trait HasTargetSpec {
fn target_spec(&self) -> &Target;
}
-impl<'a> HasTargetSpec for &'a Target {
+impl HasTargetSpec for Target {
fn target_spec(&self) -> &Target {
self
}
}
}
-fn maybe_jemalloc() -> Option<String> {
- if cfg!(feature = "jemalloc") {
- Some("alloc_jemalloc".to_string())
- } else {
- None
- }
-}
-
/// Either a target triple string or a path to a JSON file.
#[derive(PartialEq, Clone, Debug, Hash, RustcEncodable, RustcDecodable)]
pub enum TargetTriple {
no_integrated_as: true,
// There are no atomic CAS instructions available in the MSP430
- // instruction set
- max_atomic_width: Some(16),
+ // instruction set, and the LLVM backend doesn't currently support
+ // compiler fences so the Atomic* API is missing on this target.
+ // When the LLVM backend gains support for compile fences uncomment
+ // the `singlethread: true` line and set `max_atomic_width` to
+ // `Some(16)`.
+ max_atomic_width: Some(0),
atomic_cas: false,
+ // singlethread: true,
// Because these devices have very little resources having an
// unwinder is too onerous so we default to "abort" because the
has_rpath: true,
target_family: Some("unix".to_string()),
is_like_solaris: true,
- exe_allocation_crate: super::maybe_jemalloc(),
.. Default::default()
}
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-use spec::{LinkerFlavor, Target, TargetResult};
+use spec::{LldFlavor, LinkerFlavor, Target, TargetResult};
pub fn target() -> TargetResult {
let mut base = super::fuchsia_base::opts();
base.cpu = "x86-64".to_string();
base.max_atomic_width = Some(64);
- base.pre_link_args.get_mut(&LinkerFlavor::Gcc).unwrap().push("-m64".to_string());
base.stack_probes = true;
Ok(Target {
target_os: "fuchsia".to_string(),
target_env: String::new(),
target_vendor: String::new(),
- linker_flavor: LinkerFlavor::Gcc,
+ linker_flavor: LinkerFlavor::Lld(LldFlavor::Ld),
options: base,
})
}
ty::GeneratorWitness(..) |
ty::UnnormalizedProjection(..) |
ty::Infer(..) |
+ ty::Bound(..) |
ty::Error => {
bug!("unexpected type {:?}", ty)
}
ty::UnnormalizedProjection(..) => bug!("only used with chalk-engine"),
- ty::Infer(..) | ty::Error => {
+ ty::Bound(..) | ty::Infer(..) | ty::Error => {
// By the time this code runs, all type variables ought to
// be fully resolved.
Err(NoSolution)
// From the full set of obligations, just filter down to the
// region relationships.
implied_bounds.extend(obligations.into_iter().flat_map(|obligation| {
- assert!(!obligation.has_escaping_regions());
+ assert!(!obligation.has_escaping_bound_vars());
match obligation.predicate {
ty::Predicate::Trait(..) |
ty::Predicate::Subtype(..) |
vec![]
}
- ty::Predicate::RegionOutlives(ref data) => match data.no_late_bound_regions() {
+ ty::Predicate::RegionOutlives(ref data) => match data.no_bound_vars() {
None => vec![],
Some(ty::OutlivesPredicate(r_a, r_b)) => {
vec![OutlivesBound::RegionSubRegion(r_b, r_a)]
}
},
- ty::Predicate::TypeOutlives(ref data) => match data.no_late_bound_regions() {
+ ty::Predicate::TypeOutlives(ref data) => match data.no_bound_vars() {
None => vec![],
Some(ty::OutlivesPredicate(ty_a, r_b)) => {
let ty_a = infcx.resolve_type_vars_if_possible(&ty_a);
) -> Vec<OutlivesBound<'tcx>> {
sup_components
.into_iter()
- .flat_map(|component| {
+ .filter_map(|component| {
match component {
Component::Region(r) =>
- vec![OutlivesBound::RegionSubRegion(sub_region, r)],
+ Some(OutlivesBound::RegionSubRegion(sub_region, r)),
Component::Param(p) =>
- vec![OutlivesBound::RegionSubParam(sub_region, p)],
+ Some(OutlivesBound::RegionSubParam(sub_region, p)),
Component::Projection(p) =>
- vec![OutlivesBound::RegionSubProjection(sub_region, p)],
+ Some(OutlivesBound::RegionSubProjection(sub_region, p)),
Component::EscapingProjection(_) =>
// If the projection has escaping regions, don't
// try to infer any implied bounds even for its
// idea is that the WAY that the caller proves
// that may change in the future and we want to
// give ourselves room to get smarter here.
- vec![],
+ None,
Component::UnresolvedInferenceVariable(..) =>
- vec![],
+ None,
}
})
.collect()
ty::GeneratorWitness(..) |
ty::UnnormalizedProjection(..) |
ty::Infer(..) |
+ ty::Bound(..) |
ty::Error => {
bug!("unexpected type {:?}", ty);
}
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-//! Conversion from AST representation of types to the ty.rs
+//! Conversion from AST representation of types to the `ty.rs`
//! representation. The main routine here is `ast_ty_to_ty()`: each use
//! is parameterized by an instance of `AstConv`.
item_segment: &hir::PathSegment)
-> &'tcx Substs<'tcx>
{
-
let (substs, assoc_bindings) = item_segment.with_generic_args(|generic_args| {
self.create_substs_for_ast_path(
span,
}
/// Given the type/region arguments provided to some path (along with
- /// an implicit Self, if this is a trait reference) returns the complete
+ /// an implicit `Self`, if this is a trait reference) returns the complete
/// set of substitutions. This may involve applying defaulted type parameters.
///
/// Note that the type listing given here is *exactly* what the user provided.
{
let trait_def_id = self.trait_def_id(trait_ref);
- debug!("ast_path_to_poly_trait_ref({:?}, def_id={:?})", trait_ref, trait_def_id);
+ debug!("instantiate_poly_trait_ref({:?}, def_id={:?})", trait_ref, trait_def_id);
self.prohibit_generics(trait_ref.path.segments.split_last().unwrap().1);
let predicate: Result<_, ErrorReported> =
self.ast_type_binding_to_poly_projection_predicate(
trait_ref.ref_id, poly_trait_ref, binding, speculative, &mut dup_bindings);
- // ok to ignore Err() because ErrorReported (see above)
+ // ok to ignore Err because ErrorReported (see above)
Some((predicate.ok()?, binding.span))
}));
- debug!("ast_path_to_poly_trait_ref({:?}, projections={:?}) -> {:?}",
+ debug!("instantiate_poly_trait_ref({:?}, projections={:?}) -> {:?}",
trait_ref, poly_projections, poly_trait_ref);
poly_trait_ref
}
)
}
- /// Transform a PolyTraitRef into a PolyExistentialTraitRef by
- /// removing the dummy Self type (TRAIT_OBJECT_DUMMY_SELF).
+ /// Transform a `PolyTraitRef` into a `PolyExistentialTraitRef` by
+ /// removing the dummy `Self` type (`TRAIT_OBJECT_DUMMY_SELF`).
fn trait_ref_to_existential(&self, trait_ref: ty::TraitRef<'tcx>)
-> ty::ExistentialTraitRef<'tcx> {
assert_eq!(trait_ref.self_ty().sty, TRAIT_OBJECT_DUMMY_SELF);
let principal = self.instantiate_poly_trait_ref(&trait_bounds[0],
dummy_self,
&mut projection_bounds);
+ debug!("principal: {:?}", principal);
for trait_bound in trait_bounds[1..].iter() {
- // Sanity check for non-principal trait bounds
+ // sanity check for non-principal trait bounds
self.instantiate_poly_trait_ref(trait_bound,
dummy_self,
&mut vec![]);
})
});
- // check that there are no gross object safety violations,
+ // Check that there are no gross object safety violations;
// most importantly, that the supertraits don't contain Self,
- // to avoid ICE-s.
+ // to avoid ICEs.
let object_safety_violations =
- tcx.astconv_object_safety_violations(principal.def_id());
+ tcx.global_tcx().astconv_object_safety_violations(principal.def_id());
if !object_safety_violations.is_empty() {
tcx.report_object_safety_error(
span, principal.def_id(), object_safety_violations)
return tcx.types.err;
}
- // use a btreeset to keep output in a more consistent order
+ // Use a BTreeSet to keep output in a more consistent order.
let mut associated_types = BTreeSet::default();
for tr in traits::supertraits(tcx, principal) {
v.sort_by(|a, b| a.stable_cmp(tcx, b));
let existential_predicates = ty::Binder::bind(tcx.mk_existential_predicates(v.into_iter()));
- // Explicitly specified region bound. Use that.
+ // Use explicitly-specified region bound.
let region_bound = if !lifetime.is_elided() {
self.ast_region_to_region(lifetime, None)
} else {
{
let tcx = self.tcx();
- let bounds: Vec<_> = self.get_type_parameter_bounds(span, ty_param_def_id)
- .predicates.into_iter().filter_map(|(p, _)| p.to_opt_poly_trait_ref()).collect();
+ let bounds = self.get_type_parameter_bounds(span, ty_param_def_id)
+ .predicates.into_iter().filter_map(|(p, _)| p.to_opt_poly_trait_ref());
// Check that there is exactly one way to find an associated type with the
// correct name.
- let suitable_bounds = traits::transitive_bounds(tcx, &bounds)
+ let suitable_bounds = traits::transitive_bounds(tcx, bounds)
.filter(|b| self.trait_defines_associated_type_named(b.def_id(), assoc_name));
let param_node_id = tcx.hir.as_local_node_id(ty_param_def_id).unwrap();
err.span_label(span, "associated type not allowed here").emit();
}
- // Check a type Path and convert it to a Ty.
+ // Check a type `Path` and convert it to a `Ty`.
pub fn def_to_ty(&self,
opt_self_ty: Option<Ty<'tcx>>,
path: &hir::Path,
/// Parses the programmer's textual representation of a type into our
/// internal notion of a type.
pub fn ast_ty_to_ty(&self, ast_ty: &hir::Ty) -> Ty<'tcx> {
- debug!("ast_ty_to_ty(id={:?}, ast_ty={:?})",
- ast_ty.id, ast_ty);
+ debug!("ast_ty_to_ty(id={:?}, ast_ty={:?} ty_ty={:?})",
+ ast_ty.id, ast_ty, ast_ty.node);
let tcx = self.tcx();
self.region_bounds.iter().map(|&(region_bound, span)| {
// account for the binder being introduced below; no need to shift `param_ty`
// because, at present at least, it can only refer to early-bound regions
- let region_bound = tcx.mk_region(ty::fold::shift_region(*region_bound, 1));
+ let region_bound = ty::fold::shift_region(tcx, region_bound, 1);
let outlives = ty::OutlivesPredicate(param_ty, region_bound);
(ty::Binder::dummy(outlives).to_predicate(), span)
}).chain(
}
// Replace constructor type with constructed type for tuple struct patterns.
let pat_ty = pat_ty.fn_sig(tcx).output();
- let pat_ty = pat_ty.no_late_bound_regions().expect("expected fn type");
+ let pat_ty = pat_ty.no_bound_vars().expect("expected fn type");
self.demand_eqtype(pat.span, expected, pat_ty);
ty::Opaque(def_id, substs) => Some(PointerKind::OfOpaque(def_id, substs)),
ty::Param(ref p) => Some(PointerKind::OfParam(p)),
// Insufficient type information.
- ty::Infer(_) => None,
+ ty::Bound(..) | ty::Infer(_) => None,
ty::Bool | ty::Char | ty::Int(..) | ty::Uint(..) |
ty::Float(_) | ty::Array(..) | ty::GeneratorWitness(..) |
use super::{check_fn, Expectation, FnCtxt, GeneratorTypes};
use astconv::AstConv;
+use middle::region;
use rustc::hir::def_id::DefId;
use rustc::infer::{InferOk, InferResult};
use rustc::infer::LateBoundRegionConversionTime;
use rustc::infer::type_variable::TypeVariableOrigin;
+use rustc::traits::Obligation;
use rustc::traits::error_reporting::ArgKind;
use rustc::ty::{self, ToPolyTraitRef, Ty, GenericParamDefKind};
use rustc::ty::fold::TypeFoldable;
// Create a `PolyFnSig`. Note the oddity that late bound
// regions appearing free in `expected_sig` are now bound up
// in this binder we are creating.
- assert!(!expected_sig.sig.has_regions_bound_above(ty::INNERMOST));
+ assert!(!expected_sig.sig.has_vars_bound_above(ty::INNERMOST));
let bound_sig = ty::Binder::bind(self.tcx.mk_fn_sig(
expected_sig.sig.inputs().iter().cloned(),
expected_sig.sig.output(),
// Along the way, it also writes out entries for types that the user
// wrote into our tables, which are then later used by the privacy
// check.
- match self.check_supplied_sig_against_expectation(expr_def_id, decl, &closure_sigs) {
+ match self.check_supplied_sig_against_expectation(expr_def_id, decl, body, &closure_sigs) {
Ok(infer_ok) => self.register_infer_ok_obligations(infer_ok),
Err(_) => return self.sig_of_closure_no_expectation(expr_def_id, decl, body),
}
&self,
expr_def_id: DefId,
decl: &hir::FnDecl,
+ body: &hir::Body,
expected_sigs: &ClosureSignatures<'tcx>,
) -> InferResult<'tcx, ()> {
// Get the signature S that the user gave.
} = self.at(cause, self.param_env)
.eq(*expected_ty, supplied_ty)?;
all_obligations.extend(obligations);
+
+ // Also, require that the supplied type must outlive
+ // the closure body.
+ let closure_body_region = self.tcx.mk_region(
+ ty::ReScope(
+ region::Scope {
+ id: body.value.hir_id.local_id,
+ data: region::ScopeData::Node,
+ },
+ ),
+ );
+ all_obligations.push(
+ Obligation::new(
+ cause.clone(),
+ self.param_env,
+ ty::Predicate::TypeOutlives(
+ ty::Binder::dummy(
+ ty::OutlivesPredicate(
+ supplied_ty,
+ closure_body_region,
+ ),
+ ),
+ ),
+ ),
+ );
}
let (supplied_output_ty, _) = self.infcx.replace_late_bound_regions_with_fresh_var(
if compatible_variants.peek().is_some() {
let expr_text = print::to_string(print::NO_ANN, |s| s.print_expr(expr));
let suggestions = compatible_variants
- .map(|v| format!("{}({})", v, expr_text)).collect::<Vec<_>>();
+ .map(|v| format!("{}({})", v, expr_text));
err.span_suggestions_with_applicability(
expr.span,
"try using a variant of the expected type",
let mut structural_to_nomimal = FxHashMap::default();
let sig = tcx.fn_sig(def_id);
- let sig = sig.no_late_bound_regions().unwrap();
+ let sig = sig.no_bound_vars().unwrap();
if intr.inputs.len() != sig.inputs().len() {
span_err!(tcx.sess, it.span, E0444,
"platform-specific intrinsic has invalid number of \
// Trait must have a method named `m_name` and it should not have
// type parameters or early-bound regions.
let tcx = self.tcx;
- let method_item =
- self.associated_item(trait_def_id, m_name, Namespace::Value).unwrap();
+ let method_item = match self.associated_item(trait_def_id, m_name, Namespace::Value) {
+ Some(method_item) => method_item,
+ None => {
+ tcx.sess.delay_span_bug(span,
+ "operator trait does not have corresponding operator method");
+ return None;
+ }
+ };
let def_id = method_item.def_id;
let generics = tcx.generics_of(def_id);
assert_eq!(generics.params.len(), 0);
value
}
};
- assert!(!bounds.has_escaping_regions());
+ assert!(!bounds.has_escaping_bound_vars());
let cause = traits::ObligationCause::misc(span, self.body_id);
obligations.extend(traits::predicates_for_generics(cause.clone(),
use syntax::ast;
use syntax::util::lev_distance::{lev_distance, find_best_match_for_name};
use syntax_pos::{Span, symbol::Symbol};
+use std::iter;
use std::mem;
use std::ops::Deref;
use std::rc::Rc;
// itself. Hence, a `&self` method will wind up with an
// argument type like `&Trait`.
let trait_ref = principal.with_self_ty(self.tcx, self_ty);
- self.elaborate_bounds(&[trait_ref], |this, new_trait_ref, item| {
+ self.elaborate_bounds(iter::once(trait_ref), |this, new_trait_ref, item| {
let new_trait_ref = this.erase_late_bound_regions(&new_trait_ref);
let (xform_self_ty, xform_ret_ty) =
param_ty: ty::ParamTy) {
// FIXME -- Do we want to commit to this behavior for param bounds?
- let bounds: Vec<_> = self.param_env
+ let bounds = self.param_env
.caller_bounds
.iter()
.filter_map(|predicate| {
ty::Predicate::TypeOutlives(..) |
ty::Predicate::ConstEvaluatable(..) => None,
}
- })
- .collect();
+ });
- self.elaborate_bounds(&bounds, |this, poly_trait_ref, item| {
+ self.elaborate_bounds(bounds, |this, poly_trait_ref, item| {
let trait_ref = this.erase_late_bound_regions(&poly_trait_ref);
let (xform_self_ty, xform_ret_ty) =
// Do a search through a list of bounds, using a callback to actually
// create the candidates.
- fn elaborate_bounds<F>(&mut self, bounds: &[ty::PolyTraitRef<'tcx>], mut mk_cand: F)
+ fn elaborate_bounds<F>(&mut self,
+ bounds: impl Iterator<Item = ty::PolyTraitRef<'tcx>>,
+ mut mk_cand: F)
where F: for<'b> FnMut(&mut ProbeContext<'b, 'gcx, 'tcx>,
ty::PolyTraitRef<'tcx>,
ty::AssociatedItem)
{
- debug!("elaborate_bounds(bounds={:?})", bounds);
-
let tcx = self.tcx;
for bound_trait_ref in traits::transitive_bounds(tcx, bounds) {
+ debug!("elaborate_bounds(bound_trait_ref={:?})", bound_trait_ref);
for item in self.impl_or_trait_item(bound_trait_ref.def_id()) {
if !self.has_applicable_self(&item) {
self.record_static_candidate(TraitSource(bound_trait_ref.def_id()));
}
let static_candidates = mem::replace(&mut self.static_candidates, vec![]);
- let private_candidate = mem::replace(&mut self.private_candidate, None);
+ let private_candidate = self.private_candidate.take();
let unsatisfied_predicates = mem::replace(&mut self.unsatisfied_predicates, vec![]);
// things failed, so lets look at all traits, for diagnostic purposes now:
fn_sig,
substs);
- assert!(!substs.has_escaping_regions());
+ assert!(!substs.has_escaping_bound_vars());
// It is possible for type parameters or early-bound lifetimes
// to appear in the signature of `self`. The substitutions we
with_crate_prefix(|| self.tcx.item_path_str(*did)),
additional_newline
)
- }).collect();
+ });
err.span_suggestions_with_applicability(
span,
fn register_predicate(&self, obligation: traits::PredicateObligation<'tcx>) {
debug!("register_predicate({:?})", obligation);
- if obligation.has_escaping_regions() {
- span_bug!(obligation.cause.span, "escaping regions in predicate {:?}",
+ if obligation.has_escaping_bound_vars() {
+ span_bug!(obligation.cause.span, "escaping bound vars in predicate {:?}",
obligation);
}
self.fulfillment_cx
o_ty
};
- let c_ty = self.fcx.inh.infcx.canonicalize_response(&revealed_ty);
+ let c_ty = self.fcx.inh.infcx.canonicalize_user_type_annotation(&revealed_ty);
debug!("visit_local: ty.hir_id={:?} o_ty={:?} revealed_ty={:?} c_ty={:?}",
ty.hir_id, o_ty, revealed_ty, c_ty);
self.fcx.tables.borrow_mut().user_provided_tys_mut().insert(ty.hir_id, c_ty);
}
}
- // Check that a function marked as `#[panic_implementation]` has signature `fn(&PanicInfo) -> !`
+ // Check that a function marked as `#[panic_handler]` has signature `fn(&PanicInfo) -> !`
if let Some(panic_impl_did) = fcx.tcx.lang_items().panic_impl() {
if panic_impl_did == fcx.tcx.hir.local_def_id(fn_id) {
if let Some(panic_info_did) = fcx.tcx.lang_items().panic_info() {
}
fn normalize_ty(&self, span: Span, ty: Ty<'tcx>) -> Ty<'tcx> {
- if ty.has_escaping_regions() {
+ if ty.has_escaping_bound_vars() {
ty // FIXME: normalization and escaping regions
} else {
self.normalize_associated_types_in(span, &ty)
method.substs[i]
}
});
- self.infcx.canonicalize_response(&UserSubsts {
+ self.infcx.canonicalize_user_type_annotation(&UserSubsts {
substs: just_method_substs,
user_self_ty: None, // not relevant here
})
);
if !substs.is_noop() {
- let user_substs = self.infcx.canonicalize_response(&UserSubsts {
+ let user_substs = self.infcx.canonicalize_user_type_annotation(&UserSubsts {
substs,
user_self_ty,
});
cause: traits::ObligationCause<'tcx>,
predicates: &ty::InstantiatedPredicates<'tcx>)
{
- assert!(!predicates.has_escaping_regions());
+ assert!(!predicates.has_escaping_bound_vars());
debug!("add_obligations_for_parameters(predicates={:?})",
predicates);
_ => span_bug!(span, "non-ADT passed to check_expr_struct_fields")
};
- let mut remaining_fields = FxHashMap::default();
- for (i, field) in variant.fields.iter().enumerate() {
- remaining_fields.insert(field.ident.modern(), (i, field));
- }
+ let mut remaining_fields = variant.fields.iter().enumerate().map(|(i, field)|
+ (field.ident.modern(), (i, field))
+ ).collect::<FxHashMap<_, _>>();
let mut seen_fields = FxHashMap::default();
} else if !self.check_for_cast(err, expr, found, expected) {
let methods = self.get_conversion_methods(expr.span, expected, found);
if let Ok(expr_text) = self.sess().source_map().span_to_snippet(expr.span) {
- let suggestions = iter::repeat(&expr_text).zip(methods.iter())
+ let mut suggestions = iter::repeat(&expr_text).zip(methods.iter())
.filter_map(|(receiver, method)| {
let method_call = format!(".{}()", method.ident);
if receiver.ends_with(&method_call) {
Some(format!("{}{}", receiver, method_call))
}
}
- }).collect::<Vec<_>>();
- if !suggestions.is_empty() {
+ }).peekable();
+ if suggestions.peek().is_some() {
err.span_suggestions_with_applicability(
expr.span,
"try using a conversion method",
// provided (if any) into their appropriate spaces. We'll also report
// errors if type parameters are provided in an inappropriate place.
- let mut generic_segs = FxHashSet::default();
- for PathSeg(_, index) in &path_segs {
- generic_segs.insert(index);
- }
+ let generic_segs = path_segs.iter().map(|PathSeg(_, index)| index)
+ .collect::<FxHashSet<_>>();
AstConv::prohibit_generics(self, segments.iter().enumerate().filter_map(|(index, seg)| {
if !generic_segs.contains(&index) {
Some(seg)
}
},
);
- assert!(!substs.has_escaping_regions());
- assert!(!ty.has_escaping_regions());
+ assert!(!substs.has_escaping_bound_vars());
+ assert!(!ty.has_escaping_bound_vars());
// Write the "user substs" down first thing for later.
let hir_id = self.tcx.hir.node_to_hir_id(node_id);
hir::ItemKind::Trait(..) => {
check_trait(tcx, item);
}
+ hir::ItemKind::TraitAlias(..) => {
+ check_trait(tcx, item);
+ }
_ => {}
}
}
Checker { tcx, trait_def_id }
.check(tcx.lang_items().drop_trait(), visit_implementation_of_drop)
.check(tcx.lang_items().copy_trait(), visit_implementation_of_copy)
- .check(tcx.lang_items().coerce_unsized_trait(),
- visit_implementation_of_coerce_unsized);
+ .check(tcx.lang_items().coerce_unsized_trait(), visit_implementation_of_coerce_unsized)
+ .check(tcx.lang_items().dispatch_from_dyn_trait(),
+ visit_implementation_of_dispatch_from_dyn);
}
struct Checker<'a, 'tcx: 'a> {
let span = tcx.hir.span(impl_node_id);
let param_env = tcx.param_env(impl_did);
- assert!(!self_type.has_escaping_regions());
+ assert!(!self_type.has_escaping_bound_vars());
debug!("visit_implementation_of_copy: self_type={:?} (free)",
self_type);
}
}
+fn visit_implementation_of_dispatch_from_dyn<'a, 'tcx>(
+ tcx: TyCtxt<'a, 'tcx, 'tcx>,
+ impl_did: DefId,
+) {
+ debug!("visit_implementation_of_dispatch_from_dyn: impl_did={:?}",
+ impl_did);
+ if impl_did.is_local() {
+ let dispatch_from_dyn_trait = tcx.lang_items().dispatch_from_dyn_trait().unwrap();
+
+ let impl_node_id = tcx.hir.as_local_node_id(impl_did).unwrap();
+ let span = tcx.hir.span(impl_node_id);
+
+ let source = tcx.type_of(impl_did);
+ assert!(!source.has_escaping_bound_vars());
+ let target = {
+ let trait_ref = tcx.impl_trait_ref(impl_did).unwrap();
+ assert_eq!(trait_ref.def_id, dispatch_from_dyn_trait);
+
+ trait_ref.substs.type_at(1)
+ };
+
+ debug!("visit_implementation_of_dispatch_from_dyn: {:?} -> {:?}",
+ source,
+ target);
+
+ let param_env = tcx.param_env(impl_did);
+
+ let create_err = |msg: &str| {
+ struct_span_err!(tcx.sess, span, E0378, "{}", msg)
+ };
+
+ tcx.infer_ctxt().enter(|infcx| {
+ let cause = ObligationCause::misc(span, impl_node_id);
+
+ use ty::TyKind::*;
+ match (&source.sty, &target.sty) {
+ (&Ref(r_a, _, mutbl_a), Ref(r_b, _, mutbl_b))
+ if infcx.at(&cause, param_env).eq(r_a, r_b).is_ok()
+ && mutbl_a == *mutbl_b => (),
+ (&RawPtr(tm_a), &RawPtr(tm_b))
+ if tm_a.mutbl == tm_b.mutbl => (),
+ (&Adt(def_a, substs_a), &Adt(def_b, substs_b))
+ if def_a.is_struct() && def_b.is_struct() =>
+ {
+ if def_a != def_b {
+ let source_path = tcx.item_path_str(def_a.did);
+ let target_path = tcx.item_path_str(def_b.did);
+
+ create_err(
+ &format!(
+ "the trait `DispatchFromDyn` may only be implemented \
+ for a coercion between structures with the same \
+ definition; expected `{}`, found `{}`",
+ source_path, target_path,
+ )
+ ).emit();
+
+ return
+ }
+
+ if def_a.repr.c() || def_a.repr.packed() {
+ create_err(
+ "structs implementing `DispatchFromDyn` may not have \
+ `#[repr(packed)]` or `#[repr(C)]`"
+ ).emit();
+ }
+
+ let fields = &def_a.non_enum_variant().fields;
+
+ let coerced_fields = fields.iter().filter_map(|field| {
+ if tcx.type_of(field.did).is_phantom_data() {
+ // ignore PhantomData fields
+ return None
+ }
+
+ let ty_a = field.ty(tcx, substs_a);
+ let ty_b = field.ty(tcx, substs_b);
+ if let Ok(ok) = infcx.at(&cause, param_env).eq(ty_a, ty_b) {
+ if ok.obligations.is_empty() {
+ create_err(
+ "the trait `DispatchFromDyn` may only be implemented \
+ for structs containing the field being coerced, \
+ `PhantomData` fields, and nothing else"
+ ).note(
+ &format!(
+ "extra field `{}` of type `{}` is not allowed",
+ field.ident, ty_a,
+ )
+ ).emit();
+
+ return None;
+ }
+ }
+
+ Some(field)
+ }).collect::<Vec<_>>();
+
+ if coerced_fields.is_empty() {
+ create_err(
+ "the trait `DispatchFromDyn` may only be implemented \
+ for a coercion between structures with a single field \
+ being coerced, none found"
+ ).emit();
+ } else if coerced_fields.len() > 1 {
+ create_err(
+ "implementing the `DispatchFromDyn` trait requires multiple coercions",
+ ).note(
+ "the trait `DispatchFromDyn` may only be implemented \
+ for a coercion between structures with a single field \
+ being coerced"
+ ).note(
+ &format!(
+ "currently, {} fields need coercions: {}",
+ coerced_fields.len(),
+ coerced_fields.iter().map(|field| {
+ format!("`{}` (`{}` to `{}`)",
+ field.ident,
+ field.ty(tcx, substs_a),
+ field.ty(tcx, substs_b),
+ )
+ }).collect::<Vec<_>>()
+ .join(", ")
+ )
+ ).emit();
+ } else {
+ let mut fulfill_cx = TraitEngine::new(infcx.tcx);
+
+ for field in coerced_fields {
+
+ let predicate = tcx.predicate_for_trait_def(
+ param_env,
+ cause.clone(),
+ dispatch_from_dyn_trait,
+ 0,
+ field.ty(tcx, substs_a),
+ &[field.ty(tcx, substs_b).into()]
+ );
+
+ fulfill_cx.register_predicate_obligation(&infcx, predicate);
+ }
+
+ // Check that all transitive obligations are satisfied.
+ if let Err(errors) = fulfill_cx.select_all_or_error(&infcx) {
+ infcx.report_fulfillment_errors(&errors, None, false);
+ }
+
+ // Finally, resolve all regions.
+ let region_scope_tree = region::ScopeTree::default();
+ let outlives_env = OutlivesEnvironment::new(param_env);
+ infcx.resolve_regions_and_report_errors(
+ impl_did,
+ ®ion_scope_tree,
+ &outlives_env,
+ SuppressRegionErrors::default(),
+ );
+ }
+ }
+ _ => {
+ create_err(
+ "the trait `DispatchFromDyn` may only be implemented \
+ for a coercion between structures"
+ ).emit();
+ }
+ }
+ })
+ }
+}
+
pub fn coerce_unsized_info<'a, 'gcx>(gcx: TyCtxt<'a, 'gcx, 'gcx>,
impl_did: DefId)
-> CoerceUnsizedInfo {
let span = gcx.hir.span(impl_node_id);
let param_env = gcx.param_env(impl_did);
- assert!(!source.has_escaping_regions());
+ assert!(!source.has_escaping_bound_vars());
let err_info = CoerceUnsizedInfo { custom_kind: None };
E0377,
"the trait `CoerceUnsized` may only be implemented \
for a coercion between structures with the same \
- definition; expected {}, found {}",
+ definition; expected `{}`, found `{}`",
source_path,
target_path);
return err_info;
diff_fields.len(),
diff_fields.iter()
.map(|&(i, a, b)| {
- format!("{} ({} to {})", fields[i].ident, a, b)
+ format!("`{}` (`{}` to `{}`)", fields[i].ident, a, b)
})
.collect::<Vec<_>>()
.join(", ")));
use std::iter;
+struct OnlySelfBounds(bool);
+
///////////////////////////////////////////////////////////////////////////
// Main entry point
item_def_id: DefId,
poly_trait_ref: ty::PolyTraitRef<'tcx>,
) -> Ty<'tcx> {
- if let Some(trait_ref) = poly_trait_ref.no_late_bound_regions() {
+ if let Some(trait_ref) = poly_trait_ref.no_bound_vars() {
self.tcx().mk_projection(item_def_id, trait_ref.substs)
} else {
// no late-bound regions, we can just ignore the binder
use rustc::hir::*;
// In the AST, bounds can derive from two places. Either
- // written inline like `<T:Foo>` or in a where clause like
- // `where T:Foo`.
+ // written inline like `<T : Foo>` or in a where clause like
+ // `where T : Foo`.
let param_id = tcx.hir.as_local_node_id(def_id).unwrap();
let param_owner = tcx.hir.ty_param_owner(param_id);
let icx = ItemCtxt::new(tcx, item_def_id);
result
.predicates
- .extend(icx.type_parameter_bounds_in_generics(ast_generics, param_id, ty));
+ .extend(icx.type_parameter_bounds_in_generics(ast_generics, param_id, ty,
+ OnlySelfBounds(true)));
result
}
impl<'a, 'tcx> ItemCtxt<'a, 'tcx> {
- /// Find bounds from hir::Generics. This requires scanning through the
+ /// Find bounds from `hir::Generics`. This requires scanning through the
/// AST. We do this to avoid having to convert *all* the bounds, which
/// would create artificial cycles. Instead we can only convert the
/// bounds for a type parameter `X` if `X::Foo` is used.
ast_generics: &hir::Generics,
param_id: ast::NodeId,
ty: Ty<'tcx>,
+ only_self_bounds: OnlySelfBounds,
) -> Vec<(ty::Predicate<'tcx>, Span)> {
let from_ty_params = ast_generics
.params
hir::WherePredicate::BoundPredicate(ref bp) => Some(bp),
_ => None,
})
- .filter(|bp| is_param(self.tcx, &bp.bounded_ty, param_id))
- .flat_map(|bp| bp.bounds.iter())
- .flat_map(|b| predicates_from_bound(self, ty, b));
+ .flat_map(|bp| {
+ let bt = if is_param(self.tcx, &bp.bounded_ty, param_id) {
+ Some(ty)
+ } else if !only_self_bounds.0 {
+ Some(self.to_ty(&bp.bounded_ty))
+ } else {
+ None
+ };
+ bp.bounds.iter().filter_map(move |b| bt.map(|bt| (bt, b)))
+ })
+ .flat_map(|(bt, b)| predicates_from_bound(self, bt, b));
from_ty_params.chain(from_where_clauses).collect()
}
tcx.predicates_of(def_id);
}
hir::ItemKind::TraitAlias(..) => {
- span_err!(
- tcx.sess,
- it.span,
- E0645,
- "trait aliases are not yet implemented (see issue #41517)"
- );
+ tcx.generics_of(def_id);
+ tcx.at(it.span).super_predicates_of(def_id);
+ tcx.predicates_of(def_id);
}
hir::ItemKind::Struct(ref struct_def, _) | hir::ItemKind::Union(ref struct_def, _) => {
tcx.generics_of(def_id);
let icx = ItemCtxt::new(tcx, trait_def_id);
- // Convert the bounds that follow the colon, e.g. `Bar+Zed` in `trait Foo : Bar+Zed`.
+ // Convert the bounds that follow the colon, e.g. `Bar + Zed` in `trait Foo : Bar + Zed`.
let self_param_ty = tcx.mk_self_type();
let superbounds1 = compute_bounds(&icx, self_param_ty, bounds, SizedByDefault::No, item.span);
let superbounds1 = superbounds1.predicates(tcx, self_param_ty);
// Convert any explicit superbounds in the where clause,
- // e.g. `trait Foo where Self : Bar`:
- let superbounds2 = icx.type_parameter_bounds_in_generics(generics, item.id, self_param_ty);
+ // e.g. `trait Foo where Self : Bar`.
+ // In the case of trait aliases, however, we include all bounds in the where clause,
+ // so e.g. `trait Foo = where u32: PartialEq<Self>` would include `u32: PartialEq<Self>`
+ // as one of its "superpredicates".
+ let is_trait_alias = ty::is_trait_alias(tcx, trait_def_id);
+ let superbounds2 = icx.type_parameter_bounds_in_generics(
+ generics, item.id, self_param_ty, OnlySelfBounds(!is_trait_alias));
// Combine the two lists to form the complete set of superbounds:
let superbounds: Vec<_> = superbounds1.into_iter().chain(superbounds2).collect();
// Now require that immediate supertraits are converted,
// which will, in turn, reach indirect supertraits.
for &(pred, span) in &superbounds {
+ debug!("superbound: {:?}", pred);
if let ty::Predicate::Trait(bound) = pred {
tcx.at(span).super_predicates_of(bound.def_id());
}
let icx = ItemCtxt::new(tcx, def_id);
let no_generics = hir::Generics::empty();
+ let empty_trait_items = HirVec::new();
let mut predicates = UniquePredicates::new();
is_trait = Some((ty::TraitRef::identity(tcx, def_id), items));
generics
}
+ ItemKind::TraitAlias(ref generics, _) => {
+ is_trait = Some((ty::TraitRef::identity(tcx, def_id), &empty_trait_items));
+ generics
+ }
ItemKind::Existential(ExistTy {
ref bounds,
impl_trait_fn,
}
}
-/// Converts a specific GenericBound from the AST into a set of
+/// Converts a specific `GenericBound` from the AST into a set of
/// predicates that apply to the self-type. A vector is returned
-/// because this can be anywhere from 0 predicates (`T:?Sized` adds no
-/// predicates) to 1 (`T:Foo`) to many (`T:Bar<X=i32>` adds `T:Bar`
+/// because this can be anywhere from zero predicates (`T : ?Sized` adds no
+/// predicates) to one (`T : Foo`) to many (`T : Bar<X=i32>` adds `T : Bar`
/// and `<T as Bar>::X == i32`).
fn predicates_from_bound<'tcx>(
astconv: &dyn AstConv<'tcx, 'tcx>,
struct.
"##,
+E0378: r##"
+The `DispatchFromDyn` trait currently can only be implemented for
+builtin pointer types and structs that are newtype wrappers around them
+— that is, the struct must have only one field (except for`PhantomData`),
+and that field must itself implement `DispatchFromDyn`.
+
+Examples:
+
+```
+#![feature(dispatch_from_dyn, unsize)]
+use std::{
+ marker::Unsize,
+ ops::DispatchFromDyn,
+};
+
+struct Ptr<T: ?Sized>(*const T);
+
+impl<T: ?Sized, U: ?Sized> DispatchFromDyn<Ptr<U>> for Ptr<T>
+where
+ T: Unsize<U>,
+{}
+```
+
+```
+#![feature(dispatch_from_dyn)]
+use std::{
+ ops::DispatchFromDyn,
+ marker::PhantomData,
+};
+
+struct Wrapper<T> {
+ ptr: T,
+ _phantom: PhantomData<()>,
+}
+
+impl<T, U> DispatchFromDyn<Wrapper<U>> for Wrapper<T>
+where
+ T: DispatchFromDyn<U>,
+{}
+```
+
+Example of illegal `DispatchFromDyn` implementation
+(illegal because of extra field)
+
+```compile-fail,E0378
+#![feature(dispatch_from_dyn)]
+use std::ops::DispatchFromDyn;
+
+struct WrapperExtraField<T> {
+ ptr: T,
+ extra_stuff: i32,
+}
+
+impl<T, U> DispatchFromDyn<WrapperExtraField<U>> for WrapperExtraField<T>
+where
+ T: DispatchFromDyn<U>,
+{}
+```
+"##,
+
E0390: r##"
You tried to implement methods for a primitive type. Erroneous code example:
RegionKind::ReEmpty
| RegionKind::ReErased
| RegionKind::ReClosureBound(..)
- | RegionKind::ReCanonical(..)
| RegionKind::ReScope(..)
| RegionKind::ReVar(..)
| RegionKind::RePlaceholder(..)
ty::UnnormalizedProjection(..) |
ty::GeneratorWitness(..) |
+ ty::Bound(..) |
ty::Infer(..) => {
bug!("unexpected type encountered in \
variance inference: {}",
// way early-bound regions do, so we skip them here.
}
- ty::ReCanonical(_) |
ty::ReFree(..) |
ty::ReClosureBound(..) |
ty::ReScope(..) |
_ => return None,
})
}
+
+ pub fn is_associated(&self) -> bool {
+ match *self {
+ ItemEnum::TypedefItem(_, _) |
+ ItemEnum::AssociatedTypeItem(_, _) => true,
+ _ => false,
+ }
+ }
}
#[derive(Clone, RustcEncodable, RustcDecodable, Debug)]
ty::RePlaceholder(..) |
ty::ReEmpty |
ty::ReClosureBound(_) |
- ty::ReCanonical(_) |
ty::ReErased => None
}
}
ty::Closure(..) | ty::Generator(..) => Tuple(vec![]), // FIXME(pcwalton)
+ ty::Bound(..) => panic!("Bound"),
ty::UnnormalizedProjection(..) => panic!("UnnormalizedProjection"),
ty::GeneratorWitness(..) => panic!("GeneratorWitness"),
ty::Infer(..) => panic!("Infer"),
--- /dev/null
+// Copyright 2018 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+use std::collections::{BTreeMap, BTreeSet};
+use std::fmt;
+use std::path::PathBuf;
+
+use errors;
+use errors::emitter::ColorConfig;
+use getopts;
+use rustc::lint::Level;
+use rustc::session::early_error;
+use rustc::session::config::{CodegenOptions, DebuggingOptions, ErrorOutputType, Externs};
+use rustc::session::config::{nightly_options, build_codegen_options, build_debugging_options,
+ get_cmd_lint_options};
+use rustc::session::search_paths::SearchPaths;
+use rustc_driver;
+use rustc_target::spec::TargetTriple;
+use syntax::edition::Edition;
+
+use core::new_handler;
+use externalfiles::ExternalHtml;
+use html;
+use html::markdown::IdMap;
+use opts;
+use passes::{self, DefaultPassOption};
+use theme;
+
+/// Configuration options for rustdoc.
+#[derive(Clone)]
+pub struct Options {
+ // Basic options / Options passed directly to rustc
+
+ /// The crate root or Markdown file to load.
+ pub input: PathBuf,
+ /// The name of the crate being documented.
+ pub crate_name: Option<String>,
+ /// How to format errors and warnings.
+ pub error_format: ErrorOutputType,
+ /// Library search paths to hand to the compiler.
+ pub libs: SearchPaths,
+ /// The list of external crates to link against.
+ pub externs: Externs,
+ /// List of `cfg` flags to hand to the compiler. Always includes `rustdoc`.
+ pub cfgs: Vec<String>,
+ /// Codegen options to hand to the compiler.
+ pub codegen_options: CodegenOptions,
+ /// Debugging (`-Z`) options to pass to the compiler.
+ pub debugging_options: DebuggingOptions,
+ /// The target used to compile the crate against.
+ pub target: Option<TargetTriple>,
+ /// Edition used when reading the crate. Defaults to "2015". Also used by default when
+ /// compiling doctests from the crate.
+ pub edition: Edition,
+ /// The path to the sysroot. Used during the compilation process.
+ pub maybe_sysroot: Option<PathBuf>,
+ /// Linker to use when building doctests.
+ pub linker: Option<PathBuf>,
+ /// Lint information passed over the command-line.
+ pub lint_opts: Vec<(String, Level)>,
+ /// Whether to ask rustc to describe the lints it knows. Practically speaking, this will not be
+ /// used, since we abort if we have no input file, but it's included for completeness.
+ pub describe_lints: bool,
+ /// What level to cap lints at.
+ pub lint_cap: Option<Level>,
+
+ // Options specific to running doctests
+
+ /// Whether we should run doctests instead of generating docs.
+ pub should_test: bool,
+ /// List of arguments to pass to the test harness, if running tests.
+ pub test_args: Vec<String>,
+
+ // Options that affect the documentation process
+
+ /// The selected default set of passes to use.
+ ///
+ /// Be aware: This option can come both from the CLI and from crate attributes!
+ pub default_passes: DefaultPassOption,
+ /// Any passes manually selected by the user.
+ ///
+ /// Be aware: This option can come both from the CLI and from crate attributes!
+ pub manual_passes: Vec<String>,
+ /// Whether to display warnings during doc generation or while gathering doctests. By default,
+ /// all non-rustdoc-specific lints are allowed when generating docs.
+ pub display_warnings: bool,
+
+ // Options that alter generated documentation pages
+
+ /// Crate version to note on the sidebar of generated docs.
+ pub crate_version: Option<String>,
+ /// Collected options specific to outputting final pages.
+ pub render_options: RenderOptions,
+}
+
+impl fmt::Debug for Options {
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ struct FmtExterns<'a>(&'a Externs);
+
+ impl<'a> fmt::Debug for FmtExterns<'a> {
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ f.debug_map()
+ .entries(self.0.iter())
+ .finish()
+ }
+ }
+
+ f.debug_struct("Options")
+ .field("input", &self.input)
+ .field("crate_name", &self.crate_name)
+ .field("error_format", &self.error_format)
+ .field("libs", &self.libs)
+ .field("externs", &FmtExterns(&self.externs))
+ .field("cfgs", &self.cfgs)
+ .field("codegen_options", &"...")
+ .field("debugging_options", &"...")
+ .field("target", &self.target)
+ .field("edition", &self.edition)
+ .field("maybe_sysroot", &self.maybe_sysroot)
+ .field("linker", &self.linker)
+ .field("lint_opts", &self.lint_opts)
+ .field("describe_lints", &self.describe_lints)
+ .field("lint_cap", &self.lint_cap)
+ .field("should_test", &self.should_test)
+ .field("test_args", &self.test_args)
+ .field("default_passes", &self.default_passes)
+ .field("manual_passes", &self.manual_passes)
+ .field("display_warnings", &self.display_warnings)
+ .field("crate_version", &self.crate_version)
+ .field("render_options", &self.render_options)
+ .finish()
+ }
+}
+
+/// Configuration options for the HTML page-creation process.
+#[derive(Clone, Debug)]
+pub struct RenderOptions {
+ /// Output directory to generate docs into. Defaults to `doc`.
+ pub output: PathBuf,
+ /// External files to insert into generated pages.
+ pub external_html: ExternalHtml,
+ /// A pre-populated `IdMap` with the default headings and any headings added by Markdown files
+ /// processed by `external_html`.
+ pub id_map: IdMap,
+ /// If present, playground URL to use in the "Run" button added to code samples.
+ ///
+ /// Be aware: This option can come both from the CLI and from crate attributes!
+ pub playground_url: Option<String>,
+ /// Whether to sort modules alphabetically on a module page instead of using declaration order.
+ /// `true` by default.
+ ///
+ /// FIXME(misdreavus): the flag name is `--sort-modules-by-appearance` but the meaning is
+ /// inverted once read
+ pub sort_modules_alphabetically: bool,
+ /// List of themes to extend the docs with. Original argument name is included to assist in
+ /// displaying errors if it fails a theme check.
+ pub themes: Vec<PathBuf>,
+ /// If present, CSS file that contains rules to add to the default CSS.
+ pub extension_css: Option<PathBuf>,
+ /// A map of crate names to the URL to use instead of querying the crate's `html_root_url`.
+ pub extern_html_root_urls: BTreeMap<String, String>,
+ /// If present, suffix added to CSS/JavaScript files when referencing them in generated pages.
+ pub resource_suffix: String,
+ /// Whether to run the static CSS/JavaScript through a minifier when outputting them. `true` by
+ /// default.
+ ///
+ /// FIXME(misdreavus): the flag name is `--disable-minification` but the meaning is inverted
+ /// once read
+ pub enable_minification: bool,
+ /// Whether to create an index page in the root of the output directory. If this is true but
+ /// `enable_index_page` is None, generate a static listing of crates instead.
+ pub enable_index_page: bool,
+ /// A file to use as the index page at the root of the output directory. Overrides
+ /// `enable_index_page` to be true if set.
+ pub index_page: Option<PathBuf>,
+
+ // Options specific to reading standalone Markdown files
+
+ /// Whether to generate a table of contents on the output file when reading a standalone
+ /// Markdown file.
+ pub markdown_no_toc: bool,
+ /// Additional CSS files to link in pages generated from standlone Markdown files.
+ pub markdown_css: Vec<String>,
+ /// If present, playground URL to use in the "Run" button added to code samples generated from
+ /// standalone Markdown files. If not present, `playground_url` is used.
+ pub markdown_playground_url: Option<String>,
+}
+
+impl Options {
+ /// Parses the given command-line for options. If an error message or other early-return has
+ /// been printed, returns `Err` with the exit code.
+ pub fn from_matches(matches: &getopts::Matches) -> Result<Options, isize> {
+ // Check for unstable options.
+ nightly_options::check_nightly_options(&matches, &opts());
+
+ if matches.opt_present("h") || matches.opt_present("help") {
+ ::usage("rustdoc");
+ return Err(0);
+ } else if matches.opt_present("version") {
+ rustc_driver::version("rustdoc", &matches);
+ return Err(0);
+ }
+
+ if matches.opt_strs("passes") == ["list"] {
+ println!("Available passes for running rustdoc:");
+ for pass in passes::PASSES {
+ println!("{:>20} - {}", pass.name(), pass.description());
+ }
+ println!("\nDefault passes for rustdoc:");
+ for &name in passes::DEFAULT_PASSES {
+ println!("{:>20}", name);
+ }
+ println!("\nPasses run with `--document-private-items`:");
+ for &name in passes::DEFAULT_PRIVATE_PASSES {
+ println!("{:>20}", name);
+ }
+ return Err(0);
+ }
+
+ let color = match matches.opt_str("color").as_ref().map(|s| &s[..]) {
+ Some("auto") => ColorConfig::Auto,
+ Some("always") => ColorConfig::Always,
+ Some("never") => ColorConfig::Never,
+ None => ColorConfig::Auto,
+ Some(arg) => {
+ early_error(ErrorOutputType::default(),
+ &format!("argument for --color must be `auto`, `always` or `never` \
+ (instead was `{}`)", arg));
+ }
+ };
+ let error_format = match matches.opt_str("error-format").as_ref().map(|s| &s[..]) {
+ Some("human") => ErrorOutputType::HumanReadable(color),
+ Some("json") => ErrorOutputType::Json(false),
+ Some("pretty-json") => ErrorOutputType::Json(true),
+ Some("short") => ErrorOutputType::Short(color),
+ None => ErrorOutputType::HumanReadable(color),
+ Some(arg) => {
+ early_error(ErrorOutputType::default(),
+ &format!("argument for --error-format must be `human`, `json` or \
+ `short` (instead was `{}`)", arg));
+ }
+ };
+
+ let codegen_options = build_codegen_options(matches, error_format);
+ let debugging_options = build_debugging_options(matches, error_format);
+
+ let diag = new_handler(error_format,
+ None,
+ debugging_options.treat_err_as_bug,
+ debugging_options.ui_testing);
+
+ // check for deprecated options
+ check_deprecated_options(&matches, &diag);
+
+ let to_check = matches.opt_strs("theme-checker");
+ if !to_check.is_empty() {
+ let paths = theme::load_css_paths(include_bytes!("html/static/themes/light.css"));
+ let mut errors = 0;
+
+ println!("rustdoc: [theme-checker] Starting tests!");
+ for theme_file in to_check.iter() {
+ print!(" - Checking \"{}\"...", theme_file);
+ let (success, differences) = theme::test_theme_against(theme_file, &paths, &diag);
+ if !differences.is_empty() || !success {
+ println!(" FAILED");
+ errors += 1;
+ if !differences.is_empty() {
+ println!("{}", differences.join("\n"));
+ }
+ } else {
+ println!(" OK");
+ }
+ }
+ if errors != 0 {
+ return Err(1);
+ }
+ return Err(0);
+ }
+
+ if matches.free.is_empty() {
+ diag.struct_err("missing file operand").emit();
+ return Err(1);
+ }
+ if matches.free.len() > 1 {
+ diag.struct_err("too many file operands").emit();
+ return Err(1);
+ }
+ let input = PathBuf::from(&matches.free[0]);
+
+ let mut libs = SearchPaths::new();
+ for s in &matches.opt_strs("L") {
+ libs.add_path(s, error_format);
+ }
+ let externs = match parse_externs(&matches) {
+ Ok(ex) => ex,
+ Err(err) => {
+ diag.struct_err(&err).emit();
+ return Err(1);
+ }
+ };
+ let extern_html_root_urls = match parse_extern_html_roots(&matches) {
+ Ok(ex) => ex,
+ Err(err) => {
+ diag.struct_err(err).emit();
+ return Err(1);
+ }
+ };
+
+ let test_args = matches.opt_strs("test-args");
+ let test_args: Vec<String> = test_args.iter()
+ .flat_map(|s| s.split_whitespace())
+ .map(|s| s.to_string())
+ .collect();
+
+ let should_test = matches.opt_present("test");
+
+ let output = matches.opt_str("o")
+ .map(|s| PathBuf::from(&s))
+ .unwrap_or_else(|| PathBuf::from("doc"));
+ let mut cfgs = matches.opt_strs("cfg");
+ cfgs.push("rustdoc".to_string());
+
+ let extension_css = matches.opt_str("e").map(|s| PathBuf::from(&s));
+
+ if let Some(ref p) = extension_css {
+ if !p.is_file() {
+ diag.struct_err("option --extend-css argument must be a file").emit();
+ return Err(1);
+ }
+ }
+
+ let mut themes = Vec::new();
+ if matches.opt_present("themes") {
+ let paths = theme::load_css_paths(include_bytes!("html/static/themes/light.css"));
+
+ for (theme_file, theme_s) in matches.opt_strs("themes")
+ .iter()
+ .map(|s| (PathBuf::from(&s), s.to_owned())) {
+ if !theme_file.is_file() {
+ diag.struct_err("option --themes arguments must all be files").emit();
+ return Err(1);
+ }
+ let (success, ret) = theme::test_theme_against(&theme_file, &paths, &diag);
+ if !success || !ret.is_empty() {
+ diag.struct_err(&format!("invalid theme: \"{}\"", theme_s))
+ .help("check what's wrong with the --theme-checker option")
+ .emit();
+ return Err(1);
+ }
+ themes.push(theme_file);
+ }
+ }
+
+ let mut id_map = html::markdown::IdMap::new();
+ id_map.populate(html::render::initial_ids());
+ let external_html = match ExternalHtml::load(
+ &matches.opt_strs("html-in-header"),
+ &matches.opt_strs("html-before-content"),
+ &matches.opt_strs("html-after-content"),
+ &matches.opt_strs("markdown-before-content"),
+ &matches.opt_strs("markdown-after-content"), &diag, &mut id_map) {
+ Some(eh) => eh,
+ None => return Err(3),
+ };
+
+ let edition = matches.opt_str("edition").unwrap_or("2015".to_string());
+ let edition = match edition.parse() {
+ Ok(e) => e,
+ Err(_) => {
+ diag.struct_err("could not parse edition").emit();
+ return Err(1);
+ }
+ };
+
+ match matches.opt_str("r").as_ref().map(|s| &**s) {
+ Some("rust") | None => {}
+ Some(s) => {
+ diag.struct_err(&format!("unknown input format: {}", s)).emit();
+ return Err(1);
+ }
+ }
+
+ match matches.opt_str("w").as_ref().map(|s| &**s) {
+ Some("html") | None => {}
+ Some(s) => {
+ diag.struct_err(&format!("unknown output format: {}", s)).emit();
+ return Err(1);
+ }
+ }
+
+ let index_page = matches.opt_str("index-page").map(|s| PathBuf::from(&s));
+ if let Some(ref index_page) = index_page {
+ if !index_page.is_file() {
+ diag.struct_err("option `--index-page` argument must be a file").emit();
+ return Err(1);
+ }
+ }
+
+ let target = matches.opt_str("target").map(|target| {
+ if target.ends_with(".json") {
+ TargetTriple::TargetPath(PathBuf::from(target))
+ } else {
+ TargetTriple::TargetTriple(target)
+ }
+ });
+
+ let default_passes = if matches.opt_present("no-defaults") {
+ passes::DefaultPassOption::None
+ } else if matches.opt_present("document-private-items") {
+ passes::DefaultPassOption::Private
+ } else {
+ passes::DefaultPassOption::Default
+ };
+ let manual_passes = matches.opt_strs("passes");
+
+ let crate_name = matches.opt_str("crate-name");
+ let playground_url = matches.opt_str("playground-url");
+ let maybe_sysroot = matches.opt_str("sysroot").map(PathBuf::from);
+ let display_warnings = matches.opt_present("display-warnings");
+ let linker = matches.opt_str("linker").map(PathBuf::from);
+ let sort_modules_alphabetically = !matches.opt_present("sort-modules-by-appearance");
+ let resource_suffix = matches.opt_str("resource-suffix").unwrap_or_default();
+ let enable_minification = !matches.opt_present("disable-minification");
+ let markdown_no_toc = matches.opt_present("markdown-no-toc");
+ let markdown_css = matches.opt_strs("markdown-css");
+ let markdown_playground_url = matches.opt_str("markdown-playground-url");
+ let crate_version = matches.opt_str("crate-version");
+ let enable_index_page = matches.opt_present("enable-index-page") || index_page.is_some();
+
+ let (lint_opts, describe_lints, lint_cap) = get_cmd_lint_options(matches, error_format);
+
+ Ok(Options {
+ input,
+ crate_name,
+ error_format,
+ libs,
+ externs,
+ cfgs,
+ codegen_options,
+ debugging_options,
+ target,
+ edition,
+ maybe_sysroot,
+ linker,
+ lint_opts,
+ describe_lints,
+ lint_cap,
+ should_test,
+ test_args,
+ default_passes,
+ manual_passes,
+ display_warnings,
+ crate_version,
+ render_options: RenderOptions {
+ output,
+ external_html,
+ id_map,
+ playground_url,
+ sort_modules_alphabetically,
+ themes,
+ extension_css,
+ extern_html_root_urls,
+ resource_suffix,
+ enable_minification,
+ enable_index_page,
+ index_page,
+ markdown_no_toc,
+ markdown_css,
+ markdown_playground_url,
+ }
+ })
+ }
+
+ /// Returns whether the file given as `self.input` is a Markdown file.
+ pub fn markdown_input(&self) -> bool {
+ self.input.extension()
+ .map_or(false, |e| e == "md" || e == "markdown")
+ }
+}
+
+/// Prints deprecation warnings for deprecated options
+fn check_deprecated_options(matches: &getopts::Matches, diag: &errors::Handler) {
+ let deprecated_flags = [
+ "input-format",
+ "output-format",
+ "no-defaults",
+ "passes",
+ ];
+
+ for flag in deprecated_flags.into_iter() {
+ if matches.opt_present(flag) {
+ let mut err = diag.struct_warn(&format!("the '{}' flag is considered deprecated",
+ flag));
+ err.warn("please see https://github.com/rust-lang/rust/issues/44136");
+
+ if *flag == "no-defaults" {
+ err.help("you may want to use --document-private-items");
+ }
+
+ err.emit();
+ }
+ }
+
+ let removed_flags = [
+ "plugins",
+ "plugin-path",
+ ];
+
+ for &flag in removed_flags.iter() {
+ if matches.opt_present(flag) {
+ diag.struct_warn(&format!("the '{}' flag no longer functions", flag))
+ .warn("see CVE-2018-1000622")
+ .emit();
+ }
+ }
+}
+
+/// Extracts `--extern-html-root-url` arguments from `matches` and returns a map of crate names to
+/// the given URLs. If an `--extern-html-root-url` argument was ill-formed, returns an error
+/// describing the issue.
+fn parse_extern_html_roots(
+ matches: &getopts::Matches,
+) -> Result<BTreeMap<String, String>, &'static str> {
+ let mut externs = BTreeMap::new();
+ for arg in &matches.opt_strs("extern-html-root-url") {
+ let mut parts = arg.splitn(2, '=');
+ let name = parts.next().ok_or("--extern-html-root-url must not be empty")?;
+ let url = parts.next().ok_or("--extern-html-root-url must be of the form name=url")?;
+ externs.insert(name.to_string(), url.to_string());
+ }
+
+ Ok(externs)
+}
+
+/// Extracts `--extern CRATE=PATH` arguments from `matches` and
+/// returns a map mapping crate names to their paths or else an
+/// error message.
+// FIXME(eddyb) This shouldn't be duplicated with `rustc::session`.
+fn parse_externs(matches: &getopts::Matches) -> Result<Externs, String> {
+ let mut externs: BTreeMap<_, BTreeSet<_>> = BTreeMap::new();
+ for arg in &matches.opt_strs("extern") {
+ let mut parts = arg.splitn(2, '=');
+ let name = parts.next().ok_or("--extern value must not be empty".to_string())?;
+ let location = parts.next().map(|s| s.to_string());
+ if location.is_none() && !nightly_options::is_unstable_enabled(matches) {
+ return Err("the `-Z unstable-options` flag must also be passed to \
+ enable `--extern crate_name` without `=path`".to_string());
+ }
+ let name = name.to_string();
+ externs.entry(name).or_default().insert(location);
+ }
+ Ok(Externs::new(externs))
+}
use syntax::ast::{self, Ident, NodeId};
use syntax::source_map;
-use syntax::edition::Edition;
use syntax::feature_gate::UnstableFeatures;
use syntax::json::JsonEmitter;
use syntax::ptr::P;
use rustc_data_structures::sync::{self, Lrc};
use std::rc::Rc;
use std::sync::Arc;
-use std::path::PathBuf;
use visit_ast::RustdocVisitor;
+use config::{Options as RustdocOptions, RenderOptions};
use clean;
use clean::{get_path_for_type, Clean, MAX_DEF_ID, AttributesExt};
use html::render::RenderInfo;
)
}
-pub fn run_core(search_paths: SearchPaths,
- cfgs: Vec<String>,
- externs: config::Externs,
- input: Input,
- triple: Option<TargetTriple>,
- maybe_sysroot: Option<PathBuf>,
- allow_warnings: bool,
- crate_name: Option<String>,
- force_unstable_if_unmarked: bool,
- edition: Edition,
- cg: CodegenOptions,
- error_format: ErrorOutputType,
- cmd_lints: Vec<(String, lint::Level)>,
- lint_cap: Option<lint::Level>,
- describe_lints: bool,
- mut manual_passes: Vec<String>,
- mut default_passes: passes::DefaultPassOption,
- treat_err_as_bug: bool,
- ui_testing: bool,
-) -> (clean::Crate, RenderInfo, Vec<String>) {
+pub fn run_core(options: RustdocOptions) -> (clean::Crate, RenderInfo, RenderOptions, Vec<String>) {
// Parse, resolve, and typecheck the given crate.
- let cpath = match input {
- Input::File(ref p) => Some(p.clone()),
- _ => None
- };
+ let RustdocOptions {
+ input,
+ crate_name,
+ error_format,
+ libs,
+ externs,
+ cfgs,
+ codegen_options,
+ debugging_options,
+ target,
+ edition,
+ maybe_sysroot,
+ lint_opts,
+ describe_lints,
+ lint_cap,
+ mut default_passes,
+ mut manual_passes,
+ display_warnings,
+ render_options,
+ ..
+ } = options;
+
+ let cpath = Some(input.clone());
+ let input = Input::File(input);
let intra_link_resolution_failure_name = lint::builtin::INTRA_DOC_LINK_RESOLUTION_FAILURE.name;
let warnings_lint_name = lint::builtin::WARNINGS.name;
missing_docs.to_owned(),
missing_doc_example.to_owned()];
- whitelisted_lints.extend(cmd_lints.iter().map(|(lint, _)| lint).cloned());
+ whitelisted_lints.extend(lint_opts.iter().map(|(lint, _)| lint).cloned());
let lints = lint::builtin::HardwiredLints.get_lints()
.into_iter()
Some((lint.name_lower(), lint::Allow))
}
})
- .chain(cmd_lints.into_iter())
+ .chain(lint_opts.into_iter())
.collect::<Vec<_>>();
let host_triple = TargetTriple::from_triple(config::host_triple());
// plays with error output here!
let sessopts = config::Options {
maybe_sysroot,
- search_paths,
+ search_paths: libs,
crate_types: vec![config::CrateType::Rlib],
- lint_opts: if !allow_warnings {
+ lint_opts: if !display_warnings {
lints
} else {
vec![]
},
lint_cap: Some(lint_cap.unwrap_or_else(|| lint::Forbid)),
- cg,
+ cg: codegen_options,
externs,
- target_triple: triple.unwrap_or(host_triple),
+ target_triple: target.unwrap_or(host_triple),
// Ensure that rustdoc works even if rustc is feature-staged
unstable_features: UnstableFeatures::Allow,
actually_rustdoc: true,
- debugging_opts: config::DebuggingOptions {
- force_unstable_if_unmarked,
- treat_err_as_bug,
- ui_testing,
- ..config::basic_debugging_options()
- },
+ debugging_opts: debugging_options.clone(),
error_format,
edition,
describe_lints,
let source_map = Lrc::new(source_map::SourceMap::new(sessopts.file_path_mapping()));
let diagnostic_handler = new_handler(error_format,
Some(source_map.clone()),
- treat_err_as_bug,
- ui_testing);
+ debugging_options.treat_err_as_bug,
+ debugging_options.ui_testing);
let mut sess = session::build_session_(
sessopts, cpath, diagnostic_handler, source_map,
ctxt.sess().abort_if_errors();
- (krate, ctxt.renderinfo.into_inner(), passes)
+ (krate, ctxt.renderinfo.into_inner(), render_options, passes)
}), &sess)
})
}
use html::markdown::{IdMap, ErrorCodes, Markdown};
use std::cell::RefCell;
-#[derive(Clone)]
+#[derive(Clone, Debug)]
pub struct ExternalHtml {
/// Content that will be included inline in the <head> section of a
/// rendered Markdown file or generated documentation
links
}
-#[derive(Default)]
+#[derive(Clone, Default, Debug)]
pub struct IdMap {
map: FxHashMap<String, usize>,
}
use std::sync::Arc;
use std::rc::Rc;
-use externalfiles::ExternalHtml;
-
+use errors;
use serialize::json::{ToJson, Json, as_json};
use syntax::ast;
use syntax::ext::base::MacroKind;
use rustc_data_structures::flock;
use clean::{self, AttributesExt, GetDefId, SelfTy, Mutability};
+use config::RenderOptions;
use doctree;
use fold::DocFolder;
use html::escape::Escape;
/// Generates the documentation for `crate` into the directory `dst`
pub fn run(mut krate: clean::Crate,
- extern_urls: BTreeMap<String, String>,
- external_html: &ExternalHtml,
- playground_url: Option<String>,
- dst: PathBuf,
- resource_suffix: String,
+ options: RenderOptions,
passes: FxHashSet<String>,
- css_file_extension: Option<PathBuf>,
renderinfo: RenderInfo,
- sort_modules_alphabetically: bool,
- themes: Vec<PathBuf>,
- enable_minification: bool,
- id_map: IdMap) -> Result<(), Error> {
+ diag: &errors::Handler) -> Result<(), Error> {
+ // need to save a copy of the options for rendering the index page
+ let md_opts = options.clone();
+ let RenderOptions {
+ output,
+ external_html,
+ id_map,
+ playground_url,
+ sort_modules_alphabetically,
+ themes,
+ extension_css,
+ extern_html_root_urls,
+ resource_suffix,
+ ..
+ } = options;
+
let src_root = match krate.src {
FileName::Real(ref p) => match p.parent() {
Some(p) => p.to_path_buf(),
layout: layout::Layout {
logo: String::new(),
favicon: String::new(),
- external_html: external_html.clone(),
+ external_html,
krate: krate.name.clone(),
},
- css_file_extension,
+ css_file_extension: extension_css,
created_dirs: Default::default(),
sort_modules_alphabetically,
themes,
}
}
}
+ let dst = output;
try_err!(fs::create_dir_all(&dst), &dst);
krate = render_sources(&dst, &mut scx, krate)?;
let cx = Context {
},
_ => PathBuf::new(),
};
- let extern_url = extern_urls.get(&e.name).map(|u| &**u);
+ let extern_url = extern_html_root_urls.get(&e.name).map(|u| &**u);
cache.extern_locations.insert(n, (e.name.clone(), src_root,
extern_location(e, extern_url, &cx.dst)));
CACHE_KEY.with(|v| *v.borrow_mut() = cache.clone());
CURRENT_LOCATION_KEY.with(|s| s.borrow_mut().clear());
- write_shared(&cx, &krate, &*cache, index, enable_minification)?;
+ write_shared(&cx, &krate, &*cache, index, &md_opts, diag)?;
// And finally render the whole crate's documentation
cx.krate(krate)
Json::Object(crate_data))
}
-fn write_shared(cx: &Context,
- krate: &clean::Crate,
- cache: &Cache,
- search_index: String,
- enable_minification: bool) -> Result<(), Error> {
+fn write_shared(
+ cx: &Context,
+ krate: &clean::Crate,
+ cache: &Cache,
+ search_index: String,
+ options: &RenderOptions,
+ diag: &errors::Handler,
+) -> Result<(), Error> {
// Write out the shared files. Note that these are shared among all rustdoc
// docs placed in the output directory, so this needs to be a synchronized
// operation with respect to all other rustdocs running around.
write_minify(cx.dst.join(&format!("rustdoc{}.css", cx.shared.resource_suffix)),
include_str!("static/rustdoc.css"),
- enable_minification)?;
+ options.enable_minification)?;
write_minify(cx.dst.join(&format!("settings{}.css", cx.shared.resource_suffix)),
include_str!("static/settings.css"),
- enable_minification)?;
+ options.enable_minification)?;
// To avoid "light.css" to be overwritten, we'll first run over the received themes and only
// then we'll run over the "official" styles.
include_bytes!("static/wheel.svg"))?;
write_minify(cx.dst.join(&format!("light{}.css", cx.shared.resource_suffix)),
include_str!("static/themes/light.css"),
- enable_minification)?;
+ options.enable_minification)?;
themes.insert("light".to_owned());
write_minify(cx.dst.join(&format!("dark{}.css", cx.shared.resource_suffix)),
include_str!("static/themes/dark.css"),
- enable_minification)?;
+ options.enable_minification)?;
themes.insert("dark".to_owned());
let mut themes: Vec<&String> = themes.iter().collect();
write_minify(cx.dst.join(&format!("main{}.js", cx.shared.resource_suffix)),
include_str!("static/main.js"),
- enable_minification)?;
+ options.enable_minification)?;
write_minify(cx.dst.join(&format!("settings{}.js", cx.shared.resource_suffix)),
include_str!("static/settings.js"),
- enable_minification)?;
+ options.enable_minification)?;
{
let mut data = format!("var resourcesSuffix = \"{}\";\n",
data.push_str(include_str!("static/storage.js"));
write_minify(cx.dst.join(&format!("storage{}.js", cx.shared.resource_suffix)),
&data,
- enable_minification)?;
+ options.enable_minification)?;
}
if let Some(ref css) = cx.shared.css_file_extension {
let out = cx.dst.join(&format!("theme{}.css", cx.shared.resource_suffix));
- if !enable_minification {
+ if !options.enable_minification {
try_err!(fs::copy(css, out), css);
} else {
let mut f = try_err!(File::open(css), css);
let mut buffer = String::with_capacity(1000);
try_err!(f.read_to_string(&mut buffer), css);
- write_minify(out, &buffer, enable_minification)?;
+ write_minify(out, &buffer, options.enable_minification)?;
}
}
write_minify(cx.dst.join(&format!("normalize{}.css", cx.shared.resource_suffix)),
include_str!("static/normalize.css"),
- enable_minification)?;
+ options.enable_minification)?;
write(cx.dst.join("FiraSans-Regular.woff"),
include_bytes!("static/FiraSans-Regular.woff"))?;
write(cx.dst.join("FiraSans-Medium.woff"),
write(cx.dst.join("COPYRIGHT.txt"),
include_bytes!("static/COPYRIGHT.txt"))?;
- fn collect(path: &Path, krate: &str, key: &str) -> io::Result<Vec<String>> {
+ fn collect(path: &Path, krate: &str, key: &str) -> io::Result<(Vec<String>, Vec<String>)> {
let mut ret = Vec::new();
+ let mut krates = Vec::new();
if path.exists() {
for line in BufReader::new(File::open(path)?).lines() {
let line = line?;
continue;
}
ret.push(line.to_string());
+ krates.push(line[key.len() + 2..].split('"')
+ .next()
+ .map(|s| s.to_owned())
+ .unwrap_or_else(|| String::new()));
}
}
- Ok(ret)
+ Ok((ret, krates))
}
fn show_item(item: &IndexItem, krate: &str) -> String {
let dst = cx.dst.join("aliases.js");
{
- let mut all_aliases = try_err!(collect(&dst, &krate.name, "ALIASES"), &dst);
+ let (mut all_aliases, _) = try_err!(collect(&dst, &krate.name, "ALIASES"), &dst);
let mut w = try_err!(File::create(&dst), &dst);
let mut output = String::with_capacity(100);
for (alias, items) in &cache.aliases {
// Update the search index
let dst = cx.dst.join("search-index.js");
- let mut all_indexes = try_err!(collect(&dst, &krate.name, "searchIndex"), &dst);
+ let (mut all_indexes, mut krates) = try_err!(collect(&dst, &krate.name, "searchIndex"), &dst);
all_indexes.push(search_index);
// Sort the indexes by crate so the file will be generated identically even
// with rustdoc running in parallel.
let mut w = try_err!(File::create(&dst), &dst);
try_err!(writeln!(&mut w, "var N = null;var searchIndex = {{}};"), &dst);
for index in &all_indexes {
- try_err!(write_minify_replacer(&mut w, &*index, enable_minification,
+ try_err!(write_minify_replacer(&mut w, &*index, options.enable_minification,
&[(minifier::js::Keyword::Null, "N")]),
&dst);
}
try_err!(writeln!(&mut w, "initSearch(searchIndex);"), &dst);
+ if options.enable_index_page {
+ if let Some(index_page) = options.index_page.clone() {
+ let mut md_opts = options.clone();
+ md_opts.output = cx.dst.clone();
+ md_opts.external_html = (*cx.shared).layout.external_html.clone();
+
+ ::markdown::render(index_page, md_opts, diag);
+ } else {
+ let dst = cx.dst.join("index.html");
+ let mut w = BufWriter::new(try_err!(File::create(&dst), &dst));
+ let page = layout::Page {
+ title: "Index of crates",
+ css_class: "mod",
+ root_path: "./",
+ description: "List of crates",
+ keywords: BASIC_KEYWORDS,
+ resource_suffix: &cx.shared.resource_suffix,
+ };
+ krates.push(krate.name.clone());
+ krates.sort();
+ krates.dedup();
+
+ let content = format!(
+"<h1 class='fqn'>\
+ <span class='in-band'>List of all crates</span>\
+</h1><ul class='mod'>{}</ul>",
+ krates
+ .iter()
+ .map(|s| {
+ format!("<li><a href=\"{}/index.html\">{}</li>", s, s)
+ })
+ .collect::<String>());
+ try_err!(layout::render(&mut w, &cx.shared.layout,
+ &page, &(""), &content,
+ cx.shared.css_file_extension.is_some(),
+ &cx.shared.themes), &dst);
+ try_err!(w.flush(), &dst);
+ }
+ }
+
// Update the list of all implementors for traits
let dst = cx.dst.join("implementors");
for (&did, imps) in &cache.implementors {
remote_item_type.css_class(),
remote_path[remote_path.len() - 1]));
- let mut all_implementors = try_err!(collect(&mydst, &krate.name, "implementors"), &mydst);
+ let (mut all_implementors, _) = try_err!(collect(&mydst, &krate.name, "implementors"),
+ &mydst);
all_implementors.push(implementors);
// Sort the implementors by crate so the file will be generated
// identically even with rustdoc running in parallel.
if let Some(ref name) = item.name {
info!("Documenting {}", name);
}
- document_stability(w, cx, item)?;
- document_full(w, item, cx, "")?;
+ document_stability(w, cx, item, false)?;
+ document_full(w, item, cx, "", false)?;
Ok(())
}
cx: &Context,
md_text: &str,
links: Vec<(String, String)>,
- prefix: &str)
+ prefix: &str,
+ is_hidden: bool)
-> fmt::Result {
let mut ids = cx.id_map.borrow_mut();
- write!(w, "<div class='docblock'>{}{}</div>",
- prefix, Markdown(md_text, &links, RefCell::new(&mut ids), cx.codes))
+ write!(w, "<div class='docblock{}'>{}{}</div>",
+ if is_hidden { " hidden" } else { "" },
+ prefix,
+ Markdown(md_text, &links, RefCell::new(&mut ids),
+ cx.codes))
}
fn document_short(w: &mut fmt::Formatter, cx: &Context, item: &clean::Item, link: AssocItemLink,
- prefix: &str) -> fmt::Result {
+ prefix: &str, is_hidden: bool) -> fmt::Result {
if let Some(s) = item.doc_value() {
let markdown = if s.contains('\n') {
format!("{} [Read more]({})",
} else {
plain_summary_line(Some(s))
};
- render_markdown(w, cx, &markdown, item.links(), prefix)?;
+ render_markdown(w, cx, &markdown, item.links(), prefix, is_hidden)?;
} else if !prefix.is_empty() {
- write!(w, "<div class='docblock'>{}</div>", prefix)?;
+ write!(w, "<div class='docblock{}'>{}</div>",
+ if is_hidden { " hidden" } else { "" },
+ prefix)?;
}
Ok(())
}
fn document_full(w: &mut fmt::Formatter, item: &clean::Item,
- cx: &Context, prefix: &str) -> fmt::Result {
+ cx: &Context, prefix: &str, is_hidden: bool) -> fmt::Result {
if let Some(s) = cx.shared.maybe_collapsed_doc_value(item) {
debug!("Doc block: =====\n{}\n=====", s);
- render_markdown(w, cx, &*s, item.links(), prefix)?;
+ render_markdown(w, cx, &*s, item.links(), prefix, is_hidden)?;
} else if !prefix.is_empty() {
- write!(w, "<div class='docblock'>{}</div>", prefix)?;
+ write!(w, "<div class='docblock{}'>{}</div>",
+ if is_hidden { " hidden" } else { "" },
+ prefix)?;
}
Ok(())
}
-fn document_stability(w: &mut fmt::Formatter, cx: &Context, item: &clean::Item) -> fmt::Result {
+fn document_stability(w: &mut fmt::Formatter, cx: &Context, item: &clean::Item,
+ is_hidden: bool) -> fmt::Result {
let stabilities = short_stability(item, cx, true);
if !stabilities.is_empty() {
- write!(w, "<div class='stability'>")?;
+ write!(w, "<div class='stability{}'>", if is_hidden { " hidden" } else { "" })?;
for stability in stabilities {
write!(w, "{}", stability)?;
}
RenderMode::ForDeref { mut_: deref_mut_ } => should_render_item(&item, deref_mut_),
};
+ let (is_hidden, extra_class) = if trait_.is_none() ||
+ item.doc_value().is_some() ||
+ item.inner.is_associated() {
+ (false, "")
+ } else {
+ (true, " hidden")
+ };
match item.inner {
clean::MethodItem(clean::Method { ref decl, .. }) |
- clean::TyMethodItem(clean::TyMethod{ ref decl, .. }) => {
+ clean::TyMethodItem(clean::TyMethod { ref decl, .. }) => {
// Only render when the method is not static or we allow static methods
if render_method_item {
let id = cx.derive_id(format!("{}.{}", item_type, name));
let ns_id = cx.derive_id(format!("{}.{}", name, item_type.name_space()));
- write!(w, "<h4 id='{}' class=\"{}\">", id, item_type)?;
+ write!(w, "<h4 id='{}' class=\"{}{}\">", id, item_type, extra_class)?;
write!(w, "{}", spotlight_decl(decl)?)?;
write!(w, "<span id='{}' class='invisible'>", ns_id)?;
write!(w, "<table class='table-display'><tbody><tr><td><code>")?;
clean::TypedefItem(ref tydef, _) => {
let id = cx.derive_id(format!("{}.{}", ItemType::AssociatedType, name));
let ns_id = cx.derive_id(format!("{}.{}", name, item_type.name_space()));
- write!(w, "<h4 id='{}' class=\"{}\">", id, item_type)?;
+ write!(w, "<h4 id='{}' class=\"{}{}\">", id, item_type, extra_class)?;
write!(w, "<span id='{}' class='invisible'><code>", ns_id)?;
assoc_type(w, item, &Vec::new(), Some(&tydef.type_), link.anchor(&id))?;
write!(w, "</code></span></h4>\n")?;
clean::AssociatedConstItem(ref ty, ref default) => {
let id = cx.derive_id(format!("{}.{}", item_type, name));
let ns_id = cx.derive_id(format!("{}.{}", name, item_type.name_space()));
- write!(w, "<h4 id='{}' class=\"{}\">", id, item_type)?;
+ write!(w, "<h4 id='{}' class=\"{}{}\">", id, item_type, extra_class)?;
write!(w, "<span id='{}' class='invisible'><code>", ns_id)?;
assoc_const(w, item, ty, default.as_ref(), link.anchor(&id))?;
let src = if let Some(l) = (Item { cx, item }).src_href() {
clean::AssociatedTypeItem(ref bounds, ref default) => {
let id = cx.derive_id(format!("{}.{}", item_type, name));
let ns_id = cx.derive_id(format!("{}.{}", name, item_type.name_space()));
- write!(w, "<h4 id='{}' class=\"{}\">", id, item_type)?;
+ write!(w, "<h4 id='{}' class=\"{}{}\">", id, item_type, extra_class)?;
write!(w, "<span id='{}' class='invisible'><code>", ns_id)?;
assoc_type(w, item, bounds, default.as_ref(), link.anchor(&id))?;
write!(w, "</code></span></h4>\n")?;
if let Some(it) = t.items.iter().find(|i| i.name == item.name) {
// We need the stability of the item from the trait
// because impls can't have a stability.
- document_stability(w, cx, it)?;
+ document_stability(w, cx, it, is_hidden)?;
if item.doc_value().is_some() {
- document_full(w, item, cx, "")?;
+ document_full(w, item, cx, "", is_hidden)?;
} else if show_def_docs {
// In case the item isn't documented,
// provide short documentation from the trait.
- document_short(w, cx, it, link, "")?;
+ document_short(w, cx, it, link, "", is_hidden)?;
}
}
} else {
- document_stability(w, cx, item)?;
+ document_stability(w, cx, item, is_hidden)?;
if show_def_docs {
- document_full(w, item, cx, "")?;
+ document_full(w, item, cx, "", is_hidden)?;
}
}
} else {
- document_stability(w, cx, item)?;
+ document_stability(w, cx, item, is_hidden)?;
if show_def_docs {
- document_short(w, cx, item, link, "")?;
+ document_short(w, cx, item, link, "", is_hidden)?;
}
}
}
onEach(document.getElementsByClassName('method'), func);
onEach(document.getElementsByClassName('associatedconstant'), func);
onEach(document.getElementsByClassName('impl'), func);
+ onEach(document.getElementsByClassName('impl-items'), function(e) {
+ onEach(e.getElementsByClassName('associatedconstant'), func);
+ var hiddenElems = e.getElementsByClassName('hidden');
+ var needToggle = false;
+
+ for (var i = 0; i < hiddenElems.length; ++i) {
+ if (hasClass(hiddenElems[i], "content") === false &&
+ hasClass(hiddenElems[i], "docblock") === false) {
+ needToggle = true;
+ break;
+ }
+ }
+ if (needToggle === true) {
+ var newToggle = document.createElement('a');
+ newToggle.href = 'javascript:void(0)';
+ newToggle.className = 'collapse-toggle hidden-default collapsed';
+ newToggle.innerHTML = "[<span class='inner'>" + labelForToggleButton(true) + "</span>" +
+ "] Show hidden undocumented items";
+ newToggle.onclick = function() {
+ if (hasClass(this, "collapsed")) {
+ removeClass(this, "collapsed");
+ onEach(this.parentNode.getElementsByClassName("hidden"), function(x) {
+ if (hasClass(x, "content") === false) {
+ removeClass(x, "hidden");
+ addClass(x, "x");
+ }
+ }, true);
+ this.innerHTML = "[<span class='inner'>" + labelForToggleButton(false) +
+ "</span>] Hide undocumented items"
+ } else {
+ addClass(this, "collapsed");
+ onEach(this.parentNode.getElementsByClassName("x"), function(x) {
+ if (hasClass(x, "content") === false) {
+ addClass(x, "hidden");
+ removeClass(x, "x");
+ }
+ }, true);
+ this.innerHTML = "[<span class='inner'>" + labelForToggleButton(true) +
+ "</span>] Show hidden undocumented items";
+ }
+ };
+ e.insertBefore(newToggle, e.firstChild);
+ }
+ });
function createToggle(otherMessage, fontSize, extraClass, show) {
var span = document.createElement('span');
return wrapper;
}
- var hideItemDeclarations = getCurrentValue('rustdoc-item-declarations') === "false";
+ var showItemDeclarations = getCurrentValue('rustdoc-item-declarations') === "false";
function buildToggleWrapper(e) {
if (hasClass(e, 'autohide')) {
var wrap = e.previousElementSibling;
if (hasClass(e, "type-decl")) {
fontSize = "20px";
otherMessage = ' Show declaration';
- if (hideItemDeclarations === false) {
+ if (showItemDeclarations === false) {
extraClass = 'collapsed';
}
} else if (hasClass(e, "sub-variant")) {
extraClass = "marg-left";
}
- e.parentNode.insertBefore(createToggle(otherMessage,
- fontSize,
- extraClass,
- hideItemDeclarations),
- e);
- if (otherMessage.length > 0 && hideItemDeclarations === true) {
+ e.parentNode.insertBefore(
+ createToggle(otherMessage,
+ fontSize,
+ extraClass,
+ hasClass(e, "type-decl") === false || showItemDeclarations === true),
+ e);
+ if (hasClass(e, "type-decl") === true && showItemDeclarations === true) {
collapseDocs(e.previousSibling.childNodes[0], "toggle");
}
}
padding-left: 0;
}
-.example-wrap {
+body:not(.source) .example-wrap {
display: inline-flex;
+}
+
+.example-wrap {
width: 100%;
}
text-align: right;
}
-.example-wrap > pre.rust {
+body:not(.source) .example-wrap > pre.rust {
width: 100%;
}
margin-bottom: 15px;
}
-.content .impl-items .method, .content .impl-items > .type, .impl-items > .associatedconstant {
- margin-left: 20px;
-}
-.content .impl-items .docblock, .content .impl-items .stability {
- margin-bottom: .6em;
-}
-
-.content .impl-items > .stability {
- margin-left: 40px;
-}
-
.content .docblock > .impl-items {
margin-left: 20px;
margin-top: -34px;
top: -9px;
left: -13px;
}
-.methods > .stability {
+
+.content .impl-items .method, .content .impl-items > .type, .impl-items > .associatedconstant {
+ margin-left: 20px;
+}
+
+.content .impl-items .docblock, .content .impl-items .stability {
+ margin-bottom: .6em;
+}
+
+.content .impl-items > .stability {
+ margin-left: 40px;
+}
+
+.methods > .stability, .content .impl-items > .stability {
margin-top: -8px;
}
text-align: center;
}
+.collapse-toggle.hidden-default {
+ position: relative;
+ margin-left: 20px;
+}
+
.ghost {
display: none;
}
var savedHref = [];
-function onEach(arr, func) {
+function onEach(arr, func, reversed) {
if (arr && arr.length > 0 && func) {
- for (var i = 0; i < arr.length; i++) {
- if (func(arr[i]) === true) {
- return true;
+ if (reversed !== true) {
+ for (var i = 0; i < arr.length; ++i) {
+ if (func(arr[i]) === true) {
+ return true;
+ }
+ }
+ } else {
+ for (var i = arr.length - 1; i >= 0; --i) {
+ if (func(arr[i]) === true) {
+ return true;
+ }
}
}
}
extern crate serialize as rustc_serialize; // used by deriving
-use errors::ColorConfig;
-
-use std::collections::{BTreeMap, BTreeSet};
use std::default::Default;
use std::env;
use std::panic;
-use std::path::{Path, PathBuf};
use std::process;
use std::sync::mpsc::channel;
-use syntax::edition::Edition;
-use externalfiles::ExternalHtml;
use rustc::session::{early_warn, early_error};
-use rustc::session::search_paths::SearchPaths;
-use rustc::session::config::{ErrorOutputType, RustcOptGroup, Externs, CodegenOptions};
-use rustc::session::config::{nightly_options, build_codegen_options};
-use rustc_target::spec::TargetTriple;
-use rustc::session::config::get_cmd_lint_options;
+use rustc::session::config::{ErrorOutputType, RustcOptGroup};
#[macro_use]
mod externalfiles;
mod clean;
+mod config;
mod core;
mod doctree;
mod fold;
struct Output {
krate: clean::Crate,
renderinfo: html::render::RenderInfo,
+ renderopts: config::RenderOptions,
passes: Vec<String>,
}
"LEVEL",
)
}),
+ unstable("index-page", |o| {
+ o.optopt("",
+ "index-page",
+ "Markdown file to be used as index page",
+ "PATH")
+ }),
+ unstable("enable-index-page", |o| {
+ o.optflag("",
+ "enable-index-page",
+ "To enable generation of the index page")
+ }),
]
}
early_error(ErrorOutputType::default(), &err.to_string());
}
};
- // Check for unstable options.
- nightly_options::check_nightly_options(&matches, &opts());
-
- if matches.opt_present("h") || matches.opt_present("help") {
- usage("rustdoc");
- return 0;
- } else if matches.opt_present("version") {
- rustc_driver::version("rustdoc", &matches);
- return 0;
- }
-
- if matches.opt_strs("passes") == ["list"] {
- println!("Available passes for running rustdoc:");
- for pass in passes::PASSES {
- println!("{:>20} - {}", pass.name(), pass.description());
- }
- println!("\nDefault passes for rustdoc:");
- for &name in passes::DEFAULT_PASSES {
- println!("{:>20}", name);
- }
- println!("\nPasses run with `--document-private-items`:");
- for &name in passes::DEFAULT_PRIVATE_PASSES {
- println!("{:>20}", name);
- }
- return 0;
- }
-
- let color = match matches.opt_str("color").as_ref().map(|s| &s[..]) {
- Some("auto") => ColorConfig::Auto,
- Some("always") => ColorConfig::Always,
- Some("never") => ColorConfig::Never,
- None => ColorConfig::Auto,
- Some(arg) => {
- early_error(ErrorOutputType::default(),
- &format!("argument for --color must be `auto`, `always` or `never` \
- (instead was `{}`)", arg));
- }
- };
- let error_format = match matches.opt_str("error-format").as_ref().map(|s| &s[..]) {
- Some("human") => ErrorOutputType::HumanReadable(color),
- Some("json") => ErrorOutputType::Json(false),
- Some("pretty-json") => ErrorOutputType::Json(true),
- Some("short") => ErrorOutputType::Short(color),
- None => ErrorOutputType::HumanReadable(color),
- Some(arg) => {
- early_error(ErrorOutputType::default(),
- &format!("argument for --error-format must be `human`, `json` or \
- `short` (instead was `{}`)", arg));
- }
- };
- let treat_err_as_bug = matches.opt_strs("Z").iter().any(|x| {
- *x == "treat-err-as-bug"
- });
- let ui_testing = matches.opt_strs("Z").iter().any(|x| {
- *x == "ui-testing"
- });
-
- let diag = core::new_handler(error_format, None, treat_err_as_bug, ui_testing);
-
- // check for deprecated options
- check_deprecated_options(&matches, &diag);
-
- let to_check = matches.opt_strs("theme-checker");
- if !to_check.is_empty() {
- let paths = theme::load_css_paths(include_bytes!("html/static/themes/light.css"));
- let mut errors = 0;
-
- println!("rustdoc: [theme-checker] Starting tests!");
- for theme_file in to_check.iter() {
- print!(" - Checking \"{}\"...", theme_file);
- let (success, differences) = theme::test_theme_against(theme_file, &paths, &diag);
- if !differences.is_empty() || !success {
- println!(" FAILED");
- errors += 1;
- if !differences.is_empty() {
- println!("{}", differences.join("\n"));
- }
- } else {
- println!(" OK");
- }
- }
- if errors != 0 {
- return 1;
- }
- return 0;
- }
-
- if matches.free.is_empty() {
- diag.struct_err("missing file operand").emit();
- return 1;
- }
- if matches.free.len() > 1 {
- diag.struct_err("too many file operands").emit();
- return 1;
- }
- let input = &matches.free[0];
-
- let mut libs = SearchPaths::new();
- for s in &matches.opt_strs("L") {
- libs.add_path(s, error_format);
- }
- let externs = match parse_externs(&matches) {
- Ok(ex) => ex,
- Err(err) => {
- diag.struct_err(&err).emit();
- return 1;
- }
- };
- let extern_urls = match parse_extern_html_roots(&matches) {
- Ok(ex) => ex,
- Err(err) => {
- diag.struct_err(err).emit();
- return 1;
- }
+ let options = match config::Options::from_matches(&matches) {
+ Ok(opts) => opts,
+ Err(code) => return code,
};
- let test_args = matches.opt_strs("test-args");
- let test_args: Vec<String> = test_args.iter()
- .flat_map(|s| s.split_whitespace())
- .map(|s| s.to_string())
- .collect();
-
- let should_test = matches.opt_present("test");
- let markdown_input = Path::new(input).extension()
- .map_or(false, |e| e == "md" || e == "markdown");
-
- let output = matches.opt_str("o").map(|s| PathBuf::from(&s));
- let css_file_extension = matches.opt_str("e").map(|s| PathBuf::from(&s));
- let mut cfgs = matches.opt_strs("cfg");
- cfgs.push("rustdoc".to_string());
-
- if let Some(ref p) = css_file_extension {
- if !p.is_file() {
- diag.struct_err("option --extend-css argument must be a file").emit();
- return 1;
- }
- }
-
- let mut themes = Vec::new();
- if matches.opt_present("themes") {
- let paths = theme::load_css_paths(include_bytes!("html/static/themes/light.css"));
-
- for (theme_file, theme_s) in matches.opt_strs("themes")
- .iter()
- .map(|s| (PathBuf::from(&s), s.to_owned())) {
- if !theme_file.is_file() {
- diag.struct_err("option --themes arguments must all be files").emit();
- return 1;
- }
- let (success, ret) = theme::test_theme_against(&theme_file, &paths, &diag);
- if !success || !ret.is_empty() {
- diag.struct_err(&format!("invalid theme: \"{}\"", theme_s))
- .help("check what's wrong with the --theme-checker option")
- .emit();
- return 1;
- }
- themes.push(theme_file);
- }
- }
-
- let mut id_map = html::markdown::IdMap::new();
- id_map.populate(html::render::initial_ids());
- let external_html = match ExternalHtml::load(
- &matches.opt_strs("html-in-header"),
- &matches.opt_strs("html-before-content"),
- &matches.opt_strs("html-after-content"),
- &matches.opt_strs("markdown-before-content"),
- &matches.opt_strs("markdown-after-content"), &diag, &mut id_map) {
- Some(eh) => eh,
- None => return 3,
- };
- let crate_name = matches.opt_str("crate-name");
- let playground_url = matches.opt_str("playground-url");
- let maybe_sysroot = matches.opt_str("sysroot").map(PathBuf::from);
- let display_warnings = matches.opt_present("display-warnings");
- let linker = matches.opt_str("linker").map(PathBuf::from);
- let sort_modules_alphabetically = !matches.opt_present("sort-modules-by-appearance");
- let resource_suffix = matches.opt_str("resource-suffix");
- let enable_minification = !matches.opt_present("disable-minification");
-
- let edition = matches.opt_str("edition").unwrap_or("2015".to_string());
- let edition = match edition.parse() {
- Ok(e) => e,
- Err(_) => {
- diag.struct_err("could not parse edition").emit();
- return 1;
- }
- };
+ let diag = core::new_handler(options.error_format,
+ None,
+ options.debugging_options.treat_err_as_bug,
+ options.debugging_options.ui_testing);
- let cg = build_codegen_options(&matches, ErrorOutputType::default());
-
- match (should_test, markdown_input) {
- (true, true) => {
- return markdown::test(input, cfgs, libs, externs, test_args, maybe_sysroot,
- display_warnings, linker, edition, cg, &diag)
- }
- (true, false) => {
- return test::run(Path::new(input), cfgs, libs, externs, test_args, crate_name,
- maybe_sysroot, display_warnings, linker, edition, cg)
- }
- (false, true) => return markdown::render(Path::new(input),
- output.unwrap_or(PathBuf::from("doc")),
- &matches, &external_html,
- !matches.opt_present("markdown-no-toc"), &diag),
+ match (options.should_test, options.markdown_input()) {
+ (true, true) => return markdown::test(options, &diag),
+ (true, false) => return test::run(options),
+ (false, true) => return markdown::render(options.input, options.render_options, &diag),
(false, false) => {}
}
- let output_format = matches.opt_str("w");
-
- let res = acquire_input(PathBuf::from(input), externs, edition, cg, &matches, error_format,
- move |out| {
- let Output { krate, passes, renderinfo } = out;
- let diag = core::new_handler(error_format, None, treat_err_as_bug, ui_testing);
+ // need to move these items separately because we lose them by the time the closure is called,
+ // but we can't crates the Handler ahead of time because it's not Send
+ let diag_opts = (options.error_format,
+ options.debugging_options.treat_err_as_bug,
+ options.debugging_options.ui_testing);
+ rust_input(options, move |out| {
+ let Output { krate, passes, renderinfo, renderopts } = out;
info!("going to format");
- match output_format.as_ref().map(|s| &**s) {
- Some("html") | None => {
- html::render::run(krate, extern_urls, &external_html, playground_url,
- output.unwrap_or(PathBuf::from("doc")),
- resource_suffix.unwrap_or(String::new()),
- passes.into_iter().collect(),
- css_file_extension,
- renderinfo,
- sort_modules_alphabetically,
- themes,
- enable_minification, id_map)
- .expect("failed to generate documentation");
- 0
- }
- Some(s) => {
- diag.struct_err(&format!("unknown output format: {}", s)).emit();
- 1
- }
- }
- });
- res.unwrap_or_else(|s| {
- diag.struct_err(&format!("input error: {}", s)).emit();
- 1
+ let (error_format, treat_err_as_bug, ui_testing) = diag_opts;
+ let diag = core::new_handler(error_format, None, treat_err_as_bug, ui_testing);
+ html::render::run(krate, renderopts, passes.into_iter().collect(), renderinfo, &diag)
+ .expect("failed to generate documentation");
+ 0
})
}
-/// Looks inside the command line arguments to extract the relevant input format
-/// and files and then generates the necessary rustdoc output for formatting.
-fn acquire_input<R, F>(input: PathBuf,
- externs: Externs,
- edition: Edition,
- cg: CodegenOptions,
- matches: &getopts::Matches,
- error_format: ErrorOutputType,
- f: F)
- -> Result<R, String>
-where R: 'static + Send, F: 'static + Send + FnOnce(Output) -> R {
- match matches.opt_str("r").as_ref().map(|s| &**s) {
- Some("rust") => Ok(rust_input(input, externs, edition, cg, matches, error_format, f)),
- Some(s) => Err(format!("unknown input format: {}", s)),
- None => Ok(rust_input(input, externs, edition, cg, matches, error_format, f))
- }
-}
-
-/// Extracts `--extern CRATE=PATH` arguments from `matches` and
-/// returns a map mapping crate names to their paths or else an
-/// error message.
-// FIXME(eddyb) This shouldn't be duplicated with `rustc::session`.
-fn parse_externs(matches: &getopts::Matches) -> Result<Externs, String> {
- let mut externs: BTreeMap<_, BTreeSet<_>> = BTreeMap::new();
- for arg in &matches.opt_strs("extern") {
- let mut parts = arg.splitn(2, '=');
- let name = parts.next().ok_or("--extern value must not be empty".to_string())?;
- let location = parts.next().map(|s| s.to_string());
- if location.is_none() && !nightly_options::is_unstable_enabled(matches) {
- return Err("the `-Z unstable-options` flag must also be passed to \
- enable `--extern crate_name` without `=path`".to_string());
- }
- let name = name.to_string();
- externs.entry(name).or_default().insert(location);
- }
- Ok(Externs::new(externs))
-}
-
-/// Extracts `--extern-html-root-url` arguments from `matches` and returns a map of crate names to
-/// the given URLs. If an `--extern-html-root-url` argument was ill-formed, returns an error
-/// describing the issue.
-fn parse_extern_html_roots(matches: &getopts::Matches)
- -> Result<BTreeMap<String, String>, &'static str>
-{
- let mut externs = BTreeMap::new();
- for arg in &matches.opt_strs("extern-html-root-url") {
- let mut parts = arg.splitn(2, '=');
- let name = parts.next().ok_or("--extern-html-root-url must not be empty")?;
- let url = parts.next().ok_or("--extern-html-root-url must be of the form name=url")?;
- externs.insert(name.to_string(), url.to_string());
- }
-
- Ok(externs)
-}
-
/// Interprets the input file as a rust source file, passing it through the
/// compiler all the way through the analysis passes. The rustdoc output is then
/// generated from the cleaned AST of the crate.
///
/// This form of input will run all of the plug/cleaning passes
-fn rust_input<R, F>(cratefile: PathBuf,
- externs: Externs,
- edition: Edition,
- cg: CodegenOptions,
- matches: &getopts::Matches,
- error_format: ErrorOutputType,
- f: F) -> R
+fn rust_input<R, F>(options: config::Options, f: F) -> R
where R: 'static + Send,
F: 'static + Send + FnOnce(Output) -> R
{
- let default_passes = if matches.opt_present("no-defaults") {
- passes::DefaultPassOption::None
- } else if matches.opt_present("document-private-items") {
- passes::DefaultPassOption::Private
- } else {
- passes::DefaultPassOption::Default
- };
-
- let manual_passes = matches.opt_strs("passes");
- let plugins = matches.opt_strs("plugins");
-
// First, parse the crate and extract all relevant information.
- let mut paths = SearchPaths::new();
- for s in &matches.opt_strs("L") {
- paths.add_path(s, ErrorOutputType::default());
- }
- let mut cfgs = matches.opt_strs("cfg");
- cfgs.push("rustdoc".to_string());
- let triple = matches.opt_str("target").map(|target| {
- if target.ends_with(".json") {
- TargetTriple::TargetPath(PathBuf::from(target))
- } else {
- TargetTriple::TargetTriple(target)
- }
- });
- let maybe_sysroot = matches.opt_str("sysroot").map(PathBuf::from);
- let crate_name = matches.opt_str("crate-name");
- let crate_version = matches.opt_str("crate-version");
- let plugin_path = matches.opt_str("plugin-path");
-
info!("starting to run rustc");
- let display_warnings = matches.opt_present("display-warnings");
-
- let force_unstable_if_unmarked = matches.opt_strs("Z").iter().any(|x| {
- *x == "force-unstable-if-unmarked"
- });
- let treat_err_as_bug = matches.opt_strs("Z").iter().any(|x| {
- *x == "treat-err-as-bug"
- });
- let ui_testing = matches.opt_strs("Z").iter().any(|x| {
- *x == "ui-testing"
- });
-
- let (lint_opts, describe_lints, lint_cap) = get_cmd_lint_options(matches, error_format);
let (tx, rx) = channel();
let result = rustc_driver::monitor(move || syntax::with_globals(move || {
- use rustc::session::config::Input;
-
- let (mut krate, renderinfo, passes) =
- core::run_core(paths, cfgs, externs, Input::File(cratefile), triple, maybe_sysroot,
- display_warnings, crate_name.clone(),
- force_unstable_if_unmarked, edition, cg, error_format,
- lint_opts, lint_cap, describe_lints, manual_passes, default_passes,
- treat_err_as_bug, ui_testing);
+ let crate_name = options.crate_name.clone();
+ let crate_version = options.crate_version.clone();
+ let (mut krate, renderinfo, renderopts, passes) = core::run_core(options);
info!("finished with rustc");
krate.version = crate_version;
- if !plugins.is_empty() {
- eprintln!("WARNING: --plugins no longer functions; see CVE-2018-1000622");
- }
-
- if !plugin_path.is_none() {
- eprintln!("WARNING: --plugin-path no longer functions; see CVE-2018-1000622");
- }
-
info!("Executing passes");
for pass in &passes {
krate = pass(krate);
}
- tx.send(f(Output { krate: krate, renderinfo: renderinfo, passes: passes })).unwrap();
+ tx.send(f(Output {
+ krate: krate,
+ renderinfo: renderinfo,
+ renderopts,
+ passes: passes
+ })).unwrap();
}));
match result {
Err(_) => panic::resume_unwind(Box::new(errors::FatalErrorMarker)),
}
}
-
-/// Prints deprecation warnings for deprecated options
-fn check_deprecated_options(matches: &getopts::Matches, diag: &errors::Handler) {
- let deprecated_flags = [
- "input-format",
- "output-format",
- "no-defaults",
- "passes",
- ];
-
- for flag in deprecated_flags.into_iter() {
- if matches.opt_present(flag) {
- let mut err = diag.struct_warn(&format!("the '{}' flag is considered deprecated",
- flag));
- err.warn("please see https://github.com/rust-lang/rust/issues/44136");
-
- if *flag == "no-defaults" {
- err.help("you may want to use --document-private-items");
- }
-
- err.emit();
- }
- }
-}
use std::default::Default;
use std::fs::File;
use std::io::prelude::*;
-use std::path::{PathBuf, Path};
+use std::path::PathBuf;
use std::cell::RefCell;
use errors;
-use getopts;
use testing;
-use rustc::session::search_paths::SearchPaths;
-use rustc::session::config::{Externs, CodegenOptions};
use syntax::source_map::DUMMY_SP;
use syntax::feature_gate::UnstableFeatures;
-use syntax::edition::Edition;
-use externalfiles::{ExternalHtml, LoadStringError, load_string};
+use externalfiles::{LoadStringError, load_string};
+use config::{Options, RenderOptions};
use html::escape::Escape;
use html::markdown;
use html::markdown::{ErrorCodes, IdMap, Markdown, MarkdownWithToc, find_testable_code};
/// Render `input` (e.g. "foo.md") into an HTML file in `output`
/// (e.g. output = "bar" => "bar/foo.html").
-pub fn render(input: &Path, mut output: PathBuf, matches: &getopts::Matches,
- external_html: &ExternalHtml, include_toc: bool, diag: &errors::Handler) -> isize {
+pub fn render(input: PathBuf, options: RenderOptions, diag: &errors::Handler) -> isize {
+ let mut output = options.output;
output.push(input.file_stem().unwrap());
output.set_extension("html");
let mut css = String::new();
- for name in &matches.opt_strs("markdown-css") {
+ for name in &options.markdown_css {
let s = format!("<link rel=\"stylesheet\" type=\"text/css\" href=\"{}\">\n", name);
css.push_str(&s)
}
- let input_str = match load_string(input, diag) {
+ let input_str = match load_string(&input, diag) {
Ok(s) => s,
Err(LoadStringError::ReadFail) => return 1,
Err(LoadStringError::BadUtf8) => return 2,
};
- if let Some(playground) = matches.opt_str("markdown-playground-url").or(
- matches.opt_str("playground-url")) {
+ let playground_url = options.markdown_playground_url
+ .or(options.playground_url);
+ if let Some(playground) = playground_url {
markdown::PLAYGROUND.with(|s| { *s.borrow_mut() = Some((None, playground)); });
}
diag.struct_err(&format!("{}: {}", output.display(), e)).emit();
return 4;
}
- Ok(f) => f
+ Ok(f) => f,
};
let (metadata, text) = extract_leading_metadata(&input_str);
let mut ids = IdMap::new();
let error_codes = ErrorCodes::from(UnstableFeatures::from_environment().is_nightly_build());
- let text = if include_toc {
+ let text = if !options.markdown_no_toc {
MarkdownWithToc(text, RefCell::new(&mut ids), error_codes).to_string()
} else {
Markdown(text, &[], RefCell::new(&mut ids), error_codes).to_string()
</html>"#,
title = Escape(title),
css = css,
- in_header = external_html.in_header,
- before_content = external_html.before_content,
+ in_header = options.external_html.in_header,
+ before_content = options.external_html.before_content,
text = text,
- after_content = external_html.after_content,
+ after_content = options.external_html.after_content,
);
match err {
}
/// Run any tests/code examples in the markdown file `input`.
-pub fn test(input: &str, cfgs: Vec<String>, libs: SearchPaths, externs: Externs,
- mut test_args: Vec<String>, maybe_sysroot: Option<PathBuf>,
- display_warnings: bool, linker: Option<PathBuf>, edition: Edition,
- cg: CodegenOptions, diag: &errors::Handler) -> isize {
- let input_str = match load_string(input, diag) {
+pub fn test(mut options: Options, diag: &errors::Handler) -> isize {
+ let input_str = match load_string(&options.input, diag) {
Ok(s) => s,
Err(LoadStringError::ReadFail) => return 1,
Err(LoadStringError::BadUtf8) => return 2,
let mut opts = TestOptions::default();
opts.no_crate_inject = true;
- opts.display_warnings = display_warnings;
- let mut collector = Collector::new(input.to_owned(), cfgs, libs, cg, externs,
- true, opts, maybe_sysroot, None,
- Some(PathBuf::from(input)),
- linker, edition);
+ opts.display_warnings = options.display_warnings;
+ let mut collector = Collector::new(options.input.display().to_string(), options.cfgs,
+ options.libs, options.codegen_options, options.externs,
+ true, opts, options.maybe_sysroot, None,
+ Some(options.input),
+ options.linker, options.edition);
collector.set_position(DUMMY_SP);
let codes = ErrorCodes::from(UnstableFeatures::from_environment().is_nightly_build());
let res = find_testable_code(&input_str, &mut collector, codes);
if let Err(err) = res {
diag.span_warn(DUMMY_SP, &err.to_string());
}
- test_args.insert(0, "rustdoctest".to_string());
- testing::test_main(&test_args, collector.tests,
- testing::Options::new().display_output(display_warnings));
+ options.test_args.insert(0, "rustdoctest".to_string());
+ testing::test_main(&options.test_args, collector.tests,
+ testing::Options::new().display_output(options.display_warnings));
0
}
use std::ffi::OsString;
use std::io::prelude::*;
use std::io;
-use std::path::{Path, PathBuf};
+use std::path::PathBuf;
use std::panic::{self, AssertUnwindSafe};
use std::process::Command;
use std::str;
use errors::emitter::ColorConfig;
use clean::Attributes;
+use config::Options;
use html::markdown::{self, ErrorCodes, LangString};
#[derive(Clone, Default)]
pub attrs: Vec<String>,
}
-pub fn run(input_path: &Path,
- cfgs: Vec<String>,
- libs: SearchPaths,
- externs: Externs,
- mut test_args: Vec<String>,
- crate_name: Option<String>,
- maybe_sysroot: Option<PathBuf>,
- display_warnings: bool,
- linker: Option<PathBuf>,
- edition: Edition,
- cg: CodegenOptions)
- -> isize {
- let input = config::Input::File(input_path.to_owned());
+pub fn run(mut options: Options) -> isize {
+ let input = config::Input::File(options.input.clone());
let sessopts = config::Options {
- maybe_sysroot: maybe_sysroot.clone().or_else(
+ maybe_sysroot: options.maybe_sysroot.clone().or_else(
|| Some(env::current_exe().unwrap().parent().unwrap().parent().unwrap().to_path_buf())),
- search_paths: libs.clone(),
+ search_paths: options.libs.clone(),
crate_types: vec![config::CrateType::Dylib],
- cg: cg.clone(),
- externs: externs.clone(),
+ cg: options.codegen_options.clone(),
+ externs: options.externs.clone(),
unstable_features: UnstableFeatures::from_environment(),
lint_cap: Some(::rustc::lint::Level::Allow),
actually_rustdoc: true,
debugging_opts: config::DebuggingOptions {
..config::basic_debugging_options()
},
- edition,
+ edition: options.edition,
..config::Options::default()
};
driver::spawn_thread_pool(sessopts, |sessopts| {
Some(source_map.clone()));
let mut sess = session::build_session_(
- sessopts, Some(input_path.to_owned()), handler, source_map.clone(),
+ sessopts, Some(options.input), handler, source_map.clone(),
);
let codegen_backend = rustc_driver::get_codegen_backend(&sess);
let cstore = CStore::new(codegen_backend.metadata_loader());
rustc_lint::register_builtins(&mut sess.lint_store.borrow_mut(), Some(&sess));
- let mut cfg = config::build_configuration(&sess, config::parse_cfgspecs(cfgs.clone()));
+ let mut cfg = config::build_configuration(&sess,
+ config::parse_cfgspecs(options.cfgs.clone()));
target_features::add_configuration(&mut cfg, &sess, &*codegen_backend);
sess.parse_sess.config = cfg;
).expect("phase_2_configure_and_expand aborted in rustdoc!")
};
- let crate_name = crate_name.unwrap_or_else(|| {
+ let crate_name = options.crate_name.unwrap_or_else(|| {
::rustc_codegen_utils::link::find_crate_name(None, &hir_forest.krate().attrs, &input)
});
let mut opts = scrape_test_config(hir_forest.krate());
- opts.display_warnings |= display_warnings;
+ opts.display_warnings |= options.display_warnings;
let mut collector = Collector::new(
crate_name,
- cfgs,
- libs,
- cg,
- externs,
+ options.cfgs,
+ options.libs,
+ options.codegen_options,
+ options.externs,
false,
opts,
- maybe_sysroot,
+ options.maybe_sysroot,
Some(source_map),
- None,
- linker,
- edition
+ None,
+ options.linker,
+ options.edition
);
{
});
}
- test_args.insert(0, "rustdoctest".to_string());
+ options.test_args.insert(0, "rustdoctest".to_string());
- testing::test_main(&test_args,
+ testing::test_main(&options.test_args,
collector.tests.into_iter().collect(),
- testing::Options::new().display_output(display_warnings));
+ testing::Options::new().display_output(options.display_warnings));
0
})
}
dont_insert_main: bool,
opts: &TestOptions)
-> (String, usize) {
- let (crate_attrs, everything_else) = partition_source(s);
+ let (crate_attrs, everything_else, crates) = partition_source(s);
let everything_else = everything_else.trim();
let mut line_offset = 0;
let mut prog = String::new();
// are intended to be crate attributes.
prog.push_str(&crate_attrs);
+ // Uses libsyntax to parse the doctest and find if there's a main fn and the extern
+ // crate already is included.
+ let (already_has_main, already_has_extern_crate) = crate::syntax::with_globals(|| {
+ use crate::syntax::{ast, parse::{self, ParseSess}, source_map::FilePathMapping};
+ use crate::syntax_pos::FileName;
+ use errors::emitter::EmitterWriter;
+ use errors::Handler;
+
+ let filename = FileName::Anon;
+ let source = crates + &everything_else;
+
+ // any errors in parsing should also appear when the doctest is compiled for real, so just
+ // send all the errors that libsyntax emits directly into a Sink instead of stderr
+ let cm = Lrc::new(SourceMap::new(FilePathMapping::empty()));
+ let emitter = EmitterWriter::new(box io::sink(), None, false, false);
+ let handler = Handler::with_emitter(false, false, box emitter);
+ let sess = ParseSess::with_span_handler(handler, cm);
+
+ debug!("about to parse: \n{}", source);
+
+ let mut found_main = false;
+ let mut found_extern_crate = cratename.is_none();
+
+ let mut parser = match parse::maybe_new_parser_from_source_str(&sess, filename, source) {
+ Ok(p) => p,
+ Err(errs) => {
+ for mut err in errs {
+ err.cancel();
+ }
+
+ return (found_main, found_extern_crate);
+ }
+ };
+
+ loop {
+ match parser.parse_item() {
+ Ok(Some(item)) => {
+ if !found_main {
+ if let ast::ItemKind::Fn(..) = item.node {
+ if item.ident.as_str() == "main" {
+ found_main = true;
+ }
+ }
+ }
+
+ if !found_extern_crate {
+ if let ast::ItemKind::ExternCrate(original) = item.node {
+ // This code will never be reached if `cratename` is none because
+ // `found_extern_crate` is initialized to `true` if it is none.
+ let cratename = cratename.unwrap();
+
+ match original {
+ Some(name) => found_extern_crate = name.as_str() == cratename,
+ None => found_extern_crate = item.ident.as_str() == cratename,
+ }
+ }
+ }
+
+ if found_main && found_extern_crate {
+ break;
+ }
+ }
+ Ok(None) => break,
+ Err(mut e) => {
+ e.cancel();
+ break;
+ }
+ }
+ }
+
+ (found_main, found_extern_crate)
+ });
+
// Don't inject `extern crate std` because it's already injected by the
// compiler.
- if !s.contains("extern crate") && !opts.no_crate_inject && cratename != Some("std") {
+ if !already_has_extern_crate && !opts.no_crate_inject && cratename != Some("std") {
if let Some(cratename) = cratename {
+ // Make sure its actually used if not included.
if s.contains(cratename) {
prog.push_str(&format!("extern crate {};\n", cratename));
line_offset += 1;
}
}
- // FIXME (#21299): prefer libsyntax or some other actual parser over this
- // best-effort ad hoc approach
- let already_has_main = s.lines()
- .map(|line| {
- let comment = line.find("//");
- if let Some(comment_begins) = comment {
- &line[0..comment_begins]
- } else {
- line
- }
- })
- .any(|code| code.contains("fn main"));
-
if dont_insert_main || already_has_main {
prog.push_str(everything_else);
} else {
}
// FIXME(aburka): use a real parser to deal with multiline attributes
-fn partition_source(s: &str) -> (String, String) {
+fn partition_source(s: &str) -> (String, String, String) {
let mut after_header = false;
let mut before = String::new();
+ let mut crates = String::new();
let mut after = String::new();
for line in s.lines() {
after.push_str(line);
after.push_str("\n");
} else {
+ if trimline.starts_with("#[macro_use] extern crate")
+ || trimline.starts_with("extern crate") {
+ crates.push_str(line);
+ crates.push_str("\n");
+ }
before.push_str(line);
before.push_str("\n");
}
}
- (before, after)
+ (before, after, crates)
}
pub trait Tester {
let output = make_test(input, None, false, &opts);
assert_eq!(output, (expected, 1));
}
+
+ #[test]
+ fn make_test_issues_21299_33731() {
+ let opts = TestOptions::default();
+
+ let input =
+"// fn main
+assert_eq!(2+2, 4);";
+
+ let expected =
+"#![allow(unused)]
+fn main() {
+// fn main
+assert_eq!(2+2, 4);
+}".to_string();
+
+ let output = make_test(input, None, false, &opts);
+ assert_eq!(output, (expected, 2));
+
+ let input =
+"extern crate hella_qwop;
+assert_eq!(asdf::foo, 4);";
+
+ let expected =
+"#![allow(unused)]
+extern crate hella_qwop;
+extern crate asdf;
+fn main() {
+assert_eq!(asdf::foo, 4);
+}".to_string();
+
+ let output = make_test(input, Some("asdf"), false, &opts);
+ assert_eq!(output, (expected, 3));
+ }
}
[dependencies]
alloc = { path = "../liballoc" }
-alloc_jemalloc = { path = "../liballoc_jemalloc", optional = true }
alloc_system = { path = "../liballoc_system" }
panic_unwind = { path = "../libpanic_unwind", optional = true }
panic_abort = { path = "../libpanic_abort" }
[features]
backtrace = []
-debug-jemalloc = ["alloc_jemalloc/debug"]
-jemalloc = ["alloc_jemalloc"]
-force_alloc_system = []
panic-unwind = ["panic_unwind"]
profiler = ["profiler_builtins"]
//! In a given program, the standard library has one “global” memory allocator
//! that is used for example by `Box<T>` and `Vec<T>`.
//!
-//! Currently the default global allocator is unspecified.
-//! The compiler may link to a version of [jemalloc] on some platforms,
-//! but this is not guaranteed.
-//! Libraries, however, like `cdylib`s and `staticlib`s are guaranteed
-//! to use the [`System`] by default.
+//! Currently the default global allocator is unspecified. Libraries, however,
+//! like `cdylib`s and `staticlib`s are guaranteed to use the [`System`] by
+//! default.
//!
-//! [jemalloc]: https://github.com/jemalloc/jemalloc
//! [`System`]: struct.System.html
//!
//! # The `#[global_allocator]` attribute
use hash::{Hash, Hasher, BuildHasher, SipHasher13};
use iter::{FromIterator, FusedIterator};
use mem::{self, replace};
-use ops::{Deref, Index};
+use ops::{Deref, DerefMut, Index};
use sys;
use super::table::{self, Bucket, EmptyBucket, Fallibility, FullBucket, FullBucketMut, RawTable,
return InternalEntry::TableIsEmpty;
}
- search_hashed_nonempty(table, hash, is_match)
+ search_hashed_nonempty(table, hash, is_match, true)
}
/// Search for a pre-hashed key when the hash map is known to be non-empty.
#[inline]
-fn search_hashed_nonempty<K, V, M, F>(table: M, hash: SafeHash, mut is_match: F)
+fn search_hashed_nonempty<K, V, M, F>(table: M, hash: SafeHash, mut is_match: F,
+ compare_hashes: bool)
-> InternalEntry<K, V, M>
where M: Deref<Target = RawTable<K, V>>,
F: FnMut(&K) -> bool
}
// If the hash doesn't match, it can't be this one..
- if hash == full.hash() {
+ if !compare_hashes || hash == full.hash() {
// If the key doesn't match, it can't be this one..
if is_match(full.read().0) {
return InternalEntry::Occupied { elem: full };
}
}
+/// Same as `search_hashed_nonempty` but for mutable access.
+#[inline]
+fn search_hashed_nonempty_mut<K, V, M, F>(table: M, hash: SafeHash, mut is_match: F,
+ compare_hashes: bool)
+ -> InternalEntry<K, V, M>
+ where M: DerefMut<Target = RawTable<K, V>>,
+ F: FnMut(&K) -> bool
+{
+ // Do not check the capacity as an extra branch could slow the lookup.
+
+ let size = table.size();
+ let mut probe = Bucket::new(table, hash);
+ let mut displacement = 0;
+
+ loop {
+ let mut full = match probe.peek() {
+ Empty(bucket) => {
+ // Found a hole!
+ return InternalEntry::Vacant {
+ hash,
+ elem: NoElem(bucket, displacement),
+ };
+ }
+ Full(bucket) => bucket,
+ };
+
+ let probe_displacement = full.displacement();
+
+ if probe_displacement < displacement {
+ // Found a luckier bucket than me.
+ // We can finish the search early if we hit any bucket
+ // with a lower distance to initial bucket than we've probed.
+ return InternalEntry::Vacant {
+ hash,
+ elem: NeqElem(full, probe_displacement),
+ };
+ }
+
+ // If the hash doesn't match, it can't be this one..
+ if hash == full.hash() || !compare_hashes {
+ // If the key doesn't match, it can't be this one..
+ if is_match(full.read_mut().0) {
+ return InternalEntry::Occupied { elem: full };
+ }
+ }
+ displacement += 1;
+ probe = full.next();
+ debug_assert!(displacement <= size);
+ }
+}
+
fn pop_internal<K, V>(starting_bucket: FullBucketMut<K, V>)
-> (K, V, &mut RawTable<K, V>)
{
}
let hash = self.make_hash(q);
- search_hashed_nonempty(&self.table, hash, |k| q.eq(k.borrow()))
+ search_hashed_nonempty(&self.table, hash, |k| q.eq(k.borrow()), true)
.into_occupied_bucket()
}
}
let hash = self.make_hash(q);
- search_hashed_nonempty(&mut self.table, hash, |k| q.eq(k.borrow()))
+ search_hashed_nonempty(&mut self.table, hash, |k| q.eq(k.borrow()), true)
.into_occupied_bucket()
}
}
}
+impl<K, V, S> HashMap<K, V, S>
+ where K: Eq + Hash,
+ S: BuildHasher
+{
+ /// Creates a raw entry builder for the HashMap.
+ ///
+ /// Raw entries provide the lowest level of control for searching and
+ /// manipulating a map. They must be manually initialized with a hash and
+ /// then manually searched. After this, insertions into a vacant entry
+ /// still require an owned key to be provided.
+ ///
+ /// Raw entries are useful for such exotic situations as:
+ ///
+ /// * Hash memoization
+ /// * Deferring the creation of an owned key until it is known to be required
+ /// * Using a search key that doesn't work with the Borrow trait
+ /// * Using custom comparison logic without newtype wrappers
+ ///
+ /// Because raw entries provide much more low-level control, it's much easier
+ /// to put the HashMap into an inconsistent state which, while memory-safe,
+ /// will cause the map to produce seemingly random results. Higher-level and
+ /// more foolproof APIs like `entry` should be preferred when possible.
+ ///
+ /// In particular, the hash used to initialized the raw entry must still be
+ /// consistent with the hash of the key that is ultimately stored in the entry.
+ /// This is because implementations of HashMap may need to recompute hashes
+ /// when resizing, at which point only the keys are available.
+ ///
+ /// Raw entries give mutable access to the keys. This must not be used
+ /// to modify how the key would compare or hash, as the map will not re-evaluate
+ /// where the key should go, meaning the keys may become "lost" if their
+ /// location does not reflect their state. For instance, if you change a key
+ /// so that the map now contains keys which compare equal, search may start
+ /// acting eratically, with two keys randomly masking eachother. Implementations
+ /// are free to assume this doesn't happen (within the limits of memory-safety).
+ #[unstable(feature = "hash_raw_entry", issue = "54043")]
+ pub fn raw_entry_mut(&mut self) -> RawEntryBuilderMut<K, V, S> {
+ self.reserve(1);
+ RawEntryBuilderMut { map: self }
+ }
+
+ /// Creates a raw immutable entry builder for the HashMap.
+ ///
+ /// Raw entries provide the lowest level of control for searching and
+ /// manipulating a map. They must be manually initialized with a hash and
+ /// then manually searched.
+ ///
+ /// This is useful for
+ /// * Hash memoization
+ /// * Using a search key that doesn't work with the Borrow trait
+ /// * Using custom comparison logic without newtype wrappers
+ ///
+ /// Unless you are in such a situation, higher-level and more foolproof APIs like
+ /// `get` should be preferred.
+ ///
+ /// Immutable raw entries have very limited use; you might instead want `raw_entry_mut`.
+ #[unstable(feature = "hash_raw_entry", issue = "54043")]
+ pub fn raw_entry(&self) -> RawEntryBuilder<K, V, S> {
+ RawEntryBuilder { map: self }
+ }
+}
+
#[stable(feature = "rust1", since = "1.0.0")]
impl<K, V, S> PartialEq for HashMap<K, V, S>
where K: Eq + Hash,
}
}
+/// A builder for computing where in a HashMap a key-value pair would be stored.
+///
+/// See the [`HashMap::raw_entry_mut`] docs for usage examples.
+///
+/// [`HashMap::raw_entry_mut`]: struct.HashMap.html#method.raw_entry_mut
+
+#[unstable(feature = "hash_raw_entry", issue = "54043")]
+pub struct RawEntryBuilderMut<'a, K: 'a, V: 'a, S: 'a> {
+ map: &'a mut HashMap<K, V, S>,
+}
+
+/// A view into a single entry in a map, which may either be vacant or occupied.
+///
+/// This is a lower-level version of [`Entry`].
+///
+/// This `enum` is constructed from the [`raw_entry`] method on [`HashMap`].
+///
+/// [`HashMap`]: struct.HashMap.html
+/// [`Entry`]: enum.Entry.html
+/// [`raw_entry`]: struct.HashMap.html#method.raw_entry
+#[unstable(feature = "hash_raw_entry", issue = "54043")]
+pub enum RawEntryMut<'a, K: 'a, V: 'a, S: 'a> {
+ /// An occupied entry.
+ Occupied(RawOccupiedEntryMut<'a, K, V>),
+ /// A vacant entry.
+ Vacant(RawVacantEntryMut<'a, K, V, S>),
+}
+
+/// A view into an occupied entry in a `HashMap`.
+/// It is part of the [`RawEntryMut`] enum.
+///
+/// [`RawEntryMut`]: enum.RawEntryMut.html
+#[unstable(feature = "hash_raw_entry", issue = "54043")]
+pub struct RawOccupiedEntryMut<'a, K: 'a, V: 'a> {
+ elem: FullBucket<K, V, &'a mut RawTable<K, V>>,
+}
+
+/// A view into a vacant entry in a `HashMap`.
+/// It is part of the [`RawEntryMut`] enum.
+///
+/// [`RawEntryMut`]: enum.RawEntryMut.html
+#[unstable(feature = "hash_raw_entry", issue = "54043")]
+pub struct RawVacantEntryMut<'a, K: 'a, V: 'a, S: 'a> {
+ elem: VacantEntryState<K, V, &'a mut RawTable<K, V>>,
+ hash_builder: &'a S,
+}
+
+/// A builder for computing where in a HashMap a key-value pair would be stored.
+///
+/// See the [`HashMap::raw_entry`] docs for usage examples.
+///
+/// [`HashMap::raw_entry`]: struct.HashMap.html#method.raw_entry
+#[unstable(feature = "hash_raw_entry", issue = "54043")]
+pub struct RawEntryBuilder<'a, K: 'a, V: 'a, S: 'a> {
+ map: &'a HashMap<K, V, S>,
+}
+
+impl<'a, K, V, S> RawEntryBuilderMut<'a, K, V, S>
+ where S: BuildHasher,
+ K: Eq + Hash,
+{
+ /// Create a `RawEntryMut` from the given key.
+ #[unstable(feature = "hash_raw_entry", issue = "54043")]
+ pub fn from_key<Q: ?Sized>(self, k: &Q) -> RawEntryMut<'a, K, V, S>
+ where K: Borrow<Q>,
+ Q: Hash + Eq
+ {
+ let mut hasher = self.map.hash_builder.build_hasher();
+ k.hash(&mut hasher);
+ self.from_key_hashed_nocheck(hasher.finish(), k)
+ }
+
+ /// Create a `RawEntryMut` from the given key and its hash.
+ #[unstable(feature = "hash_raw_entry", issue = "54043")]
+ pub fn from_key_hashed_nocheck<Q: ?Sized>(self, hash: u64, k: &Q) -> RawEntryMut<'a, K, V, S>
+ where K: Borrow<Q>,
+ Q: Eq
+ {
+ self.from_hash(hash, |q| q.borrow().eq(k))
+ }
+
+ fn search<F>(self, hash: u64, is_match: F, compare_hashes: bool) -> RawEntryMut<'a, K, V, S>
+ where for<'b> F: FnMut(&'b K) -> bool,
+ {
+ match search_hashed_nonempty_mut(&mut self.map.table,
+ SafeHash::new(hash),
+ is_match,
+ compare_hashes) {
+ InternalEntry::Occupied { elem } => {
+ RawEntryMut::Occupied(RawOccupiedEntryMut { elem })
+ }
+ InternalEntry::Vacant { elem, .. } => {
+ RawEntryMut::Vacant(RawVacantEntryMut {
+ elem,
+ hash_builder: &self.map.hash_builder,
+ })
+ }
+ InternalEntry::TableIsEmpty => {
+ unreachable!()
+ }
+ }
+ }
+ /// Create a `RawEntryMut` from the given hash.
+ #[unstable(feature = "hash_raw_entry", issue = "54043")]
+ pub fn from_hash<F>(self, hash: u64, is_match: F) -> RawEntryMut<'a, K, V, S>
+ where for<'b> F: FnMut(&'b K) -> bool,
+ {
+ self.search(hash, is_match, true)
+ }
+
+ /// Search possible locations for an element with hash `hash` until `is_match` returns true for
+ /// one of them. There is no guarantee that all keys passed to `is_match` will have the provided
+ /// hash.
+ #[unstable(feature = "hash_raw_entry", issue = "54043")]
+ pub fn search_bucket<F>(self, hash: u64, is_match: F) -> RawEntryMut<'a, K, V, S>
+ where for<'b> F: FnMut(&'b K) -> bool,
+ {
+ self.search(hash, is_match, false)
+ }
+}
+
+impl<'a, K, V, S> RawEntryBuilder<'a, K, V, S>
+ where S: BuildHasher,
+{
+ /// Access an entry by key.
+ #[unstable(feature = "hash_raw_entry", issue = "54043")]
+ pub fn from_key<Q: ?Sized>(self, k: &Q) -> Option<(&'a K, &'a V)>
+ where K: Borrow<Q>,
+ Q: Hash + Eq
+ {
+ let mut hasher = self.map.hash_builder.build_hasher();
+ k.hash(&mut hasher);
+ self.from_key_hashed_nocheck(hasher.finish(), k)
+ }
+
+ /// Access an entry by a key and its hash.
+ #[unstable(feature = "hash_raw_entry", issue = "54043")]
+ pub fn from_key_hashed_nocheck<Q: ?Sized>(self, hash: u64, k: &Q) -> Option<(&'a K, &'a V)>
+ where K: Borrow<Q>,
+ Q: Hash + Eq
+
+ {
+ self.from_hash(hash, |q| q.borrow().eq(k))
+ }
+
+ fn search<F>(self, hash: u64, is_match: F, compare_hashes: bool) -> Option<(&'a K, &'a V)>
+ where F: FnMut(&K) -> bool
+ {
+ match search_hashed_nonempty(&self.map.table,
+ SafeHash::new(hash),
+ is_match,
+ compare_hashes) {
+ InternalEntry::Occupied { elem } => Some(elem.into_refs()),
+ InternalEntry::Vacant { .. } => None,
+ InternalEntry::TableIsEmpty => unreachable!(),
+ }
+ }
+
+ /// Access an entry by hash.
+ #[unstable(feature = "hash_raw_entry", issue = "54043")]
+ pub fn from_hash<F>(self, hash: u64, is_match: F) -> Option<(&'a K, &'a V)>
+ where F: FnMut(&K) -> bool
+ {
+ self.search(hash, is_match, true)
+ }
+
+ /// Search possible locations for an element with hash `hash` until `is_match` returns true for
+ /// one of them. There is no guarantee that all keys passed to `is_match` will have the provided
+ /// hash.
+ #[unstable(feature = "hash_raw_entry", issue = "54043")]
+ pub fn search_bucket<F>(self, hash: u64, is_match: F) -> Option<(&'a K, &'a V)>
+ where F: FnMut(&K) -> bool
+ {
+ self.search(hash, is_match, false)
+ }
+}
+
+impl<'a, K, V, S> RawEntryMut<'a, K, V, S> {
+ /// Ensures a value is in the entry by inserting the default if empty, and returns
+ /// mutable references to the key and value in the entry.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(hash_raw_entry)]
+ /// use std::collections::HashMap;
+ ///
+ /// let mut map: HashMap<&str, u32> = HashMap::new();
+ /// map.raw_entry_mut().from_key("poneyland").or_insert("poneyland", 12);
+ ///
+ /// assert_eq!(map["poneyland"], 12);
+ ///
+ /// *map.raw_entry_mut().from_key("poneyland").or_insert("poneyland", 12).1 += 10;
+ /// assert_eq!(map["poneyland"], 22);
+ /// ```
+ #[unstable(feature = "hash_raw_entry", issue = "54043")]
+ pub fn or_insert(self, default_key: K, default_val: V) -> (&'a mut K, &'a mut V)
+ where K: Hash,
+ S: BuildHasher,
+ {
+ match self {
+ RawEntryMut::Occupied(entry) => entry.into_key_value(),
+ RawEntryMut::Vacant(entry) => entry.insert(default_key, default_val),
+ }
+ }
+
+ /// Ensures a value is in the entry by inserting the result of the default function if empty,
+ /// and returns mutable references to the key and value in the entry.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(hash_raw_entry)]
+ /// use std::collections::HashMap;
+ ///
+ /// let mut map: HashMap<&str, String> = HashMap::new();
+ ///
+ /// map.raw_entry_mut().from_key("poneyland").or_insert_with(|| {
+ /// ("poneyland", "hoho".to_string())
+ /// });
+ ///
+ /// assert_eq!(map["poneyland"], "hoho".to_string());
+ /// ```
+ #[unstable(feature = "hash_raw_entry", issue = "54043")]
+ pub fn or_insert_with<F>(self, default: F) -> (&'a mut K, &'a mut V)
+ where F: FnOnce() -> (K, V),
+ K: Hash,
+ S: BuildHasher,
+ {
+ match self {
+ RawEntryMut::Occupied(entry) => entry.into_key_value(),
+ RawEntryMut::Vacant(entry) => {
+ let (k, v) = default();
+ entry.insert(k, v)
+ }
+ }
+ }
+
+ /// Provides in-place mutable access to an occupied entry before any
+ /// potential inserts into the map.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(hash_raw_entry)]
+ /// use std::collections::HashMap;
+ ///
+ /// let mut map: HashMap<&str, u32> = HashMap::new();
+ ///
+ /// map.raw_entry_mut()
+ /// .from_key("poneyland")
+ /// .and_modify(|_k, v| { *v += 1 })
+ /// .or_insert("poneyland", 42);
+ /// assert_eq!(map["poneyland"], 42);
+ ///
+ /// map.raw_entry_mut()
+ /// .from_key("poneyland")
+ /// .and_modify(|_k, v| { *v += 1 })
+ /// .or_insert("poneyland", 0);
+ /// assert_eq!(map["poneyland"], 43);
+ /// ```
+ #[unstable(feature = "hash_raw_entry", issue = "54043")]
+ pub fn and_modify<F>(self, f: F) -> Self
+ where F: FnOnce(&mut K, &mut V)
+ {
+ match self {
+ RawEntryMut::Occupied(mut entry) => {
+ {
+ let (k, v) = entry.get_key_value_mut();
+ f(k, v);
+ }
+ RawEntryMut::Occupied(entry)
+ },
+ RawEntryMut::Vacant(entry) => RawEntryMut::Vacant(entry),
+ }
+ }
+}
+
+impl<'a, K, V> RawOccupiedEntryMut<'a, K, V> {
+ /// Gets a reference to the key in the entry.
+ #[unstable(feature = "hash_raw_entry", issue = "54043")]
+ pub fn key(&self) -> &K {
+ self.elem.read().0
+ }
+
+ /// Gets a mutable reference to the key in the entry.
+ #[unstable(feature = "hash_raw_entry", issue = "54043")]
+ pub fn key_mut(&mut self) -> &mut K {
+ self.elem.read_mut().0
+ }
+
+ /// Converts the entry into a mutable reference to the key in the entry
+ /// with a lifetime bound to the map itself.
+ #[unstable(feature = "hash_raw_entry", issue = "54043")]
+ pub fn into_key(self) -> &'a mut K {
+ self.elem.into_mut_refs().0
+ }
+
+ /// Gets a reference to the value in the entry.
+ #[unstable(feature = "hash_raw_entry", issue = "54043")]
+ pub fn get(&self) -> &V {
+ self.elem.read().1
+ }
+
+ /// Converts the OccupiedEntry into a mutable reference to the value in the entry
+ /// with a lifetime bound to the map itself.
+ #[unstable(feature = "hash_raw_entry", issue = "54043")]
+ pub fn into_mut(self) -> &'a mut V {
+ self.elem.into_mut_refs().1
+ }
+
+ /// Gets a mutable reference to the value in the entry.
+ #[unstable(feature = "hash_raw_entry", issue = "54043")]
+ pub fn get_mut(&mut self) -> &mut V {
+ self.elem.read_mut().1
+ }
+
+ /// Gets a reference to the key and value in the entry.
+ #[unstable(feature = "hash_raw_entry", issue = "54043")]
+ pub fn get_key_value(&mut self) -> (&K, &V) {
+ self.elem.read()
+ }
+
+ /// Gets a mutable reference to the key and value in the entry.
+ #[unstable(feature = "hash_raw_entry", issue = "54043")]
+ pub fn get_key_value_mut(&mut self) -> (&mut K, &mut V) {
+ self.elem.read_mut()
+ }
+
+ /// Converts the OccupiedEntry into a mutable reference to the key and value in the entry
+ /// with a lifetime bound to the map itself.
+ #[unstable(feature = "hash_raw_entry", issue = "54043")]
+ pub fn into_key_value(self) -> (&'a mut K, &'a mut V) {
+ self.elem.into_mut_refs()
+ }
+
+ /// Sets the value of the entry, and returns the entry's old value.
+ #[unstable(feature = "hash_raw_entry", issue = "54043")]
+ pub fn insert(&mut self, value: V) -> V {
+ mem::replace(self.get_mut(), value)
+ }
+
+ /// Sets the value of the entry, and returns the entry's old value.
+ #[unstable(feature = "hash_raw_entry", issue = "54043")]
+ pub fn insert_key(&mut self, key: K) -> K {
+ mem::replace(self.key_mut(), key)
+ }
+
+ /// Takes the value out of the entry, and returns it.
+ #[unstable(feature = "hash_raw_entry", issue = "54043")]
+ pub fn remove(self) -> V {
+ pop_internal(self.elem).1
+ }
+
+ /// Take the ownership of the key and value from the map.
+ #[unstable(feature = "hash_raw_entry", issue = "54043")]
+ pub fn remove_entry(self) -> (K, V) {
+ let (k, v, _) = pop_internal(self.elem);
+ (k, v)
+ }
+}
+
+impl<'a, K, V, S> RawVacantEntryMut<'a, K, V, S> {
+ /// Sets the value of the entry with the VacantEntry's key,
+ /// and returns a mutable reference to it.
+ #[unstable(feature = "hash_raw_entry", issue = "54043")]
+ pub fn insert(self, key: K, value: V) -> (&'a mut K, &'a mut V)
+ where K: Hash,
+ S: BuildHasher,
+ {
+ let mut hasher = self.hash_builder.build_hasher();
+ key.hash(&mut hasher);
+ self.insert_hashed_nocheck(hasher.finish(), key, value)
+ }
+
+ /// Sets the value of the entry with the VacantEntry's key,
+ /// and returns a mutable reference to it.
+ #[unstable(feature = "hash_raw_entry", issue = "54043")]
+ pub fn insert_hashed_nocheck(self, hash: u64, key: K, value: V) -> (&'a mut K, &'a mut V) {
+ let hash = SafeHash::new(hash);
+ let b = match self.elem {
+ NeqElem(mut bucket, disp) => {
+ if disp >= DISPLACEMENT_THRESHOLD {
+ bucket.table_mut().set_tag(true);
+ }
+ robin_hood(bucket, disp, hash, key, value)
+ },
+ NoElem(mut bucket, disp) => {
+ if disp >= DISPLACEMENT_THRESHOLD {
+ bucket.table_mut().set_tag(true);
+ }
+ bucket.put(hash, key, value)
+ },
+ };
+ b.into_mut_refs()
+ }
+}
+
+#[unstable(feature = "hash_raw_entry", issue = "54043")]
+impl<'a, K, V, S> Debug for RawEntryBuilderMut<'a, K, V, S> {
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ f.debug_struct("RawEntryBuilder")
+ .finish()
+ }
+}
+
+#[unstable(feature = "hash_raw_entry", issue = "54043")]
+impl<'a, K: Debug, V: Debug, S> Debug for RawEntryMut<'a, K, V, S> {
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ match *self {
+ RawEntryMut::Vacant(ref v) => {
+ f.debug_tuple("RawEntry")
+ .field(v)
+ .finish()
+ }
+ RawEntryMut::Occupied(ref o) => {
+ f.debug_tuple("RawEntry")
+ .field(o)
+ .finish()
+ }
+ }
+ }
+}
+
+#[unstable(feature = "hash_raw_entry", issue = "54043")]
+impl<'a, K: Debug, V: Debug> Debug for RawOccupiedEntryMut<'a, K, V> {
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ f.debug_struct("RawOccupiedEntryMut")
+ .field("key", self.key())
+ .field("value", self.get())
+ .finish()
+ }
+}
+
+#[unstable(feature = "hash_raw_entry", issue = "54043")]
+impl<'a, K, V, S> Debug for RawVacantEntryMut<'a, K, V, S> {
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ f.debug_struct("RawVacantEntryMut")
+ .finish()
+ }
+}
+
+#[unstable(feature = "hash_raw_entry", issue = "54043")]
+impl<'a, K, V, S> Debug for RawEntryBuilder<'a, K, V, S> {
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ f.debug_struct("RawEntryBuilder")
+ .finish()
+ }
+}
+
/// A view into a single entry in a map, which may either be vacant or occupied.
///
/// This `enum` is constructed from the [`entry`] method on [`HashMap`].
}
}
+ #[test]
+ fn test_raw_entry() {
+ use super::RawEntryMut::{Occupied, Vacant};
+
+ let xs = [(1i32, 10i32), (2, 20), (3, 30), (4, 40), (5, 50), (6, 60)];
+
+ let mut map: HashMap<_, _> = xs.iter().cloned().collect();
+
+ let compute_hash = |map: &HashMap<i32, i32>, k: i32| -> u64 {
+ use core::hash::{BuildHasher, Hash, Hasher};
+
+ let mut hasher = map.hasher().build_hasher();
+ k.hash(&mut hasher);
+ hasher.finish()
+ };
+
+ // Existing key (insert)
+ match map.raw_entry_mut().from_key(&1) {
+ Vacant(_) => unreachable!(),
+ Occupied(mut view) => {
+ assert_eq!(view.get(), &10);
+ assert_eq!(view.insert(100), 10);
+ }
+ }
+ let hash1 = compute_hash(&map, 1);
+ assert_eq!(map.raw_entry().from_key(&1).unwrap(), (&1, &100));
+ assert_eq!(map.raw_entry().from_hash(hash1, |k| *k == 1).unwrap(), (&1, &100));
+ assert_eq!(map.raw_entry().from_key_hashed_nocheck(hash1, &1).unwrap(), (&1, &100));
+ assert_eq!(map.raw_entry().search_bucket(hash1, |k| *k == 1).unwrap(), (&1, &100));
+ assert_eq!(map.len(), 6);
+
+ // Existing key (update)
+ match map.raw_entry_mut().from_key(&2) {
+ Vacant(_) => unreachable!(),
+ Occupied(mut view) => {
+ let v = view.get_mut();
+ let new_v = (*v) * 10;
+ *v = new_v;
+ }
+ }
+ let hash2 = compute_hash(&map, 2);
+ assert_eq!(map.raw_entry().from_key(&2).unwrap(), (&2, &200));
+ assert_eq!(map.raw_entry().from_hash(hash2, |k| *k == 2).unwrap(), (&2, &200));
+ assert_eq!(map.raw_entry().from_key_hashed_nocheck(hash2, &2).unwrap(), (&2, &200));
+ assert_eq!(map.raw_entry().search_bucket(hash2, |k| *k == 2).unwrap(), (&2, &200));
+ assert_eq!(map.len(), 6);
+
+ // Existing key (take)
+ let hash3 = compute_hash(&map, 3);
+ match map.raw_entry_mut().from_key_hashed_nocheck(hash3, &3) {
+ Vacant(_) => unreachable!(),
+ Occupied(view) => {
+ assert_eq!(view.remove_entry(), (3, 30));
+ }
+ }
+ assert_eq!(map.raw_entry().from_key(&3), None);
+ assert_eq!(map.raw_entry().from_hash(hash3, |k| *k == 3), None);
+ assert_eq!(map.raw_entry().from_key_hashed_nocheck(hash3, &3), None);
+ assert_eq!(map.raw_entry().search_bucket(hash3, |k| *k == 3), None);
+ assert_eq!(map.len(), 5);
+
+
+ // Nonexistent key (insert)
+ match map.raw_entry_mut().from_key(&10) {
+ Occupied(_) => unreachable!(),
+ Vacant(view) => {
+ assert_eq!(view.insert(10, 1000), (&mut 10, &mut 1000));
+ }
+ }
+ assert_eq!(map.raw_entry().from_key(&10).unwrap(), (&10, &1000));
+ assert_eq!(map.len(), 6);
+
+ // Ensure all lookup methods produce equivalent results.
+ for k in 0..12 {
+ let hash = compute_hash(&map, k);
+ let v = map.get(&k).cloned();
+ let kv = v.as_ref().map(|v| (&k, v));
+
+ assert_eq!(map.raw_entry().from_key(&k), kv);
+ assert_eq!(map.raw_entry().from_hash(hash, |q| *q == k), kv);
+ assert_eq!(map.raw_entry().from_key_hashed_nocheck(hash, &k), kv);
+ assert_eq!(map.raw_entry().search_bucket(hash, |q| *q == k), kv);
+
+ match map.raw_entry_mut().from_key(&k) {
+ Occupied(mut o) => assert_eq!(Some(o.get_key_value()), kv),
+ Vacant(_) => assert_eq!(v, None),
+ }
+ match map.raw_entry_mut().from_key_hashed_nocheck(hash, &k) {
+ Occupied(mut o) => assert_eq!(Some(o.get_key_value()), kv),
+ Vacant(_) => assert_eq!(v, None),
+ }
+ match map.raw_entry_mut().from_hash(hash, |q| *q == k) {
+ Occupied(mut o) => assert_eq!(Some(o.get_key_value()), kv),
+ Vacant(_) => assert_eq!(v, None),
+ }
+ match map.raw_entry_mut().search_bucket(hash, |q| *q == k) {
+ Occupied(mut o) => assert_eq!(Some(o.get_key_value()), kv),
+ Vacant(_) => assert_eq!(v, None),
+ }
+ }
+ }
+
}
///
/// [changes]: ../io/index.html#platform-specific-behavior
///
+/// **NOTE**: If a parent of the given path doesn't exist, this function will
+/// return an error. To create a directory and all its missing parents at the
+/// same time, use the [`create_dir_all`] function.
+///
/// # Errors
///
/// This function will return an error in the following situations, but is not
/// limited to just these cases:
///
/// * User lacks permissions to create directory at `path`.
+/// * A parent of the given path doesn't exist. (To create a directory and all
+/// its missing parents at the same time, use the [`create_dir_all`]
+/// function.)
/// * `path` already exists.
///
/// # Examples
#![feature(cfg_target_vendor)]
#![feature(char_error_internals)]
#![feature(compiler_builtins_lib)]
-#![cfg_attr(stage0, feature(min_const_fn))]
#![feature(const_int_ops)]
#![feature(const_ip)]
#![feature(const_raw_ptr_deref)]
#![feature(prelude_import)]
#![feature(ptr_internals)]
#![feature(raw)]
+#![feature(hash_raw_entry)]
#![feature(rustc_attrs)]
#![feature(rustc_const_unstable)]
#![feature(std_internals)]
#![default_lib_allocator]
-// Always use alloc_system during stage0 since we don't know if the alloc_*
-// crate the stage0 compiler will pick by default is enabled (e.g.
-// if the user has disabled jemalloc in `./configure`).
-// `force_alloc_system` is *only* intended as a workaround for local rebuilds
-// with a rustc without jemalloc.
-// FIXME(#44236) shouldn't need MSVC logic
-#[cfg(all(not(target_env = "msvc"),
- any(all(stage0, not(test)), feature = "force_alloc_system")))]
+#[cfg(stage0)]
#[global_allocator]
static ALLOC: alloc_system::System = alloc_system::System;
#[cfg(target_has_atomic = "64")]
#[unstable(feature = "integer_atomics", issue = "32976")]
impl RefUnwindSafe for atomic::AtomicI64 {}
+#[cfg(all(not(stage0), target_has_atomic = "128"))]
+#[unstable(feature = "integer_atomics", issue = "32976")]
+impl RefUnwindSafe for atomic::AtomicI128 {}
#[cfg(target_has_atomic = "ptr")]
#[stable(feature = "unwind_safe_atomic_refs", since = "1.14.0")]
#[cfg(target_has_atomic = "64")]
#[unstable(feature = "integer_atomics", issue = "32976")]
impl RefUnwindSafe for atomic::AtomicU64 {}
+#[cfg(all(not(stage0), target_has_atomic = "128"))]
+#[unstable(feature = "integer_atomics", issue = "32976")]
+impl RefUnwindSafe for atomic::AtomicU128 {}
#[cfg(target_has_atomic = "8")]
#[stable(feature = "unwind_safe_atomic_refs", since = "1.14.0")]
pub fn get_argv(&self) -> &Vec<*const c_char> {
&self.argv.0
}
+ #[cfg(not(target_os = "fuchsia"))]
+ pub fn get_program(&self) -> &CString {
+ return &self.program;
+ }
#[allow(dead_code)]
pub fn get_cwd(&self) -> &Option<CString> {
pub fn as_ptr(&self) -> *const *const c_char {
self.ptrs.as_ptr()
}
+ #[cfg(not(target_os = "fuchsia"))]
+ pub fn get_items(&self) -> &[CString] {
+ return &self.items;
+ }
}
fn construct_envp(env: BTreeMap<DefaultEnvKey, OsString>, saw_nul: &mut bool) -> CStringArray {
// option. This file may not be copied, modified, or distributed
// except according to those terms.
+use env;
+use ffi::CString;
use io::{self, Error, ErrorKind};
use libc::{self, c_int, gid_t, pid_t, uid_t};
use ptr;
return Ok((ret, ours))
}
+ let possible_paths = self.compute_possible_paths(envp.as_ref());
+
let (input, output) = sys::pipe::anon_pipe()?;
let pid = unsafe {
match cvt(libc::fork())? {
0 => {
drop(input);
- let err = self.do_exec(theirs, envp.as_ref());
+ let err = self.do_exec(theirs, envp.as_ref(), possible_paths);
let errno = err.raw_os_error().unwrap_or(libc::EINVAL) as u32;
let bytes = [
(errno >> 24) as u8,
"nul byte found in provided data")
}
+ let possible_paths = self.compute_possible_paths(envp.as_ref());
match self.setup_io(default, true) {
- Ok((_, theirs)) => unsafe { self.do_exec(theirs, envp.as_ref()) },
+ Ok((_, theirs)) => unsafe { self.do_exec(theirs, envp.as_ref(), possible_paths) },
Err(e) => e,
}
}
+ fn compute_possible_paths(&self, maybe_envp: Option<&CStringArray>) -> Option<Vec<CString>> {
+ let program = self.get_program().as_bytes();
+ if program.contains(&b'/') {
+ return None;
+ }
+ // Outside the match so we can borrow it for the lifetime of the function.
+ let parent_path = env::var("PATH").ok();
+ let paths = match maybe_envp {
+ Some(envp) => {
+ match envp.get_items().iter().find(|var| var.as_bytes().starts_with(b"PATH=")) {
+ Some(p) => &p.as_bytes()[5..],
+ None => return None,
+ }
+ },
+ // maybe_envp is None if the process isn't changing the parent's env at all.
+ None => {
+ match parent_path.as_ref() {
+ Some(p) => p.as_bytes(),
+ None => return None,
+ }
+ },
+ };
+
+ let mut possible_paths = vec![];
+ for path in paths.split(|p| *p == b':') {
+ let mut binary_path = Vec::with_capacity(program.len() + path.len() + 1);
+ binary_path.extend_from_slice(path);
+ binary_path.push(b'/');
+ binary_path.extend_from_slice(program);
+ let c_binary_path = CString::new(binary_path).unwrap();
+ possible_paths.push(c_binary_path);
+ }
+ return Some(possible_paths);
+ }
+
// And at this point we've reached a special time in the life of the
// child. The child must now be considered hamstrung and unable to
// do anything other than syscalls really. Consider the following
unsafe fn do_exec(
&mut self,
stdio: ChildPipes,
- maybe_envp: Option<&CStringArray>
+ maybe_envp: Option<&CStringArray>,
+ maybe_possible_paths: Option<Vec<CString>>,
) -> io::Error {
use sys::{self, cvt_r};
if let Some(ref cwd) = *self.get_cwd() {
t!(cvt(libc::chdir(cwd.as_ptr())));
}
- if let Some(envp) = maybe_envp {
- *sys::os::environ() = envp.as_ptr();
- }
// emscripten has no signal support.
#[cfg(not(any(target_os = "emscripten")))]
t!(callback());
}
- libc::execvp(self.get_argv()[0], self.get_argv().as_ptr());
- io::Error::last_os_error()
+ // If the program isn't an absolute path, and our environment contains a PATH var, then we
+ // implement the PATH traversal ourselves so that it honors the child's PATH instead of the
+ // parent's. This mirrors the logic that exists in glibc's execvpe, except using the
+ // child's env to fetch PATH.
+ match maybe_possible_paths {
+ Some(possible_paths) => {
+ let mut pending_error = None;
+ for path in possible_paths {
+ libc::execve(
+ path.as_ptr(),
+ self.get_argv().as_ptr(),
+ maybe_envp.map(|envp| envp.as_ptr()).unwrap_or_else(|| *sys::os::environ())
+ );
+ let err = io::Error::last_os_error();
+ match err.kind() {
+ io::ErrorKind::PermissionDenied => {
+ // If we saw a PermissionDenied, and none of the other entries in
+ // $PATH are successful, then we'll return the first EACCESS we see.
+ if pending_error.is_none() {
+ pending_error = Some(err);
+ }
+ },
+ // Errors which indicate we failed to find a file are ignored and we try
+ // the next entry in the path.
+ io::ErrorKind::NotFound | io::ErrorKind::TimedOut => {
+ continue
+ },
+ // Any other error means we found a file and couldn't execute it.
+ _ => {
+ return err;
+ }
+ }
+ }
+ if let Some(err) = pending_error {
+ return err;
+ }
+ return io::Error::from_raw_os_error(libc::ENOENT);
+ },
+ _ => {
+ libc::execve(
+ self.get_argv()[0],
+ self.get_argv().as_ptr(),
+ maybe_envp.map(|envp| envp.as_ptr()).unwrap_or_else(|| *sys::os::environ())
+ );
+ return io::Error::last_os_error()
+ }
+ }
}
#[cfg(not(any(target_os = "macos", target_os = "freebsd",
use cell::UnsafeCell;
use fmt;
+use hint;
use mem;
/// A thread local storage key which owns its contents.
// process multiple declarations
($(#[$attr:meta])* $vis:vis static $name:ident: $t:ty = $init:expr; $($rest:tt)*) => (
- __thread_local_inner!($(#[$attr])* $vis $name, $t, $init);
- thread_local!($($rest)*);
+ $crate::__thread_local_inner!($(#[$attr])* $vis $name, $t, $init);
+ $crate::thread_local!($($rest)*);
);
// handle a single declaration
($(#[$attr:meta])* $vis:vis static $name:ident: $t:ty = $init:expr) => (
- __thread_local_inner!($(#[$attr])* $vis $name, $t, $init);
+ $crate::__thread_local_inner!($(#[$attr])* $vis $name, $t, $init);
);
}
};
($(#[$attr:meta])* $vis:vis $name:ident, $t:ty, $init:expr) => {
$(#[$attr])* $vis const $name: $crate::thread::LocalKey<$t> =
- __thread_local_inner!(@key $(#[$attr])* $vis $name, $t, $init);
+ $crate::__thread_local_inner!(@key $(#[$attr])* $vis $name, $t, $init);
}
}
// operations a little differently and make this safe to call.
mem::replace(&mut *ptr, Some(value));
- (*ptr).as_ref().unwrap()
+ // After storing `Some` we want to get a reference to the contents of
+ // what we just stored. While we could use `unwrap` here and it should
+ // always work it empirically doesn't seem to always get optimized away,
+ // which means that using something like `try_with` can pull in
+ // panicking code and cause a large size bloat.
+ match *ptr {
+ Some(ref x) => x,
+ None => hint::unreachable_unchecked(),
+ }
}
/// Acquires a reference to the value in this TLS key.
_ => panic!("inconsistent state in unpark"),
}
- // Coordinate wakeup through the mutex and a condvar notification
- let _lock = self.inner.lock.lock().unwrap();
+ // There is a period between when the parked thread sets `state` to
+ // `PARKED` (or last checked `state` in the case of a spurious wake
+ // up) and when it actually waits on `cvar`. If we were to notify
+ // during this period it would be ignored and then when the parked
+ // thread went to sleep it would never wake up. Fortunately, it has
+ // `lock` locked at this stage so we can acquire `lock` to wait until
+ // it is ready to receive the notification.
+ //
+ // Releasing `lock` before the call to `notify_one` means that when the
+ // parked thread wakes it doesn't get woken only to have to wait for us
+ // to release `lock`.
+ drop(self.inner.lock.lock().unwrap());
self.inner.cvar.notify_one()
}
}
/// The AST represents all type param bounds as types.
-/// typeck::collect::compute_bounds matches these against
-/// the "special" built-in traits (see middle::lang_items) and
-/// detects Copy, Send and Sync.
+/// `typeck::collect::compute_bounds` matches these against
+/// the "special" built-in traits (see `middle::lang_items`) and
+/// detects `Copy`, `Send` and `Sync`.
#[derive(Clone, RustcEncodable, RustcDecodable, Debug)]
pub enum GenericBound {
Trait(PolyTraitRef, TraitBoundModifier),
pub struct MacroExpander<'a, 'b:'a> {
pub cx: &'a mut ExtCtxt<'b>,
- monotonic: bool, // c.f. `cx.monotonic_expander()`
+ monotonic: bool, // cf. `cx.monotonic_expander()`
}
impl<'a, 'b> MacroExpander<'a, 'b> {
add_derived_markers(&mut self.cx, item.span(), &traits, item.clone());
let derives = derives.entry(invoc.expansion_data.mark).or_default();
+ derives.reserve(traits.len());
+ invocations.reserve(traits.len());
for path in &traits {
let mark = Mark::fresh(self.cx.current_expansion.mark);
derives.push(mark);
"proc_macro_hygiene",
self.span,
GateIssue::Language,
- &format!("procedural macros cannot expand to macro definitions"),
+ "procedural macros cannot expand to macro definitions",
);
}
visit::walk_item(self, i);
}
},
AstFragmentKind::Ty => AstFragment::Ty(self.parse_ty()?),
- AstFragmentKind::Pat => AstFragment::Pat(self.parse_pat()?),
+ AstFragmentKind::Pat => AstFragment::Pat(self.parse_pat(None)?),
})
}
}
pub fn parse_pat_panic(parser: &mut Parser) -> P<Pat> {
- panictry!(parser.parse_pat())
+ panictry!(parser.parse_pat(None))
}
pub fn parse_arm_panic(parser: &mut Parser) -> Arm {
idx: usize,
}
+type NamedMatchVec = SmallVec<[NamedMatch; 4]>;
+
/// Represents a single "position" (aka "matcher position", aka "item"), as described in the module
/// documentation.
#[derive(Clone)]
/// all bound matches from the submatcher into the shared top-level `matches` vector. If `sep`
/// and `up` are `Some`, then `matches` is _not_ the shared top-level list. Instead, if one
/// wants the shared `matches`, one should use `up.matches`.
- matches: Vec<Rc<Vec<NamedMatch>>>,
+ matches: Box<[Rc<NamedMatchVec>]>,
/// The position in `matches` corresponding to the first metavar in this matcher's sequence of
/// token trees. In other words, the first metavar in the first token of `top_elts` corresponds
/// to `matches[match_lo]`.
})
}
-/// Initialize `len` empty shared `Vec`s to be used to store matches of metavars.
-fn create_matches(len: usize) -> Vec<Rc<Vec<NamedMatch>>> {
- (0..len).into_iter().map(|_| Rc::new(Vec::new())).collect()
+/// `len` `Vec`s (initially shared and empty) that will store matches of metavars.
+fn create_matches(len: usize) -> Box<[Rc<NamedMatchVec>]> {
+ if len == 0 {
+ vec![]
+ } else {
+ let empty_matches = Rc::new(SmallVec::new());
+ vec![empty_matches.clone(); len]
+ }.into_boxed_slice()
}
/// Generate the top-level matcher position in which the "dot" is before the first token of the
/// token tree it was derived from.
#[derive(Debug, Clone)]
pub enum NamedMatch {
- MatchedSeq(Rc<Vec<NamedMatch>>, DelimSpan),
+ MatchedSeq(Rc<NamedMatchVec>, DelimSpan),
MatchedNonterminal(Rc<Nonterminal>),
}
new_item.match_cur += seq.num_captures;
new_item.idx += 1;
for idx in item.match_cur..item.match_cur + seq.num_captures {
- new_item.push_match(idx, MatchedSeq(Rc::new(vec![]), sp));
+ new_item.push_match(idx, MatchedSeq(Rc::new(smallvec![]), sp));
}
cur_items.push(new_item);
}
FatalError.raise();
}
},
- "pat" => token::NtPat(panictry!(p.parse_pat())),
+ "pat" => token::NtPat(panictry!(p.parse_pat(None))),
"expr" => token::NtExpr(panictry!(p.parse_expr())),
"literal" => token::NtLiteral(panictry!(p.parse_literal_maybe_minus())),
"ty" => token::NtTy(panictry!(p.parse_ty())),
(active, abi_thiscall, "1.19.0", None, None),
// Allows a test to fail without failing the whole suite
- (active, allow_fail, "1.19.0", Some(42219), None),
+ (active, allow_fail, "1.19.0", Some(46488), None),
// Allows unsized tuple coercion.
(active, unsized_tuple_coercion, "1.20.0", Some(42877), None),
(active, non_exhaustive, "1.22.0", Some(44109), None),
// `crate` as visibility modifier, synonymous to `pub(crate)`
- (active, crate_visibility_modifier, "1.23.0", Some(45388), None),
+ (active, crate_visibility_modifier, "1.23.0", Some(53120), None),
// extern types
(active, extern_types, "1.23.0", Some(43467), None),
(active, generic_associated_types, "1.23.0", Some(44265), None),
// `extern` in paths
- (active, extern_in_paths, "1.23.0", Some(44660), None),
+ (active, extern_in_paths, "1.23.0", Some(55600), None),
// Use `?` as the Kleene "at most one" operator
(active, macro_at_most_once_rep, "1.25.0", Some(48075), None),
// Infer static outlives requirements; RFC 2093
- (active, infer_static_outlives_requirements, "1.26.0", Some(44493), None),
+ (active, infer_static_outlives_requirements, "1.26.0", Some(54185), None),
// Multiple patterns with `|` in `if let` and `while let`
(active, if_while_or_patterns, "1.26.0", Some(48215), None),
// Integer match exhaustiveness checking
(active, exhaustive_integer_patterns, "1.30.0", Some(50907), None),
- // RFC 2070: #[panic_implementation] / #[panic_handler]
- (active, panic_implementation, "1.28.0", Some(44489), None),
-
// #[doc(keyword = "...")]
(active, doc_keyword, "1.28.0", Some(51315), None),
(active, test_2018_feature, "1.31.0", Some(0), Some(Edition::Edition2018)),
// Support for arbitrary delimited token streams in non-macro attributes
- (active, unrestricted_attribute_tokens, "1.30.0", Some(44690), None),
+ (active, unrestricted_attribute_tokens, "1.30.0", Some(55208), None),
// Allows `use x::y;` to resolve through `self::x`, not just `::x`
(active, uniform_paths, "1.30.0", Some(53130), None),
(active, underscore_const_names, "1.31.0", Some(54912), None),
// `extern crate foo as bar;` puts `bar` into extern prelude.
- (active, extern_crate_item_prelude, "1.31.0", Some(54658), None),
+ (active, extern_crate_item_prelude, "1.31.0", Some(55599), None),
// `reason = ` in lint attributes and `expect` lint attribute
(active, lint_reasons, "1.31.0", Some(54503), None),
Some("subsumed by `#![feature(proc_macro_hygiene)]`")),
(removed, proc_macro_gen, "1.27.0", Some(54727), None,
Some("subsumed by `#![feature(proc_macro_hygiene)]`")),
+ (removed, panic_implementation, "1.28.0", Some(44489), None,
+ Some("subsumed by `#[panic_handler]`")),
);
declare_features! (
"infer 'static lifetime requirements",
cfg_fn!(infer_static_outlives_requirements))),
- // RFC 2070 (deprecated attribute name)
- ("panic_implementation",
- Normal,
- Gated(Stability::Deprecated("https://github.com/rust-lang/rust/issues/44489\
- #issuecomment-415140224",
- Some("replace this attribute with `#[panic_handler]`")),
- "panic_implementation",
- "this attribute was renamed to `panic_handler`",
- cfg_fn!(panic_implementation))),
-
// RFC 2070
("panic_handler", Normal, Ungated),
if name == "packed" {
gate_feature_post!(&self, repr_packed, attr.span,
"the `#[repr(packed(n))]` attribute \
- is experimental");
+ is experimental");
}
}
}
}
}
- ast::ItemKind::TraitAlias(..) => {
- gate_feature_post!(&self, trait_alias,
- i.span,
- "trait aliases are not yet fully implemented");
- }
-
ast::ItemKind::Impl(_, polarity, defaultness, _, _, _, _) => {
if polarity == ast::ImplPolarity::Negative {
gate_feature_post!(&self, optin_builtin_traits,
"auto traits are experimental and possibly buggy");
}
+ ast::ItemKind::TraitAlias(..) => {
+ gate_feature_post!(
+ &self,
+ trait_alias,
+ i.span,
+ "trait aliases are experimental"
+ );
+ }
+
ast::ItemKind::MacroDef(ast::MacroDef { legacy: false, .. }) => {
let msg = "`macro` is experimental";
gate_feature_post!(&self, decl_macro, i.span, msg);
pub struct JsonEmitter {
dst: Box<dyn Write + Send>,
registry: Option<Registry>,
- cm: Lrc<dyn SourceMapper + sync::Send + sync::Sync>,
+ sm: Lrc<dyn SourceMapper + sync::Send + sync::Sync>,
pretty: bool,
ui_testing: bool,
}
impl JsonEmitter {
pub fn stderr(registry: Option<Registry>,
- code_map: Lrc<SourceMap>,
+ source_map: Lrc<SourceMap>,
pretty: bool) -> JsonEmitter {
JsonEmitter {
dst: Box::new(io::stderr()),
registry,
- cm: code_map,
+ sm: source_map,
pretty,
ui_testing: false,
}
pub fn new(dst: Box<dyn Write + Send>,
registry: Option<Registry>,
- code_map: Lrc<SourceMap>,
+ source_map: Lrc<SourceMap>,
pretty: bool) -> JsonEmitter {
JsonEmitter {
dst,
registry,
- cm: code_map,
+ sm: source_map,
pretty,
ui_testing: false,
}
}
let buf = BufWriter::default();
let output = buf.clone();
- EmitterWriter::new(Box::new(buf), Some(je.cm.clone()), false, false)
+ EmitterWriter::new(Box::new(buf), Some(je.sm.clone()), false, false)
.ui_testing(je.ui_testing).emit(db);
let output = Arc::try_unwrap(output.0).unwrap().into_inner().unwrap();
let output = String::from_utf8(output).unwrap();
mut backtrace: vec::IntoIter<MacroBacktrace>,
je: &JsonEmitter)
-> DiagnosticSpan {
- let start = je.cm.lookup_char_pos(span.lo());
- let end = je.cm.lookup_char_pos(span.hi());
+ let start = je.sm.lookup_char_pos(span.lo());
+ let end = je.sm.lookup_char_pos(span.hi());
let backtrace_step = backtrace.next().map(|bt| {
let call_site =
Self::from_span_full(bt.call_site,
/// of `span` gets a DiagnosticSpanLine, with the highlight indicating the
/// `span` within the line.
fn from_span(span: Span, je: &JsonEmitter) -> Vec<DiagnosticSpanLine> {
- je.cm.span_to_lines(span)
+ je.sm.span_to_lines(span)
.map(|lines| {
let fm = &*lines.file;
lines.lines
})
}
+// A variant of 'panictry!' that works on a Vec<Diagnostic> instead of a single DiagnosticBuilder.
+macro_rules! panictry_buffer {
+ ($handler:expr, $e:expr) => ({
+ use std::result::Result::{Ok, Err};
+ use errors::{FatalError, DiagnosticBuilder};
+ match $e {
+ Ok(e) => e,
+ Err(errs) => {
+ for e in errs {
+ DiagnosticBuilder::new_diagnostic($handler, e).emit();
+ }
+ FatalError.raise()
+ }
+ }
+ })
+}
+
#[macro_export]
macro_rules! unwrap_or {
($opt:expr, $default:expr) => {
use ast::{self, Ident};
use syntax_pos::{self, BytePos, CharPos, Pos, Span, NO_EXPANSION};
use source_map::{SourceMap, FilePathMapping};
-use errors::{Applicability, FatalError, DiagnosticBuilder};
+use errors::{Applicability, FatalError, Diagnostic, DiagnosticBuilder};
use parse::{token, ParseSess};
use str::char_at;
use symbol::{Symbol, keywords};
self.fatal_errs.clear();
}
+ pub fn buffer_fatal_errors(&mut self) -> Vec<Diagnostic> {
+ let mut buffer = Vec::new();
+
+ for err in self.fatal_errs.drain(..) {
+ err.buffer(&mut buffer);
+ }
+
+ buffer
+ }
+
pub fn peek(&self) -> TokenAndSpan {
// FIXME(pcwalton): Bad copy!
TokenAndSpan {
Ok(sr)
}
+ pub fn new_or_buffered_errs(sess: &'a ParseSess,
+ source_file: Lrc<syntax_pos::SourceFile>,
+ override_span: Option<Span>) -> Result<Self, Vec<Diagnostic>> {
+ let mut sr = StringReader::new_raw(sess, source_file, override_span);
+ if sr.advance_token().is_err() {
+ Err(sr.buffer_fatal_errors())
+ } else {
+ Ok(sr)
+ }
+ }
+
pub fn retokenize(sess: &'a ParseSess, mut span: Span) -> Self {
let begin = sess.source_map().lookup_byte_offset(span.lo());
let end = sess.source_map().lookup_byte_offset(span.hi());
// Make the range zero-length if the span is invalid.
- if span.lo() > span.hi() || begin.fm.start_pos != end.fm.start_pos {
+ if span.lo() > span.hi() || begin.sf.start_pos != end.sf.start_pos {
span = span.shrink_to_lo();
}
- let mut sr = StringReader::new_raw_internal(sess, begin.fm, None);
+ let mut sr = StringReader::new_raw_internal(sess, begin.sf, None);
// Seek the lexer to the right byte range.
sr.next_pos = span.lo();
// I guess this is the only way to figure out if
// we're at the beginning of the file...
- let cmap = SourceMap::new(FilePathMapping::empty());
- cmap.files.borrow_mut().file_maps.push(self.source_file.clone());
- let loc = cmap.lookup_char_pos_adj(self.pos);
+ let smap = SourceMap::new(FilePathMapping::empty());
+ smap.files.borrow_mut().source_files.push(self.source_file.clone());
+ let loc = smap.lookup_char_pos_adj(self.pos);
debug!("Skipping a shebang");
if loc.line == 1 && loc.col == CharPos(0) {
// FIXME: Add shebang "token", return it
use rustc_data_structures::fx::FxHashSet;
use rustc_data_structures::sync::Lock;
use with_globals;
- fn mk_sess(cm: Lrc<SourceMap>) -> ParseSess {
+ fn mk_sess(sm: Lrc<SourceMap>) -> ParseSess {
let emitter = errors::emitter::EmitterWriter::new(Box::new(io::sink()),
- Some(cm.clone()),
+ Some(sm.clone()),
false,
false);
ParseSess {
unstable_features: UnstableFeatures::from_environment(),
config: CrateConfig::default(),
included_mod_stack: Lock::new(Vec::new()),
- code_map: cm,
+ source_map: sm,
missing_fragment_specifiers: Lock::new(FxHashSet::default()),
raw_identifier_spans: Lock::new(Vec::new()),
registered_diagnostics: Lock::new(ErrorMap::new()),
}
// open a string reader for the given string
- fn setup<'a>(cm: &SourceMap,
+ fn setup<'a>(sm: &SourceMap,
sess: &'a ParseSess,
teststr: String)
-> StringReader<'a> {
- let fm = cm.new_source_file(PathBuf::from("zebra.rs").into(), teststr);
- StringReader::new(sess, fm, None)
+ let sf = sm.new_source_file(PathBuf::from("zebra.rs").into(), teststr);
+ StringReader::new(sess, sf, None)
}
#[test]
fn t1() {
with_globals(|| {
- let cm = Lrc::new(SourceMap::new(FilePathMapping::empty()));
- let sh = mk_sess(cm.clone());
- let mut string_reader = setup(&cm,
+ let sm = Lrc::new(SourceMap::new(FilePathMapping::empty()));
+ let sh = mk_sess(sm.clone());
+ let mut string_reader = setup(&sm,
&sh,
"/* my source file */ fn main() { println!(\"zebra\"); }\n"
.to_string());
#[test]
fn doublecolonparsing() {
with_globals(|| {
- let cm = Lrc::new(SourceMap::new(FilePathMapping::empty()));
- let sh = mk_sess(cm.clone());
- check_tokenization(setup(&cm, &sh, "a b".to_string()),
+ let sm = Lrc::new(SourceMap::new(FilePathMapping::empty()));
+ let sh = mk_sess(sm.clone());
+ check_tokenization(setup(&sm, &sh, "a b".to_string()),
vec![mk_ident("a"), token::Whitespace, mk_ident("b")]);
})
}
#[test]
fn dcparsing_2() {
with_globals(|| {
- let cm = Lrc::new(SourceMap::new(FilePathMapping::empty()));
- let sh = mk_sess(cm.clone());
- check_tokenization(setup(&cm, &sh, "a::b".to_string()),
+ let sm = Lrc::new(SourceMap::new(FilePathMapping::empty()));
+ let sh = mk_sess(sm.clone());
+ check_tokenization(setup(&sm, &sh, "a::b".to_string()),
vec![mk_ident("a"), token::ModSep, mk_ident("b")]);
})
}
#[test]
fn dcparsing_3() {
with_globals(|| {
- let cm = Lrc::new(SourceMap::new(FilePathMapping::empty()));
- let sh = mk_sess(cm.clone());
- check_tokenization(setup(&cm, &sh, "a ::b".to_string()),
+ let sm = Lrc::new(SourceMap::new(FilePathMapping::empty()));
+ let sh = mk_sess(sm.clone());
+ check_tokenization(setup(&sm, &sh, "a ::b".to_string()),
vec![mk_ident("a"), token::Whitespace, token::ModSep, mk_ident("b")]);
})
}
#[test]
fn dcparsing_4() {
with_globals(|| {
- let cm = Lrc::new(SourceMap::new(FilePathMapping::empty()));
- let sh = mk_sess(cm.clone());
- check_tokenization(setup(&cm, &sh, "a:: b".to_string()),
+ let sm = Lrc::new(SourceMap::new(FilePathMapping::empty()));
+ let sh = mk_sess(sm.clone());
+ check_tokenization(setup(&sm, &sh, "a:: b".to_string()),
vec![mk_ident("a"), token::ModSep, token::Whitespace, mk_ident("b")]);
})
}
#[test]
fn character_a() {
with_globals(|| {
- let cm = Lrc::new(SourceMap::new(FilePathMapping::empty()));
- let sh = mk_sess(cm.clone());
- assert_eq!(setup(&cm, &sh, "'a'".to_string()).next_token().tok,
+ let sm = Lrc::new(SourceMap::new(FilePathMapping::empty()));
+ let sh = mk_sess(sm.clone());
+ assert_eq!(setup(&sm, &sh, "'a'".to_string()).next_token().tok,
token::Literal(token::Char(Symbol::intern("a")), None));
})
}
#[test]
fn character_space() {
with_globals(|| {
- let cm = Lrc::new(SourceMap::new(FilePathMapping::empty()));
- let sh = mk_sess(cm.clone());
- assert_eq!(setup(&cm, &sh, "' '".to_string()).next_token().tok,
+ let sm = Lrc::new(SourceMap::new(FilePathMapping::empty()));
+ let sh = mk_sess(sm.clone());
+ assert_eq!(setup(&sm, &sh, "' '".to_string()).next_token().tok,
token::Literal(token::Char(Symbol::intern(" ")), None));
})
}
#[test]
fn character_escaped() {
with_globals(|| {
- let cm = Lrc::new(SourceMap::new(FilePathMapping::empty()));
- let sh = mk_sess(cm.clone());
- assert_eq!(setup(&cm, &sh, "'\\n'".to_string()).next_token().tok,
+ let sm = Lrc::new(SourceMap::new(FilePathMapping::empty()));
+ let sh = mk_sess(sm.clone());
+ assert_eq!(setup(&sm, &sh, "'\\n'".to_string()).next_token().tok,
token::Literal(token::Char(Symbol::intern("\\n")), None));
})
}
#[test]
fn lifetime_name() {
with_globals(|| {
- let cm = Lrc::new(SourceMap::new(FilePathMapping::empty()));
- let sh = mk_sess(cm.clone());
- assert_eq!(setup(&cm, &sh, "'abc".to_string()).next_token().tok,
+ let sm = Lrc::new(SourceMap::new(FilePathMapping::empty()));
+ let sh = mk_sess(sm.clone());
+ assert_eq!(setup(&sm, &sh, "'abc".to_string()).next_token().tok,
token::Lifetime(Ident::from_str("'abc")));
})
}
#[test]
fn raw_string() {
with_globals(|| {
- let cm = Lrc::new(SourceMap::new(FilePathMapping::empty()));
- let sh = mk_sess(cm.clone());
- assert_eq!(setup(&cm, &sh, "r###\"\"#a\\b\x00c\"\"###".to_string())
+ let sm = Lrc::new(SourceMap::new(FilePathMapping::empty()));
+ let sh = mk_sess(sm.clone());
+ assert_eq!(setup(&sm, &sh, "r###\"\"#a\\b\x00c\"\"###".to_string())
.next_token()
.tok,
token::Literal(token::StrRaw(Symbol::intern("\"#a\\b\x00c\""), 3), None));
#[test]
fn literal_suffixes() {
with_globals(|| {
- let cm = Lrc::new(SourceMap::new(FilePathMapping::empty()));
- let sh = mk_sess(cm.clone());
+ let sm = Lrc::new(SourceMap::new(FilePathMapping::empty()));
+ let sh = mk_sess(sm.clone());
macro_rules! test {
($input: expr, $tok_type: ident, $tok_contents: expr) => {{
- assert_eq!(setup(&cm, &sh, format!("{}suffix", $input)).next_token().tok,
+ assert_eq!(setup(&sm, &sh, format!("{}suffix", $input)).next_token().tok,
token::Literal(token::$tok_type(Symbol::intern($tok_contents)),
Some(Symbol::intern("suffix"))));
// with a whitespace separator:
- assert_eq!(setup(&cm, &sh, format!("{} suffix", $input)).next_token().tok,
+ assert_eq!(setup(&sm, &sh, format!("{} suffix", $input)).next_token().tok,
token::Literal(token::$tok_type(Symbol::intern($tok_contents)),
None));
}}
test!("1.0", Float, "1.0");
test!("1.0e10", Float, "1.0e10");
- assert_eq!(setup(&cm, &sh, "2us".to_string()).next_token().tok,
+ assert_eq!(setup(&sm, &sh, "2us".to_string()).next_token().tok,
token::Literal(token::Integer(Symbol::intern("2")),
Some(Symbol::intern("us"))));
- assert_eq!(setup(&cm, &sh, "r###\"raw\"###suffix".to_string()).next_token().tok,
+ assert_eq!(setup(&sm, &sh, "r###\"raw\"###suffix".to_string()).next_token().tok,
token::Literal(token::StrRaw(Symbol::intern("raw"), 3),
Some(Symbol::intern("suffix"))));
- assert_eq!(setup(&cm, &sh, "br###\"raw\"###suffix".to_string()).next_token().tok,
+ assert_eq!(setup(&sm, &sh, "br###\"raw\"###suffix".to_string()).next_token().tok,
token::Literal(token::ByteStrRaw(Symbol::intern("raw"), 3),
Some(Symbol::intern("suffix"))));
})
#[test]
fn nested_block_comments() {
with_globals(|| {
- let cm = Lrc::new(SourceMap::new(FilePathMapping::empty()));
- let sh = mk_sess(cm.clone());
- let mut lexer = setup(&cm, &sh, "/* /* */ */'a'".to_string());
+ let sm = Lrc::new(SourceMap::new(FilePathMapping::empty()));
+ let sh = mk_sess(sm.clone());
+ let mut lexer = setup(&sm, &sh, "/* /* */ */'a'".to_string());
match lexer.next_token().tok {
token::Comment => {}
_ => panic!("expected a comment!"),
#[test]
fn crlf_comments() {
with_globals(|| {
- let cm = Lrc::new(SourceMap::new(FilePathMapping::empty()));
- let sh = mk_sess(cm.clone());
- let mut lexer = setup(&cm, &sh, "// test\r\n/// test\r\n".to_string());
+ let sm = Lrc::new(SourceMap::new(FilePathMapping::empty()));
+ let sh = mk_sess(sm.clone());
+ let mut lexer = setup(&sm, &sh, "// test\r\n/// test\r\n".to_string());
let comment = lexer.next_token();
assert_eq!(comment.tok, token::Comment);
assert_eq!((comment.sp.lo(), comment.sp.hi()), (BytePos(0), BytePos(7)));
use early_buffered_lints::{BufferedEarlyLint, BufferedEarlyLintId};
use source_map::{SourceMap, FilePathMapping};
use syntax_pos::{Span, SourceFile, FileName, MultiSpan};
-use errors::{Handler, ColorConfig, DiagnosticBuilder};
+use errors::{Handler, ColorConfig, Diagnostic, DiagnosticBuilder};
use feature_gate::UnstableFeatures;
use parse::parser::Parser;
use ptr::P;
pub non_modrs_mods: Lock<Vec<(ast::Ident, Span)>>,
/// Used to determine and report recursive mod inclusions
included_mod_stack: Lock<Vec<PathBuf>>,
- code_map: Lrc<SourceMap>,
+ source_map: Lrc<SourceMap>,
pub buffered_lints: Lock<Vec<BufferedEarlyLint>>,
}
ParseSess::with_span_handler(handler, cm)
}
- pub fn with_span_handler(handler: Handler, code_map: Lrc<SourceMap>) -> ParseSess {
+ pub fn with_span_handler(handler: Handler, source_map: Lrc<SourceMap>) -> ParseSess {
ParseSess {
span_diagnostic: handler,
unstable_features: UnstableFeatures::from_environment(),
raw_identifier_spans: Lock::new(Vec::new()),
registered_diagnostics: Lock::new(ErrorMap::new()),
included_mod_stack: Lock::new(vec![]),
- code_map,
+ source_map,
non_modrs_mods: Lock::new(vec![]),
buffered_lints: Lock::new(vec![]),
}
}
pub fn source_map(&self) -> &SourceMap {
- &self.code_map
+ &self.source_map
}
pub fn buffer_lint<S: Into<MultiSpan>>(&self,
source_file_to_stream(sess, sess.source_map().new_source_file(name, source), override_span)
}
-// Create a new parser from a source string
+/// Create a new parser from a source string
pub fn new_parser_from_source_str(sess: &ParseSess, name: FileName, source: String)
-> Parser {
- let mut parser = source_file_to_parser(sess, sess.source_map().new_source_file(name, source));
+ panictry_buffer!(&sess.span_diagnostic, maybe_new_parser_from_source_str(sess, name, source))
+}
+
+/// Create a new parser from a source string. Returns any buffered errors from lexing the initial
+/// token stream.
+pub fn maybe_new_parser_from_source_str(sess: &ParseSess, name: FileName, source: String)
+ -> Result<Parser, Vec<Diagnostic>>
+{
+ let mut parser = maybe_source_file_to_parser(sess,
+ sess.source_map().new_source_file(name, source))?;
parser.recurse_into_file_modules = false;
- parser
+ Ok(parser)
}
/// Create a new parser, handling errors as appropriate
/// Given a source_file and config, return a parser
fn source_file_to_parser(sess: & ParseSess, source_file: Lrc<SourceFile>) -> Parser {
+ panictry_buffer!(&sess.span_diagnostic,
+ maybe_source_file_to_parser(sess, source_file))
+}
+
+/// Given a source_file and config, return a parser. Returns any buffered errors from lexing the
+/// initial token stream.
+fn maybe_source_file_to_parser(sess: &ParseSess, source_file: Lrc<SourceFile>)
+ -> Result<Parser, Vec<Diagnostic>>
+{
let end_pos = source_file.end_pos;
- let mut parser = stream_to_parser(sess, source_file_to_stream(sess, source_file, None));
+ let mut parser = stream_to_parser(sess, maybe_file_to_stream(sess, source_file, None)?);
if parser.token == token::Eof && parser.span.is_dummy() {
parser.span = Span::new(end_pos, end_pos, parser.span.ctxt());
}
- parser
+ Ok(parser)
}
// must preserve old name for now, because quote! from the *existing*
pub fn source_file_to_stream(sess: &ParseSess,
source_file: Lrc<SourceFile>,
override_span: Option<Span>) -> TokenStream {
- let mut srdr = lexer::StringReader::new(sess, source_file, override_span);
+ panictry_buffer!(&sess.span_diagnostic, maybe_file_to_stream(sess, source_file, override_span))
+}
+
+/// Given a source file, produce a sequence of token-trees. Returns any buffered errors from
+/// parsing the token tream.
+pub fn maybe_file_to_stream(sess: &ParseSess,
+ source_file: Lrc<SourceFile>,
+ override_span: Option<Span>) -> Result<TokenStream, Vec<Diagnostic>> {
+ let mut srdr = lexer::StringReader::new_or_buffered_errs(sess, source_file, override_span)?;
srdr.real_token();
- panictry!(srdr.parse_all_token_trees())
+
+ match srdr.parse_all_token_trees() {
+ Ok(stream) => Ok(stream),
+ Err(err) => {
+ let mut buffer = Vec::with_capacity(1);
+ err.buffer(&mut buffer);
+ Err(buffer)
+ }
+ }
}
/// Given stream and the `ParseSess`, produce a parser
t if t.is_special_ident() => "reserved identifier",
t if t.is_used_keyword() => "keyword",
t if t.is_unused_keyword() => "reserved keyword",
+ token::DocComment(..) => "doc comment",
_ => return None,
})
}
Ok(())
} else {
let token_str = pprust::token_to_string(t);
- let this_token_str = self.this_token_to_string();
- let mut err = self.fatal(&format!("expected `{}`, found `{}`",
+ let this_token_str = self.this_token_descr();
+ let mut err = self.fatal(&format!("expected `{}`, found {}",
token_str,
this_token_str));
self.check_keyword(keywords::Extern) && self.is_extern_non_path()
}
- /// parse a TyKind::BareFn type:
+ /// parse a `TyKind::BareFn` type:
fn parse_ty_bare_fn(&mut self, generic_params: Vec<GenericParam>) -> PResult<'a, TyKind> {
/*
Some(body)
}
_ => {
- let token_str = self.this_token_to_string();
- let mut err = self.fatal(&format!("expected `;` or `{{`, found `{}`",
+ let token_str = self.this_token_descr();
+ let mut err = self.fatal(&format!("expected `;` or `{{`, found {}",
token_str));
err.span_label(self.span, "expected `;` or `{`");
return Err(err);
}
}
_ => {
- let token_str = self.this_token_to_string();
- let mut err = self.fatal(&format!("expected `;` or `{{`, found `{}`",
+ let token_str = self.this_token_descr();
+ let mut err = self.fatal(&format!("expected `;` or `{{`, found {}",
token_str));
err.span_label(self.span, "expected `;` or `{`");
return Err(err);
if maybe_bounds && bounds.len() == 1 && !trailing_plus => {
let path = match bounds[0] {
GenericBound::Trait(ref pt, ..) => pt.trait_ref.path.clone(),
- _ => self.bug("unexpected lifetime bound"),
+ GenericBound::Outlives(..) => self.bug("unexpected lifetime bound"),
};
self.parse_remaining_bounds(Vec::new(), path, lo, true)?
}
self.look_ahead(offset + 1, |t| t == &token::Colon)
}
+ /// Skip unexpected attributes and doc comments in this position and emit an appropriate error.
+ fn eat_incorrect_doc_comment(&mut self, applied_to: &str) {
+ if let token::DocComment(_) = self.token {
+ let mut err = self.diagnostic().struct_span_err(
+ self.span,
+ &format!("documentation comments cannot be applied to {}", applied_to),
+ );
+ err.span_label(self.span, "doc comments are not allowed here");
+ err.emit();
+ self.bump();
+ } else if self.token == token::Pound && self.look_ahead(1, |t| {
+ *t == token::OpenDelim(token::Bracket)
+ }) {
+ let lo = self.span;
+ // Skip every token until next possible arg.
+ while self.token != token::CloseDelim(token::Bracket) {
+ self.bump();
+ }
+ let sp = lo.to(self.span);
+ self.bump();
+ let mut err = self.diagnostic().struct_span_err(
+ sp,
+ &format!("attributes cannot be applied to {}", applied_to),
+ );
+ err.span_label(sp, "attributes are not allowed here");
+ err.emit();
+ }
+ }
+
/// This version of parse arg doesn't necessarily require
/// identifier names.
fn parse_arg_general(&mut self, require_name: bool) -> PResult<'a, Arg> {
let (pat, ty) = if require_name || self.is_named_argument() {
debug!("parse_arg_general parse_pat (require_name:{})",
require_name);
- let pat = self.parse_pat()?;
+ self.eat_incorrect_doc_comment("method arguments");
+ let pat = self.parse_pat(Some("argument name"))?;
if let Err(mut err) = self.expect(&token::Colon) {
// If we find a pattern followed by an identifier, it could be an (incorrect)
return Err(err);
}
+ self.eat_incorrect_doc_comment("a method argument's type");
(pat, self.parse_ty()?)
} else {
debug!("parse_arg_general ident_to_pat");
let parser_snapshot_before_ty = self.clone();
+ self.eat_incorrect_doc_comment("a method argument's type");
let mut ty = self.parse_ty();
if ty.is_ok() && self.token == token::Colon {
// This wasn't actually a type, but a pattern looking like a type,
// Recover from attempting to parse the argument as a type without pattern.
err.cancel();
mem::replace(self, parser_snapshot_before_ty);
- let pat = self.parse_pat()?;
+ let pat = self.parse_pat(Some("argument name"))?;
self.expect(&token::Colon)?;
let ty = self.parse_ty()?;
/// Parse an argument in a lambda header e.g. |arg, arg|
fn parse_fn_block_arg(&mut self) -> PResult<'a, Arg> {
- let pat = self.parse_pat()?;
+ let pat = self.parse_pat(Some("argument name"))?;
let t = if self.eat(&token::Colon) {
self.parse_ty()?
} else {
return Ok(self.mk_expr(lo.to(hi), ex, attrs));
}
if self.eat_keyword(keywords::Match) {
- return self.parse_match_expr(attrs);
+ let match_sp = self.prev_span;
+ return self.parse_match_expr(attrs).map_err(|mut err| {
+ err.span_label(match_sp, "while parsing this match expression");
+ err
+ });
}
if self.eat_keyword(keywords::Unsafe) {
return self.parse_block_expr(
"`..` can only be used once per tuple or tuple struct pattern");
}
} else if !self.check(&token::CloseDelim(token::Paren)) {
- fields.push(self.parse_pat()?);
+ fields.push(self.parse_pat(None)?);
} else {
break
}
}
}
- let subpat = self.parse_pat()?;
+ let subpat = self.parse_pat(None)?;
if before_slice && self.eat(&token::DotDot) {
slice = Some(subpat);
before_slice = false;
// Parsing a pattern of the form "fieldname: pat"
let fieldname = self.parse_field_name()?;
self.bump();
- let pat = self.parse_pat()?;
+ let pat = self.parse_pat(None)?;
hi = pat.span;
(pat, fieldname, false)
} else {
etc_span = Some(etc_sp);
break;
}
- let token_str = self.this_token_to_string();
- let mut err = self.fatal(&format!("expected `}}`, found `{}`", token_str));
+ let token_str = self.this_token_descr();
+ let mut err = self.fatal(&format!("expected `}}`, found {}", token_str));
err.span_label(self.span, "expected `}`");
let mut comma_sp = None;
/// "top-level" patterns in a match arm, `for` loop, `let`, &c. (in contrast
/// to subpatterns within such).
fn parse_top_level_pat(&mut self) -> PResult<'a, P<Pat>> {
- let pat = self.parse_pat()?;
+ let pat = self.parse_pat(None)?;
if self.token == token::Comma {
// An unexpected comma after a top-level pattern is a clue that the
// user (perhaps more accustomed to some other language) forgot the
}
/// Parse a pattern.
- pub fn parse_pat(&mut self) -> PResult<'a, P<Pat>> {
- self.parse_pat_with_range_pat(true)
+ pub fn parse_pat(&mut self, expected: Option<&'static str>) -> PResult<'a, P<Pat>> {
+ self.parse_pat_with_range_pat(true, expected)
}
/// Parse a pattern, with a setting whether modern range patterns e.g. `a..=b`, `a..b` are
/// allowed.
- fn parse_pat_with_range_pat(&mut self, allow_range_pat: bool) -> PResult<'a, P<Pat>> {
+ fn parse_pat_with_range_pat(
+ &mut self,
+ allow_range_pat: bool,
+ expected: Option<&'static str>,
+ ) -> PResult<'a, P<Pat>> {
maybe_whole!(self, NtPat, |x| x);
let lo = self.span;
err.span_label(self.span, "unexpected lifetime");
return Err(err);
}
- let subpat = self.parse_pat_with_range_pat(false)?;
+ let subpat = self.parse_pat_with_range_pat(false, expected)?;
pat = PatKind::Ref(subpat, mutbl);
}
token::OpenDelim(token::Paren) => {
pat = self.parse_pat_ident(BindingMode::ByRef(mutbl))?;
} else if self.eat_keyword(keywords::Box) {
// Parse box pat
- let subpat = self.parse_pat_with_range_pat(false)?;
+ let subpat = self.parse_pat_with_range_pat(false, None)?;
pat = PatKind::Box(subpat);
} else if self.token.is_ident() && !self.token.is_reserved_ident() &&
self.parse_as_ident() {
}
Err(mut err) => {
self.cancel(&mut err);
- let msg = format!("expected pattern, found {}", self.this_token_descr());
+ let expected = expected.unwrap_or("pattern");
+ let msg = format!(
+ "expected {}, found {}",
+ expected,
+ self.this_token_descr(),
+ );
let mut err = self.fatal(&msg);
- err.span_label(self.span, "expected pattern");
+ err.span_label(self.span, format!("expected {}", expected));
return Err(err);
}
}
-> PResult<'a, PatKind> {
let ident = self.parse_ident()?;
let sub = if self.eat(&token::At) {
- Some(self.parse_pat()?)
+ Some(self.parse_pat(Some("binding pattern"))?)
} else {
None
};
} else {
""
};
- let tok_str = self.this_token_to_string();
- let mut err = self.fatal(&format!("expected {}`(` or `{{`, found `{}`",
+ let tok_str = self.this_token_descr();
+ let mut err = self.fatal(&format!("expected {}`(` or `{{`, found {}",
ident_str,
tok_str));
err.span_label(self.span, format!("expected {}`(` or `{{`", ident_str));
if !self.eat(&token::OpenDelim(token::Brace)) {
let sp = self.span;
- let tok = self.this_token_to_string();
- let mut e = self.span_fatal(sp, &format!("expected `{{`, found `{}`", tok));
+ let tok = self.this_token_descr();
+ let mut e = self.span_fatal(sp, &format!("expected `{{`, found {}", tok));
let do_not_suggest_help =
self.token.is_keyword(keywords::In) || self.token == token::Colon;
}
_ => ()
}
+ e.span_label(sp, "expected `{`");
return Err(e);
}
fn warn_missing_semicolon(&self) {
self.diagnostic().struct_span_warn(self.span, {
- &format!("expected `;`, found `{}`", self.this_token_to_string())
+ &format!("expected `;`, found {}", self.this_token_descr())
}).note({
"This was erroneously allowed and will become a hard error in a future release"
}).emit();
ast::ImplItemKind)> {
// code copied from parse_macro_use_or_failure... abstraction!
if let Some(mac) = self.parse_assoc_macro_invoc("impl", Some(vis), at_end)? {
- // Method macro.
+ // method macro
Ok((keywords::Invalid.ident(), vec![], ast::Generics::default(),
ast::ImplItemKind::Macro(mac)))
} else {
self.expect(&token::Semi)?;
body
} else {
- let token_str = self.this_token_to_string();
+ let token_str = self.this_token_descr();
let mut err = self.fatal(&format!(
- "expected `where`, `{{`, `(`, or `;` after struct name, found `{}`",
+ "expected `where`, `{{`, `(`, or `;` after struct name, found {}",
token_str
));
err.span_label(self.span, "expected `where`, `{`, `(`, or `;` after struct name");
} else if self.token == token::OpenDelim(token::Brace) {
VariantData::Struct(self.parse_record_struct_body()?, ast::DUMMY_NODE_ID)
} else {
- let token_str = self.this_token_to_string();
+ let token_str = self.this_token_descr();
let mut err = self.fatal(&format!(
- "expected `where` or `{{` after union name, found `{}`", token_str));
+ "expected `where` or `{{` after union name, found {}", token_str));
err.span_label(self.span, "expected `where` or `{` after union name");
return Err(err);
};
}
self.eat(&token::CloseDelim(token::Brace));
} else {
- let token_str = self.this_token_to_string();
+ let token_str = self.this_token_descr();
let mut err = self.fatal(&format!(
- "expected `where`, or `{{` after struct name, found `{}`", token_str));
+ "expected `where`, or `{{` after struct name, found {}", token_str));
err.span_label(self.span, "expected `where`, or `{` after struct name");
return Err(err);
}
}
_ => {
let sp = self.sess.source_map().next_point(self.prev_span);
- let mut err = self.struct_span_err(sp, &format!("expected `,`, or `}}`, found `{}`",
- self.this_token_to_string()));
+ let mut err = self.struct_span_err(sp, &format!("expected `,`, or `}}`, found {}",
+ self.this_token_descr()));
if self.token.is_ident() {
// This is likely another field; emit the diagnostic and keep going
err.span_suggestion_with_applicability(
}
if !self.eat(term) {
- let token_str = self.this_token_to_string();
- let mut err = self.fatal(&format!("expected item, found `{}`", token_str));
- if token_str == ";" {
+ let token_str = self.this_token_descr();
+ let mut err = self.fatal(&format!("expected item, found {}", token_str));
+ if self.token == token::Semi {
let msg = "consider removing this semicolon";
err.span_suggestion_short_with_applicability(
self.span, msg, String::new(), Applicability::MachineApplicable
Ok(self.mk_item(lo.to(prev_span), invalid, ItemKind::ForeignMod(m), visibility, attrs))
}
- /// Parse type Foo = Bar;
+ /// Parse `type Foo = Bar;`
/// or
- /// existential type Foo: Bar;
+ /// `existential type Foo: Bar;`
/// or
- /// return None without modifying the parser state
+ /// `return None` without modifying the parser state
fn eat_type(&mut self) -> Option<PResult<'a, (Ident, AliasKind, ast::Generics)>> {
// This parses the grammar:
// Ident ["<"...">"] ["where" ...] ("=" | ":") Ty ";"
// subsequent compilation sessions (which is something we need to do during
// incremental compilation).
#[derive(Copy, Clone, PartialEq, Eq, Hash, RustcEncodable, RustcDecodable, Debug)]
-pub struct StableFilemapId(u128);
+pub struct StableSourceFileId(u128);
-impl StableFilemapId {
- pub fn new(source_file: &SourceFile) -> StableFilemapId {
+impl StableSourceFileId {
+ pub fn new(source_file: &SourceFile) -> StableSourceFileId {
let mut hasher = StableHasher::new();
source_file.name.hash(&mut hasher);
source_file.name_was_remapped.hash(&mut hasher);
source_file.unmapped_path.hash(&mut hasher);
- StableFilemapId(hasher.finish())
+ StableSourceFileId(hasher.finish())
}
}
#[derive(Default)]
pub(super) struct SourceMapFiles {
- pub(super) file_maps: Vec<Lrc<SourceFile>>,
- stable_id_to_source_file: FxHashMap<StableFilemapId, Lrc<SourceFile>>
+ pub(super) source_files: Vec<Lrc<SourceFile>>,
+ stable_id_to_source_file: FxHashMap<StableSourceFileId, Lrc<SourceFile>>
}
pub struct SourceMap {
}
pub fn files(&self) -> MappedLockGuard<Vec<Lrc<SourceFile>>> {
- LockGuard::map(self.files.borrow(), |files| &mut files.file_maps)
+ LockGuard::map(self.files.borrow(), |files| &mut files.source_files)
}
- pub fn source_file_by_stable_id(&self, stable_id: StableFilemapId) -> Option<Lrc<SourceFile>> {
- self.files.borrow().stable_id_to_source_file.get(&stable_id).map(|fm| fm.clone())
+ pub fn source_file_by_stable_id(&self, stable_id: StableSourceFileId) ->
+ Option<Lrc<SourceFile>> {
+ self.files.borrow().stable_id_to_source_file.get(&stable_id).map(|sf| sf.clone())
}
fn next_start_pos(&self) -> usize {
- match self.files.borrow().file_maps.last() {
+ match self.files.borrow().source_files.last() {
None => 0,
// Add one so there is some space between files. This lets us distinguish
// positions in the source_map, even in the presence of zero-length files.
let mut files = self.files.borrow_mut();
- files.file_maps.push(source_file.clone());
- files.stable_id_to_source_file.insert(StableFilemapId::new(&source_file),
+ files.source_files.push(source_file.clone());
+ files.stable_id_to_source_file.insert(StableSourceFileId::new(&source_file),
source_file.clone());
source_file
let mut files = self.files.borrow_mut();
- files.file_maps.push(source_file.clone());
- files.stable_id_to_source_file.insert(StableFilemapId::new(&source_file),
+ files.source_files.push(source_file.clone());
+ files.stable_id_to_source_file.insert(StableSourceFileId::new(&source_file),
source_file.clone());
source_file
pub fn lookup_char_pos(&self, pos: BytePos) -> Loc {
let chpos = self.bytepos_to_file_charpos(pos);
match self.lookup_line(pos) {
- Ok(SourceFileAndLine { fm: f, line: a }) => {
+ Ok(SourceFileAndLine { sf: f, line: a }) => {
let line = a + 1; // Line numbers start at 1
let linebpos = f.lines[a];
let linechpos = self.bytepos_to_file_charpos(linebpos);
pub fn lookup_line(&self, pos: BytePos) -> Result<SourceFileAndLine, Lrc<SourceFile>> {
let idx = self.lookup_source_file_idx(pos);
- let f = (*self.files.borrow().file_maps)[idx].clone();
+ let f = (*self.files.borrow().source_files)[idx].clone();
match f.lookup_line(pos) {
- Some(line) => Ok(SourceFileAndLine { fm: f, line: line }),
+ Some(line) => Ok(SourceFileAndLine { sf: f, line: line }),
None => Err(f)
}
}
}
pub fn span_to_string(&self, sp: Span) -> String {
- if self.files.borrow().file_maps.is_empty() && sp.is_dummy() {
+ if self.files.borrow().source_files.is_empty() && sp.is_dummy() {
return "no-location".to_string();
}
let local_begin = self.lookup_byte_offset(sp.lo());
let local_end = self.lookup_byte_offset(sp.hi());
- if local_begin.fm.start_pos != local_end.fm.start_pos {
+ if local_begin.sf.start_pos != local_end.sf.start_pos {
return Err(SpanSnippetError::DistinctSources(DistinctSources {
- begin: (local_begin.fm.name.clone(),
- local_begin.fm.start_pos),
- end: (local_end.fm.name.clone(),
- local_end.fm.start_pos)
+ begin: (local_begin.sf.name.clone(),
+ local_begin.sf.start_pos),
+ end: (local_end.sf.name.clone(),
+ local_end.sf.start_pos)
}));
} else {
- self.ensure_source_file_source_present(local_begin.fm.clone());
+ self.ensure_source_file_source_present(local_begin.sf.clone());
let start_index = local_begin.pos.to_usize();
let end_index = local_end.pos.to_usize();
- let source_len = (local_begin.fm.end_pos -
- local_begin.fm.start_pos).to_usize();
+ let source_len = (local_begin.sf.end_pos -
+ local_begin.sf.start_pos).to_usize();
if start_index > end_index || end_index > source_len {
- return Err(SpanSnippetError::MalformedForCodemap(
- MalformedCodemapPositions {
- name: local_begin.fm.name.clone(),
+ return Err(SpanSnippetError::MalformedForSourcemap(
+ MalformedSourceMapPositions {
+ name: local_begin.sf.name.clone(),
source_len,
begin_pos: local_begin.pos,
end_pos: local_end.pos,
}));
}
- if let Some(ref src) = local_begin.fm.src {
+ if let Some(ref src) = local_begin.sf.src {
return Ok(extract_source(src, start_index, end_index));
- } else if let Some(src) = local_begin.fm.external_src.borrow().get_source() {
+ } else if let Some(src) = local_begin.sf.external_src.borrow().get_source() {
return Ok(extract_source(src, start_index, end_index));
} else {
return Err(SpanSnippetError::SourceNotAvailable {
- filename: local_begin.fm.name.clone()
+ filename: local_begin.sf.name.clone()
});
}
}
return 1;
}
- let source_len = (local_begin.fm.end_pos - local_begin.fm.start_pos).to_usize();
+ let source_len = (local_begin.sf.end_pos - local_begin.sf.start_pos).to_usize();
debug!("find_width_of_character_at_span: source_len=`{:?}`", source_len);
// Ensure indexes are also not malformed.
if start_index > end_index || end_index > source_len {
return 1;
}
- let src = local_begin.fm.external_src.borrow();
+ let src = local_begin.sf.external_src.borrow();
// We need to extend the snippet to the end of the src rather than to end_index so when
// searching forwards for boundaries we've got somewhere to search.
- let snippet = if let Some(ref src) = local_begin.fm.src {
+ let snippet = if let Some(ref src) = local_begin.sf.src {
let len = src.len();
(&src[start_index..len])
} else if let Some(src) = src.get_source() {
}
pub fn get_source_file(&self, filename: &FileName) -> Option<Lrc<SourceFile>> {
- for fm in self.files.borrow().file_maps.iter() {
- if *filename == fm.name {
- return Some(fm.clone());
+ for sf in self.files.borrow().source_files.iter() {
+ if *filename == sf.name {
+ return Some(sf.clone());
}
}
None
/// For a global BytePos compute the local offset within the containing SourceFile
pub fn lookup_byte_offset(&self, bpos: BytePos) -> SourceFileAndBytePos {
let idx = self.lookup_source_file_idx(bpos);
- let fm = (*self.files.borrow().file_maps)[idx].clone();
- let offset = bpos - fm.start_pos;
- SourceFileAndBytePos {fm: fm, pos: offset}
+ let sf = (*self.files.borrow().source_files)[idx].clone();
+ let offset = bpos - sf.start_pos;
+ SourceFileAndBytePos {sf: sf, pos: offset}
}
/// Converts an absolute BytePos to a CharPos relative to the source_file.
pub fn bytepos_to_file_charpos(&self, bpos: BytePos) -> CharPos {
let idx = self.lookup_source_file_idx(bpos);
- let map = &(*self.files.borrow().file_maps)[idx];
+ let map = &(*self.files.borrow().source_files)[idx];
// The number of extra bytes due to multibyte chars in the SourceFile
let mut total_extra_bytes = 0;
// Return the index of the source_file (in self.files) which contains pos.
pub fn lookup_source_file_idx(&self, pos: BytePos) -> usize {
let files = self.files.borrow();
- let files = &files.file_maps;
+ let files = &files.source_files;
let count = files.len();
// Binary search for the source_file.
}
sp
}
- fn ensure_source_file_source_present(&self, file_map: Lrc<SourceFile>) -> bool {
- file_map.add_external_src(
- || match file_map.name {
+ fn ensure_source_file_source_present(&self, source_file: Lrc<SourceFile>) -> bool {
+ source_file.add_external_src(
+ || match source_file.name {
FileName::Real(ref name) => self.file_loader.read_file(name).ok(),
_ => None,
}
use super::*;
use rustc_data_structures::sync::Lrc;
- fn init_code_map() -> SourceMap {
- let cm = SourceMap::new(FilePathMapping::empty());
- cm.new_source_file(PathBuf::from("blork.rs").into(),
+ fn init_source_map() -> SourceMap {
+ let sm = SourceMap::new(FilePathMapping::empty());
+ sm.new_source_file(PathBuf::from("blork.rs").into(),
"first line.\nsecond line".to_string());
- cm.new_source_file(PathBuf::from("empty.rs").into(),
+ sm.new_source_file(PathBuf::from("empty.rs").into(),
String::new());
- cm.new_source_file(PathBuf::from("blork2.rs").into(),
+ sm.new_source_file(PathBuf::from("blork2.rs").into(),
"first line.\nsecond line".to_string());
- cm
+ sm
}
#[test]
fn t3() {
// Test lookup_byte_offset
- let cm = init_code_map();
+ let sm = init_source_map();
- let fmabp1 = cm.lookup_byte_offset(BytePos(23));
- assert_eq!(fmabp1.fm.name, PathBuf::from("blork.rs").into());
- assert_eq!(fmabp1.pos, BytePos(23));
+ let srcfbp1 = sm.lookup_byte_offset(BytePos(23));
+ assert_eq!(srcfbp1.sf.name, PathBuf::from("blork.rs").into());
+ assert_eq!(srcfbp1.pos, BytePos(23));
- let fmabp1 = cm.lookup_byte_offset(BytePos(24));
- assert_eq!(fmabp1.fm.name, PathBuf::from("empty.rs").into());
- assert_eq!(fmabp1.pos, BytePos(0));
+ let srcfbp1 = sm.lookup_byte_offset(BytePos(24));
+ assert_eq!(srcfbp1.sf.name, PathBuf::from("empty.rs").into());
+ assert_eq!(srcfbp1.pos, BytePos(0));
- let fmabp2 = cm.lookup_byte_offset(BytePos(25));
- assert_eq!(fmabp2.fm.name, PathBuf::from("blork2.rs").into());
- assert_eq!(fmabp2.pos, BytePos(0));
+ let srcfbp2 = sm.lookup_byte_offset(BytePos(25));
+ assert_eq!(srcfbp2.sf.name, PathBuf::from("blork2.rs").into());
+ assert_eq!(srcfbp2.pos, BytePos(0));
}
#[test]
fn t4() {
// Test bytepos_to_file_charpos
- let cm = init_code_map();
+ let sm = init_source_map();
- let cp1 = cm.bytepos_to_file_charpos(BytePos(22));
+ let cp1 = sm.bytepos_to_file_charpos(BytePos(22));
assert_eq!(cp1, CharPos(22));
- let cp2 = cm.bytepos_to_file_charpos(BytePos(25));
+ let cp2 = sm.bytepos_to_file_charpos(BytePos(25));
assert_eq!(cp2, CharPos(0));
}
#[test]
fn t5() {
// Test zero-length source_files.
- let cm = init_code_map();
+ let sm = init_source_map();
- let loc1 = cm.lookup_char_pos(BytePos(22));
+ let loc1 = sm.lookup_char_pos(BytePos(22));
assert_eq!(loc1.file.name, PathBuf::from("blork.rs").into());
assert_eq!(loc1.line, 2);
assert_eq!(loc1.col, CharPos(10));
- let loc2 = cm.lookup_char_pos(BytePos(25));
+ let loc2 = sm.lookup_char_pos(BytePos(25));
assert_eq!(loc2.file.name, PathBuf::from("blork2.rs").into());
assert_eq!(loc2.line, 1);
assert_eq!(loc2.col, CharPos(0));
}
- fn init_code_map_mbc() -> SourceMap {
- let cm = SourceMap::new(FilePathMapping::empty());
+ fn init_source_map_mbc() -> SourceMap {
+ let sm = SourceMap::new(FilePathMapping::empty());
// € is a three byte utf8 char.
- cm.new_source_file(PathBuf::from("blork.rs").into(),
+ sm.new_source_file(PathBuf::from("blork.rs").into(),
"fir€st €€€€ line.\nsecond line".to_string());
- cm.new_source_file(PathBuf::from("blork2.rs").into(),
+ sm.new_source_file(PathBuf::from("blork2.rs").into(),
"first line€€.\n€ second line".to_string());
- cm
+ sm
}
#[test]
fn t6() {
// Test bytepos_to_file_charpos in the presence of multi-byte chars
- let cm = init_code_map_mbc();
+ let sm = init_source_map_mbc();
- let cp1 = cm.bytepos_to_file_charpos(BytePos(3));
+ let cp1 = sm.bytepos_to_file_charpos(BytePos(3));
assert_eq!(cp1, CharPos(3));
- let cp2 = cm.bytepos_to_file_charpos(BytePos(6));
+ let cp2 = sm.bytepos_to_file_charpos(BytePos(6));
assert_eq!(cp2, CharPos(4));
- let cp3 = cm.bytepos_to_file_charpos(BytePos(56));
+ let cp3 = sm.bytepos_to_file_charpos(BytePos(56));
assert_eq!(cp3, CharPos(12));
- let cp4 = cm.bytepos_to_file_charpos(BytePos(61));
+ let cp4 = sm.bytepos_to_file_charpos(BytePos(61));
assert_eq!(cp4, CharPos(15));
}
#[test]
fn t7() {
// Test span_to_lines for a span ending at the end of source_file
- let cm = init_code_map();
+ let sm = init_source_map();
let span = Span::new(BytePos(12), BytePos(23), NO_EXPANSION);
- let file_lines = cm.span_to_lines(span).unwrap();
+ let file_lines = sm.span_to_lines(span).unwrap();
assert_eq!(file_lines.file.name, PathBuf::from("blork.rs").into());
assert_eq!(file_lines.lines.len(), 1);
/// lines in the middle of a file.
#[test]
fn span_to_snippet_and_lines_spanning_multiple_lines() {
- let cm = SourceMap::new(FilePathMapping::empty());
+ let sm = SourceMap::new(FilePathMapping::empty());
let inputtext = "aaaaa\nbbbbBB\nCCC\nDDDDDddddd\neee\n";
let selection = " \n ~~\n~~~\n~~~~~ \n \n";
- cm.new_source_file(Path::new("blork.rs").to_owned().into(), inputtext.to_string());
+ sm.new_source_file(Path::new("blork.rs").to_owned().into(), inputtext.to_string());
let span = span_from_selection(inputtext, selection);
// check that we are extracting the text we thought we were extracting
- assert_eq!(&cm.span_to_snippet(span).unwrap(), "BB\nCCC\nDDDDD");
+ assert_eq!(&sm.span_to_snippet(span).unwrap(), "BB\nCCC\nDDDDD");
// check that span_to_lines gives us the complete result with the lines/cols we expected
- let lines = cm.span_to_lines(span).unwrap();
+ let lines = sm.span_to_lines(span).unwrap();
let expected = vec![
LineInfo { line_index: 1, start_col: CharPos(4), end_col: CharPos(6) },
LineInfo { line_index: 2, start_col: CharPos(0), end_col: CharPos(3) },
#[test]
fn t8() {
// Test span_to_snippet for a span ending at the end of source_file
- let cm = init_code_map();
+ let sm = init_source_map();
let span = Span::new(BytePos(12), BytePos(23), NO_EXPANSION);
- let snippet = cm.span_to_snippet(span);
+ let snippet = sm.span_to_snippet(span);
assert_eq!(snippet, Ok("second line".to_string()));
}
#[test]
fn t9() {
// Test span_to_str for a span ending at the end of source_file
- let cm = init_code_map();
+ let sm = init_source_map();
let span = Span::new(BytePos(12), BytePos(23), NO_EXPANSION);
- let sstr = cm.span_to_string(span);
+ let sstr = sm.span_to_string(span);
assert_eq!(sstr, "blork.rs:2:1: 2:12");
}
/// Test failing to merge two spans on different lines
#[test]
fn span_merging_fail() {
- let cm = SourceMap::new(FilePathMapping::empty());
+ let sm = SourceMap::new(FilePathMapping::empty());
let inputtext = "bbbb BB\ncc CCC\n";
let selection1 = " ~~\n \n";
let selection2 = " \n ~~~\n";
- cm.new_source_file(Path::new("blork.rs").to_owned().into(), inputtext.to_owned());
+ sm.new_source_file(Path::new("blork.rs").to_owned().into(), inputtext.to_owned());
let span1 = span_from_selection(inputtext, selection1);
let span2 = span_from_selection(inputtext, selection2);
- assert!(cm.merge_spans(span1, span2).is_none());
+ assert!(sm.merge_spans(span1, span2).is_none());
}
/// Returns the span corresponding to the `n`th occurrence of
with_globals(|| {
let output = Arc::new(Mutex::new(Vec::new()));
- let code_map = Lrc::new(SourceMap::new(FilePathMapping::empty()));
- code_map.new_source_file(Path::new("test.rs").to_owned().into(), file_text.to_owned());
+ let source_map = Lrc::new(SourceMap::new(FilePathMapping::empty()));
+ source_map.new_source_file(Path::new("test.rs").to_owned().into(), file_text.to_owned());
let primary_span = make_span(&file_text, &span_labels[0].start, &span_labels[0].end);
let mut msp = MultiSpan::from_span(primary_span);
let span = make_span(&file_text, &span_label.start, &span_label.end);
msp.push_span_label(span, span_label.label.to_string());
println!("span: {:?} label: {:?}", span, span_label.label);
- println!("text: {:?}", code_map.span_to_snippet(span));
+ println!("text: {:?}", source_map.span_to_snippet(span));
}
let emitter = EmitterWriter::new(Box::new(Shared { data: output.clone() }),
- Some(code_map.clone()),
+ Some(source_map.clone()),
false,
false);
let handler = Handler::with_emitter(true, false, Box::new(emitter));
pub fn string_to_pat(source_str: String) -> P<ast::Pat> {
let ps = ParseSess::new(FilePathMapping::empty());
with_error_checking_parse(source_str, &ps, |p| {
- p.parse_pat()
+ p.parse_pat(None)
})
}
// let __self2_vi = unsafe {
// std::intrinsics::discriminant_value(&arg2) } as i32;
// ```
- let mut index_let_stmts: Vec<ast::Stmt> = Vec::new();
+ let mut index_let_stmts: Vec<ast::Stmt> = Vec::with_capacity(vi_idents.len() + 1);
// We also build an expression which checks whether all discriminants are equal
// discriminant_test = __self0_vi == __self1_vi && __self0_vi == __self2_vi && ...
// it encounters. If that point is already outside the source_file, remove
// it again.
if let Some(&last_line_start) = lines.last() {
- let file_map_end = source_file_start_pos + BytePos::from_usize(src.len());
- assert!(file_map_end >= last_line_start);
- if last_line_start == file_map_end {
+ let source_file_end = source_file_start_pos + BytePos::from_usize(src.len());
+ assert!(source_file_end >= last_line_start);
+ if last_line_start == source_file_end {
lines.pop();
}
}
// used to be structural records. Better names, anyone?
#[derive(Debug)]
-pub struct SourceFileAndLine { pub fm: Lrc<SourceFile>, pub line: usize }
+pub struct SourceFileAndLine { pub sf: Lrc<SourceFile>, pub line: usize }
#[derive(Debug)]
-pub struct SourceFileAndBytePos { pub fm: Lrc<SourceFile>, pub pos: BytePos }
+pub struct SourceFileAndBytePos { pub sf: Lrc<SourceFile>, pub pos: BytePos }
#[derive(Copy, Clone, Debug, PartialEq, Eq)]
pub struct LineInfo {
}
// _____________________________________________________________________________
-// SpanLinesError, SpanSnippetError, DistinctSources, MalformedCodemapPositions
+// SpanLinesError, SpanSnippetError, DistinctSources, MalformedSourceMapPositions
//
pub type FileLinesResult = Result<FileLines, SpanLinesError>;
pub enum SpanSnippetError {
IllFormedSpan(Span),
DistinctSources(DistinctSources),
- MalformedForCodemap(MalformedCodemapPositions),
+ MalformedForSourcemap(MalformedSourceMapPositions),
SourceNotAvailable { filename: FileName }
}
}
#[derive(Clone, PartialEq, Eq, Debug)]
-pub struct MalformedCodemapPositions {
+pub struct MalformedSourceMapPositions {
pub name: FileName,
pub source_len: usize,
pub begin_pos: BytePos,
-Subproject commit caddcd9b9dc9479a20908d93c3e47c49b021379e
+Subproject commit 7051ead40a5f825878b59bf08d4e768be9e99a4a
rustc_driver = { path = "../librustc_driver" }
[features]
-jemalloc = ["rustc_target/jemalloc"]
+jemalloc = ['rustc_driver/jemalloc-sys']
extern "C" const char *
LLVMRustArchiveChildName(LLVMRustArchiveChildConstRef Child, size_t *Size) {
-#if LLVM_VERSION_GE(4, 0)
Expected<StringRef> NameOrErr = Child->getName();
if (!NameOrErr) {
// rustc_codegen_llvm currently doesn't use this error string, but it might be
LLVMRustSetLastError(toString(NameOrErr.takeError()).c_str());
return nullptr;
}
-#else
- ErrorOr<StringRef> NameOrErr = Child->getName();
- if (NameOrErr.getError())
- return nullptr;
-#endif
StringRef Name = NameOrErr.get();
*Size = Name.size();
return Name.data();
extern "C" const char *LLVMRustArchiveChildData(LLVMRustArchiveChildRef Child,
size_t *Size) {
StringRef Buf;
-#if LLVM_VERSION_GE(4, 0)
Expected<StringRef> BufOrErr = Child->getBuffer();
if (!BufOrErr) {
LLVMRustSetLastError(toString(BufOrErr.takeError()).c_str());
return nullptr;
}
-#else
- ErrorOr<StringRef> BufOrErr = Child->getBuffer();
- if (BufOrErr.getError()) {
- LLVMRustSetLastError(BufOrErr.getError().message().c_str());
- return nullptr;
- }
-#endif
Buf = BufOrErr.get();
*Size = Buf.size();
return Buf.data();
std::unique_ptr<MemoryBuffer> Buf =
MemoryBuffer::getMemBufferCopy(StringRef(BC, Len));
-#if LLVM_VERSION_GE(4, 0)
Expected<std::unique_ptr<Module>> SrcOrError =
llvm::getLazyBitcodeModule(Buf->getMemBufferRef(), L->Ctx);
if (!SrcOrError) {
}
auto Src = std::move(*SrcOrError);
-#else
- ErrorOr<std::unique_ptr<Module>> Src =
- llvm::getLazyBitcodeModule(std::move(Buf), L->Ctx);
- if (!Src) {
- LLVMRustSetLastError(Src.getError().message().c_str());
- return false;
- }
-#endif
-#if LLVM_VERSION_GE(4, 0)
if (L->L.linkInModule(std::move(Src))) {
-#else
- if (L->L.linkInModule(std::move(Src.get()))) {
-#endif
LLVMRustSetLastError("");
return false;
}
#include "llvm/Target/TargetSubtargetInfo.h"
#endif
-#if LLVM_VERSION_GE(4, 0)
#include "llvm/Transforms/IPO/AlwaysInliner.h"
#include "llvm/Transforms/IPO/FunctionImport.h"
#include "llvm/Transforms/Utils/FunctionImportUtils.h"
#if LLVM_VERSION_LE(4, 0)
#include "llvm/Object/ModuleSummaryIndexObjectFile.h"
#endif
-#endif
#include "llvm-c/Transforms/PassManagerBuilder.h"
-#if LLVM_VERSION_GE(4, 0)
-#define PGO_AVAILABLE
-#endif
-
using namespace llvm;
using namespace llvm::legacy;
LLVMPassManagerBuilderRef PMBR,
LLVMPassManagerRef PMR
) {
-#if LLVM_VERSION_GE(4, 0)
unwrap(PMBR)->populateThinLTOPassManager(*unwrap(PMR));
return true;
-#else
- return false;
-#endif
}
#ifdef LLVM_COMPONENT_X86
return Reloc::PIC_;
case LLVMRustRelocMode::DynamicNoPic:
return Reloc::DynamicNoPIC;
-#if LLVM_VERSION_GE(4, 0)
case LLVMRustRelocMode::ROPI:
return Reloc::ROPI;
case LLVMRustRelocMode::RWPI:
return Reloc::RWPI;
case LLVMRustRelocMode::ROPIRWPI:
return Reloc::ROPI_RWPI;
-#else
- default:
- break;
-#endif
}
report_fatal_error("Bad RelocModel.");
}
unwrap(PMBR)->SLPVectorize = SLPVectorize;
unwrap(PMBR)->OptLevel = fromRust(OptLevel);
unwrap(PMBR)->LoopVectorize = LoopVectorize;
-#if LLVM_VERSION_GE(4, 0)
unwrap(PMBR)->PrepareForThinLTO = PrepareForThinLTO;
-#endif
-#ifdef PGO_AVAILABLE
if (PGOGenPath) {
assert(!PGOUsePath);
unwrap(PMBR)->EnablePGOInstrGen = true;
assert(!PGOGenPath);
unwrap(PMBR)->PGOInstrUse = PGOUsePath;
}
-#else
- assert(!PGOGenPath && !PGOUsePath && "Should've caught earlier");
-#endif
}
// Unfortunately, the LLVM C API doesn't provide a way to set the `LibraryInfo`
LLVMInitializePasses();
struct MyListener : PassRegistrationListener {
void passEnumerate(const PassInfo *Info) {
-#if LLVM_VERSION_GE(4, 0)
StringRef PassArg = Info->getPassArgument();
StringRef PassName = Info->getPassName();
if (!PassArg.empty()) {
printf("%15.*s - %.*s\n", (int)PassArg.size(), PassArg.data(),
(int)PassName.size(), PassName.data());
}
-#else
- if (Info->getPassArgument() && *Info->getPassArgument()) {
- printf("%15s - %s\n", Info->getPassArgument(), Info->getPassName());
- }
-#endif
}
} Listener;
extern "C" void LLVMRustAddAlwaysInlinePass(LLVMPassManagerBuilderRef PMBR,
bool AddLifetimes) {
-#if LLVM_VERSION_GE(4, 0)
unwrap(PMBR)->Inliner = llvm::createAlwaysInlinerLegacyPass(AddLifetimes);
-#else
- unwrap(PMBR)->Inliner = createAlwaysInlinerPass(AddLifetimes);
-#endif
}
extern "C" void LLVMRustRunRestrictionPass(LLVMModuleRef M, char **Symbols,
unwrap(M)->setPIELevel(PIELevel::Level::Large);
}
-extern "C" bool
-LLVMRustThinLTOAvailable() {
-#if LLVM_VERSION_GE(4, 0)
- return true;
-#else
- return false;
-#endif
-}
-
-extern "C" bool
-LLVMRustPGOAvailable() {
-#ifdef PGO_AVAILABLE
- return true;
-#else
- return false;
-#endif
-}
-
-#if LLVM_VERSION_GE(4, 0)
-
// Here you'll find an implementation of ThinLTO as used by the Rust compiler
// right now. This ThinLTO support is only enabled on "recent ish" versions of
// LLVM, and otherwise it's just blanket rejected from other compilers.
MD->clearOperands();
MD->addOperand(Unit);
}
-
-#else
-
-struct LLVMRustThinLTOData {
-};
-
-struct LLVMRustThinLTOModule {
-};
-
-extern "C" LLVMRustThinLTOData*
-LLVMRustCreateThinLTOData(LLVMRustThinLTOModule *modules,
- int num_modules,
- const char **preserved_symbols,
- int num_symbols) {
- report_fatal_error("ThinLTO not available");
-}
-
-extern "C" bool
-LLVMRustPrepareThinLTORename(const LLVMRustThinLTOData *Data, LLVMModuleRef M) {
- report_fatal_error("ThinLTO not available");
-}
-
-extern "C" bool
-LLVMRustPrepareThinLTOResolveWeak(const LLVMRustThinLTOData *Data, LLVMModuleRef M) {
- report_fatal_error("ThinLTO not available");
-}
-
-extern "C" bool
-LLVMRustPrepareThinLTOInternalize(const LLVMRustThinLTOData *Data, LLVMModuleRef M) {
- report_fatal_error("ThinLTO not available");
-}
-
-extern "C" bool
-LLVMRustPrepareThinLTOImport(const LLVMRustThinLTOData *Data, LLVMModuleRef M) {
- report_fatal_error("ThinLTO not available");
-}
-
-extern "C" LLVMRustThinLTOModuleImports
-LLVMRustGetLLVMRustThinLTOModuleImports(const LLVMRustThinLTOData *Data) {
- report_fatal_error("ThinLTO not available");
-}
-
-extern "C" void
-LLVMRustFreeThinLTOData(LLVMRustThinLTOData *Data) {
- report_fatal_error("ThinLTO not available");
-}
-
-struct LLVMRustThinLTOBuffer {
-};
-
-extern "C" LLVMRustThinLTOBuffer*
-LLVMRustThinLTOBufferCreate(LLVMModuleRef M) {
- report_fatal_error("ThinLTO not available");
-}
-
-extern "C" void
-LLVMRustThinLTOBufferFree(LLVMRustThinLTOBuffer *Buffer) {
- report_fatal_error("ThinLTO not available");
-}
-
-extern "C" const void*
-LLVMRustThinLTOBufferPtr(const LLVMRustThinLTOBuffer *Buffer) {
- report_fatal_error("ThinLTO not available");
-}
-
-extern "C" size_t
-LLVMRustThinLTOBufferLen(const LLVMRustThinLTOBuffer *Buffer) {
- report_fatal_error("ThinLTO not available");
-}
-
-extern "C" LLVMModuleRef
-LLVMRustParseBitcodeForThinLTO(LLVMContextRef Context,
- const char *data,
- size_t len,
- const char *identifier) {
- report_fatal_error("ThinLTO not available");
-}
-
-extern "C" void
-LLVMRustThinLTOGetDICompileUnit(LLVMModuleRef Mod,
- DICompileUnit **A,
- DICompileUnit **B) {
- report_fatal_error("ThinLTO not available");
-}
-
-extern "C" void
-LLVMRustThinLTOPatchDICompileUnit(LLVMModuleRef Mod) {
- report_fatal_error("ThinLTO not available");
-}
-
-#endif // LLVM_VERSION_GE(4, 0)
unwrapDI<DIType>(VTableHolder), UniqueId));
}
+extern "C" LLVMMetadataRef LLVMRustDIBuilderCreateVariantPart(
+ LLVMRustDIBuilderRef Builder, LLVMMetadataRef Scope, const char *Name,
+ LLVMMetadataRef File, unsigned LineNumber, uint64_t SizeInBits,
+ uint32_t AlignInBits, LLVMRustDIFlags Flags, LLVMMetadataRef Discriminator,
+ LLVMMetadataRef Elements, const char *UniqueId) {
+#if LLVM_VERSION_GE(7, 0)
+ return wrap(Builder->createVariantPart(
+ unwrapDI<DIDescriptor>(Scope), Name, unwrapDI<DIFile>(File), LineNumber,
+ SizeInBits, AlignInBits, fromRust(Flags), unwrapDI<DIDerivedType>(Discriminator),
+ DINodeArray(unwrapDI<MDTuple>(Elements)), UniqueId));
+#else
+ abort();
+#endif
+}
+
extern "C" LLVMMetadataRef LLVMRustDIBuilderCreateMemberType(
LLVMRustDIBuilderRef Builder, LLVMMetadataRef Scope, const char *Name,
LLVMMetadataRef File, unsigned LineNo, uint64_t SizeInBits,
fromRust(Flags), unwrapDI<DIType>(Ty)));
}
+extern "C" LLVMMetadataRef LLVMRustDIBuilderCreateVariantMemberType(
+ LLVMRustDIBuilderRef Builder, LLVMMetadataRef Scope,
+ const char *Name, LLVMMetadataRef File, unsigned LineNo, uint64_t SizeInBits,
+ uint32_t AlignInBits, uint64_t OffsetInBits, LLVMValueRef Discriminant,
+ LLVMRustDIFlags Flags, LLVMMetadataRef Ty) {
+#if LLVM_VERSION_GE(7, 0)
+ llvm::ConstantInt* D = nullptr;
+ if (Discriminant) {
+ D = unwrap<llvm::ConstantInt>(Discriminant);
+ }
+ return wrap(Builder->createVariantMemberType(unwrapDI<DIDescriptor>(Scope), Name,
+ unwrapDI<DIFile>(File), LineNo,
+ SizeInBits, AlignInBits, OffsetInBits, D,
+ fromRust(Flags), unwrapDI<DIType>(Ty)));
+#else
+ return wrap(Builder->createMemberType(unwrapDI<DIDescriptor>(Scope), Name,
+ unwrapDI<DIFile>(File), LineNo,
+ SizeInBits, AlignInBits, OffsetInBits,
+ fromRust(Flags), unwrapDI<DIType>(Ty)));
+#endif
+}
+
extern "C" LLVMMetadataRef LLVMRustDIBuilderCreateLexicalBlock(
LLVMRustDIBuilderRef Builder, LLVMMetadataRef Scope,
LLVMMetadataRef File, unsigned Line, unsigned Col) {
LLVMRustDIBuilderRef Builder, LLVMMetadataRef Scope, const char *Name,
LLVMMetadataRef File, unsigned LineNumber, uint64_t SizeInBits,
uint32_t AlignInBits, LLVMMetadataRef Elements,
- LLVMMetadataRef ClassTy) {
+ LLVMMetadataRef ClassTy, bool IsFixed) {
+#if LLVM_VERSION_GE(7, 0)
+ return wrap(Builder->createEnumerationType(
+ unwrapDI<DIDescriptor>(Scope), Name, unwrapDI<DIFile>(File), LineNumber,
+ SizeInBits, AlignInBits, DINodeArray(unwrapDI<MDTuple>(Elements)),
+ unwrapDI<DIType>(ClassTy), "", IsFixed));
+#else
+ // Ignore IsFixed on older LLVM.
return wrap(Builder->createEnumerationType(
unwrapDI<DIDescriptor>(Scope), Name, unwrapDI<DIFile>(File), LineNumber,
SizeInBits, AlignInBits, DINodeArray(unwrapDI<MDTuple>(Elements)),
- unwrapDI<DIType>(ClassTy)));
+ unwrapDI<DIType>(ClassTy), ""));
+#endif
}
extern "C" LLVMMetadataRef LLVMRustDIBuilderCreateUnionType(
,
unwrapDI<DIFile>(File), LineNo
#endif
-#if LLVM_VERSION_GE(4, 0)
,
false // ExportSymbols (only relevant for C++ anonymous namespaces)
-#endif
));
}
}
#endif
-#if LLVM_VERSION_LT(4, 0)
-extern "C" LLVMValueRef
-LLVMBuildExactUDiv(LLVMBuilderRef B, LLVMValueRef LHS,
- LLVMValueRef RHS, const char *Name) {
- return wrap(unwrap(B)->CreateExactUDiv(unwrap(LHS), unwrap(RHS), Name));
-}
-#endif
-
#if LLVM_VERSION_GE(6, 0)
extern "C" LLVMValueRef
LLVMRustBuildMinNum(LLVMBuilderRef B, LLVMValueRef LHS, LLVMValueRef RHS) {
#include "llvm/IR/LegacyPassManager.h"
-#if LLVM_VERSION_GE(4, 0)
#include "llvm/Bitcode/BitcodeReader.h"
#include "llvm/Bitcode/BitcodeWriter.h"
-#else
-#include "llvm/Bitcode/ReaderWriter.h"
-#endif
#include "llvm/IR/DIBuilder.h"
#include "llvm/IR/DebugInfo.h"
# source tarball for a stable release you'll likely see `1.x.0` for rustc and
# `0.x.0` for Cargo where they were released on `date`.
-date: 2018-10-13
+date: 2018-10-30
rustc: beta
cargo: beta
-Subproject commit 431766a3fbcfb6dafb2d5a3866c1609bf44ee554
+Subproject commit 0309be1ade6bf61066f2c69f77ac3567b7dc31b5
--- /dev/null
+// Copyright 2018 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// This test depends on a patch that was committed to upstream LLVM
+// before 7.0, then backported to the Rust LLVM fork. It tests that
+// debug info for "c-like" enums is properly emitted.
+
+// ignore-tidy-linelength
+// ignore-windows
+// min-system-llvm-version 7.0
+
+// compile-flags: -g -C no-prepopulate-passes
+
+// CHECK-LABEL: @main
+// CHECK: {{.*}}DICompositeType{{.*}}tag: DW_TAG_enumeration_type,{{.*}}name: "E",{{.*}}flags: DIFlagFixedEnum,{{.*}}
+// CHECK: {{.*}}DIEnumerator{{.*}}name: "A",{{.*}}value: {{[0-9].*}}
+// CHECK: {{.*}}DIEnumerator{{.*}}name: "B",{{.*}}value: {{[0-9].*}}
+// CHECK: {{.*}}DIEnumerator{{.*}}name: "C",{{.*}}value: {{[0-9].*}}
+
+#![allow(dead_code)]
+#![allow(unused_variables)]
+#![allow(unused_assignments)]
+
+enum E { A, B, C }
+
+pub fn main() {
+ let e = E::C;
+}
--- /dev/null
+// Copyright 2018 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// This test depends on a patch that was committed to upstream LLVM
+// before 7.0, then backported to the Rust LLVM fork. It tests that
+// optimized enum debug info accurately reflects the enum layout.
+
+// ignore-tidy-linelength
+// ignore-windows
+// min-system-llvm-version 7.0
+
+// compile-flags: -g -C no-prepopulate-passes
+
+// CHECK-LABEL: @main
+// CHECK: {{.*}}DICompositeType{{.*}}tag: DW_TAG_variant_part,{{.*}}discriminator:{{.*}}
+// CHECK: {{.*}}DIDerivedType{{.*}}tag: DW_TAG_member,{{.*}}name: "A",{{.*}}extraData:{{.*}}
+// CHECK: {{.*}}DICompositeType{{.*}}tag: DW_TAG_structure_type,{{.*}}name: "A",{{.*}}
+// CHECK: {{.*}}DIDerivedType{{.*}}tag: DW_TAG_member,{{.*}}name: "B",{{.*}}extraData:{{.*}}
+// CHECK: {{.*}}DICompositeType{{.*}}tag: DW_TAG_structure_type,{{.*}}name: "B",{{.*}}
+// CHECK: {{.*}}DIDerivedType{{.*}}tag: DW_TAG_member,{{.*}}name: "C",{{.*}}extraData:{{.*}}
+// CHECK: {{.*}}DICompositeType{{.*}}tag: DW_TAG_structure_type,{{.*}}name: "C",{{.*}}
+// CHECK-NOT: {{.*}}DIDerivedType{{.*}}name: "D",{{.*}}extraData:{{.*}}
+// CHECK: {{.*}}DIDerivedType{{.*}}tag: DW_TAG_member,{{.*}}name: "D",{{.*}}
+// CHECK: {{.*}}DICompositeType{{.*}}tag: DW_TAG_structure_type,{{.*}}name: "D",{{.*}}
+// CHECK: {{.*}}DIDerivedType{{.*}}tag: DW_TAG_member,{{.*}}flags: DIFlagArtificial{{.*}}
+
+#![allow(dead_code)]
+#![allow(unused_variables)]
+#![allow(unused_assignments)]
+
+enum E { A, B, C, D(bool) }
+
+pub fn main() {
+ let e = E::D(true);
+}
--- /dev/null
+// Copyright 2018 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// This test depends on a patch that was committed to upstream LLVM
+// before 7.0, then backported to the Rust LLVM fork. It tests that
+// debug info for tagged (ordinary) enums is properly emitted.
+
+// ignore-tidy-linelength
+// ignore-windows
+// min-system-llvm-version 7.0
+
+// compile-flags: -g -C no-prepopulate-passes
+
+// CHECK-LABEL: @main
+// CHECK: {{.*}}DICompositeType{{.*}}tag: DW_TAG_structure_type,{{.*}}name: "E",{{.*}}
+// CHECK: {{.*}}DICompositeType{{.*}}tag: DW_TAG_variant_part,{{.*}}discriminator:{{.*}}
+// CHECK: {{.*}}DIDerivedType{{.*}}tag: DW_TAG_member,{{.*}}name: "A",{{.*}}extraData:{{.*}}
+// CHECK: {{.*}}DICompositeType{{.*}}tag: DW_TAG_structure_type,{{.*}}name: "A",{{.*}}
+// CHECK: {{.*}}DIDerivedType{{.*}}tag: DW_TAG_member,{{.*}}name: "__0",{{.*}}
+// CHECK: {{.*}}DIDerivedType{{.*}}tag: DW_TAG_member,{{.*}}name: "B",{{.*}}extraData:{{.*}}
+// CHECK: {{.*}}DICompositeType{{.*}}tag: DW_TAG_structure_type,{{.*}}name: "B",{{.*}}
+// CHECK: {{.*}}DIDerivedType{{.*}}tag: DW_TAG_member,{{.*}}name: "__0",{{.*}}
+// CHECK: {{.*}}DIDerivedType{{.*}}tag: DW_TAG_member,{{.*}}flags: DIFlagArtificial{{.*}}
+
+#![allow(dead_code)]
+#![allow(unused_variables)]
+#![allow(unused_assignments)]
+
+enum E { A(u32), B(u32) }
+
+pub fn main() {
+ let e = E::A(23);
+}
// min-lldb-version: 310
+// This fails on lldb 6.0.1 on x86-64 Fedora 28; so mark it macOS-only
+// for now.
+// only-macos
+
// compile-flags:-g
// === GDB TESTS ===================================================================================
--- /dev/null
+// Copyright 2013-2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// ignore-tidy-linelength
+// min-lldb-version: 310
+
+// As long as LLVM 5 and LLVM 6 are supported, we want to test the
+// enum debuginfo fallback mode. Once those are desupported, this
+// test can be removed, as there is another (non-"legacy") test that
+// tests the new mode.
+// ignore-llvm-version: 7.0 - 9.9.9
+// ignore-gdb-version: 7.11.90 - 7.12.9
+// ignore-gdb-version: 8.2 - 9.9
+
+// compile-flags:-g
+
+// === GDB TESTS ===================================================================================
+
+// gdb-command:run
+
+// gdb-command:print *the_a_ref
+// gdbg-check:$1 = {{RUST$ENUM$DISR = TheA, x = 0, y = 8970181431921507452}, {RUST$ENUM$DISR = TheA, [...]}}
+// gdbr-check:$1 = borrowed_enum_legacy::ABC::TheA{x: 0, y: 8970181431921507452}
+
+// gdb-command:print *the_b_ref
+// gdbg-check:$2 = {{RUST$ENUM$DISR = TheB, [...]}, {RUST$ENUM$DISR = TheB, __0 = 0, __1 = 286331153, __2 = 286331153}}
+// gdbr-check:$2 = borrowed_enum_legacy::ABC::TheB(0, 286331153, 286331153)
+
+// gdb-command:print *univariant_ref
+// gdbg-check:$3 = {{__0 = 4820353753753434}}
+// gdbr-check:$3 = borrowed_enum_legacy::Univariant::TheOnlyCase(4820353753753434)
+
+
+// === LLDB TESTS ==================================================================================
+
+// lldb-command:run
+
+// lldb-command:print *the_a_ref
+// lldbg-check:[...]$0 = TheA { x: 0, y: 8970181431921507452 }
+// lldbr-check:(borrowed_enum_legacy::ABC::TheA) *the_a_ref = TheA { borrowed_enum_legacy::ABC::TheA: 0, borrowed_enum_legacy::ABC::TheB: 8970181431921507452 }
+// lldb-command:print *the_b_ref
+// lldbg-check:[...]$1 = TheB(0, 286331153, 286331153)
+// lldbr-check:(borrowed_enum_legacy::ABC::TheB) *the_b_ref = { = 0 = 286331153 = 286331153 }
+// lldb-command:print *univariant_ref
+// lldbg-check:[...]$2 = TheOnlyCase(4820353753753434)
+// lldbr-check:(borrowed_enum_legacy::Univariant) *univariant_ref = { borrowed_enum_legacy::TheOnlyCase = { = 4820353753753434 } }
+
+#![allow(unused_variables)]
+#![feature(omit_gdb_pretty_printer_section)]
+#![omit_gdb_pretty_printer_section]
+
+// The first element is to ensure proper alignment, irrespective of the machines word size. Since
+// the size of the discriminant value is machine dependent, this has be taken into account when
+// datatype layout should be predictable as in this case.
+enum ABC {
+ TheA { x: i64, y: i64 },
+ TheB (i64, i32, i32),
+}
+
+// This is a special case since it does not have the implicit discriminant field.
+enum Univariant {
+ TheOnlyCase(i64)
+}
+
+fn main() {
+
+ // 0b0111110001111100011111000111110001111100011111000111110001111100 = 8970181431921507452
+ // 0b01111100011111000111110001111100 = 2088533116
+ // 0b0111110001111100 = 31868
+ // 0b01111100 = 124
+ let the_a = ABC::TheA { x: 0, y: 8970181431921507452 };
+ let the_a_ref: &ABC = &the_a;
+
+ // 0b0001000100010001000100010001000100010001000100010001000100010001 = 1229782938247303441
+ // 0b00010001000100010001000100010001 = 286331153
+ // 0b0001000100010001 = 4369
+ // 0b00010001 = 17
+ let the_b = ABC::TheB (0, 286331153, 286331153);
+ let the_b_ref: &ABC = &the_b;
+
+ let univariant = Univariant::TheOnlyCase(4820353753753434);
+ let univariant_ref: &Univariant = &univariant;
+
+ zzz(); // #break
+}
+
+fn zzz() {()}
// except according to those terms.
// ignore-tidy-linelength
-// min-lldb-version: 310
-// ignore-gdb-version: 7.11.90 - 7.12.9
+
+// Require LLVM with DW_TAG_variant_part and a gdb or lldb that can read it.
+// min-system-llvm-version: 7.0
+// min-gdb-version: 8.2
+// rust-lldb
// compile-flags:-g
// gdb-command:run
// gdb-command:print *the_a_ref
-// gdbg-check:$1 = {{RUST$ENUM$DISR = TheA, x = 0, y = 8970181431921507452}, {RUST$ENUM$DISR = TheA, [...]}}
// gdbr-check:$1 = borrowed_enum::ABC::TheA{x: 0, y: 8970181431921507452}
// gdb-command:print *the_b_ref
-// gdbg-check:$2 = {{RUST$ENUM$DISR = TheB, [...]}, {RUST$ENUM$DISR = TheB, __0 = 0, __1 = 286331153, __2 = 286331153}}
// gdbr-check:$2 = borrowed_enum::ABC::TheB(0, 286331153, 286331153)
// gdb-command:print *univariant_ref
-// gdbg-check:$3 = {{__0 = 4820353753753434}}
// gdbr-check:$3 = borrowed_enum::Univariant::TheOnlyCase(4820353753753434)
// lldb-command:run
// lldb-command:print *the_a_ref
-// lldbg-check:[...]$0 = TheA { x: 0, y: 8970181431921507452 }
-// lldbr-check:(borrowed_enum::ABC::TheA) *the_a_ref = TheA { borrowed_enum::ABC::TheA: 0, borrowed_enum::ABC::TheB: 8970181431921507452 }
+// lldbr-check:(borrowed_enum::ABC::TheA) *the_a_ref = TheA { TheA: 0, TheB: 8970181431921507452 }
// lldb-command:print *the_b_ref
-// lldbg-check:[...]$1 = TheB(0, 286331153, 286331153)
// lldbr-check:(borrowed_enum::ABC::TheB) *the_b_ref = { = 0 = 286331153 = 286331153 }
// lldb-command:print *univariant_ref
-// lldbg-check:[...]$2 = TheOnlyCase(4820353753753434)
-// lldbr-check:(borrowed_enum::Univariant) *univariant_ref = { borrowed_enum::TheOnlyCase = { = 4820353753753434 } }
+// lldbr-check:(borrowed_enum::Univariant) *univariant_ref = { TheOnlyCase = { = 4820353753753434 } }
#![allow(unused_variables)]
#![feature(omit_gdb_pretty_printer_section)]
// min-lldb-version: 310
+// This fails on lldb 6.0.1 on x86-64 Fedora 28; so mark it macOS-only
+// for now.
+// only-macos
+
// aux-build:cross_crate_spans.rs
extern crate cross_crate_spans;
// min-lldb-version: 310
+// This fails on lldb 6.0.1 on x86-64 Fedora 28; so mark it macOS-only
+// for now.
+// only-macos
+
// compile-flags:-g
// === GDB TESTS ===================================================================================
--- /dev/null
+// Copyright 2013-2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// ignore-tidy-linelength
+// ignore-lldb: FIXME(#27089)
+// min-lldb-version: 310
+
+// As long as LLVM 5 and LLVM 6 are supported, we want to test the
+// enum debuginfo fallback mode. Once those are desupported, this
+// test can be removed, as there is another (non-"legacy") test that
+// tests the new mode.
+// ignore-llvm-version: 7.0 - 9.9.9
+// ignore-gdb-version: 8.2 - 9.9
+
+// compile-flags:-g
+
+// === GDB TESTS ===================================================================================
+// gdb-command:run
+
+// gdb-command:print eight_bytes1
+// gdbg-check:$1 = {{RUST$ENUM$DISR = Variant1, __0 = 100}, {RUST$ENUM$DISR = Variant1, __0 = 100}}
+// gdbr-check:$1 = generic_enum_with_different_disr_sizes_legacy::Enum::Variant1(100)
+
+// gdb-command:print four_bytes1
+// gdbg-check:$2 = {{RUST$ENUM$DISR = Variant1, __0 = 101}, {RUST$ENUM$DISR = Variant1, __0 = 101}}
+// gdbr-check:$2 = generic_enum_with_different_disr_sizes_legacy::Enum::Variant1(101)
+
+// gdb-command:print two_bytes1
+// gdbg-check:$3 = {{RUST$ENUM$DISR = Variant1, __0 = 102}, {RUST$ENUM$DISR = Variant1, __0 = 102}}
+// gdbr-check:$3 = generic_enum_with_different_disr_sizes_legacy::Enum::Variant1(102)
+
+// gdb-command:print one_byte1
+// gdbg-check:$4 = {{RUST$ENUM$DISR = Variant1, __0 = 65 'A'}, {RUST$ENUM$DISR = Variant1, __0 = 65 'A'}}
+// gdbr-check:$4 = generic_enum_with_different_disr_sizes_legacy::Enum::Variant1(65)
+
+
+// gdb-command:print eight_bytes2
+// gdbg-check:$5 = {{RUST$ENUM$DISR = Variant2, __0 = 100}, {RUST$ENUM$DISR = Variant2, __0 = 100}}
+// gdbr-check:$5 = generic_enum_with_different_disr_sizes_legacy::Enum::Variant2(100)
+
+// gdb-command:print four_bytes2
+// gdbg-check:$6 = {{RUST$ENUM$DISR = Variant2, __0 = 101}, {RUST$ENUM$DISR = Variant2, __0 = 101}}
+// gdbr-check:$6 = generic_enum_with_different_disr_sizes_legacy::Enum::Variant2(101)
+
+// gdb-command:print two_bytes2
+// gdbg-check:$7 = {{RUST$ENUM$DISR = Variant2, __0 = 102}, {RUST$ENUM$DISR = Variant2, __0 = 102}}
+// gdbr-check:$7 = generic_enum_with_different_disr_sizes_legacy::Enum::Variant2(102)
+
+// gdb-command:print one_byte2
+// gdbg-check:$8 = {{RUST$ENUM$DISR = Variant2, __0 = 65 'A'}, {RUST$ENUM$DISR = Variant2, __0 = 65 'A'}}
+// gdbr-check:$8 = generic_enum_with_different_disr_sizes_legacy::Enum::Variant2(65)
+
+// gdb-command:continue
+
+// === LLDB TESTS ==================================================================================
+// lldb-command:run
+
+// lldb-command:print eight_bytes1
+// lldb-check:[...]$0 = Variant1(100)
+// lldb-command:print four_bytes1
+// lldb-check:[...]$1 = Variant1(101)
+// lldb-command:print two_bytes1
+// lldb-check:[...]$2 = Variant1(102)
+// lldb-command:print one_byte1
+// lldb-check:[...]$3 = Variant1('A')
+
+// lldb-command:print eight_bytes2
+// lldb-check:[...]$4 = Variant2(100)
+// lldb-command:print four_bytes2
+// lldb-check:[...]$5 = Variant2(101)
+// lldb-command:print two_bytes2
+// lldb-check:[...]$6 = Variant2(102)
+// lldb-command:print one_byte2
+// lldb-check:[...]$7 = Variant2('A')
+
+// lldb-command:continue
+
+#![allow(unused_variables)]
+#![allow(dead_code)]
+#![feature(omit_gdb_pretty_printer_section)]
+#![omit_gdb_pretty_printer_section]
+
+// This test case makes sure that we get correct type descriptions for the enum
+// discriminant of different instantiations of the same generic enum type where,
+// dependending on the generic type parameter(s), the discriminant has a
+// different size in memory.
+
+enum Enum<T> {
+ Variant1(T),
+ Variant2(T)
+}
+
+fn main() {
+ // These are ordered for descending size on purpose
+ let eight_bytes1 = Enum::Variant1(100.0f64);
+ let four_bytes1 = Enum::Variant1(101i32);
+ let two_bytes1 = Enum::Variant1(102i16);
+ let one_byte1 = Enum::Variant1(65u8);
+
+ let eight_bytes2 = Enum::Variant2(100.0f64);
+ let four_bytes2 = Enum::Variant2(101i32);
+ let two_bytes2 = Enum::Variant2(102i16);
+ let one_byte2 = Enum::Variant2(65u8);
+
+ zzz(); // #break
+}
+
+fn zzz() { () }
// ignore-lldb: FIXME(#27089)
// min-lldb-version: 310
+// Require LLVM with DW_TAG_variant_part and a gdb that can read it.
+// min-system-llvm-version: 7.0
+// min-gdb-version: 8.2
+
// compile-flags:-g
// === GDB TESTS ===================================================================================
// gdb-command:run
// gdb-command:print eight_bytes1
-// gdbg-check:$1 = {{RUST$ENUM$DISR = Variant1, __0 = 100}, {RUST$ENUM$DISR = Variant1, __0 = 100}}
-// gdbr-check:$1 = generic_enum_with_different_disr_sizes::Enum::Variant1(100)
+// gdbr-check:$1 = generic_enum_with_different_disr_sizes::Enum<f64>::Variant1(100)
// gdb-command:print four_bytes1
-// gdbg-check:$2 = {{RUST$ENUM$DISR = Variant1, __0 = 101}, {RUST$ENUM$DISR = Variant1, __0 = 101}}
-// gdbr-check:$2 = generic_enum_with_different_disr_sizes::Enum::Variant1(101)
+// gdbr-check:$2 = generic_enum_with_different_disr_sizes::Enum<i32>::Variant1(101)
// gdb-command:print two_bytes1
-// gdbg-check:$3 = {{RUST$ENUM$DISR = Variant1, __0 = 102}, {RUST$ENUM$DISR = Variant1, __0 = 102}}
-// gdbr-check:$3 = generic_enum_with_different_disr_sizes::Enum::Variant1(102)
+// gdbr-check:$3 = generic_enum_with_different_disr_sizes::Enum<i16>::Variant1(102)
// gdb-command:print one_byte1
-// gdbg-check:$4 = {{RUST$ENUM$DISR = Variant1, __0 = 65 'A'}, {RUST$ENUM$DISR = Variant1, __0 = 65 'A'}}
-// gdbr-check:$4 = generic_enum_with_different_disr_sizes::Enum::Variant1(65)
+// gdbr-check:$4 = generic_enum_with_different_disr_sizes::Enum<u8>::Variant1(65)
// gdb-command:print eight_bytes2
-// gdbg-check:$5 = {{RUST$ENUM$DISR = Variant2, __0 = 100}, {RUST$ENUM$DISR = Variant2, __0 = 100}}
-// gdbr-check:$5 = generic_enum_with_different_disr_sizes::Enum::Variant2(100)
+// gdbr-check:$5 = generic_enum_with_different_disr_sizes::Enum<f64>::Variant2(100)
// gdb-command:print four_bytes2
-// gdbg-check:$6 = {{RUST$ENUM$DISR = Variant2, __0 = 101}, {RUST$ENUM$DISR = Variant2, __0 = 101}}
-// gdbr-check:$6 = generic_enum_with_different_disr_sizes::Enum::Variant2(101)
+// gdbr-check:$6 = generic_enum_with_different_disr_sizes::Enum<i32>::Variant2(101)
// gdb-command:print two_bytes2
-// gdbg-check:$7 = {{RUST$ENUM$DISR = Variant2, __0 = 102}, {RUST$ENUM$DISR = Variant2, __0 = 102}}
-// gdbr-check:$7 = generic_enum_with_different_disr_sizes::Enum::Variant2(102)
+// gdbr-check:$7 = generic_enum_with_different_disr_sizes::Enum<i16>::Variant2(102)
// gdb-command:print one_byte2
-// gdbg-check:$8 = {{RUST$ENUM$DISR = Variant2, __0 = 65 'A'}, {RUST$ENUM$DISR = Variant2, __0 = 65 'A'}}
-// gdbr-check:$8 = generic_enum_with_different_disr_sizes::Enum::Variant2(65)
+// gdbr-check:$8 = generic_enum_with_different_disr_sizes::Enum<u8>::Variant2(65)
// gdb-command:continue
--- /dev/null
+// Copyright 2013-2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// ignore-tidy-linelength
+// min-lldb-version: 310
+// ignore-gdb-version: 7.11.90 - 7.12.9
+
+// As long as LLVM 5 and LLVM 6 are supported, we want to test the
+// enum debuginfo fallback mode. Once those are desupported, this
+// test can be removed, as there is another (non-"legacy") test that
+// tests the new mode.
+// ignore-llvm-version: 7.0 - 9.9.9
+// ignore-gdb-version: 8.2 - 9.9
+
+// compile-flags:-g
+
+// gdb-command:set print union on
+// gdb-command:run
+
+// gdb-command:print case1
+// gdbg-check:$1 = {{RUST$ENUM$DISR = Case1, a = 0, b = 31868, c = 31868, d = 31868, e = 31868}, {RUST$ENUM$DISR = Case1, [...]}, {RUST$ENUM$DISR = Case1, [...]}}
+// gdbr-check:$1 = generic_struct_style_enum_legacy::Regular::Case1{a: 0, b: 31868, c: 31868, d: 31868, e: 31868}
+
+// gdb-command:print case2
+// gdbg-check:$2 = {{RUST$ENUM$DISR = Case2, [...]}, {RUST$ENUM$DISR = Case2, a = 0, b = 286331153, c = 286331153}, {RUST$ENUM$DISR = Case2, [...]}}
+// gdbr-check:$2 = generic_struct_style_enum_legacy::Regular::Case2{a: 0, b: 286331153, c: 286331153}
+
+// gdb-command:print case3
+// gdbg-check:$3 = {{RUST$ENUM$DISR = Case3, [...]}, {RUST$ENUM$DISR = Case3, [...]}, {RUST$ENUM$DISR = Case3, a = 0, b = 6438275382588823897}}
+// gdbr-check:$3 = generic_struct_style_enum_legacy::Regular::Case3{a: 0, b: 6438275382588823897}
+
+// gdb-command:print univariant
+// gdbg-check:$4 = {{a = -1}}
+// gdbr-check:$4 = generic_struct_style_enum_legacy::Univariant<i32>::TheOnlyCase{a: -1}
+
+
+#![feature(omit_gdb_pretty_printer_section)]
+#![omit_gdb_pretty_printer_section]
+
+use self::Regular::{Case1, Case2, Case3};
+use self::Univariant::TheOnlyCase;
+
+// NOTE: This is a copy of the non-generic test case. The `Txx` type parameters have to be
+// substituted with something of size `xx` bits and the same alignment as an integer type of the
+// same size.
+
+// The first element is to ensure proper alignment, irrespective of the machines word size. Since
+// the size of the discriminant value is machine dependent, this has be taken into account when
+// datatype layout should be predictable as in this case.
+enum Regular<T16, T32, T64> {
+ Case1 { a: T64, b: T16, c: T16, d: T16, e: T16},
+ Case2 { a: T64, b: T32, c: T32},
+ Case3 { a: T64, b: T64 }
+}
+
+enum Univariant<T> {
+ TheOnlyCase { a: T }
+}
+
+fn main() {
+
+ // In order to avoid endianness trouble all of the following test values consist of a single
+ // repeated byte. This way each interpretation of the union should look the same, no matter if
+ // this is a big or little endian machine.
+
+ // 0b0111110001111100011111000111110001111100011111000111110001111100 = 8970181431921507452
+ // 0b01111100011111000111110001111100 = 2088533116
+ // 0b0111110001111100 = 31868
+ // 0b01111100 = 124
+ let case1: Regular<u16, u32, i64> = Case1 { a: 0, b: 31868, c: 31868, d: 31868, e: 31868 };
+
+ // 0b0001000100010001000100010001000100010001000100010001000100010001 = 1229782938247303441
+ // 0b00010001000100010001000100010001 = 286331153
+ // 0b0001000100010001 = 4369
+ // 0b00010001 = 17
+ let case2: Regular<i16, u32, i64> = Case2 { a: 0, b: 286331153, c: 286331153 };
+
+ // 0b0101100101011001010110010101100101011001010110010101100101011001 = 6438275382588823897
+ // 0b01011001010110010101100101011001 = 1499027801
+ // 0b0101100101011001 = 22873
+ // 0b01011001 = 89
+ let case3: Regular<u16, i32, u64> = Case3 { a: 0, b: 6438275382588823897 };
+
+ let univariant = TheOnlyCase { a: -1 };
+
+ zzz(); // #break
+}
+
+fn zzz() {()}
// ignore-tidy-linelength
// min-lldb-version: 310
-// ignore-gdb-version: 7.11.90 - 7.12.9
+
+// Require LLVM with DW_TAG_variant_part and a gdb that can read it.
+// min-system-llvm-version: 7.0
+// min-gdb-version: 8.2
// compile-flags:-g
// gdb-command:run
// gdb-command:print case1
-// gdbg-check:$1 = {{RUST$ENUM$DISR = Case1, a = 0, b = 31868, c = 31868, d = 31868, e = 31868}, {RUST$ENUM$DISR = Case1, [...]}, {RUST$ENUM$DISR = Case1, [...]}}
-// gdbr-check:$1 = generic_struct_style_enum::Regular::Case1{a: 0, b: 31868, c: 31868, d: 31868, e: 31868}
+// gdbr-check:$1 = generic_struct_style_enum::Regular<u16, u32, i64>::Case1{a: 0, b: 31868, c: 31868, d: 31868, e: 31868}
// gdb-command:print case2
-// gdbg-check:$2 = {{RUST$ENUM$DISR = Case2, [...]}, {RUST$ENUM$DISR = Case2, a = 0, b = 286331153, c = 286331153}, {RUST$ENUM$DISR = Case2, [...]}}
-// gdbr-check:$2 = generic_struct_style_enum::Regular::Case2{a: 0, b: 286331153, c: 286331153}
+// gdbr-check:$2 = generic_struct_style_enum::Regular<i16, u32, i64>::Case2{a: 0, b: 286331153, c: 286331153}
// gdb-command:print case3
-// gdbg-check:$3 = {{RUST$ENUM$DISR = Case3, [...]}, {RUST$ENUM$DISR = Case3, [...]}, {RUST$ENUM$DISR = Case3, a = 0, b = 6438275382588823897}}
-// gdbr-check:$3 = generic_struct_style_enum::Regular::Case3{a: 0, b: 6438275382588823897}
+// gdbr-check:$3 = generic_struct_style_enum::Regular<u16, i32, u64>::Case3{a: 0, b: 6438275382588823897}
// gdb-command:print univariant
-// gdbg-check:$4 = {{a = -1}}
// gdbr-check:$4 = generic_struct_style_enum::Univariant<i32>::TheOnlyCase{a: -1}
--- /dev/null
+// Copyright 2013-2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// ignore-tidy-linelength
+// min-lldb-version: 310
+// ignore-gdb-version: 7.11.90 - 7.12.9
+
+// As long as LLVM 5 and LLVM 6 are supported, we want to test the
+// enum debuginfo fallback mode. Once those are desupported, this
+// test can be removed, as there is another (non-"legacy") test that
+// tests the new mode.
+// ignore-llvm-version: 7.0 - 9.9.9
+// ignore-gdb-version: 8.2 - 9.9
+
+// compile-flags:-g
+
+// === GDB TESTS ===================================================================================
+
+// gdb-command:set print union on
+// gdb-command:run
+
+// gdb-command:print case1
+// gdbg-check:$1 = {{RUST$ENUM$DISR = Case1, __0 = 0, __1 = 31868, __2 = 31868, __3 = 31868, __4 = 31868}, {RUST$ENUM$DISR = Case1, [...]}, {RUST$ENUM$DISR = Case1, [...]}}
+// gdbr-check:$1 = generic_tuple_style_enum_legacy::Regular::Case1(0, 31868, 31868, 31868, 31868)
+
+// gdb-command:print case2
+// gdbg-check:$2 = {{RUST$ENUM$DISR = Case2, [...]}, {RUST$ENUM$DISR = Case2, __0 = 0, __1 = 286331153, __2 = 286331153}, {RUST$ENUM$DISR = Case2, [...]}}
+// gdbr-check:$2 = generic_tuple_style_enum_legacy::Regular::Case2(0, 286331153, 286331153)
+
+// gdb-command:print case3
+// gdbg-check:$3 = {{RUST$ENUM$DISR = Case3, [...]}, {RUST$ENUM$DISR = Case3, [...]}, {RUST$ENUM$DISR = Case3, __0 = 0, __1 = 6438275382588823897}}
+// gdbr-check:$3 = generic_tuple_style_enum_legacy::Regular::Case3(0, 6438275382588823897)
+
+// gdb-command:print univariant
+// gdbg-check:$4 = {{__0 = -1}}
+// gdbr-check:$4 = generic_tuple_style_enum_legacy::Univariant<i64>::TheOnlyCase(-1)
+
+
+// === LLDB TESTS ==================================================================================
+
+// lldb-command:run
+
+// lldb-command:print case1
+// lldbg-check:[...]$0 = Case1(0, 31868, 31868, 31868, 31868)
+// lldbr-check:(generic_tuple_style_enum_legacy::Regular<u16, u32, u64>::Case1) case1 = { = 0 = 31868 = 31868 = 31868 = 31868 }
+
+// lldb-command:print case2
+// lldbg-check:[...]$1 = Case2(0, 286331153, 286331153)
+// lldbr-check:(generic_tuple_style_enum_legacy::Regular<i16, i32, i64>::Case2) case2 = Regular<i16, i32, i64>::Case2 { generic_tuple_style_enum_legacy::Regular<i16, i32, i64>::Case1: 0, generic_tuple_style_enum_legacy::Regular<i16, i32, i64>::Case2: 286331153, generic_tuple_style_enum_legacy::Regular<i16, i32, i64>::Case3: 286331153 }
+
+// lldb-command:print case3
+// lldbg-check:[...]$2 = Case3(0, 6438275382588823897)
+// lldbr-check:(generic_tuple_style_enum_legacy::Regular<i16, i32, i64>::Case3) case3 = Regular<i16, i32, i64>::Case3 { generic_tuple_style_enum_legacy::Regular<i16, i32, i64>::Case1: 0, generic_tuple_style_enum_legacy::Regular<i16, i32, i64>::Case2: 6438275382588823897 }
+
+// lldb-command:print univariant
+// lldbg-check:[...]$3 = TheOnlyCase(-1)
+// lldbr-check:(generic_tuple_style_enum_legacy::Univariant<i64>) univariant = { generic_tuple_style_enum_legacy::TheOnlyCase = { = -1 } }
+
+#![feature(omit_gdb_pretty_printer_section)]
+#![omit_gdb_pretty_printer_section]
+
+use self::Regular::{Case1, Case2, Case3};
+use self::Univariant::TheOnlyCase;
+
+// NOTE: This is a copy of the non-generic test case. The `Txx` type parameters have to be
+// substituted with something of size `xx` bits and the same alignment as an integer type of the
+// same size.
+
+// The first element is to ensure proper alignment, irrespective of the machines word size. Since
+// the size of the discriminant value is machine dependent, this has be taken into account when
+// datatype layout should be predictable as in this case.
+enum Regular<T16, T32, T64> {
+ Case1(T64, T16, T16, T16, T16),
+ Case2(T64, T32, T32),
+ Case3(T64, T64)
+}
+
+enum Univariant<T64> {
+ TheOnlyCase(T64)
+}
+
+fn main() {
+
+ // In order to avoid endianness trouble all of the following test values consist of a single
+ // repeated byte. This way each interpretation of the union should look the same, no matter if
+ // this is a big or little endian machine.
+
+ // 0b0111110001111100011111000111110001111100011111000111110001111100 = 8970181431921507452
+ // 0b01111100011111000111110001111100 = 2088533116
+ // 0b0111110001111100 = 31868
+ // 0b01111100 = 124
+ let case1: Regular<u16, u32, u64> = Case1(0_u64, 31868_u16, 31868_u16, 31868_u16, 31868_u16);
+
+ // 0b0001000100010001000100010001000100010001000100010001000100010001 = 1229782938247303441
+ // 0b00010001000100010001000100010001 = 286331153
+ // 0b0001000100010001 = 4369
+ // 0b00010001 = 17
+ let case2: Regular<i16, i32, i64> = Case2(0_i64, 286331153_i32, 286331153_i32);
+
+ // 0b0101100101011001010110010101100101011001010110010101100101011001 = 6438275382588823897
+ // 0b01011001010110010101100101011001 = 1499027801
+ // 0b0101100101011001 = 22873
+ // 0b01011001 = 89
+ let case3: Regular<i16, i32, i64> = Case3(0_i64, 6438275382588823897_i64);
+
+ let univariant = TheOnlyCase(-1_i64);
+
+ zzz(); // #break
+}
+
+fn zzz() { () }
// except according to those terms.
// ignore-tidy-linelength
-// min-lldb-version: 310
-// ignore-gdb-version: 7.11.90 - 7.12.9
+
+// Require LLVM with DW_TAG_variant_part and a gdb and lldb that can
+// read it.
+// min-system-llvm-version: 7.0
+// min-gdb-version: 8.2
+// rust-lldb
// compile-flags:-g
// gdb-command:run
// gdb-command:print case1
-// gdbg-check:$1 = {{RUST$ENUM$DISR = Case1, __0 = 0, __1 = 31868, __2 = 31868, __3 = 31868, __4 = 31868}, {RUST$ENUM$DISR = Case1, [...]}, {RUST$ENUM$DISR = Case1, [...]}}
-// gdbr-check:$1 = generic_tuple_style_enum::Regular::Case1(0, 31868, 31868, 31868, 31868)
+// gdbr-check:$1 = generic_tuple_style_enum::Regular<u16, u32, u64>::Case1(0, 31868, 31868, 31868, 31868)
// gdb-command:print case2
-// gdbg-check:$2 = {{RUST$ENUM$DISR = Case2, [...]}, {RUST$ENUM$DISR = Case2, __0 = 0, __1 = 286331153, __2 = 286331153}, {RUST$ENUM$DISR = Case2, [...]}}
-// gdbr-check:$2 = generic_tuple_style_enum::Regular::Case2(0, 286331153, 286331153)
+// gdbr-check:$2 = generic_tuple_style_enum::Regular<i16, i32, i64>::Case2(0, 286331153, 286331153)
// gdb-command:print case3
-// gdbg-check:$3 = {{RUST$ENUM$DISR = Case3, [...]}, {RUST$ENUM$DISR = Case3, [...]}, {RUST$ENUM$DISR = Case3, __0 = 0, __1 = 6438275382588823897}}
-// gdbr-check:$3 = generic_tuple_style_enum::Regular::Case3(0, 6438275382588823897)
+// gdbr-check:$3 = generic_tuple_style_enum::Regular<i16, i32, i64>::Case3(0, 6438275382588823897)
// gdb-command:print univariant
-// gdbg-check:$4 = {{__0 = -1}}
// gdbr-check:$4 = generic_tuple_style_enum::Univariant<i64>::TheOnlyCase(-1)
// lldb-command:run
// lldb-command:print case1
-// lldbg-check:[...]$0 = Case1(0, 31868, 31868, 31868, 31868)
// lldbr-check:(generic_tuple_style_enum::Regular<u16, u32, u64>::Case1) case1 = { = 0 = 31868 = 31868 = 31868 = 31868 }
// lldb-command:print case2
-// lldbg-check:[...]$1 = Case2(0, 286331153, 286331153)
-// lldbr-check:(generic_tuple_style_enum::Regular<i16, i32, i64>::Case2) case2 = Regular<i16, i32, i64>::Case2 { generic_tuple_style_enum::Regular<i16, i32, i64>::Case1: 0, generic_tuple_style_enum::Regular<i16, i32, i64>::Case2: 286331153, generic_tuple_style_enum::Regular<i16, i32, i64>::Case3: 286331153 }
+// lldbr-check:(generic_tuple_style_enum::Regular<i16, i32, i64>::Case2) case2 = Regular<i16, i32, i64>::Case2 { Case1: 0, Case2: 286331153, Case3: 286331153 }
// lldb-command:print case3
-// lldbg-check:[...]$2 = Case3(0, 6438275382588823897)
-// lldbr-check:(generic_tuple_style_enum::Regular<i16, i32, i64>::Case3) case3 = Regular<i16, i32, i64>::Case3 { generic_tuple_style_enum::Regular<i16, i32, i64>::Case1: 0, generic_tuple_style_enum::Regular<i16, i32, i64>::Case2: 6438275382588823897 }
+// lldbr-check:(generic_tuple_style_enum::Regular<i16, i32, i64>::Case3) case3 = Regular<i16, i32, i64>::Case3 { Case1: 0, Case2: 6438275382588823897 }
// lldb-command:print univariant
-// lldbg-check:[...]$3 = TheOnlyCase(-1)
-// lldbr-check:(generic_tuple_style_enum::Univariant<i64>) univariant = { generic_tuple_style_enum::TheOnlyCase = { = -1 } }
+// lldbr-check:(generic_tuple_style_enum::Univariant<i64>) univariant = { TheOnlyCase = { = -1 } }
#![feature(omit_gdb_pretty_printer_section)]
#![omit_gdb_pretty_printer_section]
// STACK BY REF
// lldb-command:print *self
// lldbg-check:[...]$0 = TupleStruct(100, -100.5)
-// lldbr-check:(method_on_tuple_struct::TupleStruct) *self = { = 100 = -100.5 }
+// lldbr-check:(method_on_tuple_struct::TupleStruct) *self = TupleStruct(100, -100.5)
// lldb-command:print arg1
// lldbg-check:[...]$1 = -1
// lldbr-check:(isize) arg1 = -1
// STACK BY VAL
// lldb-command:print self
// lldbg-check:[...]$3 = TupleStruct(100, -100.5)
-// lldbr-check:(method_on_tuple_struct::TupleStruct) self = { = 100 = -100.5 }
+// lldbr-check:(method_on_tuple_struct::TupleStruct) self = TupleStruct(100, -100.5)
// lldb-command:print arg1
// lldbg-check:[...]$4 = -3
// lldbr-check:(isize) arg1 = -3
// OWNED BY REF
// lldb-command:print *self
// lldbg-check:[...]$6 = TupleStruct(200, -200.5)
-// lldbr-check:(method_on_tuple_struct::TupleStruct) *self = { = 200 = -200.5 }
+// lldbr-check:(method_on_tuple_struct::TupleStruct) *self = TupleStruct(200, -200.5)
// lldb-command:print arg1
// lldbg-check:[...]$7 = -5
// lldbr-check:(isize) arg1 = -5
// OWNED BY VAL
// lldb-command:print self
// lldbg-check:[...]$9 = TupleStruct(200, -200.5)
-// lldbr-check:(method_on_tuple_struct::TupleStruct) self = { = 200 = -200.5 }
+// lldbr-check:(method_on_tuple_struct::TupleStruct) self = TupleStruct(200, -200.5)
// lldb-command:print arg1
// lldbg-check:[...]$10 = -7
// lldbr-check:(isize) arg1 = -7
// OWNED MOVED
// lldb-command:print *self
// lldbg-check:[...]$12 = TupleStruct(200, -200.5)
-// lldbr-check:(method_on_tuple_struct::TupleStruct) *self = { = 200 = -200.5 }
+// lldbr-check:(method_on_tuple_struct::TupleStruct) *self = TupleStruct(200, -200.5)
// lldb-command:print arg1
// lldbg-check:[...]$13 = -9
// lldbr-check:(isize) arg1 = -9
// ignore-lldb
+// Require LLVM with DW_TAG_variant_part and a gdb that can read it.
+// gdb 8.2.0 crashes on this test case, see
+// https://sourceware.org/bugzilla/show_bug.cgi?id=23626
+// This will be fixed in the next release, which will be >= 8.2.1.
+// min-system-llvm-version: 7.0
+// min-gdb-version: 8.2.1
+
// compile-flags:-g
// gdb-command:run
// gdb-command:print first
-// gdbg-check:$1 = {<No data fields>}
-// gdbr-check:$1 = <error reading variable>
+// gdbr-check:$1 = nil_enum::ANilEnum {<No data fields>}
// gdb-command:print second
-// gdbg-check:$2 = {<No data fields>}
-// gdbr-check:$2 = <error reading variable>
+// gdbr-check:$2 = nil_enum::AnotherNilEnum {<No data fields>}
#![allow(unused_variables)]
#![feature(omit_gdb_pretty_printer_section)]
--- /dev/null
+// Copyright 2013-2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// ignore-tidy-linelength
+// ignore-lldb
+
+// As long as LLVM 5 and LLVM 6 are supported, we want to test the
+// enum debuginfo fallback mode. Once those are desupported, this
+// test can be removed, as there is another (non-"legacy") test that
+// tests the new mode.
+// ignore-llvm-version: 7.0 - 9.9.9
+// ignore-gdb-version: 7.11.90 - 7.12.9
+// ignore-gdb-version: 8.2 - 9.9
+
+// compile-flags:-g
+
+// gdb-command:run
+
+// gdb-command:print stack_unique.value
+// gdb-check:$1 = 0
+// gdbg-command:print stack_unique.next.RUST$ENCODED$ENUM$0$Empty.val->value
+// gdbr-command:print stack_unique.next.val.value
+// gdb-check:$2 = 1
+
+// gdbg-command:print unique_unique->value
+// gdbr-command:print unique_unique.value
+// gdb-check:$3 = 2
+// gdbg-command:print unique_unique->next.RUST$ENCODED$ENUM$0$Empty.val->value
+// gdbr-command:print unique_unique.next.val.value
+// gdb-check:$4 = 3
+
+// gdb-command:print vec_unique[0].value
+// gdb-check:$5 = 6.5
+// gdbg-command:print vec_unique[0].next.RUST$ENCODED$ENUM$0$Empty.val->value
+// gdbr-command:print vec_unique[0].next.val.value
+// gdb-check:$6 = 7.5
+
+// gdbg-command:print borrowed_unique->value
+// gdbr-command:print borrowed_unique.value
+// gdb-check:$7 = 8.5
+// gdbg-command:print borrowed_unique->next.RUST$ENCODED$ENUM$0$Empty.val->value
+// gdbr-command:print borrowed_unique.next.val.value
+// gdb-check:$8 = 9.5
+
+// LONG CYCLE
+// gdb-command:print long_cycle1.value
+// gdb-check:$9 = 20
+// gdbg-command:print long_cycle1.next->value
+// gdbr-command:print long_cycle1.next.value
+// gdb-check:$10 = 21
+// gdbg-command:print long_cycle1.next->next->value
+// gdbr-command:print long_cycle1.next.next.value
+// gdb-check:$11 = 22
+// gdbg-command:print long_cycle1.next->next->next->value
+// gdbr-command:print long_cycle1.next.next.next.value
+// gdb-check:$12 = 23
+
+// gdb-command:print long_cycle2.value
+// gdb-check:$13 = 24
+// gdbg-command:print long_cycle2.next->value
+// gdbr-command:print long_cycle2.next.value
+// gdb-check:$14 = 25
+// gdbg-command:print long_cycle2.next->next->value
+// gdbr-command:print long_cycle2.next.next.value
+// gdb-check:$15 = 26
+
+// gdb-command:print long_cycle3.value
+// gdb-check:$16 = 27
+// gdbg-command:print long_cycle3.next->value
+// gdbr-command:print long_cycle3.next.value
+// gdb-check:$17 = 28
+
+// gdb-command:print long_cycle4.value
+// gdb-check:$18 = 29.5
+
+// gdbg-command:print (*****long_cycle_w_anonymous_types).value
+// gdbr-command:print long_cycle_w_anonymous_types.value
+// gdb-check:$19 = 30
+
+// gdbg-command:print (*****((*****long_cycle_w_anonymous_types).next.RUST$ENCODED$ENUM$0$Empty.val)).value
+// gdbr-command:print long_cycle_w_anonymous_types.next.val.value
+// gdb-check:$20 = 31
+
+// gdb-command:continue
+
+#![allow(unused_variables)]
+#![feature(box_syntax)]
+#![feature(omit_gdb_pretty_printer_section)]
+#![omit_gdb_pretty_printer_section]
+
+use self::Opt::{Empty, Val};
+
+enum Opt<T> {
+ Empty,
+ Val { val: T }
+}
+
+struct UniqueNode<T> {
+ next: Opt<Box<UniqueNode<T>>>,
+ value: T
+}
+
+struct LongCycle1<T> {
+ next: Box<LongCycle2<T>>,
+ value: T,
+}
+
+struct LongCycle2<T> {
+ next: Box<LongCycle3<T>>,
+ value: T,
+}
+
+struct LongCycle3<T> {
+ next: Box<LongCycle4<T>>,
+ value: T,
+}
+
+struct LongCycle4<T> {
+ next: Option<Box<LongCycle1<T>>>,
+ value: T,
+}
+
+struct LongCycleWithAnonymousTypes {
+ next: Opt<Box<Box<Box<Box<Box<LongCycleWithAnonymousTypes>>>>>>,
+ value: usize,
+}
+
+// This test case makes sure that recursive structs are properly described. The Node structs are
+// generic so that we can have a new type (that newly needs to be described) for the different
+// cases. The potential problem with recursive types is that the DI generation algorithm gets
+// trapped in an endless loop. To make sure, we actually test this in the different cases, we have
+// to operate on a new type each time, otherwise we would just hit the DI cache for all but the
+// first case.
+
+// The different cases below (stack_*, unique_*, box_*, etc) are set up so that the type description
+// algorithm will enter the type reference cycle that is created by a recursive definition from a
+// different context each time.
+
+// The "long cycle" cases are constructed to span a longer, indirect recursion cycle between types.
+// The different locals will cause the DI algorithm to enter the type reference cycle at different
+// points.
+
+fn main() {
+ let stack_unique: UniqueNode<u16> = UniqueNode {
+ next: Val {
+ val: box UniqueNode {
+ next: Empty,
+ value: 1,
+ }
+ },
+ value: 0,
+ };
+
+ let unique_unique: Box<UniqueNode<u32>> = box UniqueNode {
+ next: Val {
+ val: box UniqueNode {
+ next: Empty,
+ value: 3,
+ }
+ },
+ value: 2,
+ };
+
+ let vec_unique: [UniqueNode<f32>; 1] = [UniqueNode {
+ next: Val {
+ val: box UniqueNode {
+ next: Empty,
+ value: 7.5,
+ }
+ },
+ value: 6.5,
+ }];
+
+ let borrowed_unique: &UniqueNode<f64> = &UniqueNode {
+ next: Val {
+ val: box UniqueNode {
+ next: Empty,
+ value: 9.5,
+ }
+ },
+ value: 8.5,
+ };
+
+ // LONG CYCLE
+ let long_cycle1: LongCycle1<u16> = LongCycle1 {
+ next: box LongCycle2 {
+ next: box LongCycle3 {
+ next: box LongCycle4 {
+ next: None,
+ value: 23,
+ },
+ value: 22,
+ },
+ value: 21
+ },
+ value: 20
+ };
+
+ let long_cycle2: LongCycle2<u32> = LongCycle2 {
+ next: box LongCycle3 {
+ next: box LongCycle4 {
+ next: None,
+ value: 26,
+ },
+ value: 25,
+ },
+ value: 24
+ };
+
+ let long_cycle3: LongCycle3<u64> = LongCycle3 {
+ next: box LongCycle4 {
+ next: None,
+ value: 28,
+ },
+ value: 27,
+ };
+
+ let long_cycle4: LongCycle4<f32> = LongCycle4 {
+ next: None,
+ value: 29.5,
+ };
+
+ // It's important that LongCycleWithAnonymousTypes is encountered only at the end of the
+ // `box` chain.
+ let long_cycle_w_anonymous_types = box box box box box LongCycleWithAnonymousTypes {
+ next: Val {
+ val: box box box box box LongCycleWithAnonymousTypes {
+ next: Empty,
+ value: 31,
+ }
+ },
+ value: 30
+ };
+
+ zzz(); // #break
+}
+
+fn zzz() {()}
// ignore-tidy-linelength
// ignore-lldb
-// ignore-gdb-version: 7.11.90 - 7.12.9
+
+// Require LLVM with DW_TAG_variant_part and a gdb that can read it.
+// min-system-llvm-version: 7.0
+// min-gdb-version: 8.2
// compile-flags:-g
// gdb-command:print stack_unique.value
// gdb-check:$1 = 0
-// gdbg-command:print stack_unique.next.RUST$ENCODED$ENUM$0$Empty.val->value
// gdbr-command:print stack_unique.next.val.value
// gdb-check:$2 = 1
-// gdbg-command:print unique_unique->value
// gdbr-command:print unique_unique.value
// gdb-check:$3 = 2
-// gdbg-command:print unique_unique->next.RUST$ENCODED$ENUM$0$Empty.val->value
// gdbr-command:print unique_unique.next.val.value
// gdb-check:$4 = 3
// gdb-command:print vec_unique[0].value
// gdb-check:$5 = 6.5
-// gdbg-command:print vec_unique[0].next.RUST$ENCODED$ENUM$0$Empty.val->value
// gdbr-command:print vec_unique[0].next.val.value
// gdb-check:$6 = 7.5
-// gdbg-command:print borrowed_unique->value
// gdbr-command:print borrowed_unique.value
// gdb-check:$7 = 8.5
-// gdbg-command:print borrowed_unique->next.RUST$ENCODED$ENUM$0$Empty.val->value
// gdbr-command:print borrowed_unique.next.val.value
// gdb-check:$8 = 9.5
// LONG CYCLE
// gdb-command:print long_cycle1.value
// gdb-check:$9 = 20
-// gdbg-command:print long_cycle1.next->value
// gdbr-command:print long_cycle1.next.value
// gdb-check:$10 = 21
-// gdbg-command:print long_cycle1.next->next->value
// gdbr-command:print long_cycle1.next.next.value
// gdb-check:$11 = 22
-// gdbg-command:print long_cycle1.next->next->next->value
// gdbr-command:print long_cycle1.next.next.next.value
// gdb-check:$12 = 23
// gdb-command:print long_cycle2.value
// gdb-check:$13 = 24
-// gdbg-command:print long_cycle2.next->value
// gdbr-command:print long_cycle2.next.value
// gdb-check:$14 = 25
-// gdbg-command:print long_cycle2.next->next->value
// gdbr-command:print long_cycle2.next.next.value
// gdb-check:$15 = 26
// gdb-command:print long_cycle3.value
// gdb-check:$16 = 27
-// gdbg-command:print long_cycle3.next->value
// gdbr-command:print long_cycle3.next.value
// gdb-check:$17 = 28
// gdb-command:print long_cycle4.value
// gdb-check:$18 = 29.5
-// gdbg-command:print (*****long_cycle_w_anonymous_types).value
// gdbr-command:print long_cycle_w_anonymous_types.value
// gdb-check:$19 = 30
-// gdbg-command:print (*****((*****long_cycle_w_anonymous_types).next.RUST$ENCODED$ENUM$0$Empty.val)).value
// gdbr-command:print long_cycle_w_anonymous_types.next.val.value
// gdb-check:$20 = 31
--- /dev/null
+// Copyright 2013-2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// ignore-tidy-linelength
+// min-lldb-version: 310
+
+// As long as LLVM 5 and LLVM 6 are supported, we want to test the
+// enum debuginfo fallback mode. Once those are desupported, this
+// test can be removed, as there is another (non-"legacy") test that
+// tests the new mode.
+// ignore-llvm-version: 7.0 - 9.9.9
+// ignore-gdb-version: 7.11.90 - 7.12.9
+// ignore-gdb-version: 8.2 - 9.9
+
+// compile-flags:-g
+
+// === GDB TESTS ===================================================================================
+
+// gdb-command:set print union on
+// gdb-command:run
+
+// gdb-command:print case1
+// gdbg-check:$1 = {{RUST$ENUM$DISR = Case1, a = 0, b = 31868, c = 31868, d = 31868, e = 31868}, {RUST$ENUM$DISR = Case1, [...]}, {RUST$ENUM$DISR = Case1, [...]}}
+// gdbr-check:$1 = struct_style_enum_legacy::Regular::Case1{a: 0, b: 31868, c: 31868, d: 31868, e: 31868}
+
+// gdb-command:print case2
+// gdbg-check:$2 = {{RUST$ENUM$DISR = Case2, [...]}, {RUST$ENUM$DISR = Case2, a = 0, b = 286331153, c = 286331153}, {RUST$ENUM$DISR = Case2, [...]}}
+// gdbr-check:$2 = struct_style_enum_legacy::Regular::Case2{a: 0, b: 286331153, c: 286331153}
+
+// gdb-command:print case3
+// gdbg-check:$3 = {{RUST$ENUM$DISR = Case3, [...]}, {RUST$ENUM$DISR = Case3, [...]}, {RUST$ENUM$DISR = Case3, a = 0, b = 6438275382588823897}}
+// gdbr-check:$3 = struct_style_enum_legacy::Regular::Case3{a: 0, b: 6438275382588823897}
+
+// gdb-command:print univariant
+// gdbg-check:$4 = {{a = -1}}
+// gdbr-check:$4 = struct_style_enum_legacy::Univariant::TheOnlyCase{a: -1}
+
+
+// === LLDB TESTS ==================================================================================
+
+// lldb-command:run
+
+// lldb-command:print case1
+// lldbg-check:[...]$0 = Case1 { a: 0, b: 31868, c: 31868, d: 31868, e: 31868 }
+// lldbr-check:(struct_style_enum_legacy::Regular::Case1) case1 = { a = 0 b = 31868 c = 31868 d = 31868 e = 31868 }
+
+// lldb-command:print case2
+// lldbg-check:[...]$1 = Case2 { a: 0, b: 286331153, c: 286331153 }
+// lldbr-check:(struct_style_enum_legacy::Regular::Case2) case2 = Case2 { struct_style_enum_legacy::Regular::Case1: 0, struct_style_enum_legacy::Regular::Case2: 286331153, struct_style_enum_legacy::Regular::Case3: 286331153 }
+
+// lldb-command:print case3
+// lldbg-check:[...]$2 = Case3 { a: 0, b: 6438275382588823897 }
+// lldbr-check:(struct_style_enum_legacy::Regular::Case3) case3 = Case3 { struct_style_enum_legacy::Regular::Case1: 0, struct_style_enum_legacy::Regular::Case2: 6438275382588823897 }
+
+// lldb-command:print univariant
+// lldbg-check:[...]$3 = TheOnlyCase { a: -1 }
+// lldbr-check:(struct_style_enum_legacy::Univariant) univariant = Univariant { struct_style_enum_legacy::TheOnlyCase: TheOnlyCase { a: -1 } }
+
+#![allow(unused_variables)]
+#![feature(omit_gdb_pretty_printer_section)]
+#![omit_gdb_pretty_printer_section]
+
+use self::Regular::{Case1, Case2, Case3};
+use self::Univariant::TheOnlyCase;
+
+// The first element is to ensure proper alignment, irrespective of the machines word size. Since
+// the size of the discriminant value is machine dependent, this has be taken into account when
+// datatype layout should be predictable as in this case.
+enum Regular {
+ Case1 { a: u64, b: u16, c: u16, d: u16, e: u16},
+ Case2 { a: u64, b: u32, c: u32},
+ Case3 { a: u64, b: u64 }
+}
+
+enum Univariant {
+ TheOnlyCase { a: i64 }
+}
+
+fn main() {
+
+ // In order to avoid endianness trouble all of the following test values consist of a single
+ // repeated byte. This way each interpretation of the union should look the same, no matter if
+ // this is a big or little endian machine.
+
+ // 0b0111110001111100011111000111110001111100011111000111110001111100 = 8970181431921507452
+ // 0b01111100011111000111110001111100 = 2088533116
+ // 0b0111110001111100 = 31868
+ // 0b01111100 = 124
+ let case1 = Case1 { a: 0, b: 31868, c: 31868, d: 31868, e: 31868 };
+
+ // 0b0001000100010001000100010001000100010001000100010001000100010001 = 1229782938247303441
+ // 0b00010001000100010001000100010001 = 286331153
+ // 0b0001000100010001 = 4369
+ // 0b00010001 = 17
+ let case2 = Case2 { a: 0, b: 286331153, c: 286331153 };
+
+ // 0b0101100101011001010110010101100101011001010110010101100101011001 = 6438275382588823897
+ // 0b01011001010110010101100101011001 = 1499027801
+ // 0b0101100101011001 = 22873
+ // 0b01011001 = 89
+ let case3 = Case3 { a: 0, b: 6438275382588823897 };
+
+ let univariant = TheOnlyCase { a: -1 };
+
+ zzz(); // #break
+}
+
+fn zzz() {()}
// except according to those terms.
// ignore-tidy-linelength
-// min-lldb-version: 310
-// ignore-gdb-version: 7.11.90 - 7.12.9
+
+// Require LLVM with DW_TAG_variant_part and a gdb and lldb that can
+// read it.
+// min-system-llvm-version: 7.0
+// min-gdb-version: 8.2
+// rust-lldb
// compile-flags:-g
// gdb-command:run
// gdb-command:print case1
-// gdbg-check:$1 = {{RUST$ENUM$DISR = Case1, a = 0, b = 31868, c = 31868, d = 31868, e = 31868}, {RUST$ENUM$DISR = Case1, [...]}, {RUST$ENUM$DISR = Case1, [...]}}
// gdbr-check:$1 = struct_style_enum::Regular::Case1{a: 0, b: 31868, c: 31868, d: 31868, e: 31868}
// gdb-command:print case2
-// gdbg-check:$2 = {{RUST$ENUM$DISR = Case2, [...]}, {RUST$ENUM$DISR = Case2, a = 0, b = 286331153, c = 286331153}, {RUST$ENUM$DISR = Case2, [...]}}
// gdbr-check:$2 = struct_style_enum::Regular::Case2{a: 0, b: 286331153, c: 286331153}
// gdb-command:print case3
-// gdbg-check:$3 = {{RUST$ENUM$DISR = Case3, [...]}, {RUST$ENUM$DISR = Case3, [...]}, {RUST$ENUM$DISR = Case3, a = 0, b = 6438275382588823897}}
// gdbr-check:$3 = struct_style_enum::Regular::Case3{a: 0, b: 6438275382588823897}
// gdb-command:print univariant
-// gdbg-check:$4 = {{a = -1}}
// gdbr-check:$4 = struct_style_enum::Univariant::TheOnlyCase{a: -1}
// lldb-command:run
// lldb-command:print case1
-// lldbg-check:[...]$0 = Case1 { a: 0, b: 31868, c: 31868, d: 31868, e: 31868 }
// lldbr-check:(struct_style_enum::Regular::Case1) case1 = { a = 0 b = 31868 c = 31868 d = 31868 e = 31868 }
// lldb-command:print case2
-// lldbg-check:[...]$1 = Case2 { a: 0, b: 286331153, c: 286331153 }
-// lldbr-check:(struct_style_enum::Regular::Case2) case2 = Case2 { struct_style_enum::Regular::Case1: 0, struct_style_enum::Regular::Case2: 286331153, struct_style_enum::Regular::Case3: 286331153 }
+// lldbr-check:(struct_style_enum::Regular::Case2) case2 = Case2 { Case1: 0, Case2: 286331153, Case3: 286331153 }
// lldb-command:print case3
-// lldbg-check:[...]$2 = Case3 { a: 0, b: 6438275382588823897 }
-// lldbr-check:(struct_style_enum::Regular::Case3) case3 = Case3 { struct_style_enum::Regular::Case1: 0, struct_style_enum::Regular::Case2: 6438275382588823897 }
+// lldbr-check:(struct_style_enum::Regular::Case3) case3 = Case3 { Case1: 0, Case2: 6438275382588823897 }
// lldb-command:print univariant
-// lldbg-check:[...]$3 = TheOnlyCase { a: -1 }
-// lldbr-check:(struct_style_enum::Univariant) univariant = Univariant { struct_style_enum::TheOnlyCase: TheOnlyCase { a: -1 } }
+// lldbr-check:(struct_style_enum::Univariant) univariant = Univariant { TheOnlyCase: TheOnlyCase { a: -1 } }
#![allow(unused_variables)]
#![feature(omit_gdb_pretty_printer_section)]
--- /dev/null
+// Copyright 2013-2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// ignore-tidy-linelength
+// min-lldb-version: 310
+
+// As long as LLVM 5 and LLVM 6 are supported, we want to test the
+// enum debuginfo fallback mode. Once those are desupported, this
+// test can be removed, as there is another (non-"legacy") test that
+// tests the new mode.
+// ignore-llvm-version: 7.0 - 9.9.9
+// ignore-gdb-version: 7.11.90 - 7.12.9
+// ignore-gdb-version: 8.2 - 9.9
+
+// compile-flags:-g
+
+// === GDB TESTS ===================================================================================
+
+// gdb-command:set print union on
+// gdb-command:run
+
+// gdb-command:print case1
+// gdbg-check:$1 = {{RUST$ENUM$DISR = Case1, __0 = 0, __1 = 31868, __2 = 31868, __3 = 31868, __4 = 31868}, {RUST$ENUM$DISR = Case1, [...]}, {RUST$ENUM$DISR = Case1, [...]}}
+// gdbr-check:$1 = tuple_style_enum_legacy::Regular::Case1(0, 31868, 31868, 31868, 31868)
+
+// gdb-command:print case2
+// gdbg-check:$2 = {{RUST$ENUM$DISR = Case2, [...]}, {RUST$ENUM$DISR = Case2, __0 = 0, __1 = 286331153, __2 = 286331153}, {RUST$ENUM$DISR = Case2, [...]}}
+// gdbr-check:$2 = tuple_style_enum_legacy::Regular::Case2(0, 286331153, 286331153)
+
+// gdb-command:print case3
+// gdbg-check:$3 = {{RUST$ENUM$DISR = Case3, [...]}, {RUST$ENUM$DISR = Case3, [...]}, {RUST$ENUM$DISR = Case3, __0 = 0, __1 = 6438275382588823897}}
+// gdbr-check:$3 = tuple_style_enum_legacy::Regular::Case3(0, 6438275382588823897)
+
+// gdb-command:print univariant
+// gdbg-check:$4 = {{__0 = -1}}
+// gdbr-check:$4 = tuple_style_enum_legacy::Univariant::TheOnlyCase(-1)
+
+
+// === LLDB TESTS ==================================================================================
+
+// lldb-command:run
+
+// lldb-command:print case1
+// lldbg-check:[...]$0 = Case1(0, 31868, 31868, 31868, 31868)
+// lldbr-check:(tuple_style_enum_legacy::Regular::Case1) case1 = { = 0 = 31868 = 31868 = 31868 = 31868 }
+
+// lldb-command:print case2
+// lldbg-check:[...]$1 = Case2(0, 286331153, 286331153)
+// lldbr-check:(tuple_style_enum_legacy::Regular::Case2) case2 = Case2 { tuple_style_enum_legacy::Regular::Case1: 0, tuple_style_enum_legacy::Regular::Case2: 286331153, tuple_style_enum_legacy::Regular::Case3: 286331153 }
+
+// lldb-command:print case3
+// lldbg-check:[...]$2 = Case3(0, 6438275382588823897)
+// lldbr-check:(tuple_style_enum_legacy::Regular::Case3) case3 = Case3 { tuple_style_enum_legacy::Regular::Case1: 0, tuple_style_enum_legacy::Regular::Case2: 6438275382588823897 }
+
+// lldb-command:print univariant
+// lldbg-check:[...]$3 = TheOnlyCase(-1)
+// lldbr-check:(tuple_style_enum_legacy::Univariant) univariant = { tuple_style_enum_legacy::TheOnlyCase = { = -1 } }
+
+#![allow(unused_variables)]
+#![feature(omit_gdb_pretty_printer_section)]
+#![omit_gdb_pretty_printer_section]
+
+use self::Regular::{Case1, Case2, Case3};
+use self::Univariant::TheOnlyCase;
+
+// The first element is to ensure proper alignment, irrespective of the machines word size. Since
+// the size of the discriminant value is machine dependent, this has be taken into account when
+// datatype layout should be predictable as in this case.
+enum Regular {
+ Case1(u64, u16, u16, u16, u16),
+ Case2(u64, u32, u32),
+ Case3(u64, u64)
+}
+
+enum Univariant {
+ TheOnlyCase(i64)
+}
+
+fn main() {
+
+ // In order to avoid endianness trouble all of the following test values consist of a single
+ // repeated byte. This way each interpretation of the union should look the same, no matter if
+ // this is a big or little endian machine.
+
+ // 0b0111110001111100011111000111110001111100011111000111110001111100 = 8970181431921507452
+ // 0b01111100011111000111110001111100 = 2088533116
+ // 0b0111110001111100 = 31868
+ // 0b01111100 = 124
+ let case1 = Case1(0, 31868, 31868, 31868, 31868);
+
+ // 0b0001000100010001000100010001000100010001000100010001000100010001 = 1229782938247303441
+ // 0b00010001000100010001000100010001 = 286331153
+ // 0b0001000100010001 = 4369
+ // 0b00010001 = 17
+ let case2 = Case2(0, 286331153, 286331153);
+
+ // 0b0101100101011001010110010101100101011001010110010101100101011001 = 6438275382588823897
+ // 0b01011001010110010101100101011001 = 1499027801
+ // 0b0101100101011001 = 22873
+ // 0b01011001 = 89
+ let case3 = Case3(0, 6438275382588823897);
+
+ let univariant = TheOnlyCase(-1);
+
+ zzz(); // #break
+}
+
+fn zzz() {()}
// except according to those terms.
// ignore-tidy-linelength
-// min-lldb-version: 310
-// ignore-gdb-version: 7.11.90 - 7.12.9
+
+// Require LLVM with DW_TAG_variant_part and a gdb and lldb that can
+// read it.
+// min-system-llvm-version: 7.0
+// min-gdb-version: 8.2
+// rust-lldb
// compile-flags:-g
// gdb-command:run
// gdb-command:print case1
-// gdbg-check:$1 = {{RUST$ENUM$DISR = Case1, __0 = 0, __1 = 31868, __2 = 31868, __3 = 31868, __4 = 31868}, {RUST$ENUM$DISR = Case1, [...]}, {RUST$ENUM$DISR = Case1, [...]}}
// gdbr-check:$1 = tuple_style_enum::Regular::Case1(0, 31868, 31868, 31868, 31868)
// gdb-command:print case2
-// gdbg-check:$2 = {{RUST$ENUM$DISR = Case2, [...]}, {RUST$ENUM$DISR = Case2, __0 = 0, __1 = 286331153, __2 = 286331153}, {RUST$ENUM$DISR = Case2, [...]}}
// gdbr-check:$2 = tuple_style_enum::Regular::Case2(0, 286331153, 286331153)
// gdb-command:print case3
-// gdbg-check:$3 = {{RUST$ENUM$DISR = Case3, [...]}, {RUST$ENUM$DISR = Case3, [...]}, {RUST$ENUM$DISR = Case3, __0 = 0, __1 = 6438275382588823897}}
// gdbr-check:$3 = tuple_style_enum::Regular::Case3(0, 6438275382588823897)
// gdb-command:print univariant
-// gdbg-check:$4 = {{__0 = -1}}
// gdbr-check:$4 = tuple_style_enum::Univariant::TheOnlyCase(-1)
// lldb-command:run
// lldb-command:print case1
-// lldbg-check:[...]$0 = Case1(0, 31868, 31868, 31868, 31868)
// lldbr-check:(tuple_style_enum::Regular::Case1) case1 = { = 0 = 31868 = 31868 = 31868 = 31868 }
// lldb-command:print case2
-// lldbg-check:[...]$1 = Case2(0, 286331153, 286331153)
-// lldbr-check:(tuple_style_enum::Regular::Case2) case2 = Case2 { tuple_style_enum::Regular::Case1: 0, tuple_style_enum::Regular::Case2: 286331153, tuple_style_enum::Regular::Case3: 286331153 }
+// lldbr-check:(tuple_style_enum::Regular::Case2) case2 = Case2 { Case1: 0, Case2: 286331153, Case3: 286331153 }
// lldb-command:print case3
-// lldbg-check:[...]$2 = Case3(0, 6438275382588823897)
-// lldbr-check:(tuple_style_enum::Regular::Case3) case3 = Case3 { tuple_style_enum::Regular::Case1: 0, tuple_style_enum::Regular::Case2: 6438275382588823897 }
+// lldbr-check:(tuple_style_enum::Regular::Case3) case3 = Case3 { Case1: 0, Case2: 6438275382588823897 }
// lldb-command:print univariant
-// lldbg-check:[...]$3 = TheOnlyCase(-1)
-// lldbr-check:(tuple_style_enum::Univariant) univariant = { tuple_style_enum::TheOnlyCase = { = -1 } }
+// lldbr-check:(tuple_style_enum::Univariant) univariant = { TheOnlyCase = { = -1 } }
#![allow(unused_variables)]
#![feature(omit_gdb_pretty_printer_section)]
--- /dev/null
+// Copyright 2013-2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// ignore-tidy-linelength
+// min-lldb-version: 310
+
+// As long as LLVM 5 and LLVM 6 are supported, we want to test the
+// enum debuginfo fallback mode. Once those are desupported, this
+// test can be removed, as there is another (non-"legacy") test that
+// tests the new mode.
+// ignore-llvm-version: 7.0 - 9.9.9
+// ignore-gdb-version: 7.11.90 - 7.12.9
+// ignore-gdb-version: 8.2 - 9.9
+
+// compile-flags:-g
+
+// === GDB TESTS ===================================================================================
+
+// gdb-command:run
+
+// gdb-command:print *the_a
+// gdbg-check:$1 = {{RUST$ENUM$DISR = TheA, x = 0, y = 8970181431921507452}, {RUST$ENUM$DISR = TheA, [...]}}
+// gdbr-check:$1 = unique_enum_legacy::ABC::TheA{x: 0, y: 8970181431921507452}
+
+// gdb-command:print *the_b
+// gdbg-check:$2 = {{RUST$ENUM$DISR = TheB, [...]}, {RUST$ENUM$DISR = TheB, __0 = 0, __1 = 286331153, __2 = 286331153}}
+// gdbr-check:$2 = unique_enum_legacy::ABC::TheB(0, 286331153, 286331153)
+
+// gdb-command:print *univariant
+// gdbg-check:$3 = {{__0 = 123234}}
+// gdbr-check:$3 = unique_enum_legacy::Univariant::TheOnlyCase(123234)
+
+
+// === LLDB TESTS ==================================================================================
+
+// lldb-command:run
+
+// lldb-command:print *the_a
+// lldbg-check:[...]$0 = TheA { x: 0, y: 8970181431921507452 }
+// lldbr-check:(unique_enum_legacy::ABC::TheA) *the_a = TheA { unique_enum_legacy::ABC::TheA: 0, unique_enum_legacy::ABC::TheB: 8970181431921507452 }
+
+// lldb-command:print *the_b
+// lldbg-check:[...]$1 = TheB(0, 286331153, 286331153)
+// lldbr-check:(unique_enum_legacy::ABC::TheB) *the_b = { = 0 = 286331153 = 286331153 }
+
+// lldb-command:print *univariant
+// lldbg-check:[...]$2 = TheOnlyCase(123234)
+// lldbr-check:(unique_enum_legacy::Univariant) *univariant = { unique_enum_legacy::TheOnlyCase = { = 123234 } }
+
+#![allow(unused_variables)]
+#![feature(box_syntax)]
+#![feature(omit_gdb_pretty_printer_section)]
+#![omit_gdb_pretty_printer_section]
+
+// The first element is to ensure proper alignment, irrespective of the machines word size. Since
+// the size of the discriminant value is machine dependent, this has be taken into account when
+// datatype layout should be predictable as in this case.
+enum ABC {
+ TheA { x: i64, y: i64 },
+ TheB (i64, i32, i32),
+}
+
+// This is a special case since it does not have the implicit discriminant field.
+enum Univariant {
+ TheOnlyCase(i64)
+}
+
+fn main() {
+
+ // In order to avoid endianness trouble all of the following test values consist of a single
+ // repeated byte. This way each interpretation of the union should look the same, no matter if
+ // this is a big or little endian machine.
+
+ // 0b0111110001111100011111000111110001111100011111000111110001111100 = 8970181431921507452
+ // 0b01111100011111000111110001111100 = 2088533116
+ // 0b0111110001111100 = 31868
+ // 0b01111100 = 124
+ let the_a: Box<_> = box ABC::TheA { x: 0, y: 8970181431921507452 };
+
+ // 0b0001000100010001000100010001000100010001000100010001000100010001 = 1229782938247303441
+ // 0b00010001000100010001000100010001 = 286331153
+ // 0b0001000100010001 = 4369
+ // 0b00010001 = 17
+ let the_b: Box<_> = box ABC::TheB (0, 286331153, 286331153);
+
+ let univariant: Box<_> = box Univariant::TheOnlyCase(123234);
+
+ zzz(); // #break
+}
+
+fn zzz() {()}
// except according to those terms.
// ignore-tidy-linelength
-// min-lldb-version: 310
-// ignore-gdb-version: 7.11.90 - 7.12.9
+
+// Require LLVM with DW_TAG_variant_part and a gdb and lldb that can
+// read it.
+// min-system-llvm-version: 7.0
+// min-gdb-version: 8.2
+// rust-lldb
// compile-flags:-g
// gdb-command:run
// gdb-command:print *the_a
-// gdbg-check:$1 = {{RUST$ENUM$DISR = TheA, x = 0, y = 8970181431921507452}, {RUST$ENUM$DISR = TheA, [...]}}
// gdbr-check:$1 = unique_enum::ABC::TheA{x: 0, y: 8970181431921507452}
// gdb-command:print *the_b
-// gdbg-check:$2 = {{RUST$ENUM$DISR = TheB, [...]}, {RUST$ENUM$DISR = TheB, __0 = 0, __1 = 286331153, __2 = 286331153}}
// gdbr-check:$2 = unique_enum::ABC::TheB(0, 286331153, 286331153)
// gdb-command:print *univariant
-// gdbg-check:$3 = {{__0 = 123234}}
// gdbr-check:$3 = unique_enum::Univariant::TheOnlyCase(123234)
// lldb-command:run
// lldb-command:print *the_a
-// lldbg-check:[...]$0 = TheA { x: 0, y: 8970181431921507452 }
-// lldbr-check:(unique_enum::ABC::TheA) *the_a = TheA { unique_enum::ABC::TheA: 0, unique_enum::ABC::TheB: 8970181431921507452 }
+// lldbr-check:(unique_enum::ABC::TheA) *the_a = TheA { TheA: 0, TheB: 8970181431921507452 }
// lldb-command:print *the_b
-// lldbg-check:[...]$1 = TheB(0, 286331153, 286331153)
// lldbr-check:(unique_enum::ABC::TheB) *the_b = { = 0 = 286331153 = 286331153 }
// lldb-command:print *univariant
-// lldbg-check:[...]$2 = TheOnlyCase(123234)
-// lldbr-check:(unique_enum::Univariant) *univariant = { unique_enum::TheOnlyCase = { = 123234 } }
+// lldbr-check:(unique_enum::Univariant) *univariant = { TheOnlyCase = { = 123234 } }
#![allow(unused_variables)]
#![feature(box_syntax)]
// lldb-command:print padded_tuple
// lldbg-check:[...]$4 = &[(6, 7), (8, 9)]
-// lldbr-check:(&[(i32, i16)]) padded_tuple = { data_ptr = *0x555555554ff0 length = 2 }
+// lldbr-check:(&[(i32, i16)]) padded_tuple = { data_ptr = *0x555555555030 length = 2 }
// lldb-command:print padded_struct
// lldbg-check:[...]$5 = &[AStruct { x: 10, y: 11, z: 12 }, AStruct { x: 13, y: 14, z: 15 }]
// StorageLive(_4);
// _4 = std::option::Option<std::boxed::Box<u32>>::None;
// FakeRead(ForLet, _4);
-// AscribeUserType(_4, o, UserTypeProjection { base: Ty(Canonical { variables: [], value: std::option::Option<std::boxed::Box<u32>> }), projs: [] });
+// AscribeUserType(_4, o, UserTypeProjection { base: Ty(Canonical { max_universe: U0, variables: [], value: std::option::Option<std::boxed::Box<u32>> }), projs: [] });
// StorageLive(_5);
// StorageLive(_6);
// _6 = move _4;
--- /dev/null
+// Copyright 2017 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// compile-flags: -Z span_free_formats -Z mir-emit-retag
+
+// Tests that MIR inliner fixes up `Retag`'s `fn_entry` flag
+
+fn main() {
+ println!("{}", bar());
+}
+
+#[inline(always)]
+fn foo(x: &i32, y: &i32) -> bool {
+ *x == *y
+}
+
+fn bar() -> bool {
+ let f = foo;
+ f(&1, &-1)
+}
+
+// END RUST SOURCE
+// START rustc.bar.Inline.after.mir
+// ...
+// bb0: {
+// ...
+// Retag(_3);
+// Retag(_6);
+// StorageLive(_9);
+// _9 = (*_3);
+// StorageLive(_10);
+// _10 = (*_6);
+// _0 = Eq(move _9, move _10);
+// ...
+// return;
+// }
+// ...
+// END rustc.bar.Inline.after.mir
--- /dev/null
+// Copyright 2017 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// ignore-tidy-linelength
+// compile-flags: -Z mir-emit-retag -Z mir-opt-level=0 -Z span_free_formats
+
+#![allow(unused)]
+
+struct Test(i32);
+
+impl Test {
+ // Make sure we run the pass on a method, not just on bare functions.
+ fn foo<'x>(&self, x: &'x mut i32) -> &'x mut i32 { x }
+ fn foo_shr<'x>(&self, x: &'x i32) -> &'x i32 { x }
+}
+
+fn main() {
+ let mut x = 0;
+ {
+ let v = Test(0).foo(&mut x); // just making sure we do not panic when there is a tuple struct ctor
+ let w = { v }; // assignment
+ let _w = w; // reborrow
+ }
+
+ // Also test closures
+ let c: fn(&i32) -> &i32 = |x: &i32| -> &i32 { let _y = x; x };
+ let _w = c(&x);
+
+ // need to call `foo_shr` or it doesn't even get generated
+ Test(0).foo_shr(&0);
+}
+
+// END RUST SOURCE
+// START rustc.{{impl}}-foo.EraseRegions.after.mir
+// bb0: {
+// Retag([fn entry] _1);
+// Retag([fn entry] _2);
+// ...
+// _0 = &mut (*_3);
+// ...
+// return;
+// }
+// END rustc.{{impl}}-foo.EraseRegions.after.mir
+// START rustc.{{impl}}-foo_shr.EraseRegions.after.mir
+// bb0: {
+// Retag([fn entry] _1);
+// Retag([fn entry] _2);
+// ...
+// _0 = _2;
+// Retag(_0);
+// ...
+// return;
+// }
+// END rustc.{{impl}}-foo_shr.EraseRegions.after.mir
+// START rustc.main.EraseRegions.after.mir
+// fn main() -> () {
+// ...
+// bb0: {
+// ...
+// _3 = const Test::foo(move _4, move _6) -> bb1;
+// }
+//
+// bb1: {
+// Retag(_3);
+// ...
+// _9 = move _3;
+// Retag(_9);
+// _8 = &mut (*_9);
+// StorageDead(_9);
+// StorageLive(_10);
+// _10 = move _8;
+// Retag(_10);
+// ...
+// _13 = move _14(move _15) -> bb2;
+// }
+//
+// bb2: {
+// Retag(_13);
+// ...
+// }
+// ...
+// }
+// END rustc.main.EraseRegions.after.mir
+// START rustc.main-{{closure}}.EraseRegions.after.mir
+// fn main::{{closure}}(_1: &[closure@NodeId(117)], _2: &i32) -> &i32 {
+// ...
+// bb0: {
+// Retag([fn entry] _1);
+// Retag([fn entry] _2);
+// StorageLive(_3);
+// _3 = _2;
+// Retag(_3);
+// _0 = _2;
+// Retag(_0);
+// StorageDead(_3);
+// return;
+// }
+// }
+// END rustc.main-{{closure}}.EraseRegions.after.mir
+++ /dev/null
-// Copyright 2017 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-// ignore-tidy-linelength
-// compile-flags: -Z verbose -Z mir-emit-validate=1 -Z span_free_formats
-
-struct Test(i32);
-
-impl Test {
- // Make sure we run the pass on a method, not just on bare functions.
- fn foo(&self, _x: &mut i32) {}
-}
-
-fn main() {
- let mut x = 0;
- Test(0).foo(&mut x); // just making sure we do not panic when there is a tuple struct ctor
-
- // Also test closures
- let c = |x: &mut i32| { let y = &*x; *y };
- c(&mut x);
-}
-
-// END RUST SOURCE
-// START rustc.{{impl}}-foo.EraseRegions.after.mir
-// bb0: {
-// Validate(Acquire, [_1: &ReFree(DefId(0/0:5 ~ validate_1[317d]::{{impl}}[0]::foo[0]), BrAnon(0)) Test, _2: &ReFree(DefId(0/0:5 ~ validate_1[317d]::{{impl}}[0]::foo[0]), BrAnon(1)) mut i32]);
-// ...
-// return;
-// }
-// END rustc.{{impl}}-foo.EraseRegions.after.mir
-// START rustc.main.EraseRegions.after.mir
-// fn main() -> () {
-// ...
-// bb0: {
-// ...
-// Validate(Suspend(ReScope(Node(ItemLocalId(13)))), [_1: i32]);
-// _6 = &ReErased mut _1;
-// Validate(Acquire, [(*_6): i32/ReScope(Node(ItemLocalId(13)))]);
-// Validate(Suspend(ReScope(Node(ItemLocalId(13)))), [(*_6): i32/ReScope(Node(ItemLocalId(13)))]);
-// _5 = &ReErased mut (*_6);
-// Validate(Acquire, [(*_5): i32/ReScope(Node(ItemLocalId(13)))]);
-// Validate(Release, [_2: (), _3: &ReScope(Node(ItemLocalId(13))) Test, _5: &ReScope(Node(ItemLocalId(13))) mut i32]);
-// _2 = const Test::foo(move _3, move _5) -> bb1;
-// }
-//
-// bb1: {
-// Validate(Acquire, [_2: ()]);
-// EndRegion(ReScope(Node(ItemLocalId(13))));
-// ...
-// return;
-// }
-// }
-// END rustc.main.EraseRegions.after.mir
-// START rustc.main-{{closure}}.EraseRegions.after.mir
-// fn main::{{closure}}(_1: &ReErased [closure@NodeId(65)], _2: &ReErased mut i32) -> i32 {
-// ...
-// bb0: {
-// Validate(Acquire, [_1: &ReFree(DefId(0/1:11 ~ validate_1[317d]::main[0]::{{closure}}[0]), BrEnv) [closure@NodeId(65)], _2: &ReFree(DefId(0/1:11 ~ validate_1[317d]::main[0]::{{closure}}[0]), BrAnon(0)) mut i32]);
-// StorageLive(_3);
-// Validate(Suspend(ReScope(Remainder { block: ItemLocalId(31), first_statement_index: 0 })), [(*_2): i32]);
-// _3 = &ReErased (*_2);
-// Validate(Acquire, [(*_3): i32/ReScope(Remainder { block: ItemLocalId(31), first_statement_index: 0 }) (imm)]);
-// _0 = (*_3);
-// EndRegion(ReScope(Remainder { block: ItemLocalId(31), first_statement_index: 0 }));
-// StorageDead(_3);
-// return;
-// }
-// }
-// END rustc.main-{{closure}}.EraseRegions.after.mir
+++ /dev/null
-// Copyright 2017 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-// ignore-tidy-linelength
-// ignore-wasm32-bare unwinding being disabled causes differences in output
-// ignore-wasm64-bare unwinding being disabled causes differences in output
-// compile-flags: -Z verbose -Z mir-emit-validate=1
-
-fn main() {
- let _x : Box<[i32]> = Box::new([1, 2, 3]);
-}
-
-// END RUST SOURCE
-// START rustc.main.EraseRegions.after.mir
-// fn main() -> () {
-// ...
-// bb1: {
-// Validate(Acquire, [_2: std::boxed::Box<[i32; 3]>]);
-// Validate(Release, [_2: std::boxed::Box<[i32; 3]>]);
-// _1 = move _2 as std::boxed::Box<[i32]> (Unsize);
-// Validate(Acquire, [_1: std::boxed::Box<[i32]>]);
-// StorageDead(_2);
-// StorageDead(_3);
-// _0 = ();
-// Validate(Release, [_1: std::boxed::Box<[i32]>]);
-// drop(_1) -> [return: bb2, unwind: bb3];
-// }
-// ...
-// }
-// END rustc.main.EraseRegions.after.mir
+++ /dev/null
-// Copyright 2017 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-// ignore-tidy-linelength
-// compile-flags: -Z verbose -Z mir-emit-validate=1
-
-struct Test {
- x: i32
-}
-
-fn foo(_x: &i32) {}
-
-fn main() {
- // These internal unsafe functions should have no effect on the code generation.
- unsafe fn _unused1() {}
- fn _unused2(x: *const i32) -> i32 { unsafe { *x }}
-
- let t = Test { x: 0 };
- let t = &t;
- foo(&t.x);
-}
-
-// END RUST SOURCE
-// START rustc.main.EraseRegions.after.mir
-// fn main() -> (){
-// let mut _0: ();
-// scope 1 {
-// scope 3 {
-// }
-// scope 4 {
-// let _2: &ReErased Test;
-// }
-// }
-// scope 2 {
-// let _1: Test;
-// }
-// let mut _3: ();
-// let mut _4: &ReErased i32;
-// let mut _5: &ReErased i32;
-// bb0: {
-// StorageLive(_1);
-// _1 = Test { x: const 0i32 };
-// StorageLive(_2);
-// Validate(Suspend(ReScope(Remainder { block: ItemLocalId(24), first_statement_index: 3 })), [_1: Test]);
-// _2 = &ReErased _1;
-// Validate(Acquire, [(*_2): Test/ReScope(Remainder { block: ItemLocalId(24), first_statement_index: 3 }) (imm)]);
-// StorageLive(_4);
-// StorageLive(_5);
-// Validate(Suspend(ReScope(Node(ItemLocalId(22)))), [((*_2).0: i32): i32/ReScope(Remainder { block: ItemLocalId(24), first_statement_index: 3 }) (imm)]);
-// _5 = &ReErased ((*_2).0: i32);
-// Validate(Acquire, [(*_5): i32/ReScope(Node(ItemLocalId(22))) (imm)]);
-// Validate(Suspend(ReScope(Node(ItemLocalId(22)))), [(*_5): i32/ReScope(Node(ItemLocalId(22))) (imm)]);
-// _4 = &ReErased (*_5);
-// Validate(Acquire, [(*_4): i32/ReScope(Node(ItemLocalId(22))) (imm)]);
-// Validate(Release, [_3: (), _4: &ReScope(Node(ItemLocalId(22))) i32]);
-// _3 = const foo(move _4) -> bb1;
-// }
-// bb1: {
-// Validate(Acquire, [_3: ()]);
-// EndRegion(ReScope(Node(ItemLocalId(22))));
-// StorageDead(_4);
-// StorageDead(_5);
-// _0 = ();
-// EndRegion(ReScope(Remainder { block: ItemLocalId(24), first_statement_index: 3 }));
-// StorageDead(_2);
-// StorageDead(_1);
-// return;
-// }
-// }
-// END rustc.main.EraseRegions.after.mir
+++ /dev/null
-// Copyright 2017 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-// ignore-tidy-linelength
-// compile-flags: -Z verbose -Z mir-emit-validate=1 -Z span_free_formats
-
-// Make sure unsafe fns and fns with an unsafe block only get restricted validation.
-
-unsafe fn write_42(x: *mut i32) -> bool {
- let test_closure = |x: *mut i32| *x = 23;
- test_closure(x);
- *x = 42;
- true
-}
-
-fn test(x: &mut i32) {
- unsafe { write_42(x) };
-}
-
-fn main() {
- test(&mut 0);
-
- let test_closure = unsafe { |x: &mut i32| write_42(x) };
- test_closure(&mut 0);
-}
-
-// FIXME: Also test code generated inside the closure, make sure it only does restricted validation
-// because it is entirely inside an unsafe block. Unfortunately, the interesting lines of code also
-// contain name of the source file, so we cannot test for it.
-
-// END RUST SOURCE
-// START rustc.write_42.EraseRegions.after.mir
-// fn write_42(_1: *mut i32) -> bool {
-// ...
-// bb0: {
-// Validate(Acquire, [_1: *mut i32]);
-// Validate(Release, [_1: *mut i32]);
-// ...
-// return;
-// }
-// }
-// END rustc.write_42.EraseRegions.after.mir
-// START rustc.write_42-{{closure}}.EraseRegions.after.mir
-// fn write_42::{{closure}}(_1: &ReErased [closure@NodeId(32)], _2: *mut i32) -> () {
-// ...
-// bb0: {
-// Validate(Acquire, [_1: &ReFree(DefId(0/1:9 ~ validate_4[317d]::write_42[0]::{{closure}}[0]), BrEnv) [closure@NodeId(32)], _2: *mut i32]);
-// Validate(Release, [_1: &ReFree(DefId(0/1:9 ~ validate_4[317d]::write_42[0]::{{closure}}[0]), BrEnv) [closure@NodeId(32)], _2: *mut i32]);
-// (*_2) = const 23i32;
-// _0 = ();
-// return;
-// }
-// }
-// END rustc.write_42-{{closure}}.EraseRegions.after.mir
-// START rustc.test.EraseRegions.after.mir
-// fn test(_1: &ReErased mut i32) -> () {
-// ...
-// bb0: {
-// Validate(Acquire, [_1: &ReFree(DefId(0/0:4 ~ validate_4[317d]::test[0]), BrAnon(0)) mut i32]);
-// Validate(Release, [_1: &ReFree(DefId(0/0:4 ~ validate_4[317d]::test[0]), BrAnon(0)) mut i32]);
-// ...
-// _2 = const write_42(move _3) -> bb1;
-// }
-// bb1: {
-// Validate(Acquire, [_2: bool]);
-// Validate(Release, [_2: bool]);
-// ...
-// }
-// }
-// END rustc.test.EraseRegions.after.mir
-// START rustc.main-{{closure}}.EraseRegions.after.mir
-// fn main::{{closure}}(_1: &ReErased [closure@NodeId(80)], _2: &ReErased mut i32) -> bool {
-// ...
-// bb0: {
-// Validate(Acquire, [_1: &ReFree(DefId(0/1:10 ~ validate_4[317d]::main[0]::{{closure}}[0]), BrEnv) [closure@NodeId(80)], _2: &ReFree(DefId(0/1:10 ~ validate_4[317d]::main[0]::{{closure}}[0]), BrAnon(0)) mut i32]);
-// Validate(Release, [_1: &ReFree(DefId(0/1:10 ~ validate_4[317d]::main[0]::{{closure}}[0]), BrEnv) [closure@NodeId(80)], _2: &ReFree(DefId(0/1:10 ~ validate_4[317d]::main[0]::{{closure}}[0]), BrAnon(0)) mut i32]);
-// StorageLive(_3);
-// ...
-// _0 = const write_42(move _3) -> bb1;
-// }
-// ...
-// }
-// END rustc.main-{{closure}}.EraseRegions.after.mir
+++ /dev/null
-// Copyright 2017 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-// ignore-tidy-linelength
-// compile-flags: -Z verbose -Z mir-emit-validate=2 -Z span_free_formats
-
-// Make sure unsafe fns and fns with an unsafe block still get full validation.
-
-unsafe fn write_42(x: *mut i32) -> bool {
- *x = 42;
- true
-}
-
-fn test(x: &mut i32) {
- unsafe { write_42(x) };
-}
-
-fn main() {
- test(&mut 0);
-
- let test_closure = unsafe { |x: &mut i32| write_42(x) };
- // Note that validation will fail if this is executed: The closure keeps the lock on
- // x, so the write in write_42 fails. This test just checks code generation,
- // so the UB doesn't matter.
- test_closure(&mut 0);
-}
-
-// END RUST SOURCE
-// START rustc.test.EraseRegions.after.mir
-// fn test(_1: &ReErased mut i32) -> () {
-// ...
-// bb0: {
-// Validate(Acquire, [_1: &ReFree(DefId(0/0:4 ~ validate_5[317d]::test[0]), BrAnon(0)) mut i32]);
-// ...
-// Validate(Release, [_2: bool, _3: *mut i32]);
-// _2 = const write_42(move _3) -> bb1;
-// }
-// ...
-// }
-// END rustc.test.EraseRegions.after.mir
-// START rustc.main-{{closure}}.EraseRegions.after.mir
-// fn main::{{closure}}(_1: &ReErased [closure@NodeId(62)], _2: &ReErased mut i32) -> bool {
-// ...
-// bb0: {
-// Validate(Acquire, [_1: &ReFree(DefId(0/1:9 ~ validate_5[317d]::main[0]::{{closure}}[0]), BrEnv) [closure@NodeId(62)], _2: &ReFree(DefId(0/1:9 ~ validate_5[317d]::main[0]::{{closure}}[0]), BrAnon(0)) mut i32]);
-// StorageLive(_3);
-// StorageLive(_4);
-// StorageLive(_5);
-// Validate(Suspend(ReScope(Node(ItemLocalId(16)))), [(*_2): i32]);
-// _5 = &ReErased mut (*_2);
-// Validate(Acquire, [(*_5): i32/ReScope(Node(ItemLocalId(16)))]);
-// _4 = move _5 as *mut i32 (Misc);
-// _3 = move _4;
-// EndRegion(ReScope(Node(ItemLocalId(16))));
-// StorageDead(_4);
-// StorageDead(_5);
-// Validate(Release, [_0: bool, _3: *mut i32]);
-// _0 = const write_42(move _3) -> bb1;
-// }
-// ...
-// }
-// END rustc.main-{{closure}}.EraseRegions.after.mir
pub unsafe fn atomic_i64(x: *mut i64) {
atomic_xadd(x, 1);
}
+#[cfg(target_has_atomic = "128")]
+pub unsafe fn atomic_u128(x: *mut u128) {
+ atomic_xadd(x, 1);
+}
+#[cfg(target_has_atomic = "128")]
+pub unsafe fn atomic_i128(x: *mut i128) {
+ atomic_xadd(x, 1);
+}
#[cfg(target_has_atomic = "ptr")]
pub unsafe fn atomic_usize(x: *mut usize) {
atomic_xadd(x, 1);
--- /dev/null
+-include ../../run-make-fulldeps/tools.mk
+
+# How to run this
+# $ ./x.py clean
+# $ ./x.py test --target thumbv7m-none-eabi src/test/run-make
+
+ifneq (,$(filter $(TARGET),thumbv6m-none-eabi thumbv7m-none-eabi))
+
+# For cargo setting
+export RUSTC := $(RUSTC_ORIGINAL)
+export LD_LIBRARY_PATH := $(HOST_RPATH_DIR)
+# We need to be outside of 'src' dir in order to run cargo
+export WORK_DIR := $(TMPDIR)
+export HERE := $(shell pwd)
+
+## clean up unused env variables which might cause harm.
+unexport RUSTC_LINKER
+unexport RUSTC_BOOTSTRAP
+unexport RUST_BUILD_STAGE
+unexport RUST_TEST_THREADS
+unexport RUST_TEST_TMPDIR
+unexport AR
+unexport CC
+unexport CXX
+
+all:
+ bash script.sh
+else
+all:
+endif
--- /dev/null
+[target.thumbv7m-none-eabi]
+# uncomment this to make `cargo run` execute programs on QEMU
+runner = "qemu-system-arm -cpu cortex-m3 -machine lm3s6965evb -nographic -semihosting-config enable=on,target=native -kernel"
+
+[target.thumbv6m-none-eabi]
+# uncomment this to make `cargo run` execute programs on QEMU
+# For now, we use cortex-m3 instead of cortex-m0 which are not supported by QEMU
+runner = "qemu-system-arm -cpu cortex-m3 -machine lm3s6965evb -nographic -semihosting-config enable=on,target=native -kernel"
+
+[target.'cfg(all(target_arch = "arm", target_os = "none"))']
+# uncomment ONE of these three option to make `cargo run` start a GDB session
+# which option to pick depends on your system
+# runner = "arm-none-eabi-gdb -q -x openocd.gdb"
+# runner = "gdb-multiarch -q -x openocd.gdb"
+# runner = "gdb -q -x openocd.gdb"
+
+rustflags = [
+ # LLD (shipped with the Rust toolchain) is used as the default linker
+ "-C", "link-arg=-Tlink.x",
+
+ # if you run into problems with LLD switch to the GNU linker by commenting out
+ # this line
+ # "-C", "linker=arm-none-eabi-ld",
+
+ # if you need to link to pre-compiled C libraries provided by a C toolchain
+ # use GCC as the linker by commenting out both lines above and then
+ # uncommenting the three lines below
+ # "-C", "linker=arm-none-eabi-gcc",
+ # "-C", "link-arg=-Wl,-Tlink.x",
+ # "-C", "link-arg=-nostartfiles",
+]
\ No newline at end of file
--- /dev/null
+[package]
+name = "example"
+version = "0.1.0"
+authors = ["Hideki Sekine <sekineh@me.com>"]
+# edition = "2018"
+
+[dependencies]
+cortex-m = "0.5.4"
+cortex-m-rt = "=0.5.4"
+panic-halt = "0.2.0"
+cortex-m-semihosting = "0.3.1"
--- /dev/null
+/* Device specific memory layout */
+
+/* This file is used to build the cortex-m-rt examples,
+ but not other applications using cortex-m-rt. */
+
+MEMORY
+{
+ /* FLASH and RAM are mandatory memory regions */
+ /* Update examples/data_overflow.rs if you change these sizes. */
+ FLASH : ORIGIN = 0x00000000, LENGTH = 256K
+ RAM : ORIGIN = 0x20000000, LENGTH = 64K
+
+ /* More memory regions can declared: for example this is a second RAM region */
+ /* CCRAM : ORIGIN = 0x10000000, LENGTH = 8K */
+}
+
+/* The location of the stack can be overridden using the `_stack_start` symbol.
+ By default it will be placed at the end of the RAM region */
+/* _stack_start = ORIGIN(CCRAM) + LENGTH(CCRAM); */
+
+/* The location of the .text section can be overridden using the `_stext` symbol.
+ By default it will place after .vector_table */
+/* _stext = ORIGIN(FLASH) + 0x40c; */
\ No newline at end of file
--- /dev/null
+// #![feature(stdsimd)]
+#![no_main]
+#![no_std]
+
+extern crate cortex_m;
+
+extern crate cortex_m_rt as rt;
+extern crate cortex_m_semihosting as semihosting;
+extern crate panic_halt;
+
+use core::fmt::Write;
+use cortex_m::asm;
+use rt::entry;
+
+entry!(main);
+
+fn main() -> ! {
+ let x = 42;
+
+ loop {
+ asm::nop();
+
+ // write something through semihosting interface
+ let mut hstdout = semihosting::hio::hstdout().unwrap();
+ write!(hstdout, "x = {}\n", x);
+
+ // exit from qemu
+ semihosting::debug::exit(semihosting::debug::EXIT_SUCCESS);
+ }
+}
--- /dev/null
+set -exuo pipefail
+
+CRATE=example
+
+env | sort
+mkdir -p $WORK_DIR
+pushd $WORK_DIR
+ rm -rf $CRATE || echo OK
+ cp -a $HERE/example .
+ pushd $CRATE
+ env RUSTFLAGS="-C linker=arm-none-eabi-ld -C link-arg=-Tlink.x" \
+ $CARGO run --target $TARGET | grep "x = 42"
+ env RUSTFLAGS="-C linker=arm-none-eabi-ld -C link-arg=-Tlink.x" \
+ $CARGO run --target $TARGET --release | grep "x = 42"
+ popd
+popd
$(RUSTC) foo.rs -C lto -O --target wasm32-unknown-unknown --cfg c
wc -c < $(TMPDIR)/foo.wasm
[ "`wc -c < $(TMPDIR)/foo.wasm`" -lt "5120" ]
+ $(RUSTC) foo.rs -C lto -O --target wasm32-unknown-unknown --cfg d
+ wc -c < $(TMPDIR)/foo.wasm
+ [ "`wc -c < $(TMPDIR)/foo.wasm`" -lt "5120" ]
else
all:
endif
pub fn foo() {
panic!("{}", "a");
}
+
+#[no_mangle]
+#[cfg(d)]
+pub fn foo() -> usize {
+ use std::cell::Cell;
+ thread_local!(static A: Cell<Vec<u32>> = Cell::new(Vec::new()));
+ A.try_with(|x| x.replace(Vec::new()).len()).unwrap_or(0)
+}
// except according to those terms.
#![crate_type = "cdylib"]
-
-#![feature(panic_implementation)]
#![no_std]
use core::panic::PanicInfo;
panic!()
}
-#[panic_implementation]
+#[panic_handler]
fn panic(_info: &PanicInfo) -> ! {
loop {}
}
--- /dev/null
+// Copyright 2017 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+#![feature(arbitrary_self_types, unsize, coerce_unsized, dispatch_from_dyn)]
+#![feature(rustc_attrs)]
+
+use std::{
+ ops::{Deref, CoerceUnsized, DispatchFromDyn},
+ marker::Unsize,
+};
+
+struct Ptr<T: ?Sized>(Box<T>);
+
+impl<T: ?Sized> Deref for Ptr<T> {
+ type Target = T;
+
+ fn deref(&self) -> &T {
+ &*self.0
+ }
+}
+
+impl<T: Unsize<U> + ?Sized, U: ?Sized> CoerceUnsized<Ptr<U>> for Ptr<T> {}
+impl<T: Unsize<U> + ?Sized, U: ?Sized> DispatchFromDyn<Ptr<U>> for Ptr<T> {}
+
+struct Wrapper<T: ?Sized>(T);
+
+impl<T: ?Sized> Deref for Wrapper<T> {
+ type Target = T;
+
+ fn deref(&self) -> &T {
+ &self.0
+ }
+}
+
+impl<T: CoerceUnsized<U>, U> CoerceUnsized<Wrapper<U>> for Wrapper<T> {}
+impl<T: DispatchFromDyn<U>, U> DispatchFromDyn<Wrapper<U>> for Wrapper<T> {}
+
+
+trait Trait {
+ // This method isn't object-safe yet. Unsized by-value `self` is object-safe (but not callable
+ // without unsized_locals), but wrappers arond `Self` currently are not.
+ // FIXME (mikeyhew) uncomment this when unsized rvalues object-safety is implemented
+ // fn wrapper(self: Wrapper<Self>) -> i32;
+ fn ptr_wrapper(self: Ptr<Wrapper<Self>>) -> i32;
+ fn wrapper_ptr(self: Wrapper<Ptr<Self>>) -> i32;
+ fn wrapper_ptr_wrapper(self: Wrapper<Ptr<Wrapper<Self>>>) -> i32;
+}
+
+impl Trait for i32 {
+ fn ptr_wrapper(self: Ptr<Wrapper<Self>>) -> i32 {
+ **self
+ }
+ fn wrapper_ptr(self: Wrapper<Ptr<Self>>) -> i32 {
+ **self
+ }
+ fn wrapper_ptr_wrapper(self: Wrapper<Ptr<Wrapper<Self>>>) -> i32 {
+ ***self
+ }
+}
+
+fn main() {
+ let pw = Ptr(Box::new(Wrapper(5))) as Ptr<Wrapper<dyn Trait>>;
+ assert_eq!(pw.ptr_wrapper(), 5);
+
+ let wp = Wrapper(Ptr(Box::new(6))) as Wrapper<Ptr<dyn Trait>>;
+ assert_eq!(wp.wrapper_ptr(), 6);
+
+ let wpw = Wrapper(Ptr(Box::new(Wrapper(7)))) as Wrapper<Ptr<Wrapper<dyn Trait>>>;
+ assert_eq!(wpw.wrapper_ptr_wrapper(), 7);
+}
--- /dev/null
+// Copyright 2018 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+#![feature(arbitrary_self_types)]
+#![feature(pin)]
+#![feature(rustc_attrs)]
+
+use std::{
+ rc::Rc,
+ sync::Arc,
+ pin::Pin,
+};
+
+trait Trait {
+ fn by_rc(self: Rc<Self>) -> i64;
+ fn by_arc(self: Arc<Self>) -> i64;
+ fn by_pin_mut(self: Pin<&mut Self>) -> i64;
+ fn by_pin_box(self: Pin<Box<Self>>) -> i64;
+}
+
+impl Trait for i64 {
+ fn by_rc(self: Rc<Self>) -> i64 {
+ *self
+ }
+ fn by_arc(self: Arc<Self>) -> i64 {
+ *self
+ }
+ fn by_pin_mut(self: Pin<&mut Self>) -> i64 {
+ *self
+ }
+ fn by_pin_box(self: Pin<Box<Self>>) -> i64 {
+ *self
+ }
+}
+
+fn main() {
+ let rc = Rc::new(1i64) as Rc<dyn Trait>;
+ assert_eq!(1, rc.by_rc());
+
+ let arc = Arc::new(2i64) as Arc<dyn Trait>;
+ assert_eq!(2, arc.by_arc());
+
+ let mut value = 3i64;
+ let pin_mut = Pin::new(&mut value) as Pin<&mut dyn Trait>;
+ assert_eq!(3, pin_mut.by_pin_mut());
+
+ let pin_box = Into::<Pin<Box<i64>>>::into(Box::new(4i64)) as Pin<Box<dyn Trait>>;
+ assert_eq!(4, pin_box.by_pin_box());
+}
--- /dev/null
+// Copyright 2018 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+#![feature(cfg_target_has_atomic)]
+#![feature(integer_atomics)]
+
+use std::mem::{align_of, size_of};
+use std::sync::atomic::*;
+
+fn main() {
+ #[cfg(target_has_atomic = "8")]
+ assert_eq!(align_of::<AtomicBool>(), size_of::<AtomicBool>());
+ #[cfg(target_has_atomic = "ptr")]
+ assert_eq!(align_of::<AtomicPtr<u8>>(), size_of::<AtomicPtr<u8>>());
+ #[cfg(target_has_atomic = "8")]
+ assert_eq!(align_of::<AtomicU8>(), size_of::<AtomicU8>());
+ #[cfg(target_has_atomic = "8")]
+ assert_eq!(align_of::<AtomicI8>(), size_of::<AtomicI8>());
+ #[cfg(target_has_atomic = "16")]
+ assert_eq!(align_of::<AtomicU16>(), size_of::<AtomicU16>());
+ #[cfg(target_has_atomic = "16")]
+ assert_eq!(align_of::<AtomicI16>(), size_of::<AtomicI16>());
+ #[cfg(target_has_atomic = "32")]
+ assert_eq!(align_of::<AtomicU32>(), size_of::<AtomicU32>());
+ #[cfg(target_has_atomic = "32")]
+ assert_eq!(align_of::<AtomicI32>(), size_of::<AtomicI32>());
+ #[cfg(target_has_atomic = "64")]
+ assert_eq!(align_of::<AtomicU64>(), size_of::<AtomicU64>());
+ #[cfg(target_has_atomic = "64")]
+ assert_eq!(align_of::<AtomicI64>(), size_of::<AtomicI64>());
+ #[cfg(target_has_atomic = "128")]
+ assert_eq!(align_of::<AtomicU128>(), size_of::<AtomicU128>());
+ #[cfg(target_has_atomic = "128")]
+ assert_eq!(align_of::<AtomicI128>(), size_of::<AtomicI128>());
+ #[cfg(target_has_atomic = "ptr")]
+ assert_eq!(align_of::<AtomicUsize>(), size_of::<AtomicUsize>());
+ #[cfg(target_has_atomic = "ptr")]
+ assert_eq!(align_of::<AtomicIsize>(), size_of::<AtomicIsize>());
+}
println!("passed");
}
+ "exec-test5" => {
+ env::set_var("VARIABLE", "ABC");
+ Command::new("definitely-not-a-real-binary").env("VARIABLE", "XYZ").exec();
+ assert_eq!(env::var("VARIABLE").unwrap(), "ABC");
+ println!("passed");
+ }
+
_ => panic!("unknown argument: {}", arg),
}
return
assert!(output.status.success());
assert!(output.stderr.is_empty());
assert_eq!(output.stdout, b"passed\n");
+
+ let output = Command::new(&me).arg("exec-test5").output().unwrap();
+ assert!(output.status.success());
+ assert!(output.stderr.is_empty());
+ assert_eq!(output.stdout, b"passed\n");
}
where Self::Item: std::fmt::Display {
let mut s = String::new();
if let Some(e) = self.next() {
- write!(s, "{}", e);
+ write!(s, "{}", e).unwrap();
for e in self {
s.push_str(sep);
- write!(s, "{}", e);
+ write!(s, "{}", e).unwrap();
}
}
s
first = false;
}
- write!(buf, " {:>2}", d.day());
+ write!(buf, " {:>2}", d.day()).unwrap();
}
// Insert more filler at the end to fill up the remainder of the week,
// The AST node for the (1 + y) expression generated by the macro will then
// take it's `lo` span bound from the `1` literal in the macro-defining file
// and it's `hi` bound from `y` in this file, which should be lower than the
- // `lo` and even lower than the lower bound of the FileMap it is supposedly
- // contained in because the FileMap for this file was allocated earlier than
- // the FileMap of the macro-defining file.
+ // `lo` and even lower than the lower bound of the SourceFile it is supposedly
+ // contained in because the SourceFile for this file was allocated earlier than
+ // the SourceFile of the macro-defining file.
return (x, add1!(y));
}
use std::os::unix::process::ExitStatusExt;
assert!(status.signal() == Some(libc::SIGILL)
+ || status.signal() == Some(libc::SIGTRAP)
|| status.signal() == Some(libc::SIGABRT));
}
--- /dev/null
+// Copyright 2018 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// Tests that paths in `pub(...)` don't fail HIR verification.
+
+#![allow(unused_imports)]
+#![allow(dead_code)]
+
+pub(self) use self::my_mod::Foo;
+
+mod my_mod {
+ pub(super) use self::Foo as Bar;
+ pub(in super::my_mod) use self::Foo as Baz;
+
+ pub struct Foo;
+}
+
+fn main() {}
--- /dev/null
+// rust-lang/rust#54477: runtime bug in the VecDeque library that was
+// exposed by this test case, derived from test suite of crates.io
+// `collection` crate.
+
+use std::collections::VecDeque;
+
+fn main() {
+ let mut vecdeque_13 = VecDeque::from(vec![ ]);
+ let mut vecdeque_29 = VecDeque::from(vec![ 0 ]);
+ vecdeque_29.insert(0, 30 );
+ vecdeque_29.insert(1, 31 );
+ vecdeque_29.insert(2, 32 );
+ vecdeque_29.insert(3, 33 );
+ vecdeque_29.insert(4, 34 );
+ vecdeque_29.insert(5, 35 );
+ // println!("vecdeque_13: {:?}", vecdeque_13);
+ // println!("vecdeque_29: {:?}", vecdeque_29);
+
+ // println!("Invoking: `vecdeque_13.append(&mut vecdeque_29)`");
+ vecdeque_13.append(&mut vecdeque_29);
+
+ // println!("vecdeque_13: {:?}", vecdeque_13);
+
+ assert_eq!(vecdeque_13, VecDeque::from(vec![30, 31, 32, 33, 34, 35, 0]));
+}
}
fn borrowing_writer_from_struct_and_formatting_struct_field(foo: Foo) {
- write!(foo.writer, "{}", foo.other);
+ write!(foo.writer, "{}", foo.other).unwrap();
}
fn main() {
let mut w = Vec::new();
- write!(&mut w as &mut Write, "");
- write!(&mut w, ""); // should coerce
+ write!(&mut w as &mut Write, "").unwrap();
+ write!(&mut w, "").unwrap(); // should coerce
println!("ok");
let mut s = Bar;
{
use std::fmt::Write;
- write!(&mut s, "test");
+ write!(&mut s, "test").unwrap();
}
}
use std::mem::size_of;
-// compile-flags: -Z fuel=foo=0
+// (#55495: The --error-format is to sidestep an issue in our test harness)
+// compile-flags: --error-format human -Z fuel=foo=0
struct S1(u8, u16, u8);
struct S2(u8, u16, u8);
--- /dev/null
+optimization-fuel-exhausted: Reorder fields of "S1"
+++ /dev/null
-optimization-fuel-exhausted: Reorder fields of "S1"
use std::mem::size_of;
-// compile-flags: -Z fuel=foo=1
+// (#55495: The --error-format is to sidestep an issue in our test harness)
+// compile-flags: --error-format human -Z fuel=foo=1
struct S1(u8, u16, u8);
struct S2(u8, u16, u8);
--- /dev/null
+optimization-fuel-exhausted: Reorder fields of "S2"
+++ /dev/null
-optimization-fuel-exhausted: Reorder fields of "S2"
--- /dev/null
+// Copyright 2018 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+#![no_std]
+
+extern crate std;
+
+std::thread_local!(static A: usize = 30);
+
+fn main() {
+}
--- /dev/null
+// Copyright 2018 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+#![feature(trait_alias)]
+
+use std::marker::PhantomData;
+
+trait Empty {}
+trait EmptyAlias = Empty;
+trait CloneDefault = Clone + Default;
+trait SendSyncAlias = Send + Sync;
+trait WhereSendAlias = where Self: Send;
+trait SendEqAlias<T> = Send where T: PartialEq<Self>;
+trait I32Iterator = Iterator<Item = i32>;
+
+#[allow(dead_code)]
+struct Foo<T: SendSyncAlias>(PhantomData<T>);
+#[allow(dead_code)]
+struct Bar<T>(PhantomData<T>) where T: SendSyncAlias;
+
+impl EmptyAlias {}
+
+impl<T: SendSyncAlias> Empty for T {}
+
+fn a<T: CloneDefault>() -> (T, T) {
+ let one = T::default();
+ let two = one.clone();
+ (one, two)
+}
+
+fn b(x: &impl SendEqAlias<i32>) -> bool {
+ 22_i32 == *x
+}
+
+fn c<T: I32Iterator>(x: &mut T) -> Option<i32> {
+ x.next()
+}
+
+fn d<T: SendSyncAlias>() {
+ is_send_and_sync::<T>();
+}
+
+fn is_send_and_sync<T: Send + Sync>() {}
+
+fn main() {
+ let both = a::<i32>();
+ assert_eq!(both.0, 0);
+ assert_eq!(both.1, 0);
+ let both: (i32, i32) = a();
+ assert_eq!(both.0, 0);
+ assert_eq!(both.1, 0);
+
+ assert!(b(&22));
+
+ assert_eq!(c(&mut vec![22].into_iter()), Some(22));
+
+ d::<i32>();
+}
--- /dev/null
+// Copyright 2018 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+#![feature(trait_alias)]
+
+trait Foo = PartialEq<i32> + Send;
+trait Bar = Foo + Sync;
+
+trait I32Iterator = Iterator<Item = i32>;
+
+pub fn main() {
+ let a: &dyn Bar = &123;
+ assert!(*a == 123);
+ let b = Box::new(456) as Box<dyn Foo>;
+ assert!(*b == 456);
+
+ // FIXME(alexreg): associated type should be gotten from trait alias definition
+ // let c: &dyn I32Iterator = &vec![123].into_iter();
+ // assert_eq!(c.next(), Some(123));
+}
--- /dev/null
+// Copyright 2018 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+#![feature(trait_alias)]
+
+trait SimpleAlias = Default;
+trait GenericAlias<T> = Iterator<Item = T>;
+trait Partial<T> = IntoIterator<Item = T>;
+trait SpecificAlias = GenericAlias<i32>;
+trait PartialEqRef<'a, T: 'a> = PartialEq<&'a T>;
+trait StaticAlias = 'static;
+
+trait Things<T> {}
+trait Romeo {}
+#[allow(dead_code)]
+struct The<T>(T);
+#[allow(dead_code)]
+struct Fore<T>(T);
+impl<T, U> Things<T> for The<U> {}
+impl<T> Romeo for Fore<T> {}
+
+trait WithWhere<Art, Thou> = Romeo + Romeo where Fore<(Art, Thou)>: Romeo;
+trait BareWhere<Wild, Are> = where The<Wild>: Things<Are>;
+
+fn main() {}
3 | no
| ^^ not found in this scope
-thread '$DIR/failed-doctest-output.rs - OtherStruct (line 27)' panicked at 'couldn't compile the test', librustdoc/test.rs:332:13
+thread '$DIR/failed-doctest-output.rs - OtherStruct (line 27)' panicked at 'couldn't compile the test', librustdoc/test.rs:323:13
note: Run with `RUST_BACKTRACE=1` for a backtrace.
---- $DIR/failed-doctest-output.rs - SomeStruct (line 21) stdout ----
thread 'main' panicked at 'oh no', $DIR/failed-doctest-output.rs:3:1
note: Run with `RUST_BACKTRACE=1` for a backtrace.
-', librustdoc/test.rs:367:17
+', librustdoc/test.rs:358:17
failures:
/// Docs for QUX1 in trait.
const QUX1: i8;
// @has - '//*[@id="associatedconstant.QUX_DEFAULT0"]' 'const QUX_DEFAULT0: u16'
- // @has - '//*[@class="docblock"]' "Docs for QUX_DEFAULT0 in trait."
- /// Docs for QUX_DEFAULT0 in trait.
+ // @has - '//*[@class="docblock"]' "Docs for QUX_DEFAULT12 in trait."
+ /// Docs for QUX_DEFAULT12 in trait.
const QUX_DEFAULT0: u16 = 1;
// @has - '//*[@id="associatedconstant.QUX_DEFAULT1"]' 'const QUX_DEFAULT1: i16'
// @has - '//*[@class="docblock"]' "Docs for QUX_DEFAULT1 in trait."
/// Docs for QUX1 in impl.
const QUX1: i8 = 5;
// @has - '//*[@id="associatedconstant.QUX_DEFAULT0"]' 'const QUX_DEFAULT0: u16'
- // @has - '//*[@class="docblock"]' "Docs for QUX_DEFAULT0 in trait."
+ // @has - '//*[@class="docblock hidden"]' "Docs for QUX_DEFAULT12 in trait."
const QUX_DEFAULT0: u16 = 6;
// @has - '//*[@id="associatedconstant.QUX_DEFAULT1"]' 'const QUX_DEFAULT1: i16'
// @has - '//*[@class="docblock"]' "Docs for QUX_DEFAULT1 in impl."
--- /dev/null
+// Copyright (c) 2015 Anders Kaseorg <andersk@mit.edu>
+
+// Permission is hereby granted, free of charge, to any person obtaining
+// a copy of this software and associated documentation files (the
+// “Software”), to deal in the Software without restriction, including
+// without limitation the rights to use, copy, modify, merge, publish,
+// distribute, sublicense, and/or sell copies of the Software, and to
+// permit persons to whom the Software is furnished to do so, subject to
+// the following conditions:
+
+// The above copyright notice and this permission notice shall be
+// included in all copies or substantial portions of the Software.
+
+// THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND,
+// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+// IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+// TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+// SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+
+
+//! This crate exports a macro `enum_from_primitive!` that wraps an
+//! `enum` declaration and automatically adds an implementation of
+//! `num::FromPrimitive` (reexported here), to allow conversion from
+//! primitive integers to the enum. It therefore provides an
+//! alternative to the built-in `#[derive(FromPrimitive)]`, which
+//! requires the unstable `std::num::FromPrimitive` and is disabled in
+//! Rust 1.0.
+//!
+//! # Example
+//!
+//! ```
+//! #[macro_use] extern crate enum_primitive;
+//! extern crate num_traits;
+//! use num_traits::FromPrimitive;
+//!
+//! enum_from_primitive! {
+//! #[derive(Debug, PartialEq)]
+//! enum FooBar {
+//! Foo = 17,
+//! Bar = 42,
+//! Baz,
+//! }
+//! }
+//!
+//! fn main() {
+//! assert_eq!(FooBar::from_i32(17), Some(FooBar::Foo));
+//! assert_eq!(FooBar::from_i32(42), Some(FooBar::Bar));
+//! assert_eq!(FooBar::from_i32(43), Some(FooBar::Baz));
+//! assert_eq!(FooBar::from_i32(91), None);
+//! }
+//! ```
+
+
+pub mod num_traits {
+ pub trait FromPrimitive: Sized {
+ fn from_i64(n: i64) -> Option<Self>;
+ fn from_u64(n: u64) -> Option<Self>;
+ }
+}
+
+pub use std::option::Option;
+pub use num_traits::FromPrimitive;
+
+/// Helper macro for internal use by `enum_from_primitive!`.
+#[macro_export]
+macro_rules! enum_from_primitive_impl_ty {
+ ($meth:ident, $ty:ty, $name:ident, $( $variant:ident )*) => {
+ #[allow(non_upper_case_globals, unused)]
+ fn $meth(n: $ty) -> $crate::Option<Self> {
+ $( if n == $name::$variant as $ty {
+ $crate::Option::Some($name::$variant)
+ } else )* {
+ $crate::Option::None
+ }
+ }
+ };
+}
+
+/// Helper macro for internal use by `enum_from_primitive!`.
+#[macro_export]
+#[macro_use(enum_from_primitive_impl_ty)]
+macro_rules! enum_from_primitive_impl {
+ ($name:ident, $( $variant:ident )*) => {
+ impl $crate::FromPrimitive for $name {
+ enum_from_primitive_impl_ty! { from_i64, i64, $name, $( $variant )* }
+ enum_from_primitive_impl_ty! { from_u64, u64, $name, $( $variant )* }
+ }
+ };
+}
+
+/// Wrap this macro around an `enum` declaration to get an
+/// automatically generated implementation of `num::FromPrimitive`.
+#[macro_export]
+#[macro_use(enum_from_primitive_impl)]
+macro_rules! enum_from_primitive {
+ (
+ $( #[$enum_attr:meta] )*
+ enum $name:ident {
+ $( $( #[$variant_attr:meta] )* $variant:ident ),+
+ $( = $discriminator:expr, $( $( #[$variant_two_attr:meta] )* $variant_two:ident ),+ )*
+ }
+ ) => {
+ $( #[$enum_attr] )*
+ enum $name {
+ $( $( #[$variant_attr] )* $variant ),+
+ $( = $discriminator, $( $( #[$variant_two_attr] )* $variant_two ),+ )*
+ }
+ enum_from_primitive_impl! { $name, $( $variant )+ $( $( $variant_two )+ )* }
+ };
+
+ (
+ $( #[$enum_attr:meta] )*
+ enum $name:ident {
+ $( $( $( #[$variant_attr:meta] )* $variant:ident ),+ = $discriminator:expr ),*
+ }
+ ) => {
+ $( #[$enum_attr] )*
+ enum $name {
+ $( $( $( #[$variant_attr] )* $variant ),+ = $discriminator ),*
+ }
+ enum_from_primitive_impl! { $name, $( $( $variant )+ )* }
+ };
+
+ (
+ $( #[$enum_attr:meta] )*
+ enum $name:ident {
+ $( $( #[$variant_attr:meta] )* $variant:ident ),+
+ $( = $discriminator:expr, $( $( #[$variant_two_attr:meta] )* $variant_two:ident ),+ )*,
+ }
+ ) => {
+ $( #[$enum_attr] )*
+ enum $name {
+ $( $( #[$variant_attr] )* $variant ),+
+ $( = $discriminator, $( $( #[$variant_two_attr] )* $variant_two ),+ )*,
+ }
+ enum_from_primitive_impl! { $name, $( $variant )+ $( $( $variant_two )+ )* }
+ };
+
+ (
+ $( #[$enum_attr:meta] )*
+ enum $name:ident {
+ $( $( $( #[$variant_attr:meta] )* $variant:ident ),+ = $discriminator:expr ),+,
+ }
+ ) => {
+ $( #[$enum_attr] )*
+ enum $name {
+ $( $( $( #[$variant_attr] )* $variant ),+ = $discriminator ),+,
+ }
+ enum_from_primitive_impl! { $name, $( $( $variant )+ )+ }
+ };
+
+ (
+ $( #[$enum_attr:meta] )*
+ pub enum $name:ident {
+ $( $( #[$variant_attr:meta] )* $variant:ident ),+
+ $( = $discriminator:expr, $( $( #[$variant_two_attr:meta] )* $variant_two:ident ),+ )*
+ }
+ ) => {
+ $( #[$enum_attr] )*
+ pub enum $name {
+ $( $( #[$variant_attr] )* $variant ),+
+ $( = $discriminator, $( $( #[$variant_two_attr] )* $variant_two ),+ )*
+ }
+ enum_from_primitive_impl! { $name, $( $variant )+ $( $( $variant_two )+ )* }
+ };
+
+ (
+ $( #[$enum_attr:meta] )*
+ pub enum $name:ident {
+ $( $( $( #[$variant_attr:meta] )* $variant:ident ),+ = $discriminator:expr ),*
+ }
+ ) => {
+ $( #[$enum_attr] )*
+ pub enum $name {
+ $( $( $( #[$variant_attr] )* $variant ),+ = $discriminator ),*
+ }
+ enum_from_primitive_impl! { $name, $( $( $variant )+ )* }
+ };
+
+ (
+ $( #[$enum_attr:meta] )*
+ pub enum $name:ident {
+ $( $( #[$variant_attr:meta] )* $variant:ident ),+
+ $( = $discriminator:expr, $( $( #[$variant_two_attr:meta] )* $variant_two:ident ),+ )*,
+ }
+ ) => {
+ $( #[$enum_attr] )*
+ pub enum $name {
+ $( $( #[$variant_attr] )* $variant ),+
+ $( = $discriminator, $( $( #[$variant_two_attr] )* $variant_two ),+ )*,
+ }
+ enum_from_primitive_impl! { $name, $( $variant )+ $( $( $variant_two )+ )* }
+ };
+
+ (
+ $( #[$enum_attr:meta] )*
+ pub enum $name:ident {
+ $( $( $( #[$variant_attr:meta] )* $variant:ident ),+ = $discriminator:expr ),+,
+ }
+ ) => {
+ $( #[$enum_attr] )*
+ pub enum $name {
+ $( $( $( #[$variant_attr] )* $variant ),+ = $discriminator ),+,
+ }
+ enum_from_primitive_impl! { $name, $( $( $variant )+ )+ }
+ };
+}
+
--- /dev/null
+// Copyright 2018 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// compile-flags: -Z unstable-options --enable-index-page
+
+#![crate_name = "foo"]
+
+// @has foo/../index.html
+// @has - '//span[@class="in-band"]' 'List of all crates'
+// @has - '//ul[@class="mod"]//a[@href="foo/index.html"]' 'foo'
+pub struct Foo;
--- /dev/null
+// Copyright 2018 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+
+pub trait ScopeHandle<'scope> {}
+
+
+
+// @has issue_54705/struct.ScopeFutureContents.html
+// @has - '//*[@id="synthetic-implementations-list"]/*[@class="impl"]//*/code' "impl<'scope, S> \
+// Send for ScopeFutureContents<'scope, S> where S: Sync"
+//
+// @has - '//*[@id="synthetic-implementations-list"]/*[@class="impl"]//*/code' "impl<'scope, S> \
+// Sync for ScopeFutureContents<'scope, S> where S: Sync"
+pub struct ScopeFutureContents<'scope, S>
+ where S: ScopeHandle<'scope>,
+{
+ dummy: &'scope S,
+ this: Box<ScopeFuture<'scope, S>>,
+}
+
+struct ScopeFuture<'scope, S>
+ where S: ScopeHandle<'scope>,
+{
+ contents: ScopeFutureContents<'scope, S>,
+}
+
+unsafe impl<'scope, S> Send for ScopeFuture<'scope, S>
+ where S: ScopeHandle<'scope>,
+{}
+unsafe impl<'scope, S> Sync for ScopeFuture<'scope, S>
+ where S: ScopeHandle<'scope>,
+{}
// @has manual_impl/struct.S3.html '//*[@class="trait"]' 'T'
// @has - '//*[@class="docblock"]' 'Docs associated with the S3 trait implementation.'
// @has - '//*[@class="docblock"]' 'Docs associated with the S3 trait b_method implementation.'
-// @has - '//*[@class="docblock"]' 'Docs associated with the trait a_method definition.'
+// @has - '//*[@class="docblock hidden"]' 'Docs associated with the trait a_method definition.'
pub struct S3(usize);
/// Docs associated with the S3 trait implementation.
--- /dev/null
+// Copyright 2018 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// ensure this code doesn't stack overflow
+// aux-build:enum_primitive.rs
+
+#[macro_use] extern crate enum_primitive;
+
+enum_from_primitive! {
+ pub enum Test {
+ A1,A2,A3,A4,A5,A6,
+ B1,B2,B3,B4,B5,B6,
+ C1,C2,C3,C4,C5,C6,
+ D1,D2,D3,D4,D5,D6,
+ E1,E2,E3,E4,E5,E6,
+ F1,F2,F3,F4,F5,F6,
+ G1,G2,G3,G4,G5,G6,
+ H1,H2,H3,H4,H5,H6,
+ I1,I2,I3,I4,I5,I6,
+ J1,J2,J3,J4,J5,J6,
+ K1,K2,K3,K4,K5,K6,
+ L1,L2,L3,L4,L5,L6,
+ M1,M2,M3,M4,M5,M6,
+ N1,N2,N3,N4,N5,N6,
+ O1,O2,O3,O4,O5,O6,
+ P1,P2,P3,P4,P5,P6,
+ Q1,Q2,Q3,Q4,Q5,Q6,
+ R1,R2,R3,R4,R5,R6,
+ S1,S2,S3,S4,S5,S6,
+ T1,T2,T3,T4,T5,T6,
+ U1,U2,U3,U4,U5,U6,
+ V1,V2,V3,V4,V5,V6,
+ W1,W2,W3,W4,W5,W6,
+ X1,X2,X3,X4,X5,X6,
+ Y1,Y2,Y3,Y4,Y5,Y6,
+ Z1,Z2,Z3,Z4,Z5,Z6,
+ }
+}
+
LL | match *s { sty(v) => v } //~ ERROR cannot move out
| ^
-error[E0507]: cannot move out of `s.0` which is behind a `&` reference
- --> $DIR/access-mode-in-closures.rs:19:24
- |
-LL | let _foo = unpack(|s| {
- | - help: consider changing this to be a mutable reference: `&mut sty`
-LL | // Test that `s` is moved here.
-LL | match *s { sty(v) => v } //~ ERROR cannot move out
- | ^
- | |
- | cannot move out of `s.0` which is behind a `&` reference
- | `s` is a `&` reference, so the data it refers to cannot be moved
-
-error: aborting due to 2 previous errors
+error: aborting due to previous error
For more information about this error, try `rustc --explain E0507`.
use std::rc::Rc;
trait Foo {
- fn foo(self: Rc<Self>) -> usize;
+ fn foo(self: &Rc<Self>) -> usize;
}
trait Bar {
- fn foo(self: Rc<Self>) -> usize where Self: Sized;
- fn bar(self: Box<Self>) -> usize;
+ fn foo(self: &Rc<Self>) -> usize where Self: Sized;
+ fn bar(self: Rc<Self>) -> usize;
}
impl Foo for usize {
- fn foo(self: Rc<Self>) -> usize {
- *self
+ fn foo(self: &Rc<Self>) -> usize {
+ **self
}
}
impl Bar for usize {
- fn foo(self: Rc<Self>) -> usize {
- *self
+ fn foo(self: &Rc<Self>) -> usize {
+ **self
}
- fn bar(self: Box<Self>) -> usize {
+ fn bar(self: Rc<Self>) -> usize {
*self
}
}
fn make_foo() {
- let x = Box::new(5usize) as Box<Foo>;
+ let x = Rc::new(5usize) as Rc<Foo>;
//~^ ERROR E0038
//~| ERROR E0038
}
fn make_bar() {
- let x = Box::new(5usize) as Box<Bar>;
+ let x = Rc::new(5usize) as Rc<Bar>;
x.bar();
}
error[E0038]: the trait `Foo` cannot be made into an object
- --> $DIR/arbitrary-self-types-not-object-safe.rs:40:33
+ --> $DIR/arbitrary-self-types-not-object-safe.rs:40:32
|
-LL | let x = Box::new(5usize) as Box<Foo>;
- | ^^^^^^^^ the trait `Foo` cannot be made into an object
+LL | let x = Rc::new(5usize) as Rc<Foo>;
+ | ^^^^^^^ the trait `Foo` cannot be made into an object
|
- = note: method `foo` has a non-standard `self` type
+ = note: method `foo`'s receiver cannot be dispatched on
error[E0038]: the trait `Foo` cannot be made into an object
--> $DIR/arbitrary-self-types-not-object-safe.rs:40:13
|
-LL | let x = Box::new(5usize) as Box<Foo>;
- | ^^^^^^^^^^^^^^^^ the trait `Foo` cannot be made into an object
+LL | let x = Rc::new(5usize) as Rc<Foo>;
+ | ^^^^^^^^^^^^^^^ the trait `Foo` cannot be made into an object
|
- = note: method `foo` has a non-standard `self` type
- = note: required because of the requirements on the impl of `std::ops::CoerceUnsized<std::boxed::Box<dyn Foo>>` for `std::boxed::Box<usize>`
+ = note: method `foo`'s receiver cannot be dispatched on
+ = note: required because of the requirements on the impl of `std::ops::CoerceUnsized<std::rc::Rc<dyn Foo>>` for `std::rc::Rc<usize>`
error: aborting due to 2 previous errors
|
= note: move occurs because `x` has type `T`, which does not implement the `Copy` trait
+error[E0505]: cannot move out of `x` because it is borrowed
+ --> $DIR/binop-move-semantics.rs:31:5
+ |
+LL | let m = &x;
+ | -- borrow of `x` occurs here
+...
+LL | x //~ ERROR: cannot move out of `x` because it is borrowed
+ | ^ move out of `x` occurs here
+...
+LL | use_mut(n); use_imm(m);
+ | - borrow later used here
+
+error[E0505]: cannot move out of `y` because it is borrowed
+ --> $DIR/binop-move-semantics.rs:33:5
+ |
+LL | let n = &mut y;
+ | ------ borrow of `y` occurs here
+...
+LL | y; //~ ERROR: cannot move out of `y` because it is borrowed
+ | ^ move out of `y` occurs here
+LL | use_mut(n); use_imm(m);
+ | - borrow later used here
+
error[E0507]: cannot move out of borrowed content
--> $DIR/binop-move-semantics.rs:40:5
|
LL | *n; //~ ERROR: cannot move out of borrowed content
| ^^ cannot move out of borrowed content
-error[E0507]: cannot move out of `*n` which is behind a `&` reference
- --> $DIR/binop-move-semantics.rs:42:5
- |
-LL | let n = &y;
- | -- help: consider changing this to be a mutable reference: `&mut y`
-...
-LL | *n; //~ ERROR: cannot move out of borrowed content
- | ^^
- | |
- | cannot move out of `*n` which is behind a `&` reference
- | `n` is a `&` reference, so the data it refers to cannot be moved
-
error[E0502]: cannot borrow `f` as immutable because it is also borrowed as mutable
--> $DIR/binop-move-semantics.rs:64:5
|
| | immutable borrow later used here
| mutable borrow occurs here
-error: aborting due to 7 previous errors
+error: aborting due to 8 previous errors
-Some errors occurred: E0382, E0502, E0507.
+Some errors occurred: E0382, E0502, E0505, E0507.
For more information about an error, try `rustc --explain E0382`.
x //~ ERROR: cannot move out of `x` because it is borrowed
+
y; //~ ERROR: cannot move out of `y` because it is borrowed
+ use_mut(n); use_imm(m);
}
-
fn illegal_dereference<T: Add<Output=()>>(mut x: T, y: T) {
let m = &mut x;
let n = &y;
*m //~ ERROR: cannot move out of borrowed content
+
*n; //~ ERROR: cannot move out of borrowed content
+ use_imm(n); use_mut(m);
}
-
struct Foo;
impl<'a, 'b> Add<&'b Foo> for &'a mut Foo {
}
fn main() {}
+
+fn use_mut<T>(_: &mut T) { }
+fn use_imm<T>(_: &T) { }
error[E0596]: cannot borrow `*x` as mutable, as it is behind a `&` reference
- --> $DIR/borrowck-closures-mut-of-imm.rs:23:21
+ --> $DIR/borrowck-closures-mut-of-imm.rs:23:25
|
-LL | let c1 = || set(&mut *x);
- | ^^^^^^^ cannot borrow as mutable
+LL | let mut c1 = || set(&mut *x);
+ | ^^^^^^^ cannot borrow as mutable
error[E0596]: cannot borrow `*x` as mutable, as it is behind a `&` reference
- --> $DIR/borrowck-closures-mut-of-imm.rs:25:21
+ --> $DIR/borrowck-closures-mut-of-imm.rs:25:25
|
-LL | let c2 = || set(&mut *x);
- | ^^^^^^^ cannot borrow as mutable
+LL | let mut c2 = || set(&mut *x);
+ | ^^^^^^^ cannot borrow as mutable
-error: aborting due to 2 previous errors
+error[E0524]: two closures require unique access to `x` at the same time
+ --> $DIR/borrowck-closures-mut-of-imm.rs:25:18
+ |
+LL | let mut c1 = || set(&mut *x);
+ | -- - first borrow occurs due to use of `x` in closure
+ | |
+ | first closure is constructed here
+LL | //~^ ERROR cannot borrow
+LL | let mut c2 = || set(&mut *x);
+ | ^^ - second borrow occurs due to use of `x` in closure
+ | |
+ | second closure is constructed here
+...
+LL | c2(); c1();
+ | -- first borrow later used here
+
+error: aborting due to 3 previous errors
-For more information about this error, try `rustc --explain E0596`.
+Some errors occurred: E0524, E0596.
+For more information about an error, try `rustc --explain E0524`.
}
fn a(x: &isize) {
- let c1 = || set(&mut *x);
+ let mut c1 = || set(&mut *x);
//~^ ERROR cannot borrow
- let c2 = || set(&mut *x);
+ let mut c2 = || set(&mut *x);
//~^ ERROR cannot borrow
//~| ERROR two closures require unique access to `x` at the same time
+ c2(); c1();
}
fn main() {
error[E0524]: two closures require unique access to `x` at the same time
- --> $DIR/borrowck-closures-mut-of-imm.rs:25:14
+ --> $DIR/borrowck-closures-mut-of-imm.rs:25:18
|
-LL | let c1 = || set(&mut *x);
- | -- - previous borrow occurs due to use of `x` in closure
- | |
- | first closure is constructed here
+LL | let mut c1 = || set(&mut *x);
+ | -- - previous borrow occurs due to use of `x` in closure
+ | |
+ | first closure is constructed here
LL | //~^ ERROR cannot borrow
-LL | let c2 = || set(&mut *x);
- | ^^ - borrow occurs due to use of `x` in closure
- | |
- | second closure is constructed here
+LL | let mut c2 = || set(&mut *x);
+ | ^^ - borrow occurs due to use of `x` in closure
+ | |
+ | second closure is constructed here
...
LL | }
| - borrow from first closure ends here
error[E0596]: cannot borrow immutable borrowed content `***x` as mutable
- --> $DIR/borrowck-closures-mut-of-imm.rs:23:26
+ --> $DIR/borrowck-closures-mut-of-imm.rs:23:30
|
-LL | let c1 = || set(&mut *x);
- | ^^ cannot borrow as mutable
+LL | let mut c1 = || set(&mut *x);
+ | ^^ cannot borrow as mutable
error[E0596]: cannot borrow immutable borrowed content `***x` as mutable
- --> $DIR/borrowck-closures-mut-of-imm.rs:25:26
+ --> $DIR/borrowck-closures-mut-of-imm.rs:25:30
|
-LL | let c2 = || set(&mut *x);
- | ^^ cannot borrow as mutable
+LL | let mut c2 = || set(&mut *x);
+ | ^^ cannot borrow as mutable
error: aborting due to 3 previous errors
--- /dev/null
+error[E0524]: two closures require unique access to `x` at the same time
+ --> $DIR/borrowck-closures-mut-of-mut.rs:14:18
+ |
+LL | let mut c1 = || set(&mut *x);
+ | -- - first borrow occurs due to use of `x` in closure
+ | |
+ | first closure is constructed here
+LL | let mut c2 = || set(&mut *x);
+ | ^^ - second borrow occurs due to use of `x` in closure
+ | |
+ | second closure is constructed here
+LL | //~^ ERROR two closures require unique access to `x` at the same time
+LL | c2(); c1();
+ | -- first borrow later used here
+
+error: aborting due to previous error
+
+For more information about this error, try `rustc --explain E0524`.
--- /dev/null
+// Tests that two closures cannot simultaneously both have mutable
+// access to the variable. Related to issue #6801.
+
+fn get(x: &isize) -> isize {
+ *x
+}
+
+fn set(x: &mut isize) {
+ *x = 4;
+}
+
+fn a(x: &mut isize) {
+ let mut c1 = || set(&mut *x);
+ let mut c2 = || set(&mut *x);
+ //~^ ERROR two closures require unique access to `x` at the same time
+ c2(); c1();
+}
+
+fn main() {
+}
--- /dev/null
+error[E0524]: two closures require unique access to `x` at the same time
+ --> $DIR/borrowck-closures-mut-of-mut.rs:14:18
+ |
+LL | let mut c1 = || set(&mut *x);
+ | -- - previous borrow occurs due to use of `x` in closure
+ | |
+ | first closure is constructed here
+LL | let mut c2 = || set(&mut *x);
+ | ^^ - borrow occurs due to use of `x` in closure
+ | |
+ | second closure is constructed here
+...
+LL | }
+ | - borrow from first closure ends here
+
+error: aborting due to previous error
+
+For more information about this error, try `rustc --explain E0524`.
LL | c1;
| -- first borrow later used here
-warning[E0594]: cannot assign to `x`, as it is not declared as mutable
+error[E0594]: cannot assign to `x`, as it is not declared as mutable
--> $DIR/borrowck-closures-unique.rs:57:38
|
LL | fn e(x: &'static mut isize) {
| - help: consider changing this to be mutable: `mut x`
LL | let c1 = |y: &'static mut isize| x = y; //~ ERROR closure cannot assign to immutable argument
| ^^^^^ cannot assign
- |
- = warning: This error has been downgraded to a warning for backwards compatibility with previous releases.
- It represents potential unsoundness in your code.
- This warning will become a hard error in the future.
-error: aborting due to 3 previous errors
+error: aborting due to 4 previous errors
Some errors occurred: E0500, E0524, E0594.
For more information about an error, try `rustc --explain E0500`.
LL | *y = 1;
| ------ first borrow later used here
-warning: captured variable cannot escape `FnMut` closure body
+error: captured variable cannot escape `FnMut` closure body
--> $DIR/borrowck-describe-lvalue.rs:305:16
|
LL | || {
|
= note: `FnMut` closures only have access to their captured variables while they are executing...
= note: ...therefore, they cannot allow references to captured variables to escape
- = warning: This error has been downgraded to a warning for backwards compatibility with previous releases.
- It represents potential unsoundness in your code.
- This warning will become a hard error in the future.
error[E0503]: cannot use `f.x` because it was mutably borrowed
--> $DIR/borrowck-describe-lvalue.rs:53:9
|
= note: move occurs because `x` has type `std::vec::Vec<i32>`, which does not implement the `Copy` trait
-error: aborting due to 29 previous errors
+error: aborting due to 30 previous errors
Some errors occurred: E0382, E0499, E0502, E0503.
For more information about an error, try `rustc --explain E0382`.
+++ /dev/null
-error[E0507]: cannot move out of borrowed content
- --> $DIR/borrowck-fn-in-const-a.rs:19:16
- |
-LL | return *x //[ast]~ ERROR cannot move out of borrowed content [E0507]
- | ^^ cannot move out of borrowed content
-
-error[E0507]: cannot move out of `*x` which is behind a `&` reference
- --> $DIR/borrowck-fn-in-const-a.rs:19:16
- |
-LL | fn broken(x: &String) -> String {
- | ------- help: consider changing this to be a mutable reference: `&mut std::string::String`
-LL | return *x //[ast]~ ERROR cannot move out of borrowed content [E0507]
- | ^^
- | |
- | cannot move out of `*x` which is behind a `&` reference
- | `x` is a `&` reference, so the data it refers to cannot be moved
-
-error: aborting due to 2 previous errors
-
-For more information about this error, try `rustc --explain E0507`.
-error[E0507]: cannot move out of `*__next` which is behind a `&` reference
- --> $DIR/borrowck-for-loop-correct-cmt-for-pattern.rs:22:10
- |
-LL | for &a in x.iter() { //~ ERROR cannot move out
- | -^
- | ||
- | |cannot move out of `*__next` which is behind a `&` reference
- | |`__next` is a `&` reference, so the data it refers to cannot be moved
- | help: consider changing this to be a mutable reference: `&mut a`
-
error[E0507]: cannot move out of borrowed content
--> $DIR/borrowck-for-loop-correct-cmt-for-pattern.rs:22:15
|
LL | for &a in x.iter() { //~ ERROR cannot move out
| ^
-error[E0507]: cannot move out of `*__next` which is behind a `&` reference
- --> $DIR/borrowck-for-loop-correct-cmt-for-pattern.rs:28:10
- |
-LL | for &a in &f.a { //~ ERROR cannot move out
- | -^
- | ||
- | |cannot move out of `*__next` which is behind a `&` reference
- | |`__next` is a `&` reference, so the data it refers to cannot be moved
- | help: consider changing this to be a mutable reference: `&mut a`
-
error[E0507]: cannot move out of borrowed content
--> $DIR/borrowck-for-loop-correct-cmt-for-pattern.rs:28:15
|
LL | for &a in &f.a { //~ ERROR cannot move out
| ^
-error[E0507]: cannot move out of `*__next` which is behind a `&` reference
- --> $DIR/borrowck-for-loop-correct-cmt-for-pattern.rs:32:10
- |
-LL | for &a in x.iter() { //~ ERROR cannot move out
- | -^
- | ||
- | |cannot move out of `*__next` which is behind a `&` reference
- | |`__next` is a `&` reference, so the data it refers to cannot be moved
- | help: consider changing this to be a mutable reference: `&mut a`
-
error[E0507]: cannot move out of borrowed content
--> $DIR/borrowck-for-loop-correct-cmt-for-pattern.rs:32:15
|
LL | for &a in x.iter() { //~ ERROR cannot move out
| ^
-error: aborting due to 6 previous errors
+error: aborting due to 3 previous errors
For more information about this error, try `rustc --explain E0507`.
LL | Box::new(|| x) //~ ERROR cannot move out of captured outer variable
| ^ cannot move out of captured variable in an `Fn` closure
-error[E0507]: cannot move out of `x`, as it is a captured variable in a `Fn` closure
- --> $DIR/borrowck-in-static.rs:15:17
- |
-LL | Box::new(|| x) //~ ERROR cannot move out of captured outer variable
- | ^
- | |
- | cannot move out of `x`, as it is a captured variable in a `Fn` closure
- | cannot move
- |
-help: consider changing this to accept closures that implement `FnMut`
- --> $DIR/borrowck-in-static.rs:15:14
- |
-LL | Box::new(|| x) //~ ERROR cannot move out of captured outer variable
- | ^^^^
-
-error: aborting due to 2 previous errors
+error: aborting due to previous error
For more information about this error, try `rustc --explain E0507`.
| cannot move out of borrowed content
| help: consider removing the `*`: `y`
-error[E0507]: cannot move out of `*y` which is behind a `&` reference
- --> $DIR/borrowck-issue-2657-2.rs:17:18
- |
-LL | Some(ref y) => {
- | ----- help: consider changing this to be a mutable reference: `ref mut y`
-LL | let _b = *y; //~ ERROR cannot move out
- | ^^
- | |
- | cannot move out of `*y` which is behind a `&` reference
- | `y` is a `&` reference, so the data it refers to cannot be moved
-
-error: aborting due to 2 previous errors
+error: aborting due to previous error
For more information about this error, try `rustc --explain E0507`.
--- /dev/null
+error[E0502]: cannot borrow `*v` as immutable because `v` is also borrowed as mutable
+ --> $DIR/borrowck-lend-flow-loop.rs:35:17
+ |
+LL | let mut x = &mut v;
+ | - mutable borrow occurs here
+...
+LL | borrow(&*v); //[ast]~ ERROR cannot borrow
+ | ^^ immutable borrow occurs here
+LL | }
+LL | }
+ | - mutable borrow ends here
+
+error[E0502]: cannot borrow `*v` as immutable because `v` is also borrowed as mutable
+ --> $DIR/borrowck-lend-flow-loop.rs:45:17
+ |
+LL | let mut x = &mut v;
+ | - mutable borrow occurs here
+LL | for _ in 0..3 {
+LL | borrow(&*v); //[ast]~ ERROR cannot borrow
+ | ^^ immutable borrow occurs here
+...
+LL | }
+ | - mutable borrow ends here
+
+error[E0502]: cannot borrow `*v` as mutable because `v` is also borrowed as immutable
+ --> $DIR/borrowck-lend-flow-loop.rs:57:25
+ |
+LL | borrow_mut(&mut *v); //[ast]~ ERROR cannot borrow
+ | ^^ mutable borrow occurs here
+LL | _x = &v;
+ | - immutable borrow occurs here
+LL | }
+LL | }
+ | - immutable borrow ends here
+
+error[E0502]: cannot borrow `*v` as mutable because `v` is also borrowed as immutable
+ --> $DIR/borrowck-lend-flow-loop.rs:69:25
+ |
+LL | borrow_mut(&mut *v); //[ast]~ ERROR cannot borrow
+ | ^^ mutable borrow occurs here
+LL | _x = &v;
+ | - immutable borrow occurs here
+LL | }
+LL | }
+ | - immutable borrow ends here
+
+error[E0502]: cannot borrow `*v` as mutable because `v` is also borrowed as immutable
+ --> $DIR/borrowck-lend-flow-loop.rs:86:21
+ |
+LL | _x = &v;
+ | - immutable borrow occurs here
+...
+LL | borrow_mut(&mut *v); //[ast]~ ERROR cannot borrow
+ | ^^ mutable borrow occurs here
+LL | }
+ | - immutable borrow ends here
+
+error[E0502]: cannot borrow `*v` as mutable because `v` is also borrowed as immutable
+ --> $DIR/borrowck-lend-flow-loop.rs:100:21
+ |
+LL | _x = &v;
+ | - immutable borrow occurs here
+...
+LL | borrow_mut(&mut *v); //[ast]~ ERROR cannot borrow
+ | ^^ mutable borrow occurs here
+LL | }
+ | - immutable borrow ends here
+
+error[E0502]: cannot borrow `*v` as immutable because `v` is also borrowed as mutable
+ --> $DIR/borrowck-lend-flow-loop.rs:109:17
+ |
+LL | borrow(&*v); //[ast]~ ERROR cannot borrow
+ | ^^ immutable borrow occurs here
+...
+LL | x = &mut v; //[ast]~ ERROR cannot borrow
+ | - mutable borrow occurs here
+...
+LL | }
+ | - mutable borrow ends here
+
+error[E0499]: cannot borrow `v` as mutable more than once at a time
+ --> $DIR/borrowck-lend-flow-loop.rs:112:22
+ |
+LL | x = &mut v; //[ast]~ ERROR cannot borrow
+ | ^ mutable borrow starts here in previous iteration of loop
+...
+LL | }
+ | - mutable borrow ends here
+
+error: aborting due to 8 previous errors
+
+Some errors occurred: E0499, E0502.
+For more information about an error, try `rustc --explain E0499`.
LL | let mut x = &mut v;
| ------ mutable borrow occurs here
LL | for _ in 0..3 {
-LL | borrow(&*v); //~ ERROR cannot borrow
+LL | borrow(&*v); //[ast]~ ERROR cannot borrow
| ^^^ immutable borrow occurs here
-LL | }
+...
LL | *x = box 5;
| -- mutable borrow used here, in later iteration of loop
|
LL | **x += 1;
| -------- mutable borrow used here, in later iteration of loop
-LL | borrow(&*v); //~ ERROR cannot borrow
+LL | borrow(&*v); //[ast]~ ERROR cannot borrow
| ^^^ immutable borrow occurs here
-LL | if cond2 {
-LL | x = &mut v; //~ ERROR cannot borrow
+...
+LL | x = &mut v; //[ast]~ ERROR cannot borrow
| ------ mutable borrow occurs here
error: aborting due to 2 previous errors
-// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-// Note: the borrowck analysis is currently flow-insensitive.
-// Therefore, some of these errors are marked as spurious and could be
-// corrected by a simple change to the analysis. The others are
-// either genuine or would require more advanced changes. The latter
-// cases are noted.
+// revisions: ast nll
+
+// Since we are testing nll migration explicitly as a separate
+// revision, don't worry about the --compare-mode=nll on this test.
+
+// ignore-compare-mode-nll
+
+//[ast]compile-flags: -Z borrowck=ast
+//[nll]compile-flags: -Z borrowck=migrate -Z two-phase-borrows
+
+// Note: the borrowck analysis was originally a flow-insensitive pass
+// over the AST. Therefore, some of these (AST) errors are marked as
+// spurious and are corrected by the flow-sensitive (NLL) analysis.
+// The others are either genuine or would require more advanced
+// changes. The latter cases are noted.
#![feature(box_syntax)]
let mut x = &mut v;
**x += 1;
loop {
- borrow(&*v); //~ ERROR cannot borrow
+ borrow(&*v); //[ast]~ ERROR cannot borrow
}
}
let mut v: Box<_> = box 3;
let mut x = &mut v;
for _ in 0..3 {
- borrow(&*v); //~ ERROR cannot borrow
+ borrow(&*v); //[ast]~ ERROR cannot borrow
+ //[nll]~^ ERROR cannot borrow
}
*x = box 5;
}
-
fn loop_aliased_mut() {
// In this instance, the borrow is carried through the loop.
let mut w: Box<_> = box 4;
let mut _x = &w;
loop {
- borrow_mut(&mut *v); //~ ERROR cannot borrow
+ borrow_mut(&mut *v); //[ast]~ ERROR cannot borrow
_x = &v;
}
}
let mut w: Box<_> = box 4;
let mut _x = &w;
while cond() {
- borrow_mut(&mut *v); //~ ERROR cannot borrow
+ borrow_mut(&mut *v); //[ast]~ ERROR cannot borrow
_x = &v;
}
}
_x = &v;
break;
}
- borrow_mut(&mut *v); //~ ERROR cannot borrow
+ borrow_mut(&mut *v); //[ast]~ ERROR cannot borrow
}
fn while_aliased_mut_break() {
_x = &v;
break;
}
- borrow_mut(&mut *v); //~ ERROR cannot borrow
+ borrow_mut(&mut *v); //[ast]~ ERROR cannot borrow
}
fn while_aliased_mut_cond(cond: bool, cond2: bool) {
let mut x = &mut w;
while cond {
**x += 1;
- borrow(&*v); //~ ERROR cannot borrow
+ borrow(&*v); //[ast]~ ERROR cannot borrow
+ //[nll]~^ ERROR cannot borrow
if cond2 {
- x = &mut v; //~ ERROR cannot borrow
+ x = &mut v; //[ast]~ ERROR cannot borrow
}
}
}
-
fn loop_break_pops_scopes<'r, F>(_v: &'r mut [usize], mut f: F) where
F: FnMut(&'r mut usize) -> bool,
{
+++ /dev/null
-error[E0502]: cannot borrow `*v` as immutable because `v` is also borrowed as mutable
- --> $DIR/borrowck-lend-flow-loop.rs:35:17
- |
-LL | let mut x = &mut v;
- | - mutable borrow occurs here
-...
-LL | borrow(&*v); //~ ERROR cannot borrow
- | ^^ immutable borrow occurs here
-LL | }
-LL | }
- | - mutable borrow ends here
-
-error[E0502]: cannot borrow `*v` as immutable because `v` is also borrowed as mutable
- --> $DIR/borrowck-lend-flow-loop.rs:45:17
- |
-LL | let mut x = &mut v;
- | - mutable borrow occurs here
-LL | for _ in 0..3 {
-LL | borrow(&*v); //~ ERROR cannot borrow
- | ^^ immutable borrow occurs here
-...
-LL | }
- | - mutable borrow ends here
-
-error[E0502]: cannot borrow `*v` as mutable because `v` is also borrowed as immutable
- --> $DIR/borrowck-lend-flow-loop.rs:57:25
- |
-LL | borrow_mut(&mut *v); //~ ERROR cannot borrow
- | ^^ mutable borrow occurs here
-LL | _x = &v;
- | - immutable borrow occurs here
-LL | }
-LL | }
- | - immutable borrow ends here
-
-error[E0502]: cannot borrow `*v` as mutable because `v` is also borrowed as immutable
- --> $DIR/borrowck-lend-flow-loop.rs:69:25
- |
-LL | borrow_mut(&mut *v); //~ ERROR cannot borrow
- | ^^ mutable borrow occurs here
-LL | _x = &v;
- | - immutable borrow occurs here
-LL | }
-LL | }
- | - immutable borrow ends here
-
-error[E0502]: cannot borrow `*v` as mutable because `v` is also borrowed as immutable
- --> $DIR/borrowck-lend-flow-loop.rs:86:21
- |
-LL | _x = &v;
- | - immutable borrow occurs here
-...
-LL | borrow_mut(&mut *v); //~ ERROR cannot borrow
- | ^^ mutable borrow occurs here
-LL | }
- | - immutable borrow ends here
-
-error[E0502]: cannot borrow `*v` as mutable because `v` is also borrowed as immutable
- --> $DIR/borrowck-lend-flow-loop.rs:100:21
- |
-LL | _x = &v;
- | - immutable borrow occurs here
-...
-LL | borrow_mut(&mut *v); //~ ERROR cannot borrow
- | ^^ mutable borrow occurs here
-LL | }
- | - immutable borrow ends here
-
-error[E0502]: cannot borrow `*v` as immutable because `v` is also borrowed as mutable
- --> $DIR/borrowck-lend-flow-loop.rs:109:17
- |
-LL | borrow(&*v); //~ ERROR cannot borrow
- | ^^ immutable borrow occurs here
-LL | if cond2 {
-LL | x = &mut v; //~ ERROR cannot borrow
- | - mutable borrow occurs here
-...
-LL | }
- | - mutable borrow ends here
-
-error[E0499]: cannot borrow `v` as mutable more than once at a time
- --> $DIR/borrowck-lend-flow-loop.rs:111:22
- |
-LL | x = &mut v; //~ ERROR cannot borrow
- | ^ mutable borrow starts here in previous iteration of loop
-...
-LL | }
- | - mutable borrow ends here
-
-error: aborting due to 8 previous errors
-
-Some errors occurred: E0499, E0502.
-For more information about an error, try `rustc --explain E0499`.
It represents potential unsoundness in your code.
This warning will become a hard error in the future.
-warning[E0507]: cannot move out of `foo`, as it is immutable for the pattern guard
- --> $DIR/borrowck-migrate-to-nll.rs:35:17
- |
-LL | (|| { let bar = foo; bar.take() })();
- | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
- | |
- | cannot move out of `foo`, as it is immutable for the pattern guard
- | cannot move
- |
- = note: variables bound in patterns are immutable until the end of the pattern guard
- = warning: This error has been downgraded to a warning for backwards compatibility with previous releases.
- It represents potential unsoundness in your code.
- This warning will become a hard error in the future.
-
It represents potential unsoundness in your code.
This warning will become a hard error in the future.
-warning[E0507]: cannot move out of `foo`, as it is immutable for the pattern guard
- --> $DIR/borrowck-migrate-to-nll.rs:35:17
- |
-LL | (|| { let bar = foo; bar.take() })();
- | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
- | |
- | cannot move out of `foo`, as it is immutable for the pattern guard
- | cannot move
- |
- = note: variables bound in patterns are immutable until the end of the pattern guard
- = warning: This error has been downgraded to a warning for backwards compatibility with previous releases.
- It represents potential unsoundness in your code.
- This warning will become a hard error in the future.
-
LL | Foo::Foo2(num) => (),
| ^^^
-error[E0507]: cannot move out of `f.0` which is behind a `&` reference
- --> $DIR/borrowck-move-error-with-note.rs:23:19
- |
-LL | let f = &Foo::Foo1(box 1, box 2);
- | ------------------------ help: consider changing this to be a mutable reference: `&mut Foo::Foo1(box 1, box 2)`
-...
-LL | Foo::Foo1(num1,
- | ^^^^
- | |
- | cannot move out of `f.0` which is behind a `&` reference
- | `f` is a `&` reference, so the data it refers to cannot be moved
-
-error[E0507]: cannot move out of `f.1` which is behind a `&` reference
- --> $DIR/borrowck-move-error-with-note.rs:24:19
- |
-LL | let f = &Foo::Foo1(box 1, box 2);
- | ------------------------ help: consider changing this to be a mutable reference: `&mut Foo::Foo1(box 1, box 2)`
-...
-LL | num2) => (),
- | ^^^^
- | |
- | cannot move out of `f.1` which is behind a `&` reference
- | `f` is a `&` reference, so the data it refers to cannot be moved
-
-error[E0507]: cannot move out of `f.0` which is behind a `&` reference
- --> $DIR/borrowck-move-error-with-note.rs:25:19
- |
-LL | let f = &Foo::Foo1(box 1, box 2);
- | ------------------------ help: consider changing this to be a mutable reference: `&mut Foo::Foo1(box 1, box 2)`
-...
-LL | Foo::Foo2(num) => (),
- | ^^^
- | |
- | cannot move out of `f.0` which is behind a `&` reference
- | `f` is a `&` reference, so the data it refers to cannot be moved
-
error[E0509]: cannot move out of type `S`, which implements the `Drop` trait
--> $DIR/borrowck-move-error-with-note.rs:39:11
|
LL | n => {
| ^
-error[E0507]: cannot move out of `a.a` which is behind a `&` reference
- --> $DIR/borrowck-move-error-with-note.rs:59:9
- |
-LL | let a = &A { a: box 1 };
- | --------------- help: consider changing this to be a mutable reference: `&mut A { a: box 1 }`
-...
-LL | n => {
- | ^
- | |
- | cannot move out of `a.a` which is behind a `&` reference
- | `a` is a `&` reference, so the data it refers to cannot be moved
-
-error: aborting due to 7 previous errors
+error: aborting due to 3 previous errors
Some errors occurred: E0507, E0509.
For more information about an error, try `rustc --explain E0507`.
| cannot move out of dereference of raw pointer
| help: consider removing the `*`: `x`
-error[E0507]: cannot move out of `*x` which is behind a `*const` pointer
- --> $DIR/borrowck-move-from-unsafe-ptr.rs:13:13
- |
-LL | unsafe fn foo(x: *const Box<isize>) -> Box<isize> {
- | ----------------- help: consider changing this to be a mutable pointer: `*mut std::boxed::Box<isize>`
-LL | let y = *x; //~ ERROR cannot move out of dereference of raw pointer
- | ^^
- | |
- | cannot move out of `*x` which is behind a `*const` pointer
- | `x` is a `*const` pointer, so the data it refers to cannot be moved
-
-error: aborting due to 2 previous errors
+error: aborting due to previous error
For more information about this error, try `rustc --explain E0507`.
LL | fn arg_item(&_x: &String) {}
| ^^
-error[E0507]: cannot move out of data in a `&` reference
- --> $DIR/borrowck-move-in-irrefut-pat.rs:16:14
- |
-LL | fn arg_item(&_x: &String) {}
- | ^^
- | |
- | cannot move out of data in a `&` reference
- | cannot move
-
error[E0507]: cannot move out of borrowed content
--> $DIR/borrowck-move-in-irrefut-pat.rs:21:11
|
LL | with(|&_x| ())
| ^^
-error[E0507]: cannot move out of data in a `&` reference
- --> $DIR/borrowck-move-in-irrefut-pat.rs:21:12
- |
-LL | with(|&_x| ())
- | ^^
- | |
- | cannot move out of data in a `&` reference
- | cannot move
-
-error[E0507]: cannot move out of data in a `&` reference
- --> $DIR/borrowck-move-in-irrefut-pat.rs:27:10
- |
-LL | let &_x = &"hi".to_string();
- | ^^
- | |
- | cannot move out of data in a `&` reference
- | cannot move
-
error[E0507]: cannot move out of borrowed content
--> $DIR/borrowck-move-in-irrefut-pat.rs:27:15
|
LL | let &_x = &"hi".to_string();
| ^^
-error: aborting due to 6 previous errors
+error: aborting due to 3 previous errors
For more information about this error, try `rustc --explain E0507`.
LL | let _x = Rc::new(vec![1, 2]).into_iter();
| ^^^^^^^^^^^^^^^^^^^ cannot move out of an `Rc`
-error[E0507]: cannot move out of data in a `&` reference
- --> $DIR/borrowck-move-out-of-overloaded-auto-deref.rs:17:14
- |
-LL | let _x = Rc::new(vec![1, 2]).into_iter();
- | ^^^^^^^^^^^^^^^^^^^
- | |
- | cannot move out of data in a `&` reference
- | cannot move
-
-error: aborting due to 2 previous errors
+error: aborting due to previous error
For more information about this error, try `rustc --explain E0507`.
| cannot move out of an `Rc`
| help: consider removing the `*`: `Rc::new("hi".to_string())`
-error[E0507]: cannot move out of data in a `&` reference
- --> $DIR/borrowck-move-out-of-overloaded-deref.rs:14:14
- |
-LL | let _x = *Rc::new("hi".to_string());
- | ^^^^^^^^^^^^^^^^^^^^^^^^^^
- | |
- | cannot move out of data in a `&` reference
- | cannot move
-
-error: aborting due to 2 previous errors
+error: aborting due to previous error
For more information about this error, try `rustc --explain E0507`.
+++ /dev/null
-error[E0507]: cannot move out of static item
- --> $DIR/borrowck-move-out-of-static-item.rs:28:10
- |
-LL | test(BAR); //[ast]~ ERROR cannot move out of static item [E0507]
- | ^^^ cannot move out of static item
-
-error[E0507]: cannot move out of immutable static item `BAR`
- --> $DIR/borrowck-move-out-of-static-item.rs:28:10
- |
-LL | test(BAR); //[ast]~ ERROR cannot move out of static item [E0507]
- | ^^^
- | |
- | cannot move out of immutable static item `BAR`
- | cannot move
-
-error: aborting due to 2 previous errors
-
-For more information about this error, try `rustc --explain E0507`.
LL | Foo { string: b }] => {
|
-error[E0507]: cannot move out of `tail[..].string` which is behind a `&` reference
- --> $DIR/borrowck-move-out-of-vec-tail.rs:30:33
- |
-LL | [_, ref tail..] => {
- | -------- help: consider changing this to be a mutable reference: `ref mut tail`
-LL | match tail {
-LL | &[Foo { string: a },
- | ^
- | |
- | cannot move out of `tail[..].string` which is behind a `&` reference
- | `tail` is a `&` reference, so the data it refers to cannot be moved
-
-error[E0507]: cannot move out of `tail[..].string` which is behind a `&` reference
- --> $DIR/borrowck-move-out-of-vec-tail.rs:34:33
- |
-LL | [_, ref tail..] => {
- | -------- help: consider changing this to be a mutable reference: `ref mut tail`
-...
-LL | Foo { string: b }] => {
- | ^
- | |
- | cannot move out of `tail[..].string` which is behind a `&` reference
- | `tail` is a `&` reference, so the data it refers to cannot be moved
-
-error: aborting due to 3 previous errors
+error: aborting due to previous error
-Some errors occurred: E0507, E0508.
-For more information about an error, try `rustc --explain E0507`.
+For more information about this error, try `rustc --explain E0508`.
+error[E0502]: cannot borrow `s` as immutable because it is also borrowed as mutable
+ --> $DIR/borrowck-overloaded-call.rs:69:5
+ |
+LL | let sp = &mut s;
+ | ------ mutable borrow occurs here
+LL | s(3); //~ ERROR cannot borrow `s` as immutable because it is also borrowed as mutable
+ | ^ immutable borrow occurs here
+LL | use_mut(sp);
+ | -- mutable borrow later used here
+
error[E0596]: cannot borrow `s` as mutable, as it is not declared as mutable
--> $DIR/borrowck-overloaded-call.rs:77:5
|
|
= note: move occurs because `s` has type `SFnOnce`, which does not implement the `Copy` trait
-error: aborting due to 2 previous errors
+error: aborting due to 3 previous errors
-Some errors occurred: E0382, E0596.
+Some errors occurred: E0382, E0502, E0596.
For more information about an error, try `rustc --explain E0382`.
};
let sp = &mut s;
s(3); //~ ERROR cannot borrow `s` as immutable because it is also borrowed as mutable
+ use_mut(sp);
}
-
fn g() {
let s = SFnMut {
x: 1,
}
fn main() {}
+
+fn use_mut<T>(_: &mut T) { }
| - mutable borrow occurs here
LL | s(3); //~ ERROR cannot borrow `s` as immutable because it is also borrowed as mutable
| ^ immutable borrow occurs here
+LL | use_mut(sp);
LL | }
| - mutable borrow ends here
| cannot move out of borrowed content
| help: consider borrowing here: `&v[0]`
-error[E0507]: cannot move out of data in a `&` reference
- --> $DIR/borrowck-overloaded-index-move-from-vec.rs:30:15
- |
-LL | let bad = v[0];
- | ^^^^
- | |
- | cannot move out of data in a `&` reference
- | cannot move
-
-error: aborting due to 2 previous errors
+error: aborting due to previous error
For more information about this error, try `rustc --explain E0507`.
+error[E0505]: cannot move out of `s` because it is borrowed
+ --> $DIR/borrowck-overloaded-index-move-index.rs:60:22
+ |
+LL | let rs = &mut s;
+ | ------ borrow of `s` occurs here
+LL |
+LL | println!("{}", f[s]);
+ | ^ move out of `s` occurs here
+...
+LL | use_mut(rs);
+ | -- borrow later used here
+
+error[E0505]: cannot move out of `s` because it is borrowed
+ --> $DIR/borrowck-overloaded-index-move-index.rs:63:7
+ |
+LL | let rs = &mut s;
+ | ------ borrow of `s` occurs here
+...
+LL | f[s] = 10;
+ | ^ move out of `s` occurs here
+...
+LL | use_mut(rs);
+ | -- borrow later used here
+
error[E0382]: use of moved value: `s`
--> $DIR/borrowck-overloaded-index-move-index.rs:63:7
|
|
= note: move occurs because `s` has type `std::string::String`, which does not implement the `Copy` trait
-error: aborting due to previous error
+error: aborting due to 3 previous errors
-For more information about this error, try `rustc --explain E0382`.
+Some errors occurred: E0382, E0505.
+For more information about an error, try `rustc --explain E0382`.
let _j = &i;
println!("{}", s[i]); // no error, i is copy
println!("{}", s[i]);
+
+ use_mut(rs);
}
+
+fn use_mut<T>(_: &mut T) { }
+error[E0499]: cannot borrow `foo.bar1` as mutable more than once at a time
+ --> $DIR/borrowck-reborrow-from-mut.rs:23:17
+ |
+LL | let _bar1 = &mut foo.bar1;
+ | ------------- first mutable borrow occurs here
+LL | let _bar2 = &mut foo.bar1; //~ ERROR cannot borrow
+ | ^^^^^^^^^^^^^ second mutable borrow occurs here
+LL | use_mut(_bar1);
+ | ----- first borrow later used here
+
+error[E0502]: cannot borrow `foo.bar1` as immutable because it is also borrowed as mutable
+ --> $DIR/borrowck-reborrow-from-mut.rs:28:17
+ |
+LL | let _bar1 = &mut foo.bar1;
+ | ------------- mutable borrow occurs here
+LL | let _bar2 = &foo.bar1; //~ ERROR cannot borrow
+ | ^^^^^^^^^ immutable borrow occurs here
+LL | use_mut(_bar1);
+ | ----- mutable borrow later used here
+
+error[E0502]: cannot borrow `foo.bar1` as mutable because it is also borrowed as immutable
+ --> $DIR/borrowck-reborrow-from-mut.rs:33:17
+ |
+LL | let _bar1 = &foo.bar1;
+ | --------- immutable borrow occurs here
+LL | let _bar2 = &mut foo.bar1; //~ ERROR cannot borrow
+ | ^^^^^^^^^^^^^ mutable borrow occurs here
+LL | use_imm(_bar1);
+ | ----- immutable borrow later used here
+
+error[E0499]: cannot borrow `foo.bar1` as mutable more than once at a time
+ --> $DIR/borrowck-reborrow-from-mut.rs:55:21
+ |
+LL | let _bar1 = &mut foo.bar1;
+ | ------------- first mutable borrow occurs here
+LL | match *foo {
+LL | Foo { bar1: ref mut _bar1, bar2: _ } => {}
+ | ^^^^^^^^^^^^^ second mutable borrow occurs here
+...
+LL | use_mut(_bar1);
+ | ----- first borrow later used here
+
+error[E0502]: cannot borrow `foo.bar1` as immutable because it is also borrowed as mutable
+ --> $DIR/borrowck-reborrow-from-mut.rs:62:17
+ |
+LL | let _bar1 = &mut foo.bar1.int1;
+ | ------------------ mutable borrow occurs here
+LL | let _foo1 = &foo.bar1; //~ ERROR cannot borrow
+ | ^^^^^^^^^ immutable borrow occurs here
+LL | let _foo2 = &*foo; //~ ERROR cannot borrow
+LL | use_mut(_bar1);
+ | ----- mutable borrow later used here
+
+error[E0502]: cannot borrow `*foo` as immutable because it is also borrowed as mutable
+ --> $DIR/borrowck-reborrow-from-mut.rs:63:17
+ |
+LL | let _bar1 = &mut foo.bar1.int1;
+ | ------------------ mutable borrow occurs here
+LL | let _foo1 = &foo.bar1; //~ ERROR cannot borrow
+LL | let _foo2 = &*foo; //~ ERROR cannot borrow
+ | ^^^^^ immutable borrow occurs here
+LL | use_mut(_bar1);
+ | ----- mutable borrow later used here
+
+error[E0499]: cannot borrow `foo.bar1` as mutable more than once at a time
+ --> $DIR/borrowck-reborrow-from-mut.rs:68:17
+ |
+LL | let _bar1 = &mut foo.bar1.int1;
+ | ------------------ first mutable borrow occurs here
+LL | let _foo1 = &mut foo.bar1; //~ ERROR cannot borrow
+ | ^^^^^^^^^^^^^ second mutable borrow occurs here
+LL | use_mut(_bar1);
+ | ----- first borrow later used here
+
+error[E0499]: cannot borrow `*foo` as mutable more than once at a time
+ --> $DIR/borrowck-reborrow-from-mut.rs:73:17
+ |
+LL | let _bar1 = &mut foo.bar1.int1;
+ | ------------------ first mutable borrow occurs here
+LL | let _foo2 = &mut *foo; //~ ERROR cannot borrow
+ | ^^^^^^^^^ second mutable borrow occurs here
+LL | use_mut(_bar1);
+ | ----- first borrow later used here
+
+error[E0502]: cannot borrow `foo.bar1` as mutable because it is also borrowed as immutable
+ --> $DIR/borrowck-reborrow-from-mut.rs:78:17
+ |
+LL | let _bar1 = &foo.bar1.int1;
+ | -------------- immutable borrow occurs here
+LL | let _foo1 = &mut foo.bar1; //~ ERROR cannot borrow
+ | ^^^^^^^^^^^^^ mutable borrow occurs here
+LL | use_imm(_bar1);
+ | ----- immutable borrow later used here
+
+error[E0502]: cannot borrow `*foo` as mutable because it is also borrowed as immutable
+ --> $DIR/borrowck-reborrow-from-mut.rs:83:17
+ |
+LL | let _bar1 = &foo.bar1.int1;
+ | -------------- immutable borrow occurs here
+LL | let _foo2 = &mut *foo; //~ ERROR cannot borrow
+ | ^^^^^^^^^ mutable borrow occurs here
+LL | use_imm(_bar1);
+ | ----- immutable borrow later used here
+
error[E0596]: cannot borrow `foo.bar1` as mutable, as it is behind a `&` reference
--> $DIR/borrowck-reborrow-from-mut.rs:98:17
|
LL | let _bar1 = &mut foo.bar1; //~ ERROR cannot borrow
| ^^^^^^^^^^^^^ `foo` is a `&` reference, so the data it refers to cannot be borrowed as mutable
-error: aborting due to previous error
+error: aborting due to 11 previous errors
-For more information about this error, try `rustc --explain E0596`.
+Some errors occurred: E0499, E0502, E0596.
+For more information about an error, try `rustc --explain E0499`.
fn borrow_same_field_twice_mut_mut(foo: &mut Foo) {
let _bar1 = &mut foo.bar1;
let _bar2 = &mut foo.bar1; //~ ERROR cannot borrow
+ use_mut(_bar1);
}
-
fn borrow_same_field_twice_mut_imm(foo: &mut Foo) {
let _bar1 = &mut foo.bar1;
let _bar2 = &foo.bar1; //~ ERROR cannot borrow
+ use_mut(_bar1);
}
-
fn borrow_same_field_twice_imm_mut(foo: &mut Foo) {
let _bar1 = &foo.bar1;
let _bar2 = &mut foo.bar1; //~ ERROR cannot borrow
+ use_imm(_bar1);
}
-
fn borrow_same_field_twice_imm_imm(foo: &mut Foo) {
let _bar1 = &foo.bar1;
let _bar2 = &foo.bar1;
+ use_imm(_bar1);
}
-
fn borrow_both_mut(foo: &mut Foo) {
let _bar1 = &mut foo.bar1;
let _bar2 = &mut foo.bar2;
+ use_mut(_bar1);
}
-
fn borrow_both_mut_pattern(foo: &mut Foo) {
match *foo {
- Foo { bar1: ref mut _bar1, bar2: ref mut _bar2 } => {}
+ Foo { bar1: ref mut _bar1, bar2: ref mut _bar2 } =>
+ { use_mut(_bar1); use_mut(_bar2); }
}
}
-
fn borrow_var_and_pattern(foo: &mut Foo) {
let _bar1 = &mut foo.bar1;
match *foo {
Foo { bar1: ref mut _bar1, bar2: _ } => {}
//~^ ERROR cannot borrow
}
+ use_mut(_bar1);
}
-
fn borrow_mut_and_base_imm(foo: &mut Foo) {
let _bar1 = &mut foo.bar1.int1;
let _foo1 = &foo.bar1; //~ ERROR cannot borrow
let _foo2 = &*foo; //~ ERROR cannot borrow
+ use_mut(_bar1);
}
-
fn borrow_mut_and_base_mut(foo: &mut Foo) {
let _bar1 = &mut foo.bar1.int1;
let _foo1 = &mut foo.bar1; //~ ERROR cannot borrow
+ use_mut(_bar1);
}
-
fn borrow_mut_and_base_mut2(foo: &mut Foo) {
let _bar1 = &mut foo.bar1.int1;
let _foo2 = &mut *foo; //~ ERROR cannot borrow
+ use_mut(_bar1);
}
-
fn borrow_imm_and_base_mut(foo: &mut Foo) {
let _bar1 = &foo.bar1.int1;
let _foo1 = &mut foo.bar1; //~ ERROR cannot borrow
+ use_imm(_bar1);
}
-
fn borrow_imm_and_base_mut2(foo: &mut Foo) {
let _bar1 = &foo.bar1.int1;
let _foo2 = &mut *foo; //~ ERROR cannot borrow
+ use_imm(_bar1);
}
-
fn borrow_imm_and_base_imm(foo: &mut Foo) {
let _bar1 = &foo.bar1.int1;
let _foo1 = &foo.bar1;
let _foo2 = &*foo;
+ use_imm(_bar1);
}
-
fn borrow_mut_and_imm(foo: &mut Foo) {
let _bar1 = &mut foo.bar1;
let _foo1 = &foo.bar2;
+ use_mut(_bar1);
}
-
fn borrow_mut_from_imm(foo: &Foo) {
let _bar1 = &mut foo.bar1; //~ ERROR cannot borrow
}
fn borrow_long_path_both_mut(foo: &mut Foo) {
let _bar1 = &mut foo.bar1.int1;
let _foo1 = &mut foo.bar2.int2;
+ use_mut(_bar1);
}
-
fn main() {}
+
+fn use_mut<T>(_: &mut T) { }
+fn use_imm<T>(_: &T) { }
| -------- first mutable borrow occurs here
LL | let _bar2 = &mut foo.bar1; //~ ERROR cannot borrow
| ^^^^^^^^ second mutable borrow occurs here
+LL | use_mut(_bar1);
LL | }
| - first borrow ends here
| -------- mutable borrow occurs here
LL | let _bar2 = &foo.bar1; //~ ERROR cannot borrow
| ^^^^^^^^ immutable borrow occurs here
+LL | use_mut(_bar1);
LL | }
| - mutable borrow ends here
| -------- immutable borrow occurs here
LL | let _bar2 = &mut foo.bar1; //~ ERROR cannot borrow
| ^^^^^^^^ mutable borrow occurs here
+LL | use_imm(_bar1);
LL | }
| - immutable borrow ends here
| ------------- mutable borrow occurs here
LL | let _foo1 = &foo.bar1; //~ ERROR cannot borrow
| ^^^^^^^^ immutable borrow occurs here
-LL | let _foo2 = &*foo; //~ ERROR cannot borrow
+...
LL | }
| - mutable borrow ends here
LL | let _foo1 = &foo.bar1; //~ ERROR cannot borrow
LL | let _foo2 = &*foo; //~ ERROR cannot borrow
| ^^^^ immutable borrow occurs here
+LL | use_mut(_bar1);
LL | }
| - mutable borrow ends here
| ------------- first mutable borrow occurs here
LL | let _foo1 = &mut foo.bar1; //~ ERROR cannot borrow
| ^^^^^^^^ second mutable borrow occurs here
+LL | use_mut(_bar1);
LL | }
| - first borrow ends here
| ------------- first mutable borrow occurs here
LL | let _foo2 = &mut *foo; //~ ERROR cannot borrow
| ^^^^ second mutable borrow occurs here
+LL | use_mut(_bar1);
LL | }
| - first borrow ends here
| ------------- immutable borrow occurs here
LL | let _foo1 = &mut foo.bar1; //~ ERROR cannot borrow
| ^^^^^^^^ mutable borrow occurs here
+LL | use_imm(_bar1);
LL | }
| - immutable borrow ends here
| ------------- immutable borrow occurs here
LL | let _foo2 = &mut *foo; //~ ERROR cannot borrow
| ^^^^ mutable borrow occurs here
+LL | use_imm(_bar1);
LL | }
| - immutable borrow ends here
+error[E0502]: cannot borrow `f` as immutable because it is also borrowed as mutable
+ --> $DIR/borrowck-unboxed-closures.rs:13:5
+ |
+LL | let g = &mut f;
+ | ------ mutable borrow occurs here
+LL | f(1, 2); //~ ERROR cannot borrow `f` as immutable
+ | ^ immutable borrow occurs here
+LL | use_mut(g);
+ | - mutable borrow later used here
+
error[E0596]: cannot borrow `f` as mutable, as it is not declared as mutable
--> $DIR/borrowck-unboxed-closures.rs:17:5
|
|
= note: move occurs because `f` has type `F`, which does not implement the `Copy` trait
-error: aborting due to 2 previous errors
+error: aborting due to 3 previous errors
-Some errors occurred: E0382, E0596.
+Some errors occurred: E0382, E0502, E0596.
For more information about an error, try `rustc --explain E0382`.
fn a<F:Fn(isize, isize) -> isize>(mut f: F) {
let g = &mut f;
f(1, 2); //~ ERROR cannot borrow `f` as immutable
+ use_mut(g);
}
-
fn b<F:FnMut(isize, isize) -> isize>(f: F) {
f(1, 2); //~ ERROR cannot borrow immutable argument
}
}
fn main() {}
+
+fn use_mut<T>(_: &mut T) { }
| - mutable borrow occurs here
LL | f(1, 2); //~ ERROR cannot borrow `f` as immutable
| ^ immutable borrow occurs here
+LL | use_mut(g);
LL | }
| - mutable borrow ends here
LL | let opt = a.iter().enumerate().find(|(_, &s)| {
| ^
-error[E0507]: cannot move out of data in a `&` reference
- --> $DIR/issue-51415.rs:16:47
- |
-LL | let opt = a.iter().enumerate().find(|(_, &s)| {
- | ^
- | |
- | cannot move out of data in a `&` reference
- | cannot move
-
-error: aborting due to 2 previous errors
+error: aborting due to previous error
For more information about this error, try `rustc --explain E0507`.
--- /dev/null
+error[E0595]: closure cannot assign to immutable argument `x`
+ --> $DIR/issue-55492-borrowck-migrate-scans-parents.rs:21:22
+ |
+LL | let mut c1 = |y: &'static mut isize| x = y;
+ | ^^^^^^^^^^^^^^^^^^^^^^^ cannot borrow mutably
+help: consider removing the `&mut`, as it is an immutable binding to a mutable reference
+ |
+LL | x
+ |
+
+error[E0595]: closure cannot assign to immutable argument `x`
+ --> $DIR/issue-55492-borrowck-migrate-scans-parents.rs:29:22
+ |
+LL | let mut c1 = |z: &'static mut isize| {
+ | ^^^^^^^^^^^^^^^^^^^^^^^ cannot borrow mutably
+help: consider removing the `&mut`, as it is an immutable binding to a mutable reference
+ |
+LL | x
+ |
+
+error[E0595]: closure cannot assign to immutable argument `x`
+ --> $DIR/issue-55492-borrowck-migrate-scans-parents.rs:40:9
+ |
+LL | pub fn capture_assign_whole(x: (i32,)) {
+ | - help: make this binding mutable: `mut x`
+LL | || { x = (1,); };
+ | ^^ cannot borrow mutably
+
+error[E0595]: closure cannot assign to immutable argument `x`
+ --> $DIR/issue-55492-borrowck-migrate-scans-parents.rs:43:9
+ |
+LL | pub fn capture_assign_part(x: (i32,)) {
+ | - help: make this binding mutable: `mut x`
+LL | || { x.0 = 1; };
+ | ^^ cannot borrow mutably
+
+error[E0595]: closure cannot assign to immutable argument `x`
+ --> $DIR/issue-55492-borrowck-migrate-scans-parents.rs:46:9
+ |
+LL | pub fn capture_reborrow_whole(x: (i32,)) {
+ | - help: make this binding mutable: `mut x`
+LL | || { &mut x; };
+ | ^^ cannot borrow mutably
+
+error[E0595]: closure cannot assign to immutable argument `x`
+ --> $DIR/issue-55492-borrowck-migrate-scans-parents.rs:49:9
+ |
+LL | pub fn capture_reborrow_part(x: (i32,)) {
+ | - help: make this binding mutable: `mut x`
+LL | || { &mut x.0; };
+ | ^^ cannot borrow mutably
+
+error: aborting due to 6 previous errors
+
+For more information about this error, try `rustc --explain E0595`.
--- /dev/null
+error[E0594]: cannot assign to `x`, as it is not declared as mutable
+ --> $DIR/issue-55492-borrowck-migrate-scans-parents.rs:21:46
+ |
+LL | pub fn e(x: &'static mut isize) {
+ | - help: consider changing this to be mutable: `mut x`
+LL | static mut Y: isize = 3;
+LL | let mut c1 = |y: &'static mut isize| x = y;
+ | ^^^^^ cannot assign
+
+error[E0594]: cannot assign to `x`, as it is not declared as mutable
+ --> $DIR/issue-55492-borrowck-migrate-scans-parents.rs:30:50
+ |
+LL | pub fn ee(x: &'static mut isize) {
+ | - help: consider changing this to be mutable: `mut x`
+...
+LL | let mut c2 = |y: &'static mut isize| x = y;
+ | ^^^^^ cannot assign
+
+error[E0594]: cannot assign to `x`, as it is not declared as mutable
+ --> $DIR/issue-55492-borrowck-migrate-scans-parents.rs:40:14
+ |
+LL | pub fn capture_assign_whole(x: (i32,)) {
+ | - help: consider changing this to be mutable: `mut x`
+LL | || { x = (1,); };
+ | ^^^^^^^^ cannot assign
+
+error[E0594]: cannot assign to `x.0`, as `x` is not declared as mutable
+ --> $DIR/issue-55492-borrowck-migrate-scans-parents.rs:43:14
+ |
+LL | pub fn capture_assign_part(x: (i32,)) {
+ | - help: consider changing this to be mutable: `mut x`
+LL | || { x.0 = 1; };
+ | ^^^^^^^ cannot assign
+
+error[E0596]: cannot borrow `x` as mutable, as it is not declared as mutable
+ --> $DIR/issue-55492-borrowck-migrate-scans-parents.rs:46:14
+ |
+LL | pub fn capture_reborrow_whole(x: (i32,)) {
+ | - help: consider changing this to be mutable: `mut x`
+LL | || { &mut x; };
+ | ^^^^^^ cannot borrow as mutable
+
+error[E0596]: cannot borrow `x.0` as mutable, as `x` is not declared as mutable
+ --> $DIR/issue-55492-borrowck-migrate-scans-parents.rs:49:14
+ |
+LL | pub fn capture_reborrow_part(x: (i32,)) {
+ | - help: consider changing this to be mutable: `mut x`
+LL | || { &mut x.0; };
+ | ^^^^^^^^ cannot borrow as mutable
+
+error: aborting due to 6 previous errors
+
+Some errors occurred: E0594, E0596.
+For more information about an error, try `rustc --explain E0594`.
--- /dev/null
+error[E0594]: cannot assign to `x`, as it is not declared as mutable
+ --> $DIR/issue-55492-borrowck-migrate-scans-parents.rs:21:46
+ |
+LL | pub fn e(x: &'static mut isize) {
+ | - help: consider changing this to be mutable: `mut x`
+LL | static mut Y: isize = 3;
+LL | let mut c1 = |y: &'static mut isize| x = y;
+ | ^^^^^ cannot assign
+
+error[E0594]: cannot assign to `x`, as it is not declared as mutable
+ --> $DIR/issue-55492-borrowck-migrate-scans-parents.rs:30:50
+ |
+LL | pub fn ee(x: &'static mut isize) {
+ | - help: consider changing this to be mutable: `mut x`
+...
+LL | let mut c2 = |y: &'static mut isize| x = y;
+ | ^^^^^ cannot assign
+
+error[E0594]: cannot assign to `x`, as it is not declared as mutable
+ --> $DIR/issue-55492-borrowck-migrate-scans-parents.rs:40:14
+ |
+LL | pub fn capture_assign_whole(x: (i32,)) {
+ | - help: consider changing this to be mutable: `mut x`
+LL | || { x = (1,); };
+ | ^^^^^^^^ cannot assign
+
+error[E0594]: cannot assign to `x.0`, as `x` is not declared as mutable
+ --> $DIR/issue-55492-borrowck-migrate-scans-parents.rs:43:14
+ |
+LL | pub fn capture_assign_part(x: (i32,)) {
+ | - help: consider changing this to be mutable: `mut x`
+LL | || { x.0 = 1; };
+ | ^^^^^^^ cannot assign
+
+error[E0596]: cannot borrow `x` as mutable, as it is not declared as mutable
+ --> $DIR/issue-55492-borrowck-migrate-scans-parents.rs:46:14
+ |
+LL | pub fn capture_reborrow_whole(x: (i32,)) {
+ | - help: consider changing this to be mutable: `mut x`
+LL | || { &mut x; };
+ | ^^^^^^ cannot borrow as mutable
+
+error[E0596]: cannot borrow `x.0` as mutable, as `x` is not declared as mutable
+ --> $DIR/issue-55492-borrowck-migrate-scans-parents.rs:49:14
+ |
+LL | pub fn capture_reborrow_part(x: (i32,)) {
+ | - help: consider changing this to be mutable: `mut x`
+LL | || { &mut x.0; };
+ | ^^^^^^^^ cannot borrow as mutable
+
+error: aborting due to 6 previous errors
+
+Some errors occurred: E0594, E0596.
+For more information about an error, try `rustc --explain E0594`.
--- /dev/null
+// rust-lang/rust#55492: errors detected during MIR-borrowck's
+// analysis of a closure body may only be caught when AST-borrowck
+// looks at some parent.
+
+// revisions: ast migrate nll
+
+// Since we are testing nll (and migration) explicitly as a separate
+// revisions, don't worry about the --compare-mode=nll on this test.
+
+// ignore-compare-mode-nll
+
+//[ast]compile-flags: -Z borrowck=ast
+//[migrate]compile-flags: -Z borrowck=migrate -Z two-phase-borrows
+//[nll]compile-flags: -Z borrowck=mir -Z two-phase-borrows
+
+
+// transcribed from borrowck-closures-unique.rs
+mod borrowck_closures_unique {
+ pub fn e(x: &'static mut isize) {
+ static mut Y: isize = 3;
+ let mut c1 = |y: &'static mut isize| x = y;
+ unsafe { c1(&mut Y); }
+ }
+}
+
+mod borrowck_closures_unique_grandparent {
+ pub fn ee(x: &'static mut isize) {
+ static mut Z: isize = 3;
+ let mut c1 = |z: &'static mut isize| {
+ let mut c2 = |y: &'static mut isize| x = y;
+ c2(z);
+ };
+ unsafe { c1(&mut Z); }
+ }
+}
+
+// adapted from mutability_errors.rs
+mod mutability_errors {
+ pub fn capture_assign_whole(x: (i32,)) {
+ || { x = (1,); };
+ }
+ pub fn capture_assign_part(x: (i32,)) {
+ || { x.0 = 1; };
+ }
+ pub fn capture_reborrow_whole(x: (i32,)) {
+ || { &mut x; };
+ }
+ pub fn capture_reborrow_part(x: (i32,)) {
+ || { &mut x.0; };
+ }
+}
+
+fn main() {
+ static mut X: isize = 2;
+ unsafe { borrowck_closures_unique::e(&mut X); }
+
+ mutability_errors::capture_assign_whole((1000,));
+ mutability_errors::capture_assign_part((2000,));
+ mutability_errors::capture_reborrow_whole((3000,));
+ mutability_errors::capture_reborrow_part((4000,));
+}
+++ /dev/null
-error[E0507]: cannot move out of borrowed content
- --> $DIR/move-in-static-initializer-issue-38520.rs:25:23
- |
-LL | static Y: usize = get(*&X); //[ast]~ ERROR E0507
- | ^^^ cannot move out of borrowed content
-
-error[E0507]: cannot move out of data in a `&` reference
- --> $DIR/move-in-static-initializer-issue-38520.rs:25:23
- |
-LL | static Y: usize = get(*&X); //[ast]~ ERROR E0507
- | ^^^
- | |
- | cannot move out of data in a `&` reference
- | cannot move
-
-error[E0507]: cannot move out of borrowed content
- --> $DIR/move-in-static-initializer-issue-38520.rs:27:22
- |
-LL | const Z: usize = get(*&X); //[ast]~ ERROR E0507
- | ^^^ cannot move out of borrowed content
-
-error[E0507]: cannot move out of data in a `&` reference
- --> $DIR/move-in-static-initializer-issue-38520.rs:27:22
- |
-LL | const Z: usize = get(*&X); //[ast]~ ERROR E0507
- | ^^^
- | |
- | cannot move out of data in a `&` reference
- | cannot move
-
-error: aborting due to 4 previous errors
-
-For more information about this error, try `rustc --explain E0507`.
LL | &mut x.0; //~ ERROR
| ^^^^^^^^ cannot borrow as mutable
-warning[E0594]: cannot assign to `x`, as it is not declared as mutable
+error[E0594]: cannot assign to `x`, as it is not declared as mutable
--> $DIR/mutability-errors.rs:70:9
|
LL | fn imm_capture(x: (i32,)) {
LL | || { //~ ERROR
LL | x = (1,);
| ^^^^^^^^ cannot assign
- |
- = warning: This error has been downgraded to a warning for backwards compatibility with previous releases.
- It represents potential unsoundness in your code.
- This warning will become a hard error in the future.
-warning[E0594]: cannot assign to `x.0`, as `x` is not declared as mutable
+error[E0594]: cannot assign to `x.0`, as `x` is not declared as mutable
--> $DIR/mutability-errors.rs:71:9
|
LL | fn imm_capture(x: (i32,)) {
...
LL | x.0 = 1;
| ^^^^^^^ cannot assign
- |
- = warning: This error has been downgraded to a warning for backwards compatibility with previous releases.
- It represents potential unsoundness in your code.
- This warning will become a hard error in the future.
-warning[E0596]: cannot borrow `x` as mutable, as it is not declared as mutable
+error[E0596]: cannot borrow `x` as mutable, as it is not declared as mutable
--> $DIR/mutability-errors.rs:72:9
|
LL | fn imm_capture(x: (i32,)) {
...
LL | &mut x;
| ^^^^^^ cannot borrow as mutable
- |
- = warning: This error has been downgraded to a warning for backwards compatibility with previous releases.
- It represents potential unsoundness in your code.
- This warning will become a hard error in the future.
-warning[E0596]: cannot borrow `x.0` as mutable, as `x` is not declared as mutable
+error[E0596]: cannot borrow `x.0` as mutable, as `x` is not declared as mutable
--> $DIR/mutability-errors.rs:73:9
|
LL | fn imm_capture(x: (i32,)) {
...
LL | &mut x.0;
| ^^^^^^^^ cannot borrow as mutable
- |
- = warning: This error has been downgraded to a warning for backwards compatibility with previous releases.
- It represents potential unsoundness in your code.
- This warning will become a hard error in the future.
error[E0594]: cannot assign to `x`, as it is not declared as mutable
--> $DIR/mutability-errors.rs:76:9
LL | &mut X.0; //~ ERROR
| ^^^^^^^^ cannot borrow as mutable
-error: aborting due to 34 previous errors
+error: aborting due to 38 previous errors
Some errors occurred: E0594, E0596.
For more information about an error, try `rustc --explain E0594`.
LL | y.into_iter();
| ^ cannot move out of captured variable in an `Fn` closure
-error[E0507]: cannot move out of `y`, as it is a captured variable in a `Fn` closure
- --> $DIR/unboxed-closures-move-upvar-from-non-once-ref-closure.rs:21:9
- |
-LL | y.into_iter();
- | ^
- | |
- | cannot move out of `y`, as it is a captured variable in a `Fn` closure
- | cannot move
- |
-help: consider changing this to accept closures that implement `FnMut`
- --> $DIR/unboxed-closures-move-upvar-from-non-once-ref-closure.rs:20:10
- |
-LL | call(|| {
- | __________^
-LL | | y.into_iter();
-LL | | //~^ ERROR cannot move out of captured outer variable in an `Fn` closure
-LL | | });
- | |_____^
-
-error: aborting due to 2 previous errors
+error: aborting due to previous error
For more information about this error, try `rustc --explain E0507`.
LL | &E::Bar(identifier) => f(identifier.clone()) //~ ERROR cannot move
| ^^^^^^^^^^
-error[E0507]: cannot move out of data in a `&` reference
- --> $DIR/by-move-pattern-binding.rs:26:17
- |
-LL | &E::Bar(identifier) => f(identifier.clone()) //~ ERROR cannot move
- | ^^^^^^^^^^
- | |
- | cannot move out of data in a `&` reference
- | cannot move
-
-error: aborting due to 2 previous errors
+error: aborting due to previous error
For more information about this error, try `rustc --explain E0507`.
| cannot move out of static item
| help: consider borrowing here: `&x`
-error[E0507]: cannot move out of immutable static item `x`
- --> $DIR/check-static-values-constraints.rs:120:45
- |
-LL | let y = { static x: Box<isize> = box 3; x };
- | ^
- | |
- | cannot move out of immutable static item `x`
- | cannot move
-
error[E0010]: allocations are not allowed in statics
--> $DIR/check-static-values-constraints.rs:120:38
|
LL | let y = { static x: Box<isize> = box 3; x };
| ^^^^^ allocation not allowed in statics
-error: aborting due to 11 previous errors
+error: aborting due to 10 previous errors
Some errors occurred: E0010, E0015, E0493, E0507.
For more information about an error, try `rustc --explain E0010`.
+++ /dev/null
-error[E0509]: cannot move out of type `S`, which implements the `Drop` trait
- --> $DIR/overlapping_spans.rs:20:11
- |
-LL | match (S {f:"foo".to_string()}) {
- | ^^^^^^^^^^^^^^^^^^^^^^^^^ cannot move out of here
-LL | S {f:_s} => {} //~ ERROR cannot move out
- | -- data moved here
- |
-note: move occurs because `_s` has type `std::string::String`, which does not implement the `Copy` trait
- --> $DIR/overlapping_spans.rs:21:14
- |
-LL | S {f:_s} => {} //~ ERROR cannot move out
- | ^^
-
-error: aborting due to previous error
-
-For more information about this error, try `rustc --explain E0509`.
+++ /dev/null
-// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-#[derive(Debug)]
-struct Foo { }
-
-struct S {f:String}
-impl Drop for S {
- fn drop(&mut self) { println!("{}", self.f); }
-}
-
-fn main() {
- match (S {f:"foo".to_string()}) {
- S {f:_s} => {} //~ ERROR cannot move out
- }
-}
+++ /dev/null
-error[E0509]: cannot move out of type `S`, which implements the `Drop` trait
- --> $DIR/overlapping_spans.rs:21:9
- |
-LL | S {f:_s} => {} //~ ERROR cannot move out
- | ^^^^^--^
- | | |
- | | hint: to prevent move, use `ref _s` or `ref mut _s`
- | cannot move out of here
-
-error: aborting due to previous error
-
-For more information about this error, try `rustc --explain E0509`.
--> $DIR/two_files.rs:15:6
|
LL | impl Bar for Baz { } //~ ERROR expected trait, found type alias
- | ^^^ type aliases cannot be used for traits
+ | ^^^ type aliases cannot be used as traits
+ |
+ = note: did you mean to use a trait alias?
error: aborting due to previous error
union Union {
foo: &'static Foo,
bar: &'static Bar,
- usize: &'static usize,
+ u8: &'static u8,
}
-static BAR: usize = 42;
+static BAR: u8 = 42;
static FOO: (&Foo, &Bar) = unsafe {(
- Union { usize: &BAR }.foo,
- Union { usize: &BAR }.bar,
+ Union { u8: &BAR }.foo,
+ Union { u8: &BAR }.bar,
)};
fn main() {}
union Union {
foo: &'static Foo,
bar: &'static Bar,
- usize: &'static usize,
+ u8: &'static u8,
}
-static BAR: usize = 5;
+static BAR: u8 = 5;
static FOO: (&Foo, &Bar) = unsafe {( //~ undefined behavior
- Union { usize: &BAR }.foo,
- Union { usize: &BAR }.bar,
+ Union { u8: &BAR }.foo,
+ Union { u8: &BAR }.bar,
)};
fn main() {}
--> $DIR/double_check2.rs:25:1
|
LL | / static FOO: (&Foo, &Bar) = unsafe {( //~ undefined behavior
-LL | | Union { usize: &BAR }.foo,
-LL | | Union { usize: &BAR }.bar,
+LL | | Union { u8: &BAR }.foo,
+LL | | Union { u8: &BAR }.bar,
LL | | )};
| |___^ type validation failed: encountered invalid enum discriminant 5 at .1.<deref>
|
--- /dev/null
+// issue-49296: Unsafe shenigans in constants can result in missing errors
+
+#![feature(const_fn)]
+#![feature(const_fn_union)]
+
+const unsafe fn transmute<T: Copy, U: Copy>(t: T) -> U {
+ union Transmute<T: Copy, U: Copy> {
+ from: T,
+ to: U,
+ }
+
+ Transmute { from: t }.to
+}
+
+const fn wat(x: u64) -> &'static u64 {
+ unsafe { transmute(&x) }
+}
+const X: u64 = *wat(42);
+//~^ ERROR any use of this value will cause an error
+
+fn main() {
+ println!("{}", X);
+}
--- /dev/null
+error: any use of this value will cause an error
+ --> $DIR/issue-49296.rs:18:1
+ |
+LL | const X: u64 = *wat(42);
+ | ^^^^^^^^^^^^^^^--------^
+ | |
+ | dangling pointer was dereferenced
+ |
+ = note: #[deny(const_err)] on by default
+
+error: aborting due to previous error
+
--- /dev/null
+// https://github.com/rust-lang/rust/issues/55454
+// compile-pass
+
+struct This<T>(T);
+
+const C: This<Option<&i32>> = This(Some(&1));
+
+fn main() {
+}
--- /dev/null
+// https://github.com/rust-lang/rust/issues/55223
+
+#![feature(const_let)]
+
+union Foo<'a> {
+ y: &'a (),
+ long_live_the_unit: &'static (),
+}
+
+const FOO: &() = { //~ ERROR any use of this value will cause an error
+ let y = ();
+ unsafe { Foo { y: &y }.long_live_the_unit }
+};
+
+fn main() {}
--- /dev/null
+error: any use of this value will cause an error
+ --> $DIR/dangling-alloc-id-ice.rs:10:1
+ |
+LL | / const FOO: &() = { //~ ERROR any use of this value will cause an error
+LL | | let y = ();
+LL | | unsafe { Foo { y: &y }.long_live_the_unit }
+LL | | };
+ | |__^ type validation failed: encountered dangling pointer in final constant
+ |
+ = note: #[deny(const_err)] on by default
+
+error: aborting due to previous error
+
--- /dev/null
+#![feature(const_let)]
+
+const FOO: *const u32 = { //~ ERROR any use of this value will cause an error
+ let x = 42;
+ &x
+};
+
+fn main() {
+ let x = FOO;
+}
--- /dev/null
+error: any use of this value will cause an error
+ --> $DIR/dangling_raw_ptr.rs:3:1
+ |
+LL | / const FOO: *const u32 = { //~ ERROR any use of this value will cause an error
+LL | | let x = 42;
+LL | | &x
+LL | | };
+ | |__^ type validation failed: encountered dangling pointer in final constant
+ |
+ = note: #[deny(const_err)] on by default
+
+error: aborting due to previous error
+
--- /dev/null
+// https://github.com/rust-lang/rust/issues/55454
+// compile-pass
+
+#[derive(PartialEq)]
+struct This<T>(T);
+
+fn main() {
+ This(Some(&1)) == This(Some(&1));
+}
--- /dev/null
+// Copyright 2018 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// compile-pass
+// compile-flags: -Wunused
+
+// ensure there are no special warnings about uninhabited types
+// when deriving Debug on an empty enum
+
+#[derive(Debug)]
+enum Void {} //~ WARN never used
+
+#[derive(Debug)]
+enum Foo { //~ WARN never used
+ Bar(u8),
+ Void(Void),
+}
+
+fn main() {}
+
--- /dev/null
+warning: enum is never used: `Void`
+ --> $DIR/derive-uninhabited-enum-38885.rs:18:1
+ |
+LL | enum Void {} //~ WARN never used
+ | ^^^^^^^^^
+ |
+ = note: `-W dead-code` implied by `-W unused`
+
+warning: enum is never used: `Foo`
+ --> $DIR/derive-uninhabited-enum-38885.rs:21:1
+ |
+LL | enum Foo { //~ WARN never used
+ | ^^^^^^^^
+
LL | if not // lack of braces is [sic]
| -- this `if` statement has a condition, but no block
LL | println!("Then when?");
- | ^
+ | ^ expected `{`
error: unexpected `2` after identifier
--> $DIR/issue-46836-identifier-not-instead-of-negation.rs:36:24
--- /dev/null
+error[E0597]: `c` does not live long enough
+ --> $DIR/dropck-eyepatch-extern-crate.rs:41:20
+ |
+LL | dt = Dt("dt", &c);
+ | ^ borrowed value does not live long enough
+...
+LL | }
+ | - `c` dropped here while still borrowed
+ |
+ = note: values in a scope are dropped in the opposite order they are created
+
+error[E0597]: `c` does not live long enough
+ --> $DIR/dropck-eyepatch-extern-crate.rs:43:20
+ |
+LL | dr = Dr("dr", &c);
+ | ^ borrowed value does not live long enough
+...
+LL | }
+ | - `c` dropped here while still borrowed
+ |
+ = note: values in a scope are dropped in the opposite order they are created
+
+error[E0597]: `c_shortest` does not live long enough
+ --> $DIR/dropck-eyepatch-extern-crate.rs:47:20
+ |
+LL | dt = Dt("dt", &c_shortest);
+ | ^^^^^^^^^^ borrowed value does not live long enough
+...
+LL | }
+ | - `c_shortest` dropped here while still borrowed
+ |
+ = note: values in a scope are dropped in the opposite order they are created
+
+error[E0597]: `c_shortest` does not live long enough
+ --> $DIR/dropck-eyepatch-extern-crate.rs:50:20
+ |
+LL | dr = Dr("dr", &c_shortest);
+ | ^^^^^^^^^^ borrowed value does not live long enough
+...
+LL | }
+ | - `c_shortest` dropped here while still borrowed
+ |
+ = note: values in a scope are dropped in the opposite order they are created
+
+error[E0597]: `c_shortest` does not live long enough
+ --> $DIR/dropck-eyepatch-extern-crate.rs:57:29
+ |
+LL | pt = Pt("pt", &c_long, &c_shortest);
+ | ^^^^^^^^^^ borrowed value does not live long enough
+...
+LL | }
+ | - `c_shortest` dropped here while still borrowed
+ |
+ = note: values in a scope are dropped in the opposite order they are created
+
+error[E0597]: `c_shortest` does not live long enough
+ --> $DIR/dropck-eyepatch-extern-crate.rs:59:29
+ |
+LL | pr = Pr("pr", &c_long, &c_shortest);
+ | ^^^^^^^^^^ borrowed value does not live long enough
+...
+LL | }
+ | - `c_shortest` dropped here while still borrowed
+ |
+ = note: values in a scope are dropped in the opposite order they are created
+
+error: aborting due to 6 previous errors
+
+For more information about this error, try `rustc --explain E0597`.
-// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
+// The behavior of AST-borrowck and NLL explcitly differ here due to
+// NLL's increased precision; so we use revisions and do not worry
+// about the --compare-mode=nll on this test.
+
+// revisions: ast nll
+//[ast]compile-flags: -Z borrowck=ast
+//[nll]compile-flags: -Z borrowck=migrate -Z two-phase-borrows
+
+// ignore-compare-mode-nll
// aux-build:dropck_eyepatch_extern_crate.rs
// Error: destructor order imprecisely modelled
dt = Dt("dt", &c);
- //~^ ERROR `c` does not live long enough
+ //[ast]~^ ERROR `c` does not live long enough
dr = Dr("dr", &c);
- //~^ ERROR `c` does not live long enough
+ //[ast]~^ ERROR `c` does not live long enough
// Error: `c_shortest` dies too soon for the references in dtors to be valid.
dt = Dt("dt", &c_shortest);
- //~^ ERROR `c_shortest` does not live long enough
+ //[ast]~^ ERROR `c_shortest` does not live long enough
+ //[nll]~^^ ERROR `c_shortest` does not live long enough
dr = Dr("dr", &c_shortest);
- //~^ ERROR `c_shortest` does not live long enough
-
+ //[ast]~^ ERROR `c_shortest` does not live long enough
// No error: Drop impl asserts .1 (A and &'a _) are not accessed
pt = Pt("pt", &c_shortest, &c_long);
pr = Pr("pr", &c_shortest, &c_long);
// Error: Drop impl's assertion does not apply to `B` nor `&'b _`
pt = Pt("pt", &c_long, &c_shortest);
- //~^ ERROR `c_shortest` does not live long enough
+ //[ast]~^ ERROR `c_shortest` does not live long enough
pr = Pr("pr", &c_long, &c_shortest);
- //~^ ERROR `c_shortest` does not live long enough
+ //[ast]~^ ERROR `c_shortest` does not live long enough
// No error: St and Sr have no destructor.
st = St("st", &c_shortest);
sr = Sr("sr", &c_shortest);
println!("{:?}", (dt.0, dr.0, pt.0, pr.0, st.0, sr.0));
+ use_imm(sr.1); use_imm(st.1); use_imm(pr.1); use_imm(pt.1); use_imm(dr.1); use_imm(dt.1);
}
+
+fn use_imm<T>(_: &T) { }
+++ /dev/null
-error[E0597]: `c` does not live long enough
- --> $DIR/dropck-eyepatch-extern-crate.rs:41:20
- |
-LL | dt = Dt("dt", &c);
- | ^ borrowed value does not live long enough
-...
-LL | }
- | - `c` dropped here while still borrowed
- |
- = note: values in a scope are dropped in the opposite order they are created
-
-error[E0597]: `c` does not live long enough
- --> $DIR/dropck-eyepatch-extern-crate.rs:43:20
- |
-LL | dr = Dr("dr", &c);
- | ^ borrowed value does not live long enough
-...
-LL | }
- | - `c` dropped here while still borrowed
- |
- = note: values in a scope are dropped in the opposite order they are created
-
-error[E0597]: `c_shortest` does not live long enough
- --> $DIR/dropck-eyepatch-extern-crate.rs:47:20
- |
-LL | dt = Dt("dt", &c_shortest);
- | ^^^^^^^^^^ borrowed value does not live long enough
-...
-LL | }
- | - `c_shortest` dropped here while still borrowed
- |
- = note: values in a scope are dropped in the opposite order they are created
-
-error[E0597]: `c_shortest` does not live long enough
- --> $DIR/dropck-eyepatch-extern-crate.rs:49:20
- |
-LL | dr = Dr("dr", &c_shortest);
- | ^^^^^^^^^^ borrowed value does not live long enough
-...
-LL | }
- | - `c_shortest` dropped here while still borrowed
- |
- = note: values in a scope are dropped in the opposite order they are created
-
-error[E0597]: `c_shortest` does not live long enough
- --> $DIR/dropck-eyepatch-extern-crate.rs:57:29
- |
-LL | pt = Pt("pt", &c_long, &c_shortest);
- | ^^^^^^^^^^ borrowed value does not live long enough
-...
-LL | }
- | - `c_shortest` dropped here while still borrowed
- |
- = note: values in a scope are dropped in the opposite order they are created
-
-error[E0597]: `c_shortest` does not live long enough
- --> $DIR/dropck-eyepatch-extern-crate.rs:59:29
- |
-LL | pr = Pr("pr", &c_long, &c_shortest);
- | ^^^^^^^^^^ borrowed value does not live long enough
-...
-LL | }
- | - `c_shortest` dropped here while still borrowed
- |
- = note: values in a scope are dropped in the opposite order they are created
-
-error: aborting due to 6 previous errors
-
-For more information about this error, try `rustc --explain E0597`.
--- /dev/null
+error[E0597]: `c` does not live long enough
+ --> $DIR/dropck-eyepatch-reorder.rs:58:20
+ |
+LL | dt = Dt("dt", &c);
+ | ^ borrowed value does not live long enough
+...
+LL | }
+ | - `c` dropped here while still borrowed
+ |
+ = note: values in a scope are dropped in the opposite order they are created
+
+error[E0597]: `c` does not live long enough
+ --> $DIR/dropck-eyepatch-reorder.rs:60:20
+ |
+LL | dr = Dr("dr", &c);
+ | ^ borrowed value does not live long enough
+...
+LL | }
+ | - `c` dropped here while still borrowed
+ |
+ = note: values in a scope are dropped in the opposite order they are created
+
+error[E0597]: `c_shortest` does not live long enough
+ --> $DIR/dropck-eyepatch-reorder.rs:64:20
+ |
+LL | dt = Dt("dt", &c_shortest);
+ | ^^^^^^^^^^ borrowed value does not live long enough
+...
+LL | }
+ | - `c_shortest` dropped here while still borrowed
+ |
+ = note: values in a scope are dropped in the opposite order they are created
+
+error[E0597]: `c_shortest` does not live long enough
+ --> $DIR/dropck-eyepatch-reorder.rs:67:20
+ |
+LL | dr = Dr("dr", &c_shortest);
+ | ^^^^^^^^^^ borrowed value does not live long enough
+...
+LL | }
+ | - `c_shortest` dropped here while still borrowed
+ |
+ = note: values in a scope are dropped in the opposite order they are created
+
+error[E0597]: `c_shortest` does not live long enough
+ --> $DIR/dropck-eyepatch-reorder.rs:74:29
+ |
+LL | pt = Pt("pt", &c_long, &c_shortest);
+ | ^^^^^^^^^^ borrowed value does not live long enough
+...
+LL | }
+ | - `c_shortest` dropped here while still borrowed
+ |
+ = note: values in a scope are dropped in the opposite order they are created
+
+error[E0597]: `c_shortest` does not live long enough
+ --> $DIR/dropck-eyepatch-reorder.rs:76:29
+ |
+LL | pr = Pr("pr", &c_long, &c_shortest);
+ | ^^^^^^^^^^ borrowed value does not live long enough
+...
+LL | }
+ | - `c_shortest` dropped here while still borrowed
+ |
+ = note: values in a scope are dropped in the opposite order they are created
+
+error: aborting due to 6 previous errors
+
+For more information about this error, try `rustc --explain E0597`.
-// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
+// The behavior of AST-borrowck and NLL explcitly differ here due to
+// NLL's increased precision; so we use revisions and do not worry
+// about the --compare-mode=nll on this test.
+
+// revisions: ast nll
+//[ast]compile-flags: -Z borrowck=ast
+//[nll]compile-flags: -Z borrowck=migrate -Z two-phase-borrows
+
+// ignore-compare-mode-nll
#![feature(dropck_eyepatch, rustc_attrs)]
// Error: destructor order imprecisely modelled
dt = Dt("dt", &c);
- //~^ ERROR `c` does not live long enough
+ //[ast]~^ ERROR `c` does not live long enough
dr = Dr("dr", &c);
- //~^ ERROR `c` does not live long enough
+ //[ast]~^ ERROR `c` does not live long enough
// Error: `c_shortest` dies too soon for the references in dtors to be valid.
dt = Dt("dt", &c_shortest);
- //~^ ERROR `c_shortest` does not live long enough
+ //[ast]~^ ERROR `c_shortest` does not live long enough
+ //[nll]~^^ ERROR `c_shortest` does not live long enough
dr = Dr("dr", &c_shortest);
- //~^ ERROR `c_shortest` does not live long enough
-
+ //[ast]~^ ERROR `c_shortest` does not live long enough
// No error: Drop impl asserts .1 (A and &'a _) are not accessed
pt = Pt("pt", &c_shortest, &c_long);
pr = Pr("pr", &c_shortest, &c_long);
// Error: Drop impl's assertion does not apply to `B` nor `&'b _`
pt = Pt("pt", &c_long, &c_shortest);
- //~^ ERROR `c_shortest` does not live long enough
+ //[ast]~^ ERROR `c_shortest` does not live long enough
pr = Pr("pr", &c_long, &c_shortest);
- //~^ ERROR `c_shortest` does not live long enough
+ //[ast]~^ ERROR `c_shortest` does not live long enough
// No error: St and Sr have no destructor.
st = St("st", &c_shortest);
sr = Sr("sr", &c_shortest);
println!("{:?}", (dt.0, dr.0, pt.0, pr.0, st.0, sr.0));
+ use_imm(sr.1); use_imm(st.1); use_imm(pr.1); use_imm(pt.1); use_imm(dr.1); use_imm(dt.1);
}
+
+fn use_imm<T>(_: &T) { }
+++ /dev/null
-error[E0597]: `c` does not live long enough
- --> $DIR/dropck-eyepatch-reorder.rs:58:20
- |
-LL | dt = Dt("dt", &c);
- | ^ borrowed value does not live long enough
-...
-LL | }
- | - `c` dropped here while still borrowed
- |
- = note: values in a scope are dropped in the opposite order they are created
-
-error[E0597]: `c` does not live long enough
- --> $DIR/dropck-eyepatch-reorder.rs:60:20
- |
-LL | dr = Dr("dr", &c);
- | ^ borrowed value does not live long enough
-...
-LL | }
- | - `c` dropped here while still borrowed
- |
- = note: values in a scope are dropped in the opposite order they are created
-
-error[E0597]: `c_shortest` does not live long enough
- --> $DIR/dropck-eyepatch-reorder.rs:64:20
- |
-LL | dt = Dt("dt", &c_shortest);
- | ^^^^^^^^^^ borrowed value does not live long enough
-...
-LL | }
- | - `c_shortest` dropped here while still borrowed
- |
- = note: values in a scope are dropped in the opposite order they are created
-
-error[E0597]: `c_shortest` does not live long enough
- --> $DIR/dropck-eyepatch-reorder.rs:66:20
- |
-LL | dr = Dr("dr", &c_shortest);
- | ^^^^^^^^^^ borrowed value does not live long enough
-...
-LL | }
- | - `c_shortest` dropped here while still borrowed
- |
- = note: values in a scope are dropped in the opposite order they are created
-
-error[E0597]: `c_shortest` does not live long enough
- --> $DIR/dropck-eyepatch-reorder.rs:74:29
- |
-LL | pt = Pt("pt", &c_long, &c_shortest);
- | ^^^^^^^^^^ borrowed value does not live long enough
-...
-LL | }
- | - `c_shortest` dropped here while still borrowed
- |
- = note: values in a scope are dropped in the opposite order they are created
-
-error[E0597]: `c_shortest` does not live long enough
- --> $DIR/dropck-eyepatch-reorder.rs:76:29
- |
-LL | pr = Pr("pr", &c_long, &c_shortest);
- | ^^^^^^^^^^ borrowed value does not live long enough
-...
-LL | }
- | - `c_shortest` dropped here while still borrowed
- |
- = note: values in a scope are dropped in the opposite order they are created
-
-error: aborting due to 6 previous errors
-
-For more information about this error, try `rustc --explain E0597`.
--- /dev/null
+error[E0597]: `c` does not live long enough
+ --> $DIR/dropck-eyepatch.rs:81:20
+ |
+LL | dt = Dt("dt", &c);
+ | ^ borrowed value does not live long enough
+...
+LL | }
+ | - `c` dropped here while still borrowed
+ |
+ = note: values in a scope are dropped in the opposite order they are created
+
+error[E0597]: `c` does not live long enough
+ --> $DIR/dropck-eyepatch.rs:83:20
+ |
+LL | dr = Dr("dr", &c);
+ | ^ borrowed value does not live long enough
+...
+LL | }
+ | - `c` dropped here while still borrowed
+ |
+ = note: values in a scope are dropped in the opposite order they are created
+
+error[E0597]: `c_shortest` does not live long enough
+ --> $DIR/dropck-eyepatch.rs:87:20
+ |
+LL | dt = Dt("dt", &c_shortest);
+ | ^^^^^^^^^^ borrowed value does not live long enough
+...
+LL | }
+ | - `c_shortest` dropped here while still borrowed
+ |
+ = note: values in a scope are dropped in the opposite order they are created
+
+error[E0597]: `c_shortest` does not live long enough
+ --> $DIR/dropck-eyepatch.rs:90:20
+ |
+LL | dr = Dr("dr", &c_shortest);
+ | ^^^^^^^^^^ borrowed value does not live long enough
+...
+LL | }
+ | - `c_shortest` dropped here while still borrowed
+ |
+ = note: values in a scope are dropped in the opposite order they are created
+
+error[E0597]: `c_shortest` does not live long enough
+ --> $DIR/dropck-eyepatch.rs:98:29
+ |
+LL | pt = Pt("pt", &c_long, &c_shortest);
+ | ^^^^^^^^^^ borrowed value does not live long enough
+...
+LL | }
+ | - `c_shortest` dropped here while still borrowed
+ |
+ = note: values in a scope are dropped in the opposite order they are created
+
+error[E0597]: `c_shortest` does not live long enough
+ --> $DIR/dropck-eyepatch.rs:100:29
+ |
+LL | pr = Pr("pr", &c_long, &c_shortest);
+ | ^^^^^^^^^^ borrowed value does not live long enough
+...
+LL | }
+ | - `c_shortest` dropped here while still borrowed
+ |
+ = note: values in a scope are dropped in the opposite order they are created
+
+error: aborting due to 6 previous errors
+
+For more information about this error, try `rustc --explain E0597`.
-// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
+// The behavior of AST-borrowck and NLL explcitly differ here due to
+// NLL's increased precision; so we use revisions and do not worry
+// about the --compare-mode=nll on this test.
+
+// revisions: ast nll
+//[ast]compile-flags: -Z borrowck=ast
+//[nll]compile-flags: -Z borrowck=migrate -Z two-phase-borrows
+
+// ignore-compare-mode-nll
#![feature(dropck_eyepatch, rustc_attrs)]
// Error: destructor order imprecisely modelled
dt = Dt("dt", &c);
- //~^ ERROR `c` does not live long enough
+ //[ast]~^ ERROR `c` does not live long enough
dr = Dr("dr", &c);
- //~^ ERROR `c` does not live long enough
+ //[ast]~^ ERROR `c` does not live long enough
// Error: `c_shortest` dies too soon for the references in dtors to be valid.
dt = Dt("dt", &c_shortest);
- //~^ ERROR `c_shortest` does not live long enough
+ //[ast]~^ ERROR `c_shortest` does not live long enough
+ //[nll]~^^ ERROR `c_shortest` does not live long enough
dr = Dr("dr", &c_shortest);
- //~^ ERROR `c_shortest` does not live long enough
-
+ //[ast]~^ ERROR `c_shortest` does not live long enough
// No error: Drop impl asserts .1 (A and &'a _) are not accessed
pt = Pt("pt", &c_shortest, &c_long);
// Error: Drop impl's assertion does not apply to `B` nor `&'b _`
pt = Pt("pt", &c_long, &c_shortest);
- //~^ ERROR `c_shortest` does not live long enough
+ //[ast]~^ ERROR `c_shortest` does not live long enough
pr = Pr("pr", &c_long, &c_shortest);
- //~^ ERROR `c_shortest` does not live long enough
+ //[ast]~^ ERROR `c_shortest` does not live long enough
// No error: St and Sr have no destructor.
st = St("st", &c_shortest);
sr = Sr("sr", &c_shortest);
println!("{:?}", (dt.0, dr.0, pt.0, pr.0, st.0, sr.0));
+ use_imm(sr.1); use_imm(st.1); use_imm(pr.1); use_imm(pt.1); use_imm(dr.1); use_imm(dt.1);
}
+
+fn use_imm<T>(_: &T) { }
+++ /dev/null
-error[E0597]: `c` does not live long enough
- --> $DIR/dropck-eyepatch.rs:81:20
- |
-LL | dt = Dt("dt", &c);
- | ^ borrowed value does not live long enough
-...
-LL | }
- | - `c` dropped here while still borrowed
- |
- = note: values in a scope are dropped in the opposite order they are created
-
-error[E0597]: `c` does not live long enough
- --> $DIR/dropck-eyepatch.rs:83:20
- |
-LL | dr = Dr("dr", &c);
- | ^ borrowed value does not live long enough
-...
-LL | }
- | - `c` dropped here while still borrowed
- |
- = note: values in a scope are dropped in the opposite order they are created
-
-error[E0597]: `c_shortest` does not live long enough
- --> $DIR/dropck-eyepatch.rs:87:20
- |
-LL | dt = Dt("dt", &c_shortest);
- | ^^^^^^^^^^ borrowed value does not live long enough
-...
-LL | }
- | - `c_shortest` dropped here while still borrowed
- |
- = note: values in a scope are dropped in the opposite order they are created
-
-error[E0597]: `c_shortest` does not live long enough
- --> $DIR/dropck-eyepatch.rs:89:20
- |
-LL | dr = Dr("dr", &c_shortest);
- | ^^^^^^^^^^ borrowed value does not live long enough
-...
-LL | }
- | - `c_shortest` dropped here while still borrowed
- |
- = note: values in a scope are dropped in the opposite order they are created
-
-error[E0597]: `c_shortest` does not live long enough
- --> $DIR/dropck-eyepatch.rs:98:29
- |
-LL | pt = Pt("pt", &c_long, &c_shortest);
- | ^^^^^^^^^^ borrowed value does not live long enough
-...
-LL | }
- | - `c_shortest` dropped here while still borrowed
- |
- = note: values in a scope are dropped in the opposite order they are created
-
-error[E0597]: `c_shortest` does not live long enough
- --> $DIR/dropck-eyepatch.rs:100:29
- |
-LL | pr = Pr("pr", &c_long, &c_shortest);
- | ^^^^^^^^^^ borrowed value does not live long enough
-...
-LL | }
- | - `c_shortest` dropped here while still borrowed
- |
- = note: values in a scope are dropped in the opposite order they are created
-
-error: aborting due to 6 previous errors
-
-For more information about this error, try `rustc --explain E0597`.
LL | S[0];
| ^^^^ cannot move out of borrowed content
-error[E0507]: cannot move out of data in a `&` reference
- --> $DIR/dst-index.rs:41:5
- |
-LL | S[0];
- | ^^^^
- | |
- | cannot move out of data in a `&` reference
- | cannot move
-
error[E0507]: cannot move out of borrowed content
--> $DIR/dst-index.rs:44:5
|
LL | T[0];
| ^^^^ cannot move out of borrowed content
-error[E0507]: cannot move out of data in a `&` reference
- --> $DIR/dst-index.rs:44:5
- |
-LL | T[0];
- | ^^^^
- | |
- | cannot move out of data in a `&` reference
- | cannot move
-
-error: aborting due to 6 previous errors
+error: aborting due to 4 previous errors
Some errors occurred: E0161, E0507.
For more information about an error, try `rustc --explain E0161`.
LL | let _x: Box<str> = box *"hello world";
| ^^^^^^^^^^^^^^ cannot move out of borrowed content
-error[E0507]: cannot move out of data in a `&` reference
- --> $DIR/dst-rvalue.rs:16:28
- |
-LL | let _x: Box<str> = box *"hello world";
- | ^^^^^^^^^^^^^^
- | |
- | cannot move out of data in a `&` reference
- | cannot move
-
error[E0508]: cannot move out of type `[isize]`, a non-copy slice
--> $DIR/dst-rvalue.rs:21:32
|
LL | let _x: Box<[isize]> = box *array;
| ^^^^^^ cannot move out of here
-error[E0507]: cannot move out of `*array` which is behind a `&` reference
- --> $DIR/dst-rvalue.rs:21:32
- |
-LL | let array: &[isize] = &[1, 2, 3];
- | ---------- help: consider changing this to be a mutable reference: `&mut [1, 2, 3]`
-LL | let _x: Box<[isize]> = box *array;
- | ^^^^^^
- | |
- | cannot move out of `*array` which is behind a `&` reference
- | `array` is a `&` reference, so the data it refers to cannot be moved
-
-error: aborting due to 6 previous errors
+error: aborting due to 4 previous errors
Some errors occurred: E0161, E0507, E0508.
For more information about an error, try `rustc --explain E0161`.
LL | match x { } //~ ERROR E0004
| ^
|
-help: Please ensure that all possible cases are being handled; possibly adding wildcards or more match arms.
+help: ensure that all possible cases are being handled, possibly by adding wildcards or more match arms
--> $DIR/E0004-2.rs:14:11
|
LL | match x { } //~ ERROR E0004
| ^^^^^^^^^^^^^^^^^^^^^^^^ requires multiple coercions
|
= note: `CoerceUnsized` may only be implemented for a coercion between structures with one field being coerced
- = note: currently, 2 fields need coercions: b (T to U), c (U to T)
+ = note: currently, 2 fields need coercions: `b` (`T` to `U`), `c` (`U` to `T`)
error: aborting due to previous error
+++ /dev/null
-error[E0507]: cannot move out of borrowed content
- --> $DIR/E0507.rs:22:5
- |
-LL | x.borrow().nothing_is_true(); //~ ERROR E0507
- | ^^^^^^^^^^ cannot move out of borrowed content
-
-error[E0507]: cannot move out of data in a `&` reference
- --> $DIR/E0507.rs:22:5
- |
-LL | x.borrow().nothing_is_true(); //~ ERROR E0507
- | ^^^^^^^^^^
- | |
- | cannot move out of data in a `&` reference
- | cannot move
-
-error: aborting due to 2 previous errors
-
-For more information about this error, try `rustc --explain E0507`.
-error[E0658]: allow_fail attribute is currently unstable (see issue #42219)
+error[E0658]: allow_fail attribute is currently unstable (see issue #46488)
--> $DIR/feature-gate-allow_fail.rs:13:1
|
LL | #[allow_fail] //~ ERROR allow_fail attribute is currently unstable
pub unsafe fn atomic_i64(x: *mut i64) {
atomic_xadd(x, 1);
}
+#[cfg(target_has_atomic = "128")]
+//~^ ERROR `cfg(target_has_atomic)` is experimental and subject to change (see issue #32976)
+pub unsafe fn atomic_u128(x: *mut u128) {
+ atomic_xadd(x, 1);
+}
+#[cfg(target_has_atomic = "128")]
+//~^ ERROR `cfg(target_has_atomic)` is experimental and subject to change (see issue #32976)
+pub unsafe fn atomic_i128(x: *mut i128) {
+ atomic_xadd(x, 1);
+}
#[cfg(target_has_atomic = "ptr")]
//~^ ERROR `cfg(target_has_atomic)` is experimental and subject to change (see issue #32976)
pub unsafe fn atomic_usize(x: *mut usize) {
//~^ ERROR `cfg(target_has_atomic)` is experimental and subject to change (see issue #32976)
cfg!(target_has_atomic = "64");
//~^ ERROR `cfg(target_has_atomic)` is experimental and subject to change (see issue #32976)
+ cfg!(target_has_atomic = "128");
+ //~^ ERROR `cfg(target_has_atomic)` is experimental and subject to change (see issue #32976)
cfg!(target_has_atomic = "ptr");
//~^ ERROR `cfg(target_has_atomic)` is experimental and subject to change (see issue #32976)
}
error[E0658]: `cfg(target_has_atomic)` is experimental and subject to change (see issue #32976)
--> $DIR/feature-gate-cfg-target-has-atomic.rs:64:7
|
-LL | #[cfg(target_has_atomic = "ptr")]
+LL | #[cfg(target_has_atomic = "128")]
| ^^^^^^^^^^^^^^^^^^^^^^^^^
|
= help: add #![feature(cfg_target_has_atomic)] to the crate attributes to enable
error[E0658]: `cfg(target_has_atomic)` is experimental and subject to change (see issue #32976)
--> $DIR/feature-gate-cfg-target-has-atomic.rs:69:7
|
+LL | #[cfg(target_has_atomic = "128")]
+ | ^^^^^^^^^^^^^^^^^^^^^^^^^
+ |
+ = help: add #![feature(cfg_target_has_atomic)] to the crate attributes to enable
+
+error[E0658]: `cfg(target_has_atomic)` is experimental and subject to change (see issue #32976)
+ --> $DIR/feature-gate-cfg-target-has-atomic.rs:74:7
+ |
+LL | #[cfg(target_has_atomic = "ptr")]
+ | ^^^^^^^^^^^^^^^^^^^^^^^^^
+ |
+ = help: add #![feature(cfg_target_has_atomic)] to the crate attributes to enable
+
+error[E0658]: `cfg(target_has_atomic)` is experimental and subject to change (see issue #32976)
+ --> $DIR/feature-gate-cfg-target-has-atomic.rs:79:7
+ |
LL | #[cfg(target_has_atomic = "ptr")]
| ^^^^^^^^^^^^^^^^^^^^^^^^^
|
= help: add #![feature(cfg_target_has_atomic)] to the crate attributes to enable
error[E0658]: `cfg(target_has_atomic)` is experimental and subject to change (see issue #32976)
- --> $DIR/feature-gate-cfg-target-has-atomic.rs:76:10
+ --> $DIR/feature-gate-cfg-target-has-atomic.rs:86:10
|
LL | cfg!(target_has_atomic = "8");
| ^^^^^^^^^^^^^^^^^^^^^^^
= help: add #![feature(cfg_target_has_atomic)] to the crate attributes to enable
error[E0658]: `cfg(target_has_atomic)` is experimental and subject to change (see issue #32976)
- --> $DIR/feature-gate-cfg-target-has-atomic.rs:78:10
+ --> $DIR/feature-gate-cfg-target-has-atomic.rs:88:10
|
LL | cfg!(target_has_atomic = "16");
| ^^^^^^^^^^^^^^^^^^^^^^^^
= help: add #![feature(cfg_target_has_atomic)] to the crate attributes to enable
error[E0658]: `cfg(target_has_atomic)` is experimental and subject to change (see issue #32976)
- --> $DIR/feature-gate-cfg-target-has-atomic.rs:80:10
+ --> $DIR/feature-gate-cfg-target-has-atomic.rs:90:10
|
LL | cfg!(target_has_atomic = "32");
| ^^^^^^^^^^^^^^^^^^^^^^^^
= help: add #![feature(cfg_target_has_atomic)] to the crate attributes to enable
error[E0658]: `cfg(target_has_atomic)` is experimental and subject to change (see issue #32976)
- --> $DIR/feature-gate-cfg-target-has-atomic.rs:82:10
+ --> $DIR/feature-gate-cfg-target-has-atomic.rs:92:10
|
LL | cfg!(target_has_atomic = "64");
| ^^^^^^^^^^^^^^^^^^^^^^^^
= help: add #![feature(cfg_target_has_atomic)] to the crate attributes to enable
error[E0658]: `cfg(target_has_atomic)` is experimental and subject to change (see issue #32976)
- --> $DIR/feature-gate-cfg-target-has-atomic.rs:84:10
+ --> $DIR/feature-gate-cfg-target-has-atomic.rs:94:10
+ |
+LL | cfg!(target_has_atomic = "128");
+ | ^^^^^^^^^^^^^^^^^^^^^^^^^
+ |
+ = help: add #![feature(cfg_target_has_atomic)] to the crate attributes to enable
+
+error[E0658]: `cfg(target_has_atomic)` is experimental and subject to change (see issue #32976)
+ --> $DIR/feature-gate-cfg-target-has-atomic.rs:96:10
|
LL | cfg!(target_has_atomic = "ptr");
| ^^^^^^^^^^^^^^^^^^^^^^^^^
|
= help: add #![feature(cfg_target_has_atomic)] to the crate attributes to enable
-error: aborting due to 15 previous errors
+error: aborting due to 18 previous errors
For more information about this error, try `rustc --explain E0658`.
-error[E0658]: `crate` visibility modifier is experimental (see issue #45388)
+error[E0658]: `crate` visibility modifier is experimental (see issue #53120)
--> $DIR/feature-gate-crate_visibility_modifier.rs:11:1
|
LL | crate struct Bender { //~ ERROR `crate` visibility modifier is experimental
-error[E0658]: use of extern prelude names introduced with `extern crate` items is unstable (see issue #54658)
+error[E0658]: use of extern prelude names introduced with `extern crate` items is unstable (see issue #55599)
--> $DIR/feature-gate-extern_crate_item_prelude.rs:26:9
|
LL | use alloc;
|
= help: add #![feature(extern_crate_item_prelude)] to the crate attributes to enable
-error[E0658]: use of extern prelude names introduced with `extern crate` items is unstable (see issue #54658)
+error[E0658]: use of extern prelude names introduced with `extern crate` items is unstable (see issue #55599)
--> $DIR/feature-gate-extern_crate_item_prelude.rs:28:9
|
LL | use alloc::boxed;
|
= help: add #![feature(extern_crate_item_prelude)] to the crate attributes to enable
-error[E0658]: use of extern prelude names introduced with `extern crate` items is unstable (see issue #54658)
+error[E0658]: use of extern prelude names introduced with `extern crate` items is unstable (see issue #55599)
--> $DIR/feature-gate-extern_crate_item_prelude.rs:33:11
|
LL | use ::alloc;
|
= help: add #![feature(extern_crate_item_prelude)] to the crate attributes to enable
-error[E0658]: use of extern prelude names introduced with `extern crate` items is unstable (see issue #54658)
+error[E0658]: use of extern prelude names introduced with `extern crate` items is unstable (see issue #55599)
--> $DIR/feature-gate-extern_crate_item_prelude.rs:35:11
|
LL | use ::alloc::boxed;
|
= help: add #![feature(extern_crate_item_prelude)] to the crate attributes to enable
-error[E0658]: use of extern prelude names introduced with `extern crate` items is unstable (see issue #54658)
+error[E0658]: use of extern prelude names introduced with `extern crate` items is unstable (see issue #55599)
--> $DIR/feature-gate-extern_crate_item_prelude.rs:9:17
|
LL | let v = alloc::vec![0];
|
= help: add #![feature(extern_crate_item_prelude)] to the crate attributes to enable
-error[E0658]: use of extern prelude names introduced with `extern crate` items is unstable (see issue #54658)
+error[E0658]: use of extern prelude names introduced with `extern crate` items is unstable (see issue #55599)
--> $DIR/feature-gate-extern_crate_item_prelude.rs:11:18
|
LL | type A = alloc::boxed::Box<u8>;
|
= help: add #![feature(extern_crate_item_prelude)] to the crate attributes to enable
-error[E0658]: use of extern prelude names introduced with `extern crate` items is unstable (see issue #54658)
+error[E0658]: use of extern prelude names introduced with `extern crate` items is unstable (see issue #55599)
--> $DIR/feature-gate-extern_crate_item_prelude.rs:18:19
|
LL | let v = ::alloc::vec![0];
|
= help: add #![feature(extern_crate_item_prelude)] to the crate attributes to enable
-error[E0658]: use of extern prelude names introduced with `extern crate` items is unstable (see issue #54658)
+error[E0658]: use of extern prelude names introduced with `extern crate` items is unstable (see issue #55599)
--> $DIR/feature-gate-extern_crate_item_prelude.rs:20:20
|
LL | type A = ::alloc::boxed::Box<u8>;
|
= help: add #![feature(extern_crate_item_prelude)] to the crate attributes to enable
-error[E0658]: use of extern prelude names introduced with `extern crate` items is unstable (see issue #54658)
+error[E0658]: use of extern prelude names introduced with `extern crate` items is unstable (see issue #55599)
--> $DIR/feature-gate-extern_crate_item_prelude.rs:42:14
|
LL | type A = core::boxed::Box<u8>;
-error[E0658]: `extern` in paths is experimental (see issue #44660)
+error[E0658]: `extern` in paths is experimental (see issue #55600)
--> $DIR/feature-gate-extern_in_paths.rs:14:13
|
LL | let _ = extern::std::vec::Vec::new(); //~ ERROR `extern` in paths is experimental
+++ /dev/null
-error: compilation successful
- --> $DIR/feature-gate-nll.rs:13:1
- |
-LL | / fn main() { #![rustc_error] // rust-lang/rust#49855
-LL | | let mut x = 33;
-LL | |
-LL | | let p = &x;
-LL | | x = 22; //~ ERROR cannot assign to `x` because it is borrowed [E0506]
-LL | | }
- | |_^
-
-error: aborting due to previous error
-
-// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-#![feature(rustc_attrs)]
+// This is a test checking that if you do not opt into NLL then you
+// should not get the effects of NLL applied to the test.
+
+// Don't use 2018 edition, since that turns on NLL (migration mode).
+// edition:2015
+
+// Don't use compare-mode=nll, since that turns on NLL.
+// ignore-compare-mode-nll
+
+
#![allow(dead_code)]
-fn main() { #![rustc_error] // rust-lang/rust#49855
+fn main() {
let mut x = 33;
let p = &x;
+++ /dev/null
-// Copyright 2018 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-// compile-flags:-C panic=abort
-
-#![no_std]
-#![no_main]
-
-use core::panic::PanicInfo;
-
-#[panic_implementation] //~ ERROR this attribute was renamed to `panic_handler` (see issue #44489)
-fn panic(info: &PanicInfo) -> ! {
- loop {}
-}
+++ /dev/null
-error[E0658]: this attribute was renamed to `panic_handler` (see issue #44489)
- --> $DIR/feature-gate-panic-implementation.rs:18:1
- |
-LL | #[panic_implementation] //~ ERROR this attribute was renamed to `panic_handler` (see issue #44489)
- | ^^^^^^^^^^^^^^^^^^^^^^^
- |
- = help: add #![feature(panic_implementation)] to the crate attributes to enable
-
-error: aborting due to previous error
-
-For more information about this error, try `rustc --explain E0658`.
--- /dev/null
+// Copyright 2018 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+trait Foo = Default;
+
+fn main() {}
--- /dev/null
+error[E0658]: trait aliases are experimental (see issue #41517)
+ --> $DIR/feature-gate-trait-alias.rs:11:1
+ |
+LL | trait Foo = Default;
+ | ^^^^^^^^^^^^^^^^^^^^
+ |
+ = help: add #![feature(trait_alias)] to the crate attributes to enable
+
+error: aborting due to previous error
+
+For more information about this error, try `rustc --explain E0658`.
}
}
//~^ ERROR expected `{`, found `}`
+//~| NOTE expected `{`
| -- this `if` statement has a condition, but no block
...
LL | }
- | ^
+ | ^ expected `{`
error: aborting due to previous error
--- /dev/null
+use NonExistent; //~ ERROR unresolved import `NonExistent`
+use non_existent::non_existent; //~ ERROR unresolved import `non_existent`
+
+#[non_existent] //~ ERROR cannot determine resolution for the attribute macro `non_existent`
+#[derive(NonExistent)] //~ ERROR cannot determine resolution for the derive macro `NonExistent`
+struct S;
+
+fn main() {}
--- /dev/null
+error[E0432]: unresolved import `NonExistent`
+ --> $DIR/issue-55457.rs:1:5
+ |
+LL | use NonExistent; //~ ERROR unresolved import `NonExistent`
+ | ^^^^^^^^^^^ no `NonExistent` in the root. Did you mean to use `non_existent`?
+
+error[E0432]: unresolved import `non_existent`
+ --> $DIR/issue-55457.rs:2:5
+ |
+LL | use non_existent::non_existent; //~ ERROR unresolved import `non_existent`
+ | ^^^^^^^^^^^^ Maybe a missing `extern crate non_existent;`?
+
+error: cannot determine resolution for the derive macro `NonExistent`
+ --> $DIR/issue-55457.rs:5:10
+ |
+LL | #[derive(NonExistent)] //~ ERROR cannot determine resolution for the derive macro `NonExistent`
+ | ^^^^^^^^^^^
+ |
+ = note: import resolution is stuck, try simplifying macro imports
+
+error: cannot determine resolution for the attribute macro `non_existent`
+ --> $DIR/issue-55457.rs:4:3
+ |
+LL | #[non_existent] //~ ERROR cannot determine resolution for the attribute macro `non_existent`
+ | ^^^^^^^^^^^^
+ |
+ = note: import resolution is stuck, try simplifying macro imports
+
+error: aborting due to 4 previous errors
+
+For more information about this error, try `rustc --explain E0432`.
issue_37437();
issue_40187();
issue_54067();
+ multiple_errors();
}
fn issue_37433() {
asm!("mov sp, $0"::"r"(addr)); //~ ERROR E0669
}
}
+
+fn multiple_errors() {
+ let addr: (u32, u32) = (1, 2);
+ unsafe {
+ asm!("mov sp, $0"::"r"(addr), //~ ERROR E0669
+ "r"("hello e0669")); //~ ERROR E0669
+ }
+}
error[E0669]: invalid value for constraint in inline assembly
- --> $DIR/inline-asm-bad-operand.rs:28:9
+ --> $DIR/inline-asm-bad-operand.rs:29:24
|
LL | asm!("" :: "r"("")); //~ ERROR E0669
- | ^^^^^^^^^^^^^^^^^^^^
+ | ^^
error[E0669]: invalid value for constraint in inline assembly
- --> $DIR/inline-asm-bad-operand.rs:33:9
+ --> $DIR/inline-asm-bad-operand.rs:34:32
|
LL | asm!("ret" : : "{rdi}"(target)); //~ ERROR E0669
- | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+ | ^^^^^^
error[E0669]: invalid value for constraint in inline assembly
- --> $DIR/inline-asm-bad-operand.rs:40:14
+ --> $DIR/inline-asm-bad-operand.rs:41:29
|
LL | unsafe { asm!("" :: "i"(hello)) }; //~ ERROR E0669
- | ^^^^^^^^^^^^^^^^^^^^^^
+ | ^^^^^
error[E0669]: invalid value for constraint in inline assembly
- --> $DIR/inline-asm-bad-operand.rs:48:9
+ --> $DIR/inline-asm-bad-operand.rs:49:38
|
LL | asm!("movups $1, %xmm0"::"m"(arr)); //~ ERROR E0669
- | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+ | ^^^
error[E0669]: invalid value for constraint in inline assembly
- --> $DIR/inline-asm-bad-operand.rs:55:9
+ --> $DIR/inline-asm-bad-operand.rs:56:32
|
LL | asm!("mov sp, $0"::"r"(addr)); //~ ERROR E0669
- | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+ | ^^^^
-error: aborting due to 5 previous errors
+error[E0669]: invalid value for constraint in inline assembly
+ --> $DIR/inline-asm-bad-operand.rs:63:32
+ |
+LL | asm!("mov sp, $0"::"r"(addr), //~ ERROR E0669
+ | ^^^^
+
+error[E0669]: invalid value for constraint in inline assembly
+ --> $DIR/inline-asm-bad-operand.rs:64:32
+ |
+LL | "r"("hello e0669")); //~ ERROR E0669
+ | ^^^^^^^^^^^^^
+
+error: aborting due to 7 previous errors
For more information about this error, try `rustc --explain E0669`.
--- /dev/null
+// Copyright 2018 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+#![feature(unsize, dispatch_from_dyn)]
+
+use std::{
+ ops::DispatchFromDyn,
+ marker::{Unsize, PhantomData},
+};
+
+struct WrapperWithExtraField<T>(T, i32);
+
+impl<T, U> DispatchFromDyn<WrapperWithExtraField<U>> for WrapperWithExtraField<T>
+where
+ T: DispatchFromDyn<U>,
+{} //~^^^ ERROR [E0378]
+
+
+struct MultiplePointers<T: ?Sized>{
+ ptr1: *const T,
+ ptr2: *const T,
+}
+
+impl<T: ?Sized, U: ?Sized> DispatchFromDyn<MultiplePointers<U>> for MultiplePointers<T>
+where
+ T: Unsize<U>,
+{} //~^^^ ERROR [E0378]
+
+
+struct NothingToCoerce<T: ?Sized> {
+ data: PhantomData<T>,
+}
+
+impl<T: ?Sized, U: ?Sized> DispatchFromDyn<NothingToCoerce<T>> for NothingToCoerce<U> {}
+//~^ ERROR [E0378]
+
+#[repr(C)]
+struct HasReprC<T: ?Sized>(Box<T>);
+
+impl<T: ?Sized, U: ?Sized> DispatchFromDyn<HasReprC<U>> for HasReprC<T>
+where
+ T: Unsize<U>,
+{} //~^^^ ERROR [E0378]
+
+fn main() {}
--- /dev/null
+error[E0378]: the trait `DispatchFromDyn` may only be implemented for structs containing the field being coerced, `PhantomData` fields, and nothing else
+ --> $DIR/invalid_dispatch_from_dyn_impls.rs:20:1
+ |
+LL | / impl<T, U> DispatchFromDyn<WrapperWithExtraField<U>> for WrapperWithExtraField<T>
+LL | | where
+LL | | T: DispatchFromDyn<U>,
+LL | | {} //~^^^ ERROR [E0378]
+ | |__^
+ |
+ = note: extra field `1` of type `i32` is not allowed
+
+error[E0378]: implementing the `DispatchFromDyn` trait requires multiple coercions
+ --> $DIR/invalid_dispatch_from_dyn_impls.rs:31:1
+ |
+LL | / impl<T: ?Sized, U: ?Sized> DispatchFromDyn<MultiplePointers<U>> for MultiplePointers<T>
+LL | | where
+LL | | T: Unsize<U>,
+LL | | {} //~^^^ ERROR [E0378]
+ | |__^
+ |
+ = note: the trait `DispatchFromDyn` may only be implemented for a coercion between structures with a single field being coerced
+ = note: currently, 2 fields need coercions: `ptr1` (`*const T` to `*const U`), `ptr2` (`*const T` to `*const U`)
+
+error[E0378]: the trait `DispatchFromDyn` may only be implemented for a coercion between structures with a single field being coerced, none found
+ --> $DIR/invalid_dispatch_from_dyn_impls.rs:41:1
+ |
+LL | impl<T: ?Sized, U: ?Sized> DispatchFromDyn<NothingToCoerce<T>> for NothingToCoerce<U> {}
+ | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+error[E0378]: structs implementing `DispatchFromDyn` may not have `#[repr(packed)]` or `#[repr(C)]`
+ --> $DIR/invalid_dispatch_from_dyn_impls.rs:47:1
+ |
+LL | / impl<T: ?Sized, U: ?Sized> DispatchFromDyn<HasReprC<U>> for HasReprC<T>
+LL | | where
+LL | | T: Unsize<U>,
+LL | | {} //~^^^ ERROR [E0378]
+ | |__^
+
+error: aborting due to 4 previous errors
+
+For more information about this error, try `rustc --explain E0378`.
-error: expected `{`, found `in`
+error: expected `{`, found keyword `in`
--> $DIR/issue-51602.rs:12:10
|
LL | if i in 1..10 {
LL | (&[hd1, ..], &[hd2, ..])
| ^^^
-error[E0507]: cannot move out of data in a `&` reference
- --> $DIR/issue-12567.rs:16:17
- |
-LL | (&[], &[hd, ..]) | (&[hd, ..], &[])
- | ^^
- | |
- | cannot move out of data in a `&` reference
- | cannot move
-
-error[E0507]: cannot move out of data in a `&` reference
- --> $DIR/issue-12567.rs:16:31
- |
-LL | (&[], &[hd, ..]) | (&[hd, ..], &[])
- | ^^
- | |
- | cannot move out of data in a `&` reference
- | cannot move
-
-error[E0507]: cannot move out of data in a `&` reference
- --> $DIR/issue-12567.rs:20:12
- |
-LL | (&[hd1, ..], &[hd2, ..])
- | ^^^
- | |
- | cannot move out of data in a `&` reference
- | cannot move
-
-error[E0507]: cannot move out of data in a `&` reference
- --> $DIR/issue-12567.rs:20:24
- |
-LL | (&[hd1, ..], &[hd2, ..])
- | ^^^
- | |
- | cannot move out of data in a `&` reference
- | cannot move
-
-error: aborting due to 6 previous errors
+error: aborting due to 2 previous errors
-Some errors occurred: E0507, E0508.
-For more information about an error, try `rustc --explain E0507`.
+For more information about this error, try `rustc --explain E0508`.
--- /dev/null
+error[E0499]: cannot borrow `x` (via `x.b`) as mutable more than once at a time
+ --> $DIR/issue-17263.rs:17:34
+ |
+LL | let (a, b) = (&mut x.a, &mut x.b);
+ | --- ^^^ second mutable borrow occurs here (via `x.b`)
+ | |
+ | first mutable borrow occurs here (via `x.a`)
+...
+LL | }
+ | - first borrow ends here
+
+error[E0502]: cannot borrow `foo` (via `foo.b`) as immutable because `foo` is also borrowed as mutable (via `foo.a`)
+ --> $DIR/issue-17263.rs:21:32
+ |
+LL | let (c, d) = (&mut foo.a, &foo.b);
+ | ----- ^^^^^ immutable borrow occurs here (via `foo.b`)
+ | |
+ | mutable borrow occurs here (via `foo.a`)
+...
+LL | }
+ | - mutable borrow ends here
+
+error: aborting due to 2 previous errors
+
+Some errors occurred: E0499, E0502.
+For more information about an error, try `rustc --explain E0499`.
error: compilation successful
--> $DIR/issue-17263.rs:15:1
|
-LL | / fn main() { #![rustc_error] // rust-lang/rust#49855
+LL | / fn main() { //[nll]~ ERROR compilation successful
LL | | let mut x: Box<_> = box Foo { a: 1, b: 2 };
LL | | let (a, b) = (&mut x.a, &mut x.b);
-LL | | //~^ ERROR cannot borrow `x` (via `x.b`) as mutable more than once at a time
+LL | | //[ast]~^ ERROR cannot borrow `x` (via `x.b`) as mutable more than once at a time
... |
-LL | | //~^ ERROR cannot borrow `foo` (via `foo.b`) as immutable
+LL | | use_mut(a);
LL | | }
| |_^
-// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
+// This checks diagnostic quality for cases where AST-borrowck treated
+// `Box<T>` as other types (see rust-lang/rfcs#130). NLL again treats
+// `Box<T>` specially. We capture the differences via revisions.
+// revisions: ast nll
+//[ast]compile-flags: -Z borrowck=ast
+//[nll]compile-flags: -Z borrowck=migrate -Z two-phase-borrows
+
+// don't worry about the --compare-mode=nll on this test.
+// ignore-compare-mode-nll
#![feature(box_syntax, rustc_attrs)]
struct Foo { a: isize, b: isize }
-
-fn main() { #![rustc_error] // rust-lang/rust#49855
+#[rustc_error] // rust-lang/rust#49855
+fn main() { //[nll]~ ERROR compilation successful
let mut x: Box<_> = box Foo { a: 1, b: 2 };
let (a, b) = (&mut x.a, &mut x.b);
- //~^ ERROR cannot borrow `x` (via `x.b`) as mutable more than once at a time
+ //[ast]~^ ERROR cannot borrow `x` (via `x.b`) as mutable more than once at a time
let mut foo: Box<_> = box Foo { a: 1, b: 2 };
let (c, d) = (&mut foo.a, &foo.b);
- //~^ ERROR cannot borrow `foo` (via `foo.b`) as immutable
+ //[ast]~^ ERROR cannot borrow `foo` (via `foo.b`) as immutable
+
+ // We explicitly use the references created above to illustrate
+ // that NLL is accepting this code *not* because of artificially
+ // short lifetimes, but rather because it understands that all the
+ // references are of disjoint parts of memory.
+ use_imm(d);
+ use_mut(c);
+ use_mut(b);
+ use_mut(a);
}
+
+fn use_mut<T>(_: &mut T) { }
+fn use_imm<T>(_: &T) { }
+++ /dev/null
-error[E0499]: cannot borrow `x` (via `x.b`) as mutable more than once at a time
- --> $DIR/issue-17263.rs:17:34
- |
-LL | let (a, b) = (&mut x.a, &mut x.b);
- | --- ^^^ second mutable borrow occurs here (via `x.b`)
- | |
- | first mutable borrow occurs here (via `x.a`)
-...
-LL | }
- | - first borrow ends here
-
-error[E0502]: cannot borrow `foo` (via `foo.b`) as immutable because `foo` is also borrowed as mutable (via `foo.a`)
- --> $DIR/issue-17263.rs:21:32
- |
-LL | let (c, d) = (&mut foo.a, &foo.b);
- | ----- ^^^^^ immutable borrow occurs here (via `foo.b`)
- | |
- | mutable borrow occurs here (via `foo.a`)
-LL | //~^ ERROR cannot borrow `foo` (via `foo.b`) as immutable
-LL | }
- | - mutable borrow ends here
-
-error: aborting due to 2 previous errors
-
-Some errors occurred: E0499, E0502.
-For more information about an error, try `rustc --explain E0499`.
| cannot move out of static item
| help: consider borrowing here: `&FOO`
-error[E0507]: cannot move out of immutable static item `FOO`
- --> $DIR/issue-17718-static-move.rs:16:14
- |
-LL | let _a = FOO; //~ ERROR: cannot move out of static item
- | ^^^
- | |
- | cannot move out of immutable static item `FOO`
- | cannot move
-
-error: aborting due to 2 previous errors
+error: aborting due to previous error
For more information about this error, try `rustc --explain E0507`.
| cannot move out of borrowed content
| help: consider removing the `*`: `imm_ref()`
-error[E0507]: cannot move out of data in a `&` reference
- --> $DIR/issue-20801.rs:39:22
- |
-LL | let b = unsafe { *imm_ref() };
- | ^^^^^^^^^^
- | |
- | cannot move out of data in a `&` reference
- | cannot move
-
error[E0507]: cannot move out of dereference of raw pointer
--> $DIR/issue-20801.rs:42:22
|
| cannot move out of dereference of raw pointer
| help: consider removing the `*`: `const_ptr()`
-error[E0507]: cannot move out of data in a `*const` pointer
- --> $DIR/issue-20801.rs:45:22
- |
-LL | let d = unsafe { *const_ptr() };
- | ^^^^^^^^^^^^
- | |
- | cannot move out of data in a `*const` pointer
- | cannot move
-
-error: aborting due to 6 previous errors
+error: aborting due to 4 previous errors
For more information about this error, try `rustc --explain E0507`.
LL | self.tokens //~ ERROR cannot move out of borrowed content
| ^^^^^^^^^^^ cannot move out of borrowed content
-error[E0507]: cannot move out of `self.tokens` which is behind a `&` reference
- --> $DIR/issue-2590.rs:22:9
- |
-LL | fn parse(&self) -> Vec<isize> {
- | ----- help: consider changing this to be a mutable reference: `&mut self`
-LL | self.tokens //~ ERROR cannot move out of borrowed content
- | ^^^^^^^^^^^
- | |
- | cannot move out of `self.tokens` which is behind a `&` reference
- | `self` is a `&` reference, so the data it refers to cannot be moved
-
-error: aborting due to 2 previous errors
+error: aborting due to previous error
For more information about this error, try `rustc --explain E0507`.
| ^^^^^^^^^^^^^^^^^^^^^^ requires multiple coercions
|
= note: `CoerceUnsized` may only be implemented for a coercion between structures with one field being coerced
- = note: currently, 2 fields need coercions: _ptr (*const T to *const U), _boo (NotPhantomData<T> to NotPhantomData<U>)
+ = note: currently, 2 fields need coercions: `_ptr` (`*const T` to `*const U`), `_boo` (`NotPhantomData<T>` to `NotPhantomData<U>`)
error: aborting due to previous error
LL | &X(*Y)
| ^^ cannot move out of here
-error[E0507]: cannot move out of data in a `&` reference
- --> $DIR/issue-30355.rs:15:8
- |
-LL | &X(*Y)
- | ^^
- | |
- | cannot move out of data in a `&` reference
- | cannot move
-
-error: aborting due to 4 previous errors
+error: aborting due to 3 previous errors
-Some errors occurred: E0161, E0507, E0508.
+Some errors occurred: E0161, E0508.
For more information about an error, try `rustc --explain E0161`.
LL | match () { } //~ ERROR non-exhaustive
| ^^
|
-help: Please ensure that all possible cases are being handled; possibly adding wildcards or more match arms.
+help: ensure that all possible cases are being handled, possibly by adding wildcards or more match arms
--> $DIR/issue-3096-1.rs:12:11
|
LL | match () { } //~ ERROR non-exhaustive
LL | match x { } //~ ERROR non-exhaustive patterns
| ^
|
-help: Please ensure that all possible cases are being handled; possibly adding wildcards or more match arms.
+help: ensure that all possible cases are being handled, possibly by adding wildcards or more match arms
--> $DIR/issue-3096-2.rs:15:11
|
LL | match x { } //~ ERROR non-exhaustive patterns
--- /dev/null
+#![feature(no_core, lang_items)]
+#![no_core]
+
+#[lang="sized"]
+trait Sized {}
+
+#[lang="add"]
+trait Add<T> {}
+
+impl Add<i32> for i32 {}
+
+fn main() {
+ let x = 5 + 6;
+ //~^ ERROR binary operation `+` cannot be applied to type `{integer}`
+ let y = 5i32 + 6i32;
+ //~^ ERROR binary operation `+` cannot be applied to type `i32`
+}
--- /dev/null
+error[E0369]: binary operation `+` cannot be applied to type `{integer}`
+ --> $DIR/issue-31076.rs:13:13
+ |
+LL | let x = 5 + 6;
+ | ^^^^^
+ |
+ = note: an implementation of `std::ops::Add` might be missing for `{integer}`
+
+error[E0369]: binary operation `+` cannot be applied to type `i32`
+ --> $DIR/issue-31076.rs:15:13
+ |
+LL | let y = 5i32 + 6i32;
+ | ^^^^^^^^^^^
+ |
+ = note: an implementation of `std::ops::Add` might be missing for `i32`
+
+error: aborting due to 2 previous errors
+
+For more information about this error, try `rustc --explain E0369`.
| this `if` statement has a condition, but no block
...
LL | get_opt!(bar, foo);
- | ^^^
+ | ^^^ expected `{`
error: aborting due to previous error
| cannot move out of borrowed content
| help: consider borrowing here: `&f.v[0]`
-error[E0507]: cannot move out of data in a `&` reference
- --> $DIR/issue-40402-1.rs:19:13
- |
-LL | let e = f.v[0]; //~ ERROR cannot move out of indexed content
- | ^^^^^^
- | |
- | cannot move out of data in a `&` reference
- | cannot move
-
-error: aborting due to 2 previous errors
+error: aborting due to previous error
For more information about this error, try `rustc --explain E0507`.
-error[E0507]: cannot move out of data in a `&` reference
- --> $DIR/issue-40402-2.rs:15:10
- |
-LL | let (a, b) = x[0]; //~ ERROR cannot move out of indexed content
- | ^
- | |
- | cannot move out of data in a `&` reference
- | cannot move
-
-error[E0507]: cannot move out of data in a `&` reference
- --> $DIR/issue-40402-2.rs:15:13
- |
-LL | let (a, b) = x[0]; //~ ERROR cannot move out of indexed content
- | ^
- | |
- | cannot move out of data in a `&` reference
- | cannot move
-
error[E0507]: cannot move out of borrowed content
--> $DIR/issue-40402-2.rs:15:18
|
LL | let (a, b) = x[0]; //~ ERROR cannot move out of indexed content
| ^ ^
-error: aborting due to 3 previous errors
+error: aborting due to previous error
For more information about this error, try `rustc --explain E0507`.
LL | id(Box::new(|| *v))
| ^^ cannot move out of captured variable in an `FnMut` closure
-error[E0507]: cannot move out of `*v` which is behind a `&` reference
- --> $DIR/issue-4335.rs:16:20
- |
-LL | id(Box::new(|| *v))
- | ^^
- | |
- | cannot move out of `*v` which is behind a `&` reference
- | cannot move
-
error[E0373]: closure may outlive the current function, but it borrows `v`, which is owned by the current function
--> $DIR/issue-4335.rs:16:17
|
LL | id(Box::new(move || *v))
| ^^^^^^^
-error: aborting due to 3 previous errors
+error: aborting due to 2 previous errors
Some errors occurred: E0373, E0507.
For more information about an error, try `rustc --explain E0373`.
|
LL | let v: Vec<&str> = line.split_whitespace().collect();
| ^^^^ borrowed value does not live long enough
-LL | //~^ ERROR `line` does not live long enough
-LL | println!("accumulator before add_assign {:?}", acc.map);
- | ------- borrow used here, in later iteration of loop
+...
+LL | acc += cnt2;
+ | --- borrow used here, in later iteration of loop
...
LL | }
| - `line` dropped here while still borrowed
for line in vec!["123456789".to_string(), "12345678".to_string()] {
let v: Vec<&str> = line.split_whitespace().collect();
//~^ ERROR `line` does not live long enough
- println!("accumulator before add_assign {:?}", acc.map);
+ // println!("accumulator before add_assign {:?}", acc.map);
let mut map = HashMap::new();
for str_ref in v {
let e = map.entry(str_ref);
}
let cnt2 = Counter{map};
acc += cnt2;
- println!("accumulator after add_assign {:?}", acc.map);
+ // println!("accumulator after add_assign {:?}", acc.map);
// line gets dropped here but references are kept in acc.map
}
}
LL | if true 'b: {} //~ ERROR expected `{`, found `'b`
| -- ^^----
| | |
+ | | expected `{`
| | help: try placing this code inside a block: `{ 'b: { } }`
| this `if` statement has a condition, but no block
LL | if true {} else 'b: {} //~ ERROR expected `{`, found `'b`
| ^^----
| |
+ | expected `{`
| help: try placing this code inside a block: `{ 'b: { } }`
error: expected one of `.`, `?`, `{`, or an operator, found `'b`
--> $DIR/label_break_value_illegal_uses.rs:28:17
|
LL | match false 'b: {} //~ ERROR expected one of `.`, `?`, `{`, or an operator
- | ^^ expected one of `.`, `?`, `{`, or an operator here
+ | ----- ^^ expected one of `.`, `?`, `{`, or an operator here
+ | |
+ | while parsing this match expression
error: aborting due to 4 previous errors
// We want to suggest the properly-balanced expression `1 / (2 + 3)`, not
// the malformed `1 / (2 + 3`
let _a = (1 / (2 + 3));
+ f();
+}
+
+fn f() -> bool {
+ loop {
+ if (break { return true }) {
+ }
+ }
+ false
}
|
= note: expected type `for<'r, 's> fn(&'r u8, &'s u8)`
found type `for<'a> fn(&'a u8, &'a u8)`
- = note: this was previously accepted by the compiler but has been phased out
- = note: for more information, see https://github.com/rust-lang/rust/issues/45852
error: aborting due to previous error
|
= note: expected type `&dyn for<'a, 'b> Foo<&'a u8, &'b u8>`
found type `&dyn for<'a> Foo<&'a u8, &'a u8>`
- = note: this was previously accepted by the compiler but has been phased out
- = note: for more information, see https://github.com/rust-lang/rust/issues/45852
error: aborting due to previous error
--- /dev/null
+// Copyright 2018 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// compile-pass
+// compile-flags: -Wunused
+
+// make sure write!() can't hide its unused Result
+
+fn main() {
+ use std::fmt::Write;
+ let mut example = String::new();
+ write!(&mut example, "{}", 42); //~WARN must be used
+}
+
--- /dev/null
+warning: unused `std::result::Result` that must be used
+ --> $DIR/must-use-in-macro-55516.rs:19:5
+ |
+LL | write!(&mut example, "{}", 42); //~WARN must be used
+ | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+ |
+ = note: `-W unused-must-use` implied by `-W unused`
+ = note: this `Result` may be an `Err` variant, which should be handled
+ = note: this error originates in a macro outside of the current crate (in Nightly builds, run with -Z external-macro-backtrace for more info)
+
--> $DIR/missing-block-hint.rs:13:18
|
LL | if (foo) => {} //~ ERROR expected `{`, found `=>`
- | -- ^^
+ | -- ^^ expected `{`
| |
| this `if` statement has a condition, but no block
LL | bar; //~ ERROR expected `{`, found `bar`
| ^^^-
| |
+ | expected `{`
| help: try placing this code inside a block: `{ bar; }`
error: aborting due to 2 previous errors
-warning: expected `;`, found `let`
+warning: expected `;`, found keyword `let`
--> $DIR/missing-semicolon-warning.rs:16:12
|
LL | $( let x = $e1 )*; //~ WARN expected `;`
LL | box E::Bar(x) => println!("{}", x.to_string()),
| ^
-error[E0507]: cannot move out of `hellothere.x.0` which is behind a `&` reference
- --> $DIR/moves-based-on-type-block-bad.rs:37:28
- |
-LL | f(&s, |hellothere| {
- | ---------- help: consider changing this to be a mutable reference: `&mut S`
-...
-LL | box E::Bar(x) => println!("{}", x.to_string()),
- | ^
- | |
- | cannot move out of `hellothere.x.0` which is behind a `&` reference
- | `hellothere` is a `&` reference, so the data it refers to cannot be moved
-
-error: aborting due to 2 previous errors
+error: aborting due to previous error
For more information about this error, try `rustc --explain E0507`.
LL | let _f = to_fn(|| test(i)); //~ ERROR cannot move out
| ^ cannot move out of captured variable in an `Fn` closure
-error[E0507]: cannot move out of `i`, as it is a captured variable in a `Fn` closure
- --> $DIR/moves-based-on-type-move-out-of-closure-env-issue-1965.rs:21:28
- |
-LL | let _f = to_fn(|| test(i)); //~ ERROR cannot move out
- | ^
- | |
- | cannot move out of `i`, as it is a captured variable in a `Fn` closure
- | cannot move
- |
-help: consider changing this to accept closures that implement `FnMut`
- --> $DIR/moves-based-on-type-move-out-of-closure-env-issue-1965.rs:21:20
- |
-LL | let _f = to_fn(|| test(i)); //~ ERROR cannot move out
- | ^^^^^^^^^^
-
-error: aborting due to 2 previous errors
+error: aborting due to previous error
For more information about this error, try `rustc --explain E0507`.
| cannot move out of borrowed content
| help: consider removing the `*`: `r`
-error[E0507]: cannot move out of `*r` which is behind a `&` reference
- --> $DIR/cannot-move-block-spans.rs:15:15
- |
-LL | pub fn deref(r: &String) {
- | ------- help: consider changing this to be a mutable reference: `&mut std::string::String`
-LL | let x = { *r }; //~ ERROR
- | ^^
- | |
- | cannot move out of `*r` which is behind a `&` reference
- | `r` is a `&` reference, so the data it refers to cannot be moved
-
error[E0507]: cannot move out of borrowed content
--> $DIR/cannot-move-block-spans.rs:16:22
|
| cannot move out of borrowed content
| help: consider removing the `*`: `r`
-error[E0507]: cannot move out of `*r` which is behind a `&` reference
- --> $DIR/cannot-move-block-spans.rs:16:22
- |
-LL | pub fn deref(r: &String) {
- | ------- help: consider changing this to be a mutable reference: `&mut std::string::String`
-LL | let x = { *r }; //~ ERROR
-LL | let y = unsafe { *r }; //~ ERROR
- | ^^
- | |
- | cannot move out of `*r` which is behind a `&` reference
- | `r` is a `&` reference, so the data it refers to cannot be moved
-
error[E0507]: cannot move out of borrowed content
--> $DIR/cannot-move-block-spans.rs:17:26
|
| cannot move out of borrowed content
| help: consider removing the `*`: `r`
-error[E0507]: cannot move out of `*r` which is behind a `&` reference
- --> $DIR/cannot-move-block-spans.rs:17:26
- |
-LL | pub fn deref(r: &String) {
- | ------- help: consider changing this to be a mutable reference: `&mut std::string::String`
-...
-LL | let z = loop { break *r; }; //~ ERROR
- | ^^
- | |
- | cannot move out of `*r` which is behind a `&` reference
- | `r` is a `&` reference, so the data it refers to cannot be moved
-
error[E0508]: cannot move out of type `[std::string::String; 2]`, a non-copy array
--> $DIR/cannot-move-block-spans.rs:21:15
|
| cannot move out of borrowed content
| help: consider removing the `*`: `r`
-error[E0507]: cannot move out of `*r` which is behind a `&` reference
- --> $DIR/cannot-move-block-spans.rs:27:38
- |
-LL | pub fn additional_statement_cases(r: &String) {
- | ------- help: consider changing this to be a mutable reference: `&mut std::string::String`
-LL | let x = { let mut u = 0; u += 1; *r }; //~ ERROR
- | ^^
- | |
- | cannot move out of `*r` which is behind a `&` reference
- | `r` is a `&` reference, so the data it refers to cannot be moved
-
error[E0507]: cannot move out of borrowed content
--> $DIR/cannot-move-block-spans.rs:28:45
|
| cannot move out of borrowed content
| help: consider removing the `*`: `r`
-error[E0507]: cannot move out of `*r` which is behind a `&` reference
- --> $DIR/cannot-move-block-spans.rs:28:45
- |
-LL | pub fn additional_statement_cases(r: &String) {
- | ------- help: consider changing this to be a mutable reference: `&mut std::string::String`
-LL | let x = { let mut u = 0; u += 1; *r }; //~ ERROR
-LL | let y = unsafe { let mut u = 0; u += 1; *r }; //~ ERROR
- | ^^
- | |
- | cannot move out of `*r` which is behind a `&` reference
- | `r` is a `&` reference, so the data it refers to cannot be moved
-
error[E0507]: cannot move out of borrowed content
--> $DIR/cannot-move-block-spans.rs:29:49
|
| cannot move out of borrowed content
| help: consider removing the `*`: `r`
-error[E0507]: cannot move out of `*r` which is behind a `&` reference
- --> $DIR/cannot-move-block-spans.rs:29:49
- |
-LL | pub fn additional_statement_cases(r: &String) {
- | ------- help: consider changing this to be a mutable reference: `&mut std::string::String`
-...
-LL | let z = loop { let mut u = 0; u += 1; break *r; u += 2; }; //~ ERROR
- | ^^
- | |
- | cannot move out of `*r` which is behind a `&` reference
- | `r` is a `&` reference, so the data it refers to cannot be moved
-
-error: aborting due to 15 previous errors
+error: aborting due to 9 previous errors
Some errors occurred: E0507, E0508.
For more information about an error, try `rustc --explain E0507`.
It represents potential unsoundness in your code.
This warning will become a hard error in the future.
-warning[E0507]: cannot move out of `foo`, as it is immutable for the pattern guard
- --> $DIR/match-guards-always-borrow.rs:23:13
- |
-LL | (|| { let bar = foo; bar.take() })();
- | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
- | |
- | cannot move out of `foo`, as it is immutable for the pattern guard
- | cannot move
- |
- = note: variables bound in patterns are immutable until the end of the pattern guard
- = warning: This error has been downgraded to a warning for backwards compatibility with previous releases.
- It represents potential unsoundness in your code.
- This warning will become a hard error in the future.
-
error: compilation successful
--> $DIR/match-guards-always-borrow.rs:57:1
|
-error: user substs: Canonical { variables: [], value: UserSubsts { substs: [u32], user_self_ty: None } }
+error: user substs: Canonical { max_universe: U0, variables: [], value: UserSubsts { substs: [u32], user_self_ty: None } }
--> $DIR/dump-adt-brace-struct.rs:28:5
|
LL | SomeStruct::<u32> { t: 22 }; //~ ERROR [u32]
-error: user substs: Canonical { variables: [], value: UserSubsts { substs: [u32], user_self_ty: None } }
+error: user substs: Canonical { max_universe: U0, variables: [], value: UserSubsts { substs: [u32], user_self_ty: None } }
--> $DIR/dump-fn-method.rs:36:13
|
LL | let x = foo::<u32>; //~ ERROR [u32]
| ^^^^^^^^^^
-error: user substs: Canonical { variables: [CanonicalVarInfo { kind: Ty(General) }, CanonicalVarInfo { kind: Ty(General) }], value: UserSubsts { substs: [?0, u32, ?1], user_self_ty: None } }
+error: user substs: Canonical { max_universe: U0, variables: [CanonicalVarInfo { kind: Ty(General) }, CanonicalVarInfo { kind: Ty(General) }], value: UserSubsts { substs: [?0, u32, ?1], user_self_ty: None } }
--> $DIR/dump-fn-method.rs:42:13
|
LL | let x = <_ as Bazoom<u32>>::method::<_>; //~ ERROR [?0, u32, ?1]
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-error: user substs: Canonical { variables: [], value: UserSubsts { substs: [u8, u16, u32], user_self_ty: None } }
+error: user substs: Canonical { max_universe: U0, variables: [], value: UserSubsts { substs: [u8, u16, u32], user_self_ty: None } }
--> $DIR/dump-fn-method.rs:46:13
|
LL | let x = <u8 as Bazoom<u16>>::method::<u32>; //~ ERROR [u8, u16, u32]
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-error: user substs: Canonical { variables: [CanonicalVarInfo { kind: Ty(General) }, CanonicalVarInfo { kind: Ty(General) }], value: UserSubsts { substs: [?0, ?1, u32], user_self_ty: None } }
+error: user substs: Canonical { max_universe: U0, variables: [CanonicalVarInfo { kind: Ty(General) }, CanonicalVarInfo { kind: Ty(General) }], value: UserSubsts { substs: [?0, ?1, u32], user_self_ty: None } }
--> $DIR/dump-fn-method.rs:54:5
|
LL | y.method::<u32>(44, 66); //~ ERROR [?0, ?1, u32]
+++ /dev/null
-// Copyright 2018 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-// compile-flags:-C panic=abort
-
-#![deny(deprecated)]
-#![feature(panic_implementation)]
-#![no_std]
-
-use core::panic::PanicInfo;
-
-#[panic_implementation]
-fn panic(info: &PanicInfo) -> ! {
- loop {}
-}
-
-fn main() {}
+++ /dev/null
-error: use of deprecated attribute `panic_implementation`: this attribute was renamed to `panic_handler`. See https://github.com/rust-lang/rust/issues/44489#issuecomment-415140224
- --> $DIR/panic-implementation-deprecated.rs:19:1
- |
-LL | #[panic_implementation]
- | ^^^^^^^^^^^^^^^^^^^^^^^ help: replace this attribute with `#[panic_handler]`
- |
-note: lint level defined here
- --> $DIR/panic-implementation-deprecated.rs:13:9
- |
-LL | #![deny(deprecated)]
- | ^^^^^^^^^^
-
-error: aborting due to previous error
-
fn /// document
foo() {}
-//~^^ ERROR expected identifier, found `/// document`
+//~^^ ERROR expected identifier, found doc comment `/// document`
fn main() {
foo();
-error: expected identifier, found `/// document`
+error: expected identifier, found doc comment `/// document`
--> $DIR/doc-before-identifier.rs:13:4
|
LL | fn /// document
- | ^^^^^^^^^^^^ expected identifier
+ | ^^^^^^^^^^^^ expected identifier, found doc comment
error: aborting due to previous error
--- /dev/null
+fn main() {
+ if true /*!*/ {}
+ //~^ ERROR expected `{`, found doc comment `/*!*/`
+}
--- /dev/null
+error: expected `{`, found doc comment `/*!*/`
+ --> $DIR/doc-comment-in-if-statement.rs:2:13
+ |
+LL | if true /*!*/ {}
+ | -- ^^^^^ expected `{`
+ | |
+ | this `if` statement has a condition, but no block
+
+error: aborting due to previous error
+
--- /dev/null
+pub fn f(
+ /// Comment
+ //~^ ERROR documentation comments cannot be applied to method arguments
+ //~| NOTE doc comments are not allowed here
+ id: u8,
+ /// Other
+ //~^ ERROR documentation comments cannot be applied to method arguments
+ //~| NOTE doc comments are not allowed here
+ a: u8,
+) {}
+
+fn foo(#[allow(dead_code)] id: i32) {}
+//~^ ERROR attributes cannot be applied to method arguments
+//~| NOTE attributes are not allowed here
+
+fn bar(id: #[allow(dead_code)] i32) {}
+//~^ ERROR attributes cannot be applied to a method argument's type
+//~| NOTE attributes are not allowed here
+
+fn main() {
+ // verify that the parser recovered and properly typechecked the args
+ f("", "");
+ //~^ ERROR mismatched types
+ //~| NOTE expected u8, found reference
+ //~| NOTE expected
+ //~| ERROR mismatched types
+ //~| NOTE expected u8, found reference
+ //~| NOTE expected
+ foo("");
+ //~^ ERROR mismatched types
+ //~| NOTE expected i32, found reference
+ //~| NOTE expected
+ bar("");
+ //~^ ERROR mismatched types
+ //~| NOTE expected i32, found reference
+ //~| NOTE expected
+}
--- /dev/null
+error: documentation comments cannot be applied to method arguments
+ --> $DIR/fn-arg-doc-comment.rs:2:5
+ |
+LL | /// Comment
+ | ^^^^^^^^^^^ doc comments are not allowed here
+
+error: documentation comments cannot be applied to method arguments
+ --> $DIR/fn-arg-doc-comment.rs:6:5
+ |
+LL | /// Other
+ | ^^^^^^^^^ doc comments are not allowed here
+
+error: attributes cannot be applied to method arguments
+ --> $DIR/fn-arg-doc-comment.rs:12:8
+ |
+LL | fn foo(#[allow(dead_code)] id: i32) {}
+ | ^^^^^^^^^^^^^^^^^^^ attributes are not allowed here
+
+error: attributes cannot be applied to a method argument's type
+ --> $DIR/fn-arg-doc-comment.rs:16:12
+ |
+LL | fn bar(id: #[allow(dead_code)] i32) {}
+ | ^^^^^^^^^^^^^^^^^^^ attributes are not allowed here
+
+error[E0308]: mismatched types
+ --> $DIR/fn-arg-doc-comment.rs:22:7
+ |
+LL | f("", "");
+ | ^^ expected u8, found reference
+ |
+ = note: expected type `u8`
+ found type `&'static str`
+
+error[E0308]: mismatched types
+ --> $DIR/fn-arg-doc-comment.rs:22:11
+ |
+LL | f("", "");
+ | ^^ expected u8, found reference
+ |
+ = note: expected type `u8`
+ found type `&'static str`
+
+error[E0308]: mismatched types
+ --> $DIR/fn-arg-doc-comment.rs:29:9
+ |
+LL | foo("");
+ | ^^ expected i32, found reference
+ |
+ = note: expected type `i32`
+ found type `&'static str`
+
+error[E0308]: mismatched types
+ --> $DIR/fn-arg-doc-comment.rs:33:9
+ |
+LL | bar("");
+ | ^^ expected i32, found reference
+ |
+ = note: expected type `i32`
+ found type `&'static str`
+
+error: aborting due to 8 previous errors
+
+For more information about this error, try `rustc --explain E0308`.
-error: expected `;`, found `as`
+error: expected `;`, found keyword `as`
--> $DIR/import-from-rename.rs:15:16
|
LL | use foo::{bar} as baz;
-error: expected `;`, found `as`
+error: expected `;`, found keyword `as`
--> $DIR/import-glob-rename.rs:15:12
|
LL | use foo::* as baz;
// compile-flags: -Z parse-only -Z continue-parse-after-error
-struct Bar<T> { x: T } where T: Copy //~ ERROR expected item, found `where`
+struct Bar<T> { x: T } where T: Copy //~ ERROR expected item, found keyword `where`
fn main() {}
-error: expected item, found `where`
+error: expected item, found keyword `where`
--> $DIR/issue-17904-2.rs:13:24
|
-LL | struct Bar<T> { x: T } where T: Copy //~ ERROR expected item, found `where`
+LL | struct Bar<T> { x: T } where T: Copy //~ ERROR expected item, found keyword `where`
| ^^^^^ expected item
error: aborting due to previous error
// compile-flags: -Z parse-only
impl S {
- fn f(*, a: u8) -> u8 {} //~ ERROR expected pattern, found `*`
+ fn f(*, a: u8) -> u8 {}
+ //~^ ERROR expected argument name, found `*`
}
-error: expected pattern, found `*`
+error: expected argument name, found `*`
--> $DIR/issue-33413.rs:14:10
|
-LL | fn f(*, a: u8) -> u8 {} //~ ERROR expected pattern, found `*`
- | ^ expected pattern
+LL | fn f(*, a: u8) -> u8 {}
+ | ^ expected argument name
error: aborting due to previous error
fn main() {
let foo =
- match
+ match //~ NOTE while parsing this match expression
Some(4).unwrap_or_else(5)
//~^ NOTE expected one of `.`, `?`, `{`, or an operator here
; //~ NOTE unexpected token
error: expected one of `.`, `?`, `{`, or an operator, found `;`
--> $DIR/match-refactor-to-expr.rs:18:9
|
-LL | match
- | ----- help: try removing this `match`
+LL | match //~ NOTE while parsing this match expression
+ | -----
+ | |
+ | while parsing this match expression
+ | help: try removing this `match`
LL | Some(4).unwrap_or_else(5)
| - expected one of `.`, `?`, `{`, or an operator here
LL | //~^ NOTE expected one of `.`, `?`, `{`, or an operator here
// compile-flags: -Z parse-only
-fn f(+x: isize) {} //~ ERROR expected pattern, found `+`
+fn f(+x: isize) {}
+//~^ ERROR expected argument name, found `+`
-error: expected pattern, found `+`
+error: expected argument name, found `+`
--> $DIR/removed-syntax-mode.rs:13:6
|
-LL | fn f(+x: isize) {} //~ ERROR expected pattern, found `+`
- | ^ expected pattern
+LL | fn f(+x: isize) {}
+ | ^ expected argument name
error: aborting due to previous error
// Test syntax checks for `type` keyword.
-struct S1 for type; //~ ERROR expected `where`, `{`, `(`, or `;` after struct name, found `for`
+struct S1 for type;
+//~^ ERROR expected `where`, `{`, `(`, or `;` after struct name, found keyword `for`
pub fn main() {
}
-error: expected `where`, `{`, `(`, or `;` after struct name, found `for`
+error: expected `where`, `{`, `(`, or `;` after struct name, found keyword `for`
--> $DIR/unsized.rs:15:11
|
-LL | struct S1 for type; //~ ERROR expected `where`, `{`, `(`, or `;` after struct name, found `for`
+LL | struct S1 for type;
| ^^^ expected `where`, `{`, `(`, or `;` after struct name
error: aborting due to previous error
// Test diagnostics for the removed struct inheritance feature.
-virtual struct SuperStruct { //~ ERROR expected item, found `virtual`
+virtual struct SuperStruct {
+//~^ ERROR expected item, found reserved keyword `virtual`
f1: isize,
}
-error: expected item, found `virtual`
+error: expected item, found reserved keyword `virtual`
--> $DIR/virtual-structs.rs:15:1
|
-LL | virtual struct SuperStruct { //~ ERROR expected item, found `virtual`
+LL | virtual struct SuperStruct {
| ^^^^^^^ expected item
error: aborting due to previous error
#![crate_name="foo"]
#![allow(dead_code)]
-// compile-flags: -Z print-fuel=foo
+// (#55495: The --error-format is to sidestep an issue in our test harness)
+// compile-flags: --error-format human -Z print-fuel=foo
// compile-pass
struct S1(u8, u16, u8);
--- /dev/null
+Fuel used by foo: 3
+++ /dev/null
-Fuel used by foo: 3
--> $DIR/issue-3907.rs:20:6
|
LL | impl Foo for S { //~ ERROR expected trait, found type alias `Foo`
- | ^^^ type aliases cannot be used for traits
+ | ^^^ type aliases cannot be used as traits
+ |
+ = note: did you mean to use a trait alias?
help: possible better candidate is found in another module, you can import it into scope
|
LL | use issue_3907::Foo;
| ^
| |
| did you mean `I`?
- | type aliases cannot be used for traits
+ | type aliases cannot be used as traits
+ |
+ = note: did you mean to use a trait alias?
error: aborting due to 2 previous errors
--> $DIR/unboxed-closure-sugar-nonexistent-trait.rs:16:8
|
LL | fn g<F:Typedef(isize) -> isize>(x: F) {}
- | ^^^^^^^^^^^^^^^^^^^^^^^ type aliases cannot be used for traits
+ | ^^^^^^^^^^^^^^^^^^^^^^^ type aliases cannot be used as traits
+ |
+ = note: did you mean to use a trait alias?
error: aborting due to 2 previous errors
--- /dev/null
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// run-rustfix
+
+#![feature(in_band_lifetimes)]
+#![deny(single_use_lifetimes)]
+#![allow(dead_code)]
+#![allow(unused_variables)]
+
+// Test that we DO warn when lifetime name is used only
+// once in a fn argument, even with in band lifetimes.
+
+fn a(x: &u32, y: &u32) {
+ //~^ ERROR `'a` only used once
+ //~| ERROR `'b` only used once
+ //~| HELP elide the single-use lifetime
+ //~| HELP elide the single-use lifetime
+}
+
+fn main() { }
// option. This file may not be copied, modified, or distributed
// except according to those terms.
+// run-rustfix
+
#![feature(in_band_lifetimes)]
#![deny(single_use_lifetimes)]
#![allow(dead_code)]
fn a(x: &'a u32, y: &'b u32) {
//~^ ERROR `'a` only used once
//~| ERROR `'b` only used once
+ //~| HELP elide the single-use lifetime
+ //~| HELP elide the single-use lifetime
}
fn main() { }
error: lifetime parameter `'a` only used once
- --> $DIR/one-use-in-fn-argument-in-band.rs:19:10
+ --> $DIR/one-use-in-fn-argument-in-band.rs:21:10
|
LL | fn a(x: &'a u32, y: &'b u32) {
- | ^^
+ | ^^-
| |
- | this lifetime...
- | ...is used only here
+ | this lifetime is only used here
+ | help: elide the single-use lifetime
|
note: lint level defined here
- --> $DIR/one-use-in-fn-argument-in-band.rs:12:9
+ --> $DIR/one-use-in-fn-argument-in-band.rs:14:9
|
LL | #![deny(single_use_lifetimes)]
| ^^^^^^^^^^^^^^^^^^^^
error: lifetime parameter `'b` only used once
- --> $DIR/one-use-in-fn-argument-in-band.rs:19:22
+ --> $DIR/one-use-in-fn-argument-in-band.rs:21:22
|
LL | fn a(x: &'a u32, y: &'b u32) {
- | ^^
+ | ^^-
| |
- | this lifetime...
- | ...is used only here
+ | this lifetime is only used here
+ | help: elide the single-use lifetime
error: aborting due to 2 previous errors
// once in a fn argument.
fn a<'a>(x: &'a u32) { //~ ERROR `'a` only used once
+ //~^ HELP elide the single-use lifetime
}
fn main() { }
|
LL | #![deny(single_use_lifetimes)]
| ^^^^^^^^^^^^^^^^^^^^
+help: elide the single-use lifetime
+ |
+LL | fn a(x: &u32) { //~ ERROR `'a` only used once
+ | -- --
error: aborting due to previous error
#![allow(dead_code)]
#![allow(unused_variables)]
-// Test that we DO warn for a lifetime used only once in an impl.
-//
-// (Actually, until #15872 is fixed, you can't use `'_` here, but
-// hopefully that will come soon.)
+// Test that we DO warn for a lifetime used only once in an impl, and that we
+// don't warn for the anonymous lifetime.
struct Foo<'f> {
data: &'f u32
}
}
+impl Foo<'_> {
+ fn inherent_b(&self) {}
+}
+
+
fn main() { }
error: lifetime parameter `'f` only used once
- --> $DIR/one-use-in-inherent-impl-header.rs:24:6
+ --> $DIR/one-use-in-inherent-impl-header.rs:22:6
|
LL | impl<'f> Foo<'f> { //~ ERROR `'f` only used once
| ^^ -- ...is used only here
impl<'f> Foo<'f> { //~ ERROR `'f` only used once
fn inherent_a<'a>(&self, data: &'a u32) { //~ ERROR `'a` only used once
+ //~^ HELP elide the single-use lifetime
}
}
|
LL | #![deny(single_use_lifetimes)]
| ^^^^^^^^^^^^^^^^^^^^
+help: elide the single-use lifetime
+ |
+LL | fn inherent_a(&self, data: &u32) { //~ ERROR `'a` only used once
+ | -- --
error: lifetime parameter `'f` only used once
--> $DIR/one-use-in-inherent-method-argument.rs:21:6
type Item = &'f u32;
fn next<'g>(&'g mut self) -> Option<Self::Item> { //~ ERROR `'g` only used once
+ //~^ HELP elide the single-use lifetime
None
}
}
|
LL | #![deny(single_use_lifetimes)]
| ^^^^^^^^^^^^^^^^^^^^
+help: elide the single-use lifetime
+ |
+LL | fn next(&mut self) -> Option<Self::Item> { //~ ERROR `'g` only used once
+ | ----
error: aborting due to previous error
fn september() {}
//~^ ERROR lifetime parameter `'a` never used
-//~| HELP remove it
+//~| HELP elide the unused lifetime
fn october<'b, T>(s: &'b T) -> &'b T {
//~^ ERROR lifetime parameter `'a` never used
- //~| HELP remove it
+ //~| HELP elide the unused lifetime
s
}
fn november<'a>(s: &'a str) -> (&'a str) {
//~^ ERROR lifetime parameter `'b` never used
- //~| HELP remove it
+ //~| HELP elide the unused lifetime
s
}
fn september<'a>() {}
//~^ ERROR lifetime parameter `'a` never used
-//~| HELP remove it
+//~| HELP elide the unused lifetime
fn october<'a, 'b, T>(s: &'b T) -> &'b T {
//~^ ERROR lifetime parameter `'a` never used
- //~| HELP remove it
+ //~| HELP elide the unused lifetime
s
}
fn november<'a, 'b>(s: &'a str) -> (&'a str) {
//~^ ERROR lifetime parameter `'b` never used
- //~| HELP remove it
+ //~| HELP elide the unused lifetime
s
}
--> $DIR/zero-uses-in-fn.rs:8:14
|
LL | fn september<'a>() {}
- | -^^- help: remove it
+ | -^^- help: elide the unused lifetime
|
note: lint level defined here
--> $DIR/zero-uses-in-fn.rs:5:9
LL | fn october<'a, 'b, T>(s: &'b T) -> &'b T {
| ^^--
| |
- | help: remove it
+ | help: elide the unused lifetime
error: lifetime parameter `'b` never used
--> $DIR/zero-uses-in-fn.rs:18:17
LL | fn november<'a, 'b>(s: &'a str) -> (&'a str) {
| --^^
| |
- | help: remove it
+ | help: elide the unused lifetime
error: aborting due to 3 previous errors
--> $DIR/zero-uses-in-impl.rs:8:6
|
LL | impl<'a> Foo {} //~ ERROR `'a` never used
- | -^^- help: remove it
+ | -^^- help: elide the unused lifetime
|
note: lint level defined here
--> $DIR/zero-uses-in-impl.rs:3:9
LL | &mut x.y //~ ERROR cannot borrow
| ^ `x` is a `&` reference, so the data it refers to cannot be borrowed as mutable
+error[E0499]: cannot borrow `*x` as mutable more than once at a time
+ --> $DIR/borrowck-borrow-overloaded-auto-deref-mut.rs:88:19
+ |
+LL | let _x = &mut x.x;
+ | - first mutable borrow occurs here
+LL | let _y = &mut x.y; //~ ERROR cannot borrow
+ | ^ second mutable borrow occurs here
+LL | use_mut(_x);
+ | -- first borrow later used here
+
error[E0596]: cannot borrow `x` as mutable, as it is not declared as mutable
--> $DIR/borrowck-borrow-overloaded-auto-deref-mut.rs:98:5
|
LL | x.y = 3; //~ ERROR cannot borrow
| ^ `x` is a `&` reference, so the data it refers to cannot be borrowed as mutable
+error[E0499]: cannot borrow `*x` as mutable more than once at a time
+ --> $DIR/borrowck-borrow-overloaded-auto-deref-mut.rs:111:5
+ |
+LL | let _p: &mut Point = &mut **x;
+ | -- first mutable borrow occurs here
+LL | x.y = 3; //~ ERROR cannot borrow
+ | ^ second mutable borrow occurs here
+LL | use_mut(_p);
+ | -- first borrow later used here
+
error[E0596]: cannot borrow `x` as mutable, as it is not declared as mutable
--> $DIR/borrowck-borrow-overloaded-auto-deref-mut.rs:119:5
|
LL | *x.y_mut() = 3; //~ ERROR cannot borrow
| ^ `x` is a `&` reference, so the data it refers to cannot be borrowed as mutable
-error: aborting due to 8 previous errors
+error: aborting due to 10 previous errors
-For more information about this error, try `rustc --explain E0596`.
+Some errors occurred: E0499, E0596.
+For more information about an error, try `rustc --explain E0499`.
let _x = &mut x.x;
let _y = &mut x.y; //~ ERROR cannot borrow
+ use_mut(_x);
}
-
fn deref_extend_mut_field4<'a>(x: &'a mut Own<Point>) {
let p = &mut **x;
let _x = &mut p.x;
fn assign_field4<'a>(x: &'a mut Own<Point>) {
let _p: &mut Point = &mut **x;
x.y = 3; //~ ERROR cannot borrow
+ use_mut(_p);
}
-
fn deref_imm_method(x: Own<Point>) {
let __isize = x.get();
}
}
pub fn main() {}
+
+fn use_mut<T>(_: &mut T) {}
| - first mutable borrow occurs here
LL | let _y = &mut x.y; //~ ERROR cannot borrow
| ^ second mutable borrow occurs here
+LL | use_mut(_x);
LL | }
| - first borrow ends here
| -- first mutable borrow occurs here
LL | x.y = 3; //~ ERROR cannot borrow
| ^ second mutable borrow occurs here
+LL | use_mut(_p);
LL | }
| - first borrow ends here
+++ /dev/null
-error[E0507]: cannot move out of static item
- --> $DIR/static-items-cant-move.rs:28:10
- |
-LL | test(BAR); //~ ERROR cannot move out of static item
- | ^^^ cannot move out of static item
-
-error[E0507]: cannot move out of immutable static item `BAR`
- --> $DIR/static-items-cant-move.rs:28:10
- |
-LL | test(BAR); //~ ERROR cannot move out of static item
- | ^^^
- | |
- | cannot move out of immutable static item `BAR`
- | cannot move
-
-error: aborting due to 2 previous errors
-
-For more information about this error, try `rustc --explain E0507`.
| cannot move out of borrowed content
| help: consider removing the `*`: `&x`
-error[E0507]: cannot move out of data in a `&` reference
- --> $DIR/std-uncopyable-atomics.rs:19:13
- |
-LL | let x = *&x; //~ ERROR: cannot move out of borrowed content
- | ^^^
- | |
- | cannot move out of data in a `&` reference
- | cannot move
-
error[E0507]: cannot move out of borrowed content
--> $DIR/std-uncopyable-atomics.rs:21:13
|
| cannot move out of borrowed content
| help: consider removing the `*`: `&x`
-error[E0507]: cannot move out of data in a `&` reference
- --> $DIR/std-uncopyable-atomics.rs:21:13
- |
-LL | let x = *&x; //~ ERROR: cannot move out of borrowed content
- | ^^^
- | |
- | cannot move out of data in a `&` reference
- | cannot move
-
error[E0507]: cannot move out of borrowed content
--> $DIR/std-uncopyable-atomics.rs:23:13
|
| cannot move out of borrowed content
| help: consider removing the `*`: `&x`
-error[E0507]: cannot move out of data in a `&` reference
- --> $DIR/std-uncopyable-atomics.rs:23:13
- |
-LL | let x = *&x; //~ ERROR: cannot move out of borrowed content
- | ^^^
- | |
- | cannot move out of data in a `&` reference
- | cannot move
-
error[E0507]: cannot move out of borrowed content
--> $DIR/std-uncopyable-atomics.rs:25:13
|
| cannot move out of borrowed content
| help: consider removing the `*`: `&x`
-error[E0507]: cannot move out of data in a `&` reference
- --> $DIR/std-uncopyable-atomics.rs:25:13
- |
-LL | let x = *&x; //~ ERROR: cannot move out of borrowed content
- | ^^^
- | |
- | cannot move out of data in a `&` reference
- | cannot move
-
-error: aborting due to 8 previous errors
+error: aborting due to 4 previous errors
For more information about this error, try `rustc --explain E0507`.
+++ /dev/null
-// Copyright 2017 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-// gate-test-trait_alias
-
-trait Alias1<T> = Default where T: Clone; // ok
- //~^ERROR trait aliases are not yet fully implemented
-trait Alias2<T: Clone = ()> = Default;
- //~^ERROR type parameters on the left side of a trait alias cannot be bounded
- //~^^ERROR type parameters on the left side of a trait alias cannot have defaults
- //~^^^ERROR trait aliases are not yet fully implemented
-
-impl Alias1 { //~ERROR expected type, found trait alias
-}
-
-impl Alias1 for () { //~ERROR expected trait, found trait alias
-}
-
-fn main() {}
-
+++ /dev/null
-error: type parameters on the left side of a trait alias cannot be bounded
- --> $DIR/trait-alias-fail.rs:15:14
- |
-LL | trait Alias2<T: Clone = ()> = Default;
- | ^
-
-error: type parameters on the left side of a trait alias cannot have defaults
- --> $DIR/trait-alias-fail.rs:15:14
- |
-LL | trait Alias2<T: Clone = ()> = Default;
- | ^
-
-error[E0573]: expected type, found trait alias `Alias1`
- --> $DIR/trait-alias-fail.rs:20:6
- |
-LL | impl Alias1 { //~ERROR expected type, found trait alias
- | ^^^^^^ not a type
-
-error[E0404]: expected trait, found trait alias `Alias1`
- --> $DIR/trait-alias-fail.rs:23:6
- |
-LL | impl Alias1 for () { //~ERROR expected trait, found trait alias
- | ^^^^^^ not a trait
-
-error[E0658]: trait aliases are not yet fully implemented (see issue #41517)
- --> $DIR/trait-alias-fail.rs:13:1
- |
-LL | trait Alias1<T> = Default where T: Clone; // ok
- | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
- |
- = help: add #![feature(trait_alias)] to the crate attributes to enable
-
-error[E0658]: trait aliases are not yet fully implemented (see issue #41517)
- --> $DIR/trait-alias-fail.rs:15:1
- |
-LL | trait Alias2<T: Clone = ()> = Default;
- | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
- |
- = help: add #![feature(trait_alias)] to the crate attributes to enable
-
-error: aborting due to 6 previous errors
-
-Some errors occurred: E0404, E0573, E0658.
-For more information about an error, try `rustc --explain E0404`.
--- /dev/null
+// Copyright 2018 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+#![feature(trait_alias)]
+
+trait DefaultAlias = Default;
+
+impl DefaultAlias for () {}
+
+fn main() {}
--- /dev/null
+error[E0404]: expected trait, found trait alias `DefaultAlias`
+ --> $DIR/trait-alias-impl.rs:15:6
+ |
+LL | impl DefaultAlias for () {}
+ | ^^^^^^^^^^^^ not a trait
+
+error: aborting due to previous error
+
+For more information about this error, try `rustc --explain E0404`.
--- /dev/null
+// Copyright 2018 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+#![feature(trait_alias)]
+
+trait EqAlias = Eq;
+trait IteratorAlias = Iterator;
+
+fn main() {
+ let _: &dyn EqAlias = &123;
+ let _: &dyn IteratorAlias = &vec![123].into_iter();
+}
--- /dev/null
+error[E0038]: the trait `EqAlias` cannot be made into an object
+ --> $DIR/trait-alias-objects.rs:17:13
+ |
+LL | let _: &dyn EqAlias = &123;
+ | ^^^^^^^^^^^ the trait `EqAlias` cannot be made into an object
+ |
+ = note: the trait cannot use `Self` as a type parameter in the supertraits or where-clauses
+
+error[E0191]: the value of the associated type `Item` (from the trait `std::iter::Iterator`) must be specified
+ --> $DIR/trait-alias-objects.rs:18:13
+ |
+LL | let _: &dyn IteratorAlias = &vec![123].into_iter();
+ | ^^^^^^^^^^^^^^^^^ missing associated type `Item` value
+
+error: aborting due to 2 previous errors
+
+Some errors occurred: E0038, E0191.
+For more information about an error, try `rustc --explain E0038`.
--- /dev/null
+// Copyright 2018 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+#![feature(trait_alias)]
+
+trait Foo {}
+trait A<T: Foo> {}
+trait B<T> = A<T>; // T cannot be unbounded
+
+fn main() {}
--- /dev/null
+error[E0277]: the trait bound `T: Foo` is not satisfied
+ --> $DIR/trait-alias-wf.rs:15:1
+ |
+LL | trait B<T> = A<T>; // T cannot be unbounded
+ | ^^^^^^^^^^^^^^^^^^ the trait `Foo` is not implemented for `T`
+ |
+ = help: consider adding a `where T: Foo` bound
+note: required by `A`
+ --> $DIR/trait-alias-wf.rs:14:1
+ |
+LL | trait A<T: Foo> {}
+ | ^^^^^^^^^^^^^^^
+
+error: aborting due to previous error
+
+For more information about this error, try `rustc --explain E0277`.
+++ /dev/null
-// Copyright 2017 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-#![feature(trait_alias)]
-
-trait SimpleAlias = Default; //~ERROR E0645
-trait GenericAlias<T> = Iterator<Item=T>; //~ERROR E0645
-trait Partial<T> = IntoIterator<Item=T>; //~ERROR E0645
-
-trait Things<T> {}
-trait Romeo {}
-struct The<T>(T);
-struct Fore<T>(T);
-impl<T, U> Things<T> for The<U> {}
-impl<T> Romeo for Fore<T> {}
-
-trait WithWhere<Art, Thou> = Romeo + Romeo where Fore<(Art, Thou)>: Romeo; //~ERROR E0645
-trait BareWhere<Wild, Are> = where The<Wild>: Things<Are>; //~ERROR E0645
-
-trait CD = Clone + Default; //~ERROR E0645
-
-fn foo<T: CD>() -> (T, T) {
- let one = T::default();
- let two = one.clone();
- (one, two)
-}
-
-fn main() {
- let both = foo();
- assert_eq!(both.0, 0);
- assert_eq!(both.1, 0);
- let both: (i32, i32) = foo();
- assert_eq!(both.0, 0);
- assert_eq!(both.1, 0);
-}
-
+++ /dev/null
-error[E0645]: trait aliases are not yet implemented (see issue #41517)
- --> $DIR/trait-alias.rs:13:1
- |
-LL | trait SimpleAlias = Default; //~ERROR E0645
- | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-
-error[E0645]: trait aliases are not yet implemented (see issue #41517)
- --> $DIR/trait-alias.rs:14:1
- |
-LL | trait GenericAlias<T> = Iterator<Item=T>; //~ERROR E0645
- | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-
-error[E0645]: trait aliases are not yet implemented (see issue #41517)
- --> $DIR/trait-alias.rs:15:1
- |
-LL | trait Partial<T> = IntoIterator<Item=T>; //~ERROR E0645
- | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-
-error[E0645]: trait aliases are not yet implemented (see issue #41517)
- --> $DIR/trait-alias.rs:24:1
- |
-LL | trait WithWhere<Art, Thou> = Romeo + Romeo where Fore<(Art, Thou)>: Romeo; //~ERROR E0645
- | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-
-error[E0645]: trait aliases are not yet implemented (see issue #41517)
- --> $DIR/trait-alias.rs:25:1
- |
-LL | trait BareWhere<Wild, Are> = where The<Wild>: Things<Are>; //~ERROR E0645
- | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-
-error[E0645]: trait aliases are not yet implemented (see issue #41517)
- --> $DIR/trait-alias.rs:27:1
- |
-LL | trait CD = Clone + Default; //~ERROR E0645
- | ^^^^^^^^^^^^^^^^^^^^^^^^^^^
-
-error: aborting due to 6 previous errors
-
-For more information about this error, try `rustc --explain E0645`.
// A, B, C are resolved as inherent items, their traits don't need to be in scope
C::A; //~ ERROR associated constant `A` is private
//~^ ERROR the trait `assoc_const::C` cannot be made into an object
- //~| ERROR the trait bound `dyn assoc_const::C: assoc_const::A` is not satisfied
C::B; // ERROR the trait `assoc_const::C` cannot be made into an object
- //~^ ERROR the trait bound `dyn assoc_const::C: assoc_const::B` is not satisfied
C::C; // OK
}
LL | C::A; //~ ERROR associated constant `A` is private
| ^^^^
-error[E0277]: the trait bound `dyn assoc_const::C: assoc_const::A` is not satisfied
- --> $DIR/trait-item-privacy.rs:111:5
- |
-LL | C::A; //~ ERROR associated constant `A` is private
- | ^^^^ the trait `assoc_const::A` is not implemented for `dyn assoc_const::C`
- |
-note: required by `assoc_const::A::A`
- --> $DIR/trait-item-privacy.rs:35:9
- |
-LL | const A: u8 = 0;
- | ^^^^^^^^^^^^^^^^
-
-error[E0277]: the trait bound `dyn assoc_const::C: assoc_const::B` is not satisfied
- --> $DIR/trait-item-privacy.rs:114:5
- |
-LL | C::B; // ERROR the trait `assoc_const::C` cannot be made into an object
- | ^^^^ the trait `assoc_const::B` is not implemented for `dyn assoc_const::C`
- |
-note: required by `assoc_const::B::B`
- --> $DIR/trait-item-privacy.rs:39:9
- |
-LL | const B: u8 = 0;
- | ^^^^^^^^^^^^^^^^
-
error[E0038]: the trait `assoc_const::C` cannot be made into an object
--> $DIR/trait-item-privacy.rs:111:5
|
= note: the trait cannot contain associated consts like `A`
error[E0223]: ambiguous associated type
- --> $DIR/trait-item-privacy.rs:127:12
+ --> $DIR/trait-item-privacy.rs:125:12
|
LL | let _: S::A; //~ ERROR ambiguous associated type
| ^^^^ help: use fully-qualified syntax: `<S as Trait>::A`
error[E0223]: ambiguous associated type
- --> $DIR/trait-item-privacy.rs:128:12
+ --> $DIR/trait-item-privacy.rs:126:12
|
LL | let _: S::B; //~ ERROR ambiguous associated type
| ^^^^ help: use fully-qualified syntax: `<S as Trait>::B`
error[E0223]: ambiguous associated type
- --> $DIR/trait-item-privacy.rs:129:12
+ --> $DIR/trait-item-privacy.rs:127:12
|
LL | let _: S::C; //~ ERROR ambiguous associated type
| ^^^^ help: use fully-qualified syntax: `<S as Trait>::C`
error: associated type `A` is private
- --> $DIR/trait-item-privacy.rs:131:12
+ --> $DIR/trait-item-privacy.rs:129:12
|
LL | let _: T::A; //~ ERROR associated type `A` is private
| ^^^^
error: associated type `A` is private
- --> $DIR/trait-item-privacy.rs:140:9
+ --> $DIR/trait-item-privacy.rs:138:9
|
LL | A = u8, //~ ERROR associated type `A` is private
| ^^^^^^
-error: aborting due to 17 previous errors
+error: aborting due to 15 previous errors
-Some errors occurred: E0038, E0223, E0277, E0599, E0624.
+Some errors occurred: E0038, E0223, E0599, E0624.
For more information about an error, try `rustc --explain E0038`.
(box 10 as Box<bar>).dup();
//~^ ERROR E0038
//~| ERROR E0038
- //~| ERROR E0277
}
LL | 10.blah::<i32, i32>(); //~ ERROR wrong number of type arguments: expected 1, found 2
| ^^^ unexpected type argument
-error[E0277]: the trait bound `dyn bar: bar` is not satisfied
- --> $DIR/trait-test-2.rs:20:26
- |
-LL | (box 10 as Box<bar>).dup();
- | ^^^ the trait `bar` is not implemented for `dyn bar`
-
error[E0038]: the trait `bar` cannot be made into an object
--> $DIR/trait-test-2.rs:20:16
|
= note: method `blah` has generic type parameters
= note: required because of the requirements on the impl of `std::ops::CoerceUnsized<std::boxed::Box<dyn bar>>` for `std::boxed::Box<{integer}>`
-error: aborting due to 5 previous errors
+error: aborting due to 4 previous errors
-Some errors occurred: E0038, E0107, E0277.
+Some errors occurred: E0038, E0107.
For more information about an error, try `rustc --explain E0038`.
+++ /dev/null
-error[E0507]: cannot move out of borrowed content
- --> $DIR/trivial-bounds-leak-copy.rs:19:5
- |
-LL | *t //~ ERROR
- | ^^ cannot move out of borrowed content
-
-error[E0507]: cannot move out of `*t` which is behind a `&` reference
- --> $DIR/trivial-bounds-leak-copy.rs:19:5
- |
-LL | fn move_out_string(t: &String) -> String {
- | ------- help: consider changing this to be a mutable reference: `&mut std::string::String`
-LL | *t //~ ERROR
- | ^^
- | |
- | cannot move out of `*t` which is behind a `&` reference
- | `t` is a `&` reference, so the data it refers to cannot be moved
-
-error: aborting due to 2 previous errors
-
-For more information about this error, try `rustc --explain E0507`.
--> $DIR/try-block-in-match.rs:16:11
|
LL | match try { false } { _ => {} } //~ ERROR expected expression, found reserved keyword `try`
- | ^^^ expected expression
+ | ----- ^^^ expected expression
+ | |
+ | while parsing this match expression
error: aborting due to previous error
LL | let f = to_fn(|| drop(x)); //~ ERROR cannot move
| ^ cannot move out of captured variable in an `Fn` closure
-error[E0507]: cannot move out of `x`, as it is a captured variable in a `Fn` closure
- --> $DIR/unboxed-closure-illegal-move.rs:25:31
- |
-LL | let f = to_fn(|| drop(x)); //~ ERROR cannot move
- | ^
- | |
- | cannot move out of `x`, as it is a captured variable in a `Fn` closure
- | cannot move
- |
-help: consider changing this to accept closures that implement `FnMut`
- --> $DIR/unboxed-closure-illegal-move.rs:25:23
- |
-LL | let f = to_fn(|| drop(x)); //~ ERROR cannot move
- | ^^^^^^^^^^
-
error[E0507]: cannot move out of captured variable in an `FnMut` closure
--> $DIR/unboxed-closure-illegal-move.rs:29:35
|
LL | let f = to_fn(move || drop(x)); //~ ERROR cannot move
| ^ cannot move out of captured variable in an `Fn` closure
-error[E0507]: cannot move out of `x`, as it is a captured variable in a `Fn` closure
- --> $DIR/unboxed-closure-illegal-move.rs:38:36
- |
-LL | let f = to_fn(move || drop(x)); //~ ERROR cannot move
- | ^
- | |
- | cannot move out of `x`, as it is a captured variable in a `Fn` closure
- | cannot move
- |
-help: consider changing this to accept closures that implement `FnMut`
- --> $DIR/unboxed-closure-illegal-move.rs:38:23
- |
-LL | let f = to_fn(move || drop(x)); //~ ERROR cannot move
- | ^^^^^^^^^^^^^^^
-
error[E0507]: cannot move out of captured variable in an `FnMut` closure
--> $DIR/unboxed-closure-illegal-move.rs:42:40
|
LL | let f = to_fn_mut(move || drop(x)); //~ ERROR cannot move
| ^ cannot move out of captured variable in an `FnMut` closure
-error: aborting due to 6 previous errors
+error: aborting due to 4 previous errors
For more information about this error, try `rustc --explain E0507`.
LL | move || set(&mut x); //~ ERROR cannot borrow
| ^^^^^^ cannot borrow as mutable
-warning[E0594]: cannot assign to `x`, as it is not declared as mutable
+error[E0594]: cannot assign to `x`, as it is not declared as mutable
--> $DIR/unboxed-closure-immutable-capture.rs:23:8
|
LL | let x = 0;
...
LL | || x = 1; //~ ERROR cannot assign
| ^^^^^ cannot assign
- |
- = warning: This error has been downgraded to a warning for backwards compatibility with previous releases.
- It represents potential unsoundness in your code.
- This warning will become a hard error in the future.
-warning[E0596]: cannot borrow `x` as mutable, as it is not declared as mutable
+error[E0596]: cannot borrow `x` as mutable, as it is not declared as mutable
--> $DIR/unboxed-closure-immutable-capture.rs:25:12
|
LL | let x = 0;
...
LL | || set(&mut x); //~ ERROR cannot assign
| ^^^^^^ cannot borrow as mutable
- |
- = warning: This error has been downgraded to a warning for backwards compatibility with previous releases.
- It represents potential unsoundness in your code.
- This warning will become a hard error in the future.
-warning[E0594]: cannot assign to `x`, as it is not declared as mutable
+error[E0594]: cannot assign to `x`, as it is not declared as mutable
--> $DIR/unboxed-closure-immutable-capture.rs:26:8
|
LL | let x = 0;
...
LL | || x = 1; //~ ERROR cannot assign
| ^^^^^ cannot assign
- |
- = warning: This error has been downgraded to a warning for backwards compatibility with previous releases.
- It represents potential unsoundness in your code.
- This warning will become a hard error in the future.
-warning[E0596]: cannot borrow `x` as mutable, as it is not declared as mutable
+error[E0596]: cannot borrow `x` as mutable, as it is not declared as mutable
--> $DIR/unboxed-closure-immutable-capture.rs:28:12
|
LL | let x = 0;
...
LL | || set(&mut x); //~ ERROR cannot assign
| ^^^^^^ cannot borrow as mutable
- |
- = warning: This error has been downgraded to a warning for backwards compatibility with previous releases.
- It represents potential unsoundness in your code.
- This warning will become a hard error in the future.
-error: aborting due to 4 previous errors
+error: aborting due to 8 previous errors
Some errors occurred: E0594, E0596.
For more information about an error, try `rustc --explain E0594`.
-warning[E0596]: cannot borrow `tick1` as mutable, as it is not declared as mutable
+error[E0596]: cannot borrow `tick1` as mutable, as it is not declared as mutable
--> $DIR/unboxed-closures-infer-fnmut-calling-fnmut-no-mut.rs:27:9
|
LL | let tick1 = || {
...
LL | tick1();
| ^^^^^ cannot borrow as mutable
- |
- = warning: This error has been downgraded to a warning for backwards compatibility with previous releases.
- It represents potential unsoundness in your code.
- This warning will become a hard error in the future.
error[E0596]: cannot borrow `tick2` as mutable, as it is not declared as mutable
--> $DIR/unboxed-closures-infer-fnmut-calling-fnmut-no-mut.rs:30:5
LL | tick2(); //~ ERROR cannot borrow
| ^^^^^ cannot borrow as mutable
-error: aborting due to previous error
+error: aborting due to 2 previous errors
For more information about this error, try `rustc --explain E0596`.
-warning[E0594]: cannot assign to `n`, as it is not declared as mutable
+error[E0594]: cannot assign to `n`, as it is not declared as mutable
--> $DIR/unboxed-closures-mutate-upvar.rs:25:9
|
LL | let n = 0;
LL | let mut f = to_fn_mut(|| { //~ ERROR closure cannot assign
LL | n += 1;
| ^^^^^^ cannot assign
- |
- = warning: This error has been downgraded to a warning for backwards compatibility with previous releases.
- It represents potential unsoundness in your code.
- This warning will become a hard error in the future.
error[E0594]: cannot assign to `n`, as it is not declared as mutable
--> $DIR/unboxed-closures-mutate-upvar.rs:42:9
LL | | });
| |_____^
-error: aborting due to 3 previous errors
+error: aborting due to 4 previous errors
For more information about this error, try `rustc --explain E0594`.
LL | let _ = match x {}; //~ ERROR non-exhaustive
| ^
|
-help: Please ensure that all possible cases are being handled; possibly adding wildcards or more match arms.
+help: ensure that all possible cases are being handled, possibly by adding wildcards or more match arms
--> $DIR/uninhabited-matches-feature-gated.rs:20:19
|
LL | let _ = match x {}; //~ ERROR non-exhaustive
LL | let _ = match x {}; //~ ERROR non-exhaustive
| ^
|
-help: Please ensure that all possible cases are being handled; possibly adding wildcards or more match arms.
+help: ensure that all possible cases are being handled, possibly by adding wildcards or more match arms
--> $DIR/uninhabited-matches-feature-gated.rs:23:19
|
LL | let _ = match x {}; //~ ERROR non-exhaustive
LL | let _ = match x {}; //~ ERROR non-exhaustive
| ^
|
-help: Please ensure that all possible cases are being handled; possibly adding wildcards or more match arms.
+help: ensure that all possible cases are being handled, possibly by adding wildcards or more match arms
--> $DIR/uninhabited-matches-feature-gated.rs:26:19
|
LL | let _ = match x {}; //~ ERROR non-exhaustive
+error[E0502]: cannot borrow `u.y` as immutable because it is also borrowed as mutable
+ --> $DIR/union-borrow-move-parent-sibling.rs:25:13
+ |
+LL | let a = &mut u.x.0;
+ | ---------- mutable borrow occurs here
+LL | let b = &u.y; //~ ERROR cannot borrow `u.y`
+ | ^^^^ immutable borrow occurs here
+LL | use_borrow(a);
+ | - mutable borrow later used here
+
error[E0382]: use of moved value: `u`
- --> $DIR/union-borrow-move-parent-sibling.rs:29:13
+ --> $DIR/union-borrow-move-parent-sibling.rs:32:13
|
LL | let a = u.x.0;
| ----- value moved here
-LL | let a = u.y; //~ ERROR use of moved value: `u.y`
+LL | let b = u.y; //~ ERROR use of moved value: `u.y`
| ^^^ value used here after move
|
= note: move occurs because `u` has type `U`, which does not implement the `Copy` trait
+error[E0502]: cannot borrow `u.y` as immutable because it is also borrowed as mutable
+ --> $DIR/union-borrow-move-parent-sibling.rs:38:13
+ |
+LL | let a = &mut (u.x.0).0;
+ | -------------- mutable borrow occurs here
+LL | let b = &u.y; //~ ERROR cannot borrow `u.y`
+ | ^^^^ immutable borrow occurs here
+LL | use_borrow(a);
+ | - mutable borrow later used here
+
error[E0382]: use of moved value: `u`
- --> $DIR/union-borrow-move-parent-sibling.rs:41:13
+ --> $DIR/union-borrow-move-parent-sibling.rs:45:13
|
LL | let a = (u.x.0).0;
| --------- value moved here
-LL | let a = u.y; //~ ERROR use of moved value: `u.y`
+LL | let b = u.y; //~ ERROR use of moved value: `u.y`
| ^^^ value used here after move
|
= note: move occurs because `u` has type `U`, which does not implement the `Copy` trait
+error[E0502]: cannot borrow `u.x` as immutable because it is also borrowed as mutable
+ --> $DIR/union-borrow-move-parent-sibling.rs:51:13
+ |
+LL | let a = &mut *u.y;
+ | --------- mutable borrow occurs here
+LL | let b = &u.x; //~ ERROR cannot borrow `u` (via `u.x`)
+ | ^^^^ immutable borrow occurs here
+LL | use_borrow(a);
+ | - mutable borrow later used here
+
error[E0382]: use of moved value: `u`
- --> $DIR/union-borrow-move-parent-sibling.rs:53:13
+ --> $DIR/union-borrow-move-parent-sibling.rs:58:13
|
LL | let a = *u.y;
| ---- value moved here
-LL | let a = u.x; //~ ERROR use of moved value: `u.x`
+LL | let b = u.x; //~ ERROR use of moved value: `u.x`
| ^^^ value used here after move
|
= note: move occurs because `u` has type `U`, which does not implement the `Copy` trait
-error: aborting due to 3 previous errors
+error: aborting due to 6 previous errors
-For more information about this error, try `rustc --explain E0382`.
+Some errors occurred: E0382, E0502.
+For more information about an error, try `rustc --explain E0382`.
y: Box<Vec<u8>>,
}
+fn use_borrow<T>(_: &T) {}
+
unsafe fn parent_sibling_borrow() {
let mut u = U { x: ((Vec::new(), Vec::new()), Vec::new()) };
let a = &mut u.x.0;
- let a = &u.y; //~ ERROR cannot borrow `u.y`
+ let b = &u.y; //~ ERROR cannot borrow `u.y`
+ use_borrow(a);
}
unsafe fn parent_sibling_move() {
let u = U { x: ((Vec::new(), Vec::new()), Vec::new()) };
let a = u.x.0;
- let a = u.y; //~ ERROR use of moved value: `u.y`
+ let b = u.y; //~ ERROR use of moved value: `u.y`
}
unsafe fn grandparent_sibling_borrow() {
let mut u = U { x: ((Vec::new(), Vec::new()), Vec::new()) };
let a = &mut (u.x.0).0;
- let a = &u.y; //~ ERROR cannot borrow `u.y`
+ let b = &u.y; //~ ERROR cannot borrow `u.y`
+ use_borrow(a);
}
unsafe fn grandparent_sibling_move() {
let u = U { x: ((Vec::new(), Vec::new()), Vec::new()) };
let a = (u.x.0).0;
- let a = u.y; //~ ERROR use of moved value: `u.y`
+ let b = u.y; //~ ERROR use of moved value: `u.y`
}
unsafe fn deref_sibling_borrow() {
let mut u = U { y: Box::default() };
let a = &mut *u.y;
- let a = &u.x; //~ ERROR cannot borrow `u` (via `u.x`)
+ let b = &u.x; //~ ERROR cannot borrow `u` (via `u.x`)
+ use_borrow(a);
}
unsafe fn deref_sibling_move() {
let u = U { x: ((Vec::new(), Vec::new()), Vec::new()) };
let a = *u.y;
- let a = u.x; //~ ERROR use of moved value: `u.x`
+ let b = u.x; //~ ERROR use of moved value: `u.x`
}
error[E0502]: cannot borrow `u.y` as immutable because `u.x.0` is also borrowed as mutable
- --> $DIR/union-borrow-move-parent-sibling.rs:23:14
+ --> $DIR/union-borrow-move-parent-sibling.rs:25:14
|
LL | let a = &mut u.x.0;
| ----- mutable borrow occurs here
-LL | let a = &u.y; //~ ERROR cannot borrow `u.y`
+LL | let b = &u.y; //~ ERROR cannot borrow `u.y`
| ^^^ immutable borrow occurs here
+LL | use_borrow(a);
LL | }
| - mutable borrow ends here
error[E0382]: use of moved value: `u.y`
- --> $DIR/union-borrow-move-parent-sibling.rs:29:9
+ --> $DIR/union-borrow-move-parent-sibling.rs:32:9
|
LL | let a = u.x.0;
| - value moved here
-LL | let a = u.y; //~ ERROR use of moved value: `u.y`
+LL | let b = u.y; //~ ERROR use of moved value: `u.y`
| ^ value used here after move
|
= note: move occurs because `u.y` has type `[type error]`, which does not implement the `Copy` trait
error[E0502]: cannot borrow `u.y` as immutable because `u.x.0.0` is also borrowed as mutable
- --> $DIR/union-borrow-move-parent-sibling.rs:35:14
+ --> $DIR/union-borrow-move-parent-sibling.rs:38:14
|
LL | let a = &mut (u.x.0).0;
| --------- mutable borrow occurs here
-LL | let a = &u.y; //~ ERROR cannot borrow `u.y`
+LL | let b = &u.y; //~ ERROR cannot borrow `u.y`
| ^^^ immutable borrow occurs here
+LL | use_borrow(a);
LL | }
| - mutable borrow ends here
error[E0382]: use of moved value: `u.y`
- --> $DIR/union-borrow-move-parent-sibling.rs:41:9
+ --> $DIR/union-borrow-move-parent-sibling.rs:45:9
|
LL | let a = (u.x.0).0;
| - value moved here
-LL | let a = u.y; //~ ERROR use of moved value: `u.y`
+LL | let b = u.y; //~ ERROR use of moved value: `u.y`
| ^ value used here after move
|
= note: move occurs because `u.y` has type `[type error]`, which does not implement the `Copy` trait
error[E0502]: cannot borrow `u` (via `u.x`) as immutable because `u` is also borrowed as mutable (via `*u.y`)
- --> $DIR/union-borrow-move-parent-sibling.rs:47:14
+ --> $DIR/union-borrow-move-parent-sibling.rs:51:14
|
LL | let a = &mut *u.y;
| ---- mutable borrow occurs here (via `*u.y`)
-LL | let a = &u.x; //~ ERROR cannot borrow `u` (via `u.x`)
+LL | let b = &u.x; //~ ERROR cannot borrow `u` (via `u.x`)
| ^^^ immutable borrow occurs here (via `u.x`)
+LL | use_borrow(a);
LL | }
| - mutable borrow ends here
error[E0382]: use of moved value: `u.x`
- --> $DIR/union-borrow-move-parent-sibling.rs:53:9
+ --> $DIR/union-borrow-move-parent-sibling.rs:58:9
|
LL | let a = *u.y;
| - value moved here
-LL | let a = u.x; //~ ERROR use of moved value: `u.x`
+LL | let b = u.x; //~ ERROR use of moved value: `u.x`
| ^ value used here after move
|
= note: move occurs because `u.x` has type `[type error]`, which does not implement the `Copy` trait
|
= note: move occurs because `x` has type `T`, which does not implement the `Copy` trait
+error[E0505]: cannot move out of `x` because it is borrowed
+ --> $DIR/unop-move-semantics.rs:25:6
+ |
+LL | let m = &x;
+ | -- borrow of `x` occurs here
+...
+LL | !x; //~ ERROR: cannot move out of `x` because it is borrowed
+ | ^ move out of `x` occurs here
+...
+LL | use_mut(n); use_imm(m);
+ | - borrow later used here
+
+error[E0505]: cannot move out of `y` because it is borrowed
+ --> $DIR/unop-move-semantics.rs:27:6
+ |
+LL | let n = &mut y;
+ | ------ borrow of `y` occurs here
+...
+LL | !y; //~ ERROR: cannot move out of `y` because it is borrowed
+ | ^ move out of `y` occurs here
+LL | use_mut(n); use_imm(m);
+ | - borrow later used here
+
error[E0507]: cannot move out of borrowed content
--> $DIR/unop-move-semantics.rs:34:6
|
LL | !*n; //~ ERROR: cannot move out of borrowed content
| ^^ cannot move out of borrowed content
-error[E0507]: cannot move out of `*n` which is behind a `&` reference
- --> $DIR/unop-move-semantics.rs:36:6
- |
-LL | let n = &y;
- | -- help: consider changing this to be a mutable reference: `&mut y`
-...
-LL | !*n; //~ ERROR: cannot move out of borrowed content
- | ^^
- | |
- | cannot move out of `*n` which is behind a `&` reference
- | `n` is a `&` reference, so the data it refers to cannot be moved
-
-error: aborting due to 4 previous errors
+error: aborting due to 5 previous errors
-Some errors occurred: E0382, E0507.
+Some errors occurred: E0382, E0505, E0507.
For more information about an error, try `rustc --explain E0382`.
!x; //~ ERROR: cannot move out of `x` because it is borrowed
!y; //~ ERROR: cannot move out of `y` because it is borrowed
+ use_mut(n); use_imm(m);
}
-
fn illegal_dereference<T: Not<Output=T>>(mut x: T, y: T) {
let m = &mut x;
let n = &y;
!*m; //~ ERROR: cannot move out of borrowed content
!*n; //~ ERROR: cannot move out of borrowed content
+ use_imm(n); use_mut(m);
}
-
fn main() {}
+
+fn use_mut<T>(_: &mut T) { }
+fn use_imm<T>(_: &T) { }
LL | udrop::<[u8]>(foo()[..]);
| ^^^^^^^^^ cannot move out of here
-error[E0507]: cannot move out of data in a `&` reference
- --> $DIR/unsized-exprs2.rs:22:19
- |
-LL | udrop::<[u8]>(foo()[..]);
- | ^^^^^^^^^
- | |
- | cannot move out of data in a `&` reference
- | cannot move
-
-error: aborting due to 2 previous errors
+error: aborting due to previous error
-Some errors occurred: E0507, E0508.
-For more information about an error, try `rustc --explain E0507`.
+For more information about this error, try `rustc --explain E0508`.
-Subproject commit 2d0863f657e6f45159fc7412267eee3e659185e5
+Subproject commit 1fa30882067703202d13ad0bd53d630bc2c1de66
-Subproject commit b1d0343749bdc87e5cbbe7f1aeaa9d2a2c9dbc5b
+Subproject commit d8b426901a75b1eb975f52b4537f2736f2b94436
// Ignore if using system LLVM and actual version
// is smaller the minimum required version
config.system_llvm && &actual_version[..] < min_version
+ } else if line.starts_with("ignore-llvm-version") {
+ // Syntax is: "ignore-llvm-version <version1> [- <version2>]"
+ let range_components = line.split(' ')
+ .skip(1) // Skip the directive.
+ .map(|s| s.trim())
+ .filter(|word| !word.is_empty() && word != &"-")
+ .take(3) // 3 or more = invalid, so take at most 3.
+ .collect::<Vec<&str>>();
+ match range_components.len() {
+ 1 => {
+ &actual_version[..] == range_components[0]
+ }
+ 2 => {
+ let v_min = range_components[0];
+ let v_max = range_components[1];
+ if v_max < v_min {
+ panic!("Malformed LLVM version range: max < min")
+ }
+ // Ignore if version lies inside of range.
+ &actual_version[..] >= v_min && &actual_version[..] <= v_max
+ }
+ _ => panic!("Malformed LLVM version directive"),
+ }
} else {
false
}
.join(&testpaths.file.file_name().unwrap());
let mode_suffix = match config.compare_mode {
Some(ref mode) => format!(" ({})", mode.to_str()),
- None => format!(""),
+ None => String::new(),
};
test::DynTestName(format!(
"[{}{}] {}{}",
#![feature(rustc_private)]
+extern crate env_logger;
extern crate syntax;
extern crate rustdoc;
extern crate serialize as rustc_serialize;
}
fn main() {
+ env_logger::init();
PLAYGROUND.with(|slot| {
*slot.borrow_mut() = Some((None, String::from("https://play.rust-lang.org/")));
});
// whitelists to get this past `make check` today.
// FIXME(#32129)
if file.ends_with("std/string/struct.String.html") ||
- file.ends_with("interpret/struct.ValTy.html") ||
+ file.ends_with("interpret/struct.ImmTy.html") ||
file.ends_with("symbol/struct.InternedString.html") ||
file.ends_with("ast/struct.ThinVec.html") ||
file.ends_with("util/struct.ThinVec.html") ||
-Subproject commit 7728fa22bebea288abfea3b70cf795c60b93df3a
+Subproject commit 29bf48582812212450f4caf7da1af3f18c52bfef
import sys
import re
import json
-import copy
import datetime
import collections
import textwrap
MAINTAINERS = {
'miri': '@oli-obk @RalfJung @eddyb',
'clippy-driver': '@Manishearth @llogiq @mcarton @oli-obk',
- 'rls': '@nrc',
+ 'rls': '@nrc @Xanewok',
'rustfmt': '@nrc',
'book': '@carols10cents @steveklabnik',
'nomicon': '@frewsxcv @Gankro',
status[os] = new
if new > old:
changed = True
- message += '🎉 {} on {}: {} → {}.\n' \
- .format(tool, os, old, new)
+ message += '🎉 {} on {}: {} → {} (cc {}, @rust-lang/infra).\n' \
+ .format(tool, os, old, new, MAINTAINERS.get(tool))
elif new < old:
changed = True
message += '💔 {} on {}: {} → {} (cc {}, @rust-lang/infra).\n' \
// This is intentional, this dependency just makes the crate available
// for others later on. Cover cases
- let whitelisted = krate == "alloc_jemalloc";
- let whitelisted = whitelisted || krate.starts_with("panic");
+ let whitelisted = krate.starts_with("panic");
if toml.contains("name = \"std\"") && whitelisted {
continue
}
fn filter_dirs(path: &Path) -> bool {
let skip = [
"src/dlmalloc",
- "src/jemalloc",
"src/llvm",
"src/llvm-emscripten",
"src/libbacktrace",
//! - core may not have platform-specific code
//! - libcompiler_builtins may have platform-specific code
//! - liballoc_system may have platform-specific code
-//! - liballoc_jemalloc may have platform-specific code
//! - libpanic_abort may have platform-specific code
//! - libpanic_unwind may have platform-specific code
//! - libunwind may have platform-specific code
// Paths that may contain platform-specific code
const EXCEPTION_PATHS: &[&str] = &[
// std crates
- "src/liballoc_jemalloc",
"src/liballoc_system",
"src/libcompiler_builtins",
"src/liblibc",